e1000/base: fix link detect flow
[dpdk.git] / drivers / net / e1000 / base / e1000_ich8lan.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /* 82562G 10/100 Network Connection
35  * 82562G-2 10/100 Network Connection
36  * 82562GT 10/100 Network Connection
37  * 82562GT-2 10/100 Network Connection
38  * 82562V 10/100 Network Connection
39  * 82562V-2 10/100 Network Connection
40  * 82566DC-2 Gigabit Network Connection
41  * 82566DC Gigabit Network Connection
42  * 82566DM-2 Gigabit Network Connection
43  * 82566DM Gigabit Network Connection
44  * 82566MC Gigabit Network Connection
45  * 82566MM Gigabit Network Connection
46  * 82567LM Gigabit Network Connection
47  * 82567LF Gigabit Network Connection
48  * 82567V Gigabit Network Connection
49  * 82567LM-2 Gigabit Network Connection
50  * 82567LF-2 Gigabit Network Connection
51  * 82567V-2 Gigabit Network Connection
52  * 82567LF-3 Gigabit Network Connection
53  * 82567LM-3 Gigabit Network Connection
54  * 82567LM-4 Gigabit Network Connection
55  * 82577LM Gigabit Network Connection
56  * 82577LC Gigabit Network Connection
57  * 82578DM Gigabit Network Connection
58  * 82578DC Gigabit Network Connection
59  * 82579LM Gigabit Network Connection
60  * 82579V Gigabit Network Connection
61  * Ethernet Connection I217-LM
62  * Ethernet Connection I217-V
63  * Ethernet Connection I218-V
64  * Ethernet Connection I218-LM
65  * Ethernet Connection (2) I218-LM
66  * Ethernet Connection (2) I218-V
67  * Ethernet Connection (3) I218-LM
68  * Ethernet Connection (3) I218-V
69  */
70
71 #include "e1000_api.h"
72
73 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
74 STATIC s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 STATIC s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 STATIC int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 STATIC int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
84 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
85                                               u8 *mc_addr_list,
86                                               u32 mc_addr_count);
87 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
88 STATIC s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
89 STATIC s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
90 STATIC s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
91 STATIC s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
92                                             bool active);
93 STATIC s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
94                                             bool active);
95 STATIC s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
96                                    u16 words, u16 *data);
97 STATIC s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
98                                     u16 words, u16 *data);
99 STATIC s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100 STATIC s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 STATIC s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
102                                             u16 *data);
103 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
104 STATIC s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
105 STATIC s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
106 STATIC s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
107 STATIC s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
108 STATIC s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
109 STATIC s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
110 STATIC s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
111                                            u16 *speed, u16 *duplex);
112 STATIC s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
113 STATIC s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
114 STATIC s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
115 STATIC s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
116 STATIC s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
117 STATIC s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
118 STATIC s32  e1000_led_on_pchlan(struct e1000_hw *hw);
119 STATIC s32  e1000_led_off_pchlan(struct e1000_hw *hw);
120 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
121 STATIC s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
122 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
123 STATIC s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
124 STATIC s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
125                                           u32 offset, u8 *data);
126 STATIC s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
127                                           u8 size, u16 *data);
128 STATIC s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
129                                           u32 offset, u16 *data);
130 STATIC s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
131                                                  u32 offset, u8 byte);
132 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
133 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
134 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
135 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
136 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
137 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
138
139 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
140 /* Offset 04h HSFSTS */
141 union ich8_hws_flash_status {
142         struct ich8_hsfsts {
143                 u16 flcdone:1; /* bit 0 Flash Cycle Done */
144                 u16 flcerr:1; /* bit 1 Flash Cycle Error */
145                 u16 dael:1; /* bit 2 Direct Access error Log */
146                 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
147                 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
148                 u16 reserved1:2; /* bit 13:6 Reserved */
149                 u16 reserved2:6; /* bit 13:6 Reserved */
150                 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
151                 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
152         } hsf_status;
153         u16 regval;
154 };
155
156 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
157 /* Offset 06h FLCTL */
158 union ich8_hws_flash_ctrl {
159         struct ich8_hsflctl {
160                 u16 flcgo:1;   /* 0 Flash Cycle Go */
161                 u16 flcycle:2;   /* 2:1 Flash Cycle */
162                 u16 reserved:5;   /* 7:3 Reserved  */
163                 u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
164                 u16 flockdn:6;   /* 15:10 Reserved */
165         } hsf_ctrl;
166         u16 regval;
167 };
168
169 /* ICH Flash Region Access Permissions */
170 union ich8_hws_flash_regacc {
171         struct ich8_flracc {
172                 u32 grra:8; /* 0:7 GbE region Read Access */
173                 u32 grwa:8; /* 8:15 GbE region Write Access */
174                 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
175                 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
176         } hsf_flregacc;
177         u16 regval;
178 };
179
180 /**
181  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
182  *  @hw: pointer to the HW structure
183  *
184  *  Test access to the PHY registers by reading the PHY ID registers.  If
185  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
186  *  otherwise assume the read PHY ID is correct if it is valid.
187  *
188  *  Assumes the sw/fw/hw semaphore is already acquired.
189  **/
190 STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
191 {
192         u16 phy_reg = 0;
193         u32 phy_id = 0;
194         s32 ret_val = 0;
195         u16 retry_count;
196         u32 mac_reg = 0;
197
198         for (retry_count = 0; retry_count < 2; retry_count++) {
199                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
200                 if (ret_val || (phy_reg == 0xFFFF))
201                         continue;
202                 phy_id = (u32)(phy_reg << 16);
203
204                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
205                 if (ret_val || (phy_reg == 0xFFFF)) {
206                         phy_id = 0;
207                         continue;
208                 }
209                 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
210                 break;
211         }
212
213         if (hw->phy.id) {
214                 if  (hw->phy.id == phy_id)
215                         goto out;
216         } else if (phy_id) {
217                 hw->phy.id = phy_id;
218                 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
219                 goto out;
220         }
221
222         /* In case the PHY needs to be in mdio slow mode,
223          * set slow mode and try to get the PHY id again.
224          */
225         if (hw->mac.type < e1000_pch_lpt) {
226                 hw->phy.ops.release(hw);
227                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
228                 if (!ret_val)
229                         ret_val = e1000_get_phy_id(hw);
230                 hw->phy.ops.acquire(hw);
231         }
232
233         if (ret_val)
234                 return false;
235 out:
236         if (hw->mac.type == e1000_pch_lpt) {
237                 /* Unforce SMBus mode in PHY */
238                 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
239                 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
240                 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
241
242                 /* Unforce SMBus mode in MAC */
243                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
244                 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
245                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
246         }
247
248         return true;
249 }
250
251 /**
252  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
253  *  @hw: pointer to the HW structure
254  *
255  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
256  *  used to reset the PHY to a quiescent state when necessary.
257  **/
258 STATIC void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
259 {
260         u32 mac_reg;
261
262         DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
263
264         /* Set Phy Config Counter to 50msec */
265         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
266         mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
267         mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
268         E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
269
270         /* Toggle LANPHYPC Value bit */
271         mac_reg = E1000_READ_REG(hw, E1000_CTRL);
272         mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
273         mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
274         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
275         E1000_WRITE_FLUSH(hw);
276         usec_delay(10);
277         mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
278         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
279         E1000_WRITE_FLUSH(hw);
280
281         if (hw->mac.type < e1000_pch_lpt) {
282                 msec_delay(50);
283         } else {
284                 u16 count = 20;
285
286                 do {
287                         msec_delay(5);
288                 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
289                            E1000_CTRL_EXT_LPCD) && count--);
290
291                 msec_delay(30);
292         }
293 }
294
295 /**
296  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
297  *  @hw: pointer to the HW structure
298  *
299  *  Workarounds/flow necessary for PHY initialization during driver load
300  *  and resume paths.
301  **/
302 STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
303 {
304         u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
305         s32 ret_val;
306
307         DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
308
309         /* Gate automatic PHY configuration by hardware on managed and
310          * non-managed 82579 and newer adapters.
311          */
312         e1000_gate_hw_phy_config_ich8lan(hw, true);
313
314 #ifdef ULP_SUPPORT
315         /* It is not possible to be certain of the current state of ULP
316          * so forcibly disable it.
317          */
318         hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
319
320 #endif /* ULP_SUPPORT */
321         ret_val = hw->phy.ops.acquire(hw);
322         if (ret_val) {
323                 DEBUGOUT("Failed to initialize PHY flow\n");
324                 goto out;
325         }
326
327         /* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
328          * inaccessible and resetting the PHY is not blocked, toggle the
329          * LANPHYPC Value bit to force the interconnect to PCIe mode.
330          */
331         switch (hw->mac.type) {
332         case e1000_pch_lpt:
333                 if (e1000_phy_is_accessible_pchlan(hw))
334                         break;
335
336                 /* Before toggling LANPHYPC, see if PHY is accessible by
337                  * forcing MAC to SMBus mode first.
338                  */
339                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
340                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
341                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
342
343                 /* Wait 50 milliseconds for MAC to finish any retries
344                  * that it might be trying to perform from previous
345                  * attempts to acknowledge any phy read requests.
346                  */
347                  msec_delay(50);
348
349                 /* fall-through */
350         case e1000_pch2lan:
351                 if (e1000_phy_is_accessible_pchlan(hw))
352                         break;
353
354                 /* fall-through */
355         case e1000_pchlan:
356                 if ((hw->mac.type == e1000_pchlan) &&
357                     (fwsm & E1000_ICH_FWSM_FW_VALID))
358                         break;
359
360                 if (hw->phy.ops.check_reset_block(hw)) {
361                         DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
362                         ret_val = -E1000_ERR_PHY;
363                         break;
364                 }
365
366                 /* Toggle LANPHYPC Value bit */
367                 e1000_toggle_lanphypc_pch_lpt(hw);
368                 if (hw->mac.type >= e1000_pch_lpt) {
369                         if (e1000_phy_is_accessible_pchlan(hw))
370                                 break;
371
372                         /* Toggling LANPHYPC brings the PHY out of SMBus mode
373                          * so ensure that the MAC is also out of SMBus mode
374                          */
375                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
376                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
377                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
378
379                         if (e1000_phy_is_accessible_pchlan(hw))
380                                 break;
381
382                         ret_val = -E1000_ERR_PHY;
383                 }
384                 break;
385         default:
386                 break;
387         }
388
389         hw->phy.ops.release(hw);
390         if (!ret_val) {
391
392                 /* Check to see if able to reset PHY.  Print error if not */
393                 if (hw->phy.ops.check_reset_block(hw)) {
394                         ERROR_REPORT("Reset blocked by ME\n");
395                         goto out;
396                 }
397
398                 /* Reset the PHY before any access to it.  Doing so, ensures
399                  * that the PHY is in a known good state before we read/write
400                  * PHY registers.  The generic reset is sufficient here,
401                  * because we haven't determined the PHY type yet.
402                  */
403                 ret_val = e1000_phy_hw_reset_generic(hw);
404                 if (ret_val)
405                         goto out;
406
407                 /* On a successful reset, possibly need to wait for the PHY
408                  * to quiesce to an accessible state before returning control
409                  * to the calling function.  If the PHY does not quiesce, then
410                  * return E1000E_BLK_PHY_RESET, as this is the condition that
411                  *  the PHY is in.
412                  */
413                 ret_val = hw->phy.ops.check_reset_block(hw);
414                 if (ret_val)
415                         ERROR_REPORT("ME blocked access to PHY after reset\n");
416         }
417
418 out:
419         /* Ungate automatic PHY configuration on non-managed 82579 */
420         if ((hw->mac.type == e1000_pch2lan) &&
421             !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
422                 msec_delay(10);
423                 e1000_gate_hw_phy_config_ich8lan(hw, false);
424         }
425
426         return ret_val;
427 }
428
429 /**
430  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
431  *  @hw: pointer to the HW structure
432  *
433  *  Initialize family-specific PHY parameters and function pointers.
434  **/
435 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
436 {
437         struct e1000_phy_info *phy = &hw->phy;
438         s32 ret_val;
439
440         DEBUGFUNC("e1000_init_phy_params_pchlan");
441
442         phy->addr               = 1;
443         phy->reset_delay_us     = 100;
444
445         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
446         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
447         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
448         phy->ops.set_page       = e1000_set_page_igp;
449         phy->ops.read_reg       = e1000_read_phy_reg_hv;
450         phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
451         phy->ops.read_reg_page  = e1000_read_phy_reg_page_hv;
452         phy->ops.release        = e1000_release_swflag_ich8lan;
453         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
454         phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
455         phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
456         phy->ops.write_reg      = e1000_write_phy_reg_hv;
457         phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
458         phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
459         phy->ops.power_up       = e1000_power_up_phy_copper;
460         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
461         phy->autoneg_mask       = AUTONEG_ADVERTISE_SPEED_DEFAULT;
462
463         phy->id = e1000_phy_unknown;
464
465         ret_val = e1000_init_phy_workarounds_pchlan(hw);
466         if (ret_val)
467                 return ret_val;
468
469         if (phy->id == e1000_phy_unknown)
470                 switch (hw->mac.type) {
471                 default:
472                         ret_val = e1000_get_phy_id(hw);
473                         if (ret_val)
474                                 return ret_val;
475                         if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
476                                 break;
477                         /* fall-through */
478                 case e1000_pch2lan:
479                 case e1000_pch_lpt:
480                         /* In case the PHY needs to be in mdio slow mode,
481                          * set slow mode and try to get the PHY id again.
482                          */
483                         ret_val = e1000_set_mdio_slow_mode_hv(hw);
484                         if (ret_val)
485                                 return ret_val;
486                         ret_val = e1000_get_phy_id(hw);
487                         if (ret_val)
488                                 return ret_val;
489                         break;
490                 }
491         phy->type = e1000_get_phy_type_from_id(phy->id);
492
493         switch (phy->type) {
494         case e1000_phy_82577:
495         case e1000_phy_82579:
496         case e1000_phy_i217:
497                 phy->ops.check_polarity = e1000_check_polarity_82577;
498                 phy->ops.force_speed_duplex =
499                         e1000_phy_force_speed_duplex_82577;
500                 phy->ops.get_cable_length = e1000_get_cable_length_82577;
501                 phy->ops.get_info = e1000_get_phy_info_82577;
502                 phy->ops.commit = e1000_phy_sw_reset_generic;
503                 break;
504         case e1000_phy_82578:
505                 phy->ops.check_polarity = e1000_check_polarity_m88;
506                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
507                 phy->ops.get_cable_length = e1000_get_cable_length_m88;
508                 phy->ops.get_info = e1000_get_phy_info_m88;
509                 break;
510         default:
511                 ret_val = -E1000_ERR_PHY;
512                 break;
513         }
514
515         return ret_val;
516 }
517
518 /**
519  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
520  *  @hw: pointer to the HW structure
521  *
522  *  Initialize family-specific PHY parameters and function pointers.
523  **/
524 STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
525 {
526         struct e1000_phy_info *phy = &hw->phy;
527         s32 ret_val;
528         u16 i = 0;
529
530         DEBUGFUNC("e1000_init_phy_params_ich8lan");
531
532         phy->addr               = 1;
533         phy->reset_delay_us     = 100;
534
535         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
536         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
537         phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
538         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
539         phy->ops.read_reg       = e1000_read_phy_reg_igp;
540         phy->ops.release        = e1000_release_swflag_ich8lan;
541         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
542         phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
543         phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
544         phy->ops.write_reg      = e1000_write_phy_reg_igp;
545         phy->ops.power_up       = e1000_power_up_phy_copper;
546         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
547
548         /* We may need to do this twice - once for IGP and if that fails,
549          * we'll set BM func pointers and try again
550          */
551         ret_val = e1000_determine_phy_address(hw);
552         if (ret_val) {
553                 phy->ops.write_reg = e1000_write_phy_reg_bm;
554                 phy->ops.read_reg  = e1000_read_phy_reg_bm;
555                 ret_val = e1000_determine_phy_address(hw);
556                 if (ret_val) {
557                         DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
558                         return ret_val;
559                 }
560         }
561
562         phy->id = 0;
563         while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
564                (i++ < 100)) {
565                 msec_delay(1);
566                 ret_val = e1000_get_phy_id(hw);
567                 if (ret_val)
568                         return ret_val;
569         }
570
571         /* Verify phy id */
572         switch (phy->id) {
573         case IGP03E1000_E_PHY_ID:
574                 phy->type = e1000_phy_igp_3;
575                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
576                 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
577                 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
578                 phy->ops.get_info = e1000_get_phy_info_igp;
579                 phy->ops.check_polarity = e1000_check_polarity_igp;
580                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
581                 break;
582         case IFE_E_PHY_ID:
583         case IFE_PLUS_E_PHY_ID:
584         case IFE_C_E_PHY_ID:
585                 phy->type = e1000_phy_ife;
586                 phy->autoneg_mask = E1000_ALL_NOT_GIG;
587                 phy->ops.get_info = e1000_get_phy_info_ife;
588                 phy->ops.check_polarity = e1000_check_polarity_ife;
589                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
590                 break;
591         case BME1000_E_PHY_ID:
592                 phy->type = e1000_phy_bm;
593                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
594                 phy->ops.read_reg = e1000_read_phy_reg_bm;
595                 phy->ops.write_reg = e1000_write_phy_reg_bm;
596                 phy->ops.commit = e1000_phy_sw_reset_generic;
597                 phy->ops.get_info = e1000_get_phy_info_m88;
598                 phy->ops.check_polarity = e1000_check_polarity_m88;
599                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
600                 break;
601         default:
602                 return -E1000_ERR_PHY;
603                 break;
604         }
605
606         return E1000_SUCCESS;
607 }
608
609 /**
610  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
611  *  @hw: pointer to the HW structure
612  *
613  *  Initialize family-specific NVM parameters and function
614  *  pointers.
615  **/
616 STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
617 {
618         struct e1000_nvm_info *nvm = &hw->nvm;
619         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
620         u32 gfpreg, sector_base_addr, sector_end_addr;
621         u16 i;
622
623         DEBUGFUNC("e1000_init_nvm_params_ich8lan");
624
625         /* Can't read flash registers if the register set isn't mapped. */
626         nvm->type = e1000_nvm_flash_sw;
627         if (!hw->flash_address) {
628                 DEBUGOUT("ERROR: Flash registers not mapped\n");
629                 return -E1000_ERR_CONFIG;
630         }
631
632         gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
633
634         /* sector_X_addr is a "sector"-aligned address (4096 bytes)
635          * Add 1 to sector_end_addr since this sector is included in
636          * the overall size.
637          */
638         sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
639         sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
640
641         /* flash_base_addr is byte-aligned */
642         nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
643
644         /* find total size of the NVM, then cut in half since the total
645          * size represents two separate NVM banks.
646          */
647         nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
648                                 << FLASH_SECTOR_ADDR_SHIFT);
649         nvm->flash_bank_size /= 2;
650         /* Adjust to word count */
651         nvm->flash_bank_size /= sizeof(u16);
652
653         nvm->word_size = E1000_SHADOW_RAM_WORDS;
654
655         /* Clear shadow ram */
656         for (i = 0; i < nvm->word_size; i++) {
657                 dev_spec->shadow_ram[i].modified = false;
658                 dev_spec->shadow_ram[i].value    = 0xFFFF;
659         }
660
661         E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
662         E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
663
664         /* Function Pointers */
665         nvm->ops.acquire        = e1000_acquire_nvm_ich8lan;
666         nvm->ops.release        = e1000_release_nvm_ich8lan;
667         nvm->ops.read           = e1000_read_nvm_ich8lan;
668         nvm->ops.update         = e1000_update_nvm_checksum_ich8lan;
669         nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
670         nvm->ops.validate       = e1000_validate_nvm_checksum_ich8lan;
671         nvm->ops.write          = e1000_write_nvm_ich8lan;
672
673         return E1000_SUCCESS;
674 }
675
676 /**
677  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
678  *  @hw: pointer to the HW structure
679  *
680  *  Initialize family-specific MAC parameters and function
681  *  pointers.
682  **/
683 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
684 {
685         struct e1000_mac_info *mac = &hw->mac;
686 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
687         u16 pci_cfg;
688 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
689
690         DEBUGFUNC("e1000_init_mac_params_ich8lan");
691
692         /* Set media type function pointer */
693         hw->phy.media_type = e1000_media_type_copper;
694
695         /* Set mta register count */
696         mac->mta_reg_count = 32;
697         /* Set rar entry count */
698         mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
699         if (mac->type == e1000_ich8lan)
700                 mac->rar_entry_count--;
701         /* Set if part includes ASF firmware */
702         mac->asf_firmware_present = true;
703         /* FWSM register */
704         mac->has_fwsm = true;
705         /* ARC subsystem not supported */
706         mac->arc_subsystem_valid = false;
707         /* Adaptive IFS supported */
708         mac->adaptive_ifs = true;
709
710         /* Function pointers */
711
712         /* bus type/speed/width */
713         mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
714         /* function id */
715         mac->ops.set_lan_id = e1000_set_lan_id_single_port;
716         /* reset */
717         mac->ops.reset_hw = e1000_reset_hw_ich8lan;
718         /* hw initialization */
719         mac->ops.init_hw = e1000_init_hw_ich8lan;
720         /* link setup */
721         mac->ops.setup_link = e1000_setup_link_ich8lan;
722         /* physical interface setup */
723         mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
724         /* check for link */
725         mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
726         /* link info */
727         mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
728         /* multicast address update */
729         mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
730         /* clear hardware counters */
731         mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
732
733         /* LED and other operations */
734         switch (mac->type) {
735         case e1000_ich8lan:
736         case e1000_ich9lan:
737         case e1000_ich10lan:
738                 /* check management mode */
739                 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
740                 /* ID LED init */
741                 mac->ops.id_led_init = e1000_id_led_init_generic;
742                 /* blink LED */
743                 mac->ops.blink_led = e1000_blink_led_generic;
744                 /* setup LED */
745                 mac->ops.setup_led = e1000_setup_led_generic;
746                 /* cleanup LED */
747                 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
748                 /* turn on/off LED */
749                 mac->ops.led_on = e1000_led_on_ich8lan;
750                 mac->ops.led_off = e1000_led_off_ich8lan;
751                 break;
752         case e1000_pch2lan:
753                 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
754                 mac->ops.rar_set = e1000_rar_set_pch2lan;
755                 /* fall-through */
756         case e1000_pch_lpt:
757 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
758                 /* multicast address update for pch2 */
759                 mac->ops.update_mc_addr_list =
760                         e1000_update_mc_addr_list_pch2lan;
761                 /* fall-through */
762 #endif
763         case e1000_pchlan:
764 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
765                 /* save PCH revision_id */
766                 e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
767                 hw->revision_id = (u8)(pci_cfg &= 0x000F);
768 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
769                 /* check management mode */
770                 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
771                 /* ID LED init */
772                 mac->ops.id_led_init = e1000_id_led_init_pchlan;
773                 /* setup LED */
774                 mac->ops.setup_led = e1000_setup_led_pchlan;
775                 /* cleanup LED */
776                 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
777                 /* turn on/off LED */
778                 mac->ops.led_on = e1000_led_on_pchlan;
779                 mac->ops.led_off = e1000_led_off_pchlan;
780                 break;
781         default:
782                 break;
783         }
784
785         if (mac->type == e1000_pch_lpt) {
786                 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
787                 mac->ops.rar_set = e1000_rar_set_pch_lpt;
788                 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
789         }
790
791         /* Enable PCS Lock-loss workaround for ICH8 */
792         if (mac->type == e1000_ich8lan)
793                 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
794
795         return E1000_SUCCESS;
796 }
797
798 /**
799  *  __e1000_access_emi_reg_locked - Read/write EMI register
800  *  @hw: pointer to the HW structure
801  *  @addr: EMI address to program
802  *  @data: pointer to value to read/write from/to the EMI address
803  *  @read: boolean flag to indicate read or write
804  *
805  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
806  **/
807 STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
808                                          u16 *data, bool read)
809 {
810         s32 ret_val;
811
812         DEBUGFUNC("__e1000_access_emi_reg_locked");
813
814         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
815         if (ret_val)
816                 return ret_val;
817
818         if (read)
819                 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
820                                                       data);
821         else
822                 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
823                                                        *data);
824
825         return ret_val;
826 }
827
828 /**
829  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
830  *  @hw: pointer to the HW structure
831  *  @addr: EMI address to program
832  *  @data: value to be read from the EMI address
833  *
834  *  Assumes the SW/FW/HW Semaphore is already acquired.
835  **/
836 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
837 {
838         DEBUGFUNC("e1000_read_emi_reg_locked");
839
840         return __e1000_access_emi_reg_locked(hw, addr, data, true);
841 }
842
843 /**
844  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
845  *  @hw: pointer to the HW structure
846  *  @addr: EMI address to program
847  *  @data: value to be written to the EMI address
848  *
849  *  Assumes the SW/FW/HW Semaphore is already acquired.
850  **/
851 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
852 {
853         DEBUGFUNC("e1000_read_emi_reg_locked");
854
855         return __e1000_access_emi_reg_locked(hw, addr, &data, false);
856 }
857
858 /**
859  *  e1000_set_eee_pchlan - Enable/disable EEE support
860  *  @hw: pointer to the HW structure
861  *
862  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
863  *  the link and the EEE capabilities of the link partner.  The LPI Control
864  *  register bits will remain set only if/when link is up.
865  *
866  *  EEE LPI must not be asserted earlier than one second after link is up.
867  *  On 82579, EEE LPI should not be enabled until such time otherwise there
868  *  can be link issues with some switches.  Other devices can have EEE LPI
869  *  enabled immediately upon link up since they have a timer in hardware which
870  *  prevents LPI from being asserted too early.
871  **/
872 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
873 {
874         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
875         s32 ret_val;
876         u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
877
878         DEBUGFUNC("e1000_set_eee_pchlan");
879
880         switch (hw->phy.type) {
881         case e1000_phy_82579:
882                 lpa = I82579_EEE_LP_ABILITY;
883                 pcs_status = I82579_EEE_PCS_STATUS;
884                 adv_addr = I82579_EEE_ADVERTISEMENT;
885                 break;
886         case e1000_phy_i217:
887                 lpa = I217_EEE_LP_ABILITY;
888                 pcs_status = I217_EEE_PCS_STATUS;
889                 adv_addr = I217_EEE_ADVERTISEMENT;
890                 break;
891         default:
892                 return E1000_SUCCESS;
893         }
894
895         ret_val = hw->phy.ops.acquire(hw);
896         if (ret_val)
897                 return ret_val;
898
899         ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
900         if (ret_val)
901                 goto release;
902
903         /* Clear bits that enable EEE in various speeds */
904         lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
905
906         /* Enable EEE if not disabled by user */
907         if (!dev_spec->eee_disable) {
908                 /* Save off link partner's EEE ability */
909                 ret_val = e1000_read_emi_reg_locked(hw, lpa,
910                                                     &dev_spec->eee_lp_ability);
911                 if (ret_val)
912                         goto release;
913
914                 /* Read EEE advertisement */
915                 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
916                 if (ret_val)
917                         goto release;
918
919                 /* Enable EEE only for speeds in which the link partner is
920                  * EEE capable and for which we advertise EEE.
921                  */
922                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
923                         lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
924
925                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
926                         hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
927                         if (data & NWAY_LPAR_100TX_FD_CAPS)
928                                 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
929                         else
930                                 /* EEE is not supported in 100Half, so ignore
931                                  * partner's EEE in 100 ability if full-duplex
932                                  * is not advertised.
933                                  */
934                                 dev_spec->eee_lp_ability &=
935                                     ~I82579_EEE_100_SUPPORTED;
936                 }
937         }
938
939         if (hw->phy.type == e1000_phy_82579) {
940                 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
941                                                     &data);
942                 if (ret_val)
943                         goto release;
944
945                 data &= ~I82579_LPI_100_PLL_SHUT;
946                 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
947                                                      data);
948         }
949
950         /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
951         ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
952         if (ret_val)
953                 goto release;
954
955         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
956 release:
957         hw->phy.ops.release(hw);
958
959         return ret_val;
960 }
961
962 /**
963  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
964  *  @hw:   pointer to the HW structure
965  *  @link: link up bool flag
966  *
967  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
968  *  preventing further DMA write requests.  Workaround the issue by disabling
969  *  the de-assertion of the clock request when in 1Gpbs mode.
970  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
971  *  speeds in order to avoid Tx hangs.
972  **/
973 STATIC s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
974 {
975         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
976         u32 status = E1000_READ_REG(hw, E1000_STATUS);
977         s32 ret_val = E1000_SUCCESS;
978         u16 reg;
979
980         if (link && (status & E1000_STATUS_SPEED_1000)) {
981                 ret_val = hw->phy.ops.acquire(hw);
982                 if (ret_val)
983                         return ret_val;
984
985                 ret_val =
986                     e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
987                                                &reg);
988                 if (ret_val)
989                         goto release;
990
991                 ret_val =
992                     e1000_write_kmrn_reg_locked(hw,
993                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
994                                                 reg &
995                                                 ~E1000_KMRNCTRLSTA_K1_ENABLE);
996                 if (ret_val)
997                         goto release;
998
999                 usec_delay(10);
1000
1001                 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1002                                 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1003
1004                 ret_val =
1005                     e1000_write_kmrn_reg_locked(hw,
1006                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
1007                                                 reg);
1008 release:
1009                 hw->phy.ops.release(hw);
1010         } else {
1011                 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1012                 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1013
1014                 if (!link || ((status & E1000_STATUS_SPEED_100) &&
1015                               (status & E1000_STATUS_FD)))
1016                         goto update_fextnvm6;
1017
1018                 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1019                 if (ret_val)
1020                         return ret_val;
1021
1022                 /* Clear link status transmit timeout */
1023                 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1024
1025                 if (status & E1000_STATUS_SPEED_100) {
1026                         /* Set inband Tx timeout to 5x10us for 100Half */
1027                         reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1028
1029                         /* Do not extend the K1 entry latency for 100Half */
1030                         fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1031                 } else {
1032                         /* Set inband Tx timeout to 50x10us for 10Full/Half */
1033                         reg |= 50 <<
1034                                I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1035
1036                         /* Extend the K1 entry latency for 10 Mbps */
1037                         fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1038                 }
1039
1040                 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1041                 if (ret_val)
1042                         return ret_val;
1043
1044 update_fextnvm6:
1045                 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1046         }
1047
1048         return ret_val;
1049 }
1050
1051 #ifdef ULP_SUPPORT
1052 /**
1053  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1054  *  @hw: pointer to the HW structure
1055  *  @to_sx: boolean indicating a system power state transition to Sx
1056  *
1057  *  When link is down, configure ULP mode to significantly reduce the power
1058  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1059  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1060  *  system, configure the ULP mode by software.
1061  */
1062 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1063 {
1064         u32 mac_reg;
1065         s32 ret_val = E1000_SUCCESS;
1066         u16 phy_reg;
1067
1068         if ((hw->mac.type < e1000_pch_lpt) ||
1069             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1070             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1071             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1072             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1073             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1074                 return 0;
1075
1076         if (!to_sx) {
1077                 int i = 0;
1078                 /* Poll up to 5 seconds for Cable Disconnected indication */
1079                 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1080                          E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1081                         /* Bail if link is re-acquired */
1082                         if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1083                                 return -E1000_ERR_PHY;
1084                         if (i++ == 100)
1085                                 break;
1086
1087                         msec_delay(50);
1088                 }
1089                 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1090                           (E1000_READ_REG(hw, E1000_FEXT) &
1091                            E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1092                           i * 50);
1093         }
1094
1095         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1096                 /* Request ME configure ULP mode in the PHY */
1097                 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1098                 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1099                 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1100
1101                 goto out;
1102         }
1103
1104         ret_val = hw->phy.ops.acquire(hw);
1105         if (ret_val)
1106                 goto out;
1107
1108         /* During S0 Idle keep the phy in PCI-E mode */
1109         if (hw->dev_spec.ich8lan.smbus_disable)
1110                 goto skip_smbus;
1111
1112         /* Force SMBus mode in PHY */
1113         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1114         if (ret_val)
1115                 goto release;
1116         phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1117         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1118
1119         /* Force SMBus mode in MAC */
1120         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1121         mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1122         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1123
1124 skip_smbus:
1125         if (!to_sx) {
1126                 /* Change the 'Link Status Change' interrupt to trigger
1127                  * on 'Cable Status Change'
1128                  */
1129                 ret_val = e1000_read_kmrn_reg_locked(hw,
1130                                                      E1000_KMRNCTRLSTA_OP_MODES,
1131                                                      &phy_reg);
1132                 if (ret_val)
1133                         goto release;
1134                 phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1135                 e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1136                                             phy_reg);
1137         }
1138
1139         /* Set Inband ULP Exit, Reset to SMBus mode and
1140          * Disable SMBus Release on PERST# in PHY
1141          */
1142         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1143         if (ret_val)
1144                 goto release;
1145         phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1146                     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1147         if (to_sx) {
1148                 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1149                         phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1150
1151                 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1152         } else {
1153                 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1154         }
1155         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1156
1157         /* Set Disable SMBus Release on PERST# in MAC */
1158         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1159         mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1160         E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1161
1162         /* Commit ULP changes in PHY by starting auto ULP configuration */
1163         phy_reg |= I218_ULP_CONFIG1_START;
1164         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1165
1166         if (!to_sx) {
1167                 /* Disable Tx so that the MAC doesn't send any (buffered)
1168                  * packets to the PHY.
1169                  */
1170                 mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1171                 mac_reg &= ~E1000_TCTL_EN;
1172                 E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1173         }
1174 release:
1175         hw->phy.ops.release(hw);
1176 out:
1177         if (ret_val)
1178                 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1179         else
1180                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1181
1182         return ret_val;
1183 }
1184
1185 /**
1186  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1187  *  @hw: pointer to the HW structure
1188  *  @force: boolean indicating whether or not to force disabling ULP
1189  *
1190  *  Un-configure ULP mode when link is up, the system is transitioned from
1191  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1192  *  system, poll for an indication from ME that ULP has been un-configured.
1193  *  If not on an ME enabled system, un-configure the ULP mode by software.
1194  *
1195  *  During nominal operation, this function is called when link is acquired
1196  *  to disable ULP mode (force=false); otherwise, for example when unloading
1197  *  the driver or during Sx->S0 transitions, this is called with force=true
1198  *  to forcibly disable ULP.
1199
1200  *  When the cable is plugged in while the device is in D0, a Cable Status
1201  *  Change interrupt is generated which causes this function to be called
1202  *  to partially disable ULP mode and restart autonegotiation.  This function
1203  *  is then called again due to the resulting Link Status Change interrupt
1204  *  to finish cleaning up after the ULP flow.
1205  */
1206 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1207 {
1208         s32 ret_val = E1000_SUCCESS;
1209         u32 mac_reg;
1210         u16 phy_reg;
1211         int i = 0;
1212
1213         if ((hw->mac.type < e1000_pch_lpt) ||
1214             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1215             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1216             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1217             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1218             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1219                 return 0;
1220
1221         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1222                 if (force) {
1223                         /* Request ME un-configure ULP mode in the PHY */
1224                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1225                         mac_reg &= ~E1000_H2ME_ULP;
1226                         mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1227                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1228                 }
1229
1230                 /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
1231                 while (E1000_READ_REG(hw, E1000_FWSM) &
1232                        E1000_FWSM_ULP_CFG_DONE) {
1233                         if (i++ == 10) {
1234                                 ret_val = -E1000_ERR_PHY;
1235                                 goto out;
1236                         }
1237
1238                         msec_delay(10);
1239                 }
1240                 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1241
1242                 if (force) {
1243                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1244                         mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1245                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1246                 } else {
1247                         /* Clear H2ME.ULP after ME ULP configuration */
1248                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1249                         mac_reg &= ~E1000_H2ME_ULP;
1250                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1251
1252                         /* Restore link speed advertisements and restart
1253                          * Auto-negotiation
1254                          */
1255                         if (hw->mac.autoneg) {
1256                                 ret_val = e1000_phy_setup_autoneg(hw);
1257                                 if (ret_val)
1258                                         goto out;
1259                         } else {
1260                                 ret_val = e1000_setup_copper_link_generic(hw);
1261                                 if (ret_val)
1262                                         goto out;
1263                         }
1264                         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1265                 }
1266
1267                 goto out;
1268         }
1269
1270         ret_val = hw->phy.ops.acquire(hw);
1271         if (ret_val)
1272                 goto out;
1273
1274         /* Revert the change to the 'Link Status Change'
1275          * interrupt to trigger on 'Cable Status Change'
1276          */
1277         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1278                                              &phy_reg);
1279         if (ret_val)
1280                 goto release;
1281         phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1282         e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg);
1283
1284         if (force)
1285                 /* Toggle LANPHYPC Value bit */
1286                 e1000_toggle_lanphypc_pch_lpt(hw);
1287
1288         /* Unforce SMBus mode in PHY */
1289         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1290         if (ret_val) {
1291                 /* The MAC might be in PCIe mode, so temporarily force to
1292                  * SMBus mode in order to access the PHY.
1293                  */
1294                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1295                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1296                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1297
1298                 msec_delay(50);
1299
1300                 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1301                                                        &phy_reg);
1302                 if (ret_val)
1303                         goto release;
1304         }
1305         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1306         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1307
1308         /* Unforce SMBus mode in MAC */
1309         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1310         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1311         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1312
1313         /* When ULP mode was previously entered, K1 was disabled by the
1314          * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1315          */
1316         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1317         if (ret_val)
1318                 goto release;
1319         phy_reg |= HV_PM_CTRL_K1_ENABLE;
1320         e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1321
1322         /* Clear ULP enabled configuration */
1323         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1324         if (ret_val)
1325                 goto release;
1326         /* CSC interrupt received due to ULP Indication */
1327         if ((phy_reg & I218_ULP_CONFIG1_IND) || force) {
1328                 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1329                              I218_ULP_CONFIG1_STICKY_ULP |
1330                              I218_ULP_CONFIG1_RESET_TO_SMBUS |
1331                              I218_ULP_CONFIG1_WOL_HOST |
1332                              I218_ULP_CONFIG1_INBAND_EXIT |
1333                              I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1334                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1335
1336                 /* Commit ULP changes by starting auto ULP configuration */
1337                 phy_reg |= I218_ULP_CONFIG1_START;
1338                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1339
1340                 /* Clear Disable SMBus Release on PERST# in MAC */
1341                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1342                 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1343                 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1344
1345                 if (!force) {
1346                         hw->phy.ops.release(hw);
1347
1348                         if (hw->mac.autoneg)
1349                                 e1000_phy_setup_autoneg(hw);
1350
1351                         e1000_sw_lcd_config_ich8lan(hw);
1352
1353                         e1000_oem_bits_config_ich8lan(hw, true);
1354
1355                         /* Set ULP state to unknown and return non-zero to
1356                          * indicate no link (yet) and re-enter on the next LSC
1357                          * to finish disabling ULP flow.
1358                          */
1359                         hw->dev_spec.ich8lan.ulp_state =
1360                             e1000_ulp_state_unknown;
1361
1362                         return 1;
1363                 }
1364         }
1365
1366         /* Re-enable Tx */
1367         mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1368         mac_reg |= E1000_TCTL_EN;
1369         E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1370
1371 release:
1372         hw->phy.ops.release(hw);
1373         if (force) {
1374                 hw->phy.ops.reset(hw);
1375                 msec_delay(50);
1376         }
1377 out:
1378         if (ret_val)
1379                 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1380         else
1381                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1382
1383         return ret_val;
1384 }
1385
1386 #endif /* ULP_SUPPORT */
1387 /**
1388  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1389  *  @hw: pointer to the HW structure
1390  *
1391  *  Checks to see of the link status of the hardware has changed.  If a
1392  *  change in link status has been detected, then we read the PHY registers
1393  *  to get the current speed/duplex if link exists.
1394  **/
1395 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1396 {
1397         struct e1000_mac_info *mac = &hw->mac;
1398         s32 ret_val, tipg_reg = 0;
1399         u16 emi_addr, emi_val = 0;
1400         bool link = false;
1401         u16 phy_reg;
1402
1403         DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1404
1405         /* We only want to go out to the PHY registers to see if Auto-Neg
1406          * has completed and/or if our link status has changed.  The
1407          * get_link_status flag is set upon receiving a Link Status
1408          * Change or Rx Sequence Error interrupt.
1409          */
1410         if (!mac->get_link_status)
1411                 return E1000_SUCCESS;
1412
1413         if ((hw->mac.type < e1000_pch_lpt) ||
1414             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1415             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) {
1416                 /* First we want to see if the MII Status Register reports
1417                  * link.  If so, then we want to get the current speed/duplex
1418                  * of the PHY.
1419                  */
1420                 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1421                 if (ret_val)
1422                         return ret_val;
1423         } else {
1424                 /* Check the MAC's STATUS register to determine link state
1425                  * since the PHY could be inaccessible while in ULP mode.
1426                  */
1427                 link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
1428                 if (link)
1429                         ret_val = e1000_disable_ulp_lpt_lp(hw, false);
1430                 else
1431                         ret_val = e1000_enable_ulp_lpt_lp(hw, false);
1432
1433                 if (ret_val)
1434                         return ret_val;
1435         }
1436
1437         if (hw->mac.type == e1000_pchlan) {
1438                 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1439                 if (ret_val)
1440                         return ret_val;
1441         }
1442
1443         /* When connected at 10Mbps half-duplex, some parts are excessively
1444          * aggressive resulting in many collisions. To avoid this, increase
1445          * the IPG and reduce Rx latency in the PHY.
1446          */
1447         if (((hw->mac.type == e1000_pch2lan) ||
1448              (hw->mac.type == e1000_pch_lpt)) && link) {
1449                 u16 speed, duplex;
1450
1451                 e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1452                 tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1453                 tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1454
1455                 if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1456                         tipg_reg |= 0xFF;
1457                         /* Reduce Rx latency in analog PHY */
1458                         emi_val = 0;
1459                 } else {
1460                         /* Roll back the default values */
1461                         tipg_reg |= 0x08;
1462                         emi_val = 1;
1463                 }
1464
1465                 E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1466
1467                 ret_val = hw->phy.ops.acquire(hw);
1468                 if (ret_val)
1469                         return ret_val;
1470
1471                 if (hw->mac.type == e1000_pch2lan)
1472                         emi_addr = I82579_RX_CONFIG;
1473                 else
1474                         emi_addr = I217_RX_CONFIG;
1475                 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1476
1477                 hw->phy.ops.release(hw);
1478
1479                 if (ret_val)
1480                         return ret_val;
1481         }
1482
1483         /* I217 Packet Loss issue:
1484          * ensure that FEXTNVM4 Beacon Duration is set correctly
1485          * on power up.
1486          * Set the Beacon Duration for I217 to 8 usec
1487          */
1488         if (hw->mac.type == e1000_pch_lpt) {
1489                 u32 mac_reg;
1490
1491                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1492                 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1493                 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1494                 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1495         }
1496
1497         /* Work-around I218 hang issue */
1498         if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1499             (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1500             (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1501             (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1502                 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1503                 if (ret_val)
1504                         return ret_val;
1505         }
1506
1507         /* Clear link partner's EEE ability */
1508         hw->dev_spec.ich8lan.eee_lp_ability = 0;
1509
1510         if (!link)
1511                 return E1000_SUCCESS; /* No link detected */
1512
1513         mac->get_link_status = false;
1514
1515         switch (hw->mac.type) {
1516         case e1000_pch2lan:
1517                 ret_val = e1000_k1_workaround_lv(hw);
1518                 if (ret_val)
1519                         return ret_val;
1520                 /* fall-thru */
1521         case e1000_pchlan:
1522                 if (hw->phy.type == e1000_phy_82578) {
1523                         ret_val = e1000_link_stall_workaround_hv(hw);
1524                         if (ret_val)
1525                                 return ret_val;
1526                 }
1527
1528                 /* Workaround for PCHx parts in half-duplex:
1529                  * Set the number of preambles removed from the packet
1530                  * when it is passed from the PHY to the MAC to prevent
1531                  * the MAC from misinterpreting the packet type.
1532                  */
1533                 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1534                 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1535
1536                 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1537                     E1000_STATUS_FD)
1538                         phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1539
1540                 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1541                 break;
1542         default:
1543                 break;
1544         }
1545
1546         /* Check if there was DownShift, must be checked
1547          * immediately after link-up
1548          */
1549         e1000_check_downshift_generic(hw);
1550
1551         /* Enable/Disable EEE after link up */
1552         if (hw->phy.type > e1000_phy_82579) {
1553                 ret_val = e1000_set_eee_pchlan(hw);
1554                 if (ret_val)
1555                         return ret_val;
1556         }
1557
1558         /* If we are forcing speed/duplex, then we simply return since
1559          * we have already determined whether we have link or not.
1560          */
1561         if (!mac->autoneg)
1562                 return -E1000_ERR_CONFIG;
1563
1564         /* Auto-Neg is enabled.  Auto Speed Detection takes care
1565          * of MAC speed/duplex configuration.  So we only need to
1566          * configure Collision Distance in the MAC.
1567          */
1568         mac->ops.config_collision_dist(hw);
1569
1570         /* Configure Flow Control now that Auto-Neg has completed.
1571          * First, we need to restore the desired flow control
1572          * settings because we may have had to re-autoneg with a
1573          * different link partner.
1574          */
1575         ret_val = e1000_config_fc_after_link_up_generic(hw);
1576         if (ret_val)
1577                 DEBUGOUT("Error configuring flow control\n");
1578
1579         return ret_val;
1580 }
1581
1582 /**
1583  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1584  *  @hw: pointer to the HW structure
1585  *
1586  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1587  **/
1588 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1589 {
1590         DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1591
1592         hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1593         hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1594         switch (hw->mac.type) {
1595         case e1000_ich8lan:
1596         case e1000_ich9lan:
1597         case e1000_ich10lan:
1598                 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1599                 break;
1600         case e1000_pchlan:
1601         case e1000_pch2lan:
1602         case e1000_pch_lpt:
1603                 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1604                 break;
1605         default:
1606                 break;
1607         }
1608 }
1609
1610 /**
1611  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1612  *  @hw: pointer to the HW structure
1613  *
1614  *  Acquires the mutex for performing NVM operations.
1615  **/
1616 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1617 {
1618         DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1619
1620         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1621
1622         return E1000_SUCCESS;
1623 }
1624
1625 /**
1626  *  e1000_release_nvm_ich8lan - Release NVM mutex
1627  *  @hw: pointer to the HW structure
1628  *
1629  *  Releases the mutex used while performing NVM operations.
1630  **/
1631 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1632 {
1633         DEBUGFUNC("e1000_release_nvm_ich8lan");
1634
1635         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1636
1637         return;
1638 }
1639
1640 /**
1641  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1642  *  @hw: pointer to the HW structure
1643  *
1644  *  Acquires the software control flag for performing PHY and select
1645  *  MAC CSR accesses.
1646  **/
1647 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1648 {
1649         u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1650         s32 ret_val = E1000_SUCCESS;
1651
1652         DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1653
1654         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1655
1656         while (timeout) {
1657                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1658                 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1659                         break;
1660
1661                 msec_delay_irq(1);
1662                 timeout--;
1663         }
1664
1665         if (!timeout) {
1666                 DEBUGOUT("SW has already locked the resource.\n");
1667                 ret_val = -E1000_ERR_CONFIG;
1668                 goto out;
1669         }
1670
1671         timeout = SW_FLAG_TIMEOUT;
1672
1673         extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1674         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1675
1676         while (timeout) {
1677                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1678                 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1679                         break;
1680
1681                 msec_delay_irq(1);
1682                 timeout--;
1683         }
1684
1685         if (!timeout) {
1686                 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1687                           E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1688                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1689                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1690                 ret_val = -E1000_ERR_CONFIG;
1691                 goto out;
1692         }
1693
1694 out:
1695         if (ret_val)
1696                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1697
1698         return ret_val;
1699 }
1700
1701 /**
1702  *  e1000_release_swflag_ich8lan - Release software control flag
1703  *  @hw: pointer to the HW structure
1704  *
1705  *  Releases the software control flag for performing PHY and select
1706  *  MAC CSR accesses.
1707  **/
1708 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1709 {
1710         u32 extcnf_ctrl;
1711
1712         DEBUGFUNC("e1000_release_swflag_ich8lan");
1713
1714         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1715
1716         if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1717                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1718                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1719         } else {
1720                 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1721         }
1722
1723         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1724
1725         return;
1726 }
1727
1728 /**
1729  *  e1000_check_mng_mode_ich8lan - Checks management mode
1730  *  @hw: pointer to the HW structure
1731  *
1732  *  This checks if the adapter has any manageability enabled.
1733  *  This is a function pointer entry point only called by read/write
1734  *  routines for the PHY and NVM parts.
1735  **/
1736 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1737 {
1738         u32 fwsm;
1739
1740         DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1741
1742         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1743
1744         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1745                ((fwsm & E1000_FWSM_MODE_MASK) ==
1746                 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1747 }
1748
1749 /**
1750  *  e1000_check_mng_mode_pchlan - Checks management mode
1751  *  @hw: pointer to the HW structure
1752  *
1753  *  This checks if the adapter has iAMT enabled.
1754  *  This is a function pointer entry point only called by read/write
1755  *  routines for the PHY and NVM parts.
1756  **/
1757 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1758 {
1759         u32 fwsm;
1760
1761         DEBUGFUNC("e1000_check_mng_mode_pchlan");
1762
1763         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1764
1765         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1766                (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1767 }
1768
1769 /**
1770  *  e1000_rar_set_pch2lan - Set receive address register
1771  *  @hw: pointer to the HW structure
1772  *  @addr: pointer to the receive address
1773  *  @index: receive address array register
1774  *
1775  *  Sets the receive address array register at index to the address passed
1776  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1777  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1778  *  Use SHRA[0-3] in place of those reserved for ME.
1779  **/
1780 STATIC int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1781 {
1782         u32 rar_low, rar_high;
1783
1784         DEBUGFUNC("e1000_rar_set_pch2lan");
1785
1786         /* HW expects these in little endian so we reverse the byte order
1787          * from network order (big endian) to little endian
1788          */
1789         rar_low = ((u32) addr[0] |
1790                    ((u32) addr[1] << 8) |
1791                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1792
1793         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1794
1795         /* If MAC address zero, no need to set the AV bit */
1796         if (rar_low || rar_high)
1797                 rar_high |= E1000_RAH_AV;
1798
1799         if (index == 0) {
1800                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1801                 E1000_WRITE_FLUSH(hw);
1802                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1803                 E1000_WRITE_FLUSH(hw);
1804                 return E1000_SUCCESS;
1805         }
1806
1807         /* RAR[1-6] are owned by manageability.  Skip those and program the
1808          * next address into the SHRA register array.
1809          */
1810         if (index < (u32) (hw->mac.rar_entry_count)) {
1811                 s32 ret_val;
1812
1813                 ret_val = e1000_acquire_swflag_ich8lan(hw);
1814                 if (ret_val)
1815                         goto out;
1816
1817                 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1818                 E1000_WRITE_FLUSH(hw);
1819                 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1820                 E1000_WRITE_FLUSH(hw);
1821
1822                 e1000_release_swflag_ich8lan(hw);
1823
1824                 /* verify the register updates */
1825                 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1826                     (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1827                         return E1000_SUCCESS;
1828
1829                 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1830                          (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1831         }
1832
1833 out:
1834         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1835         return -E1000_ERR_CONFIG;
1836 }
1837
1838 /**
1839  *  e1000_rar_set_pch_lpt - Set receive address registers
1840  *  @hw: pointer to the HW structure
1841  *  @addr: pointer to the receive address
1842  *  @index: receive address array register
1843  *
1844  *  Sets the receive address register array at index to the address passed
1845  *  in by addr. For LPT, RAR[0] is the base address register that is to
1846  *  contain the MAC address. SHRA[0-10] are the shared receive address
1847  *  registers that are shared between the Host and manageability engine (ME).
1848  **/
1849 STATIC int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1850 {
1851         u32 rar_low, rar_high;
1852         u32 wlock_mac;
1853
1854         DEBUGFUNC("e1000_rar_set_pch_lpt");
1855
1856         /* HW expects these in little endian so we reverse the byte order
1857          * from network order (big endian) to little endian
1858          */
1859         rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1860                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1861
1862         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1863
1864         /* If MAC address zero, no need to set the AV bit */
1865         if (rar_low || rar_high)
1866                 rar_high |= E1000_RAH_AV;
1867
1868         if (index == 0) {
1869                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1870                 E1000_WRITE_FLUSH(hw);
1871                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1872                 E1000_WRITE_FLUSH(hw);
1873                 return E1000_SUCCESS;
1874         }
1875
1876         /* The manageability engine (ME) can lock certain SHRAR registers that
1877          * it is using - those registers are unavailable for use.
1878          */
1879         if (index < hw->mac.rar_entry_count) {
1880                 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
1881                             E1000_FWSM_WLOCK_MAC_MASK;
1882                 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1883
1884                 /* Check if all SHRAR registers are locked */
1885                 if (wlock_mac == 1)
1886                         goto out;
1887
1888                 if ((wlock_mac == 0) || (index <= wlock_mac)) {
1889                         s32 ret_val;
1890
1891                         ret_val = e1000_acquire_swflag_ich8lan(hw);
1892
1893                         if (ret_val)
1894                                 goto out;
1895
1896                         E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
1897                                         rar_low);
1898                         E1000_WRITE_FLUSH(hw);
1899                         E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
1900                                         rar_high);
1901                         E1000_WRITE_FLUSH(hw);
1902
1903                         e1000_release_swflag_ich8lan(hw);
1904
1905                         /* verify the register updates */
1906                         if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1907                             (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
1908                                 return E1000_SUCCESS;
1909                 }
1910         }
1911
1912 out:
1913         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1914         return -E1000_ERR_CONFIG;
1915 }
1916
1917 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
1918 /**
1919  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
1920  *  @hw: pointer to the HW structure
1921  *  @mc_addr_list: array of multicast addresses to program
1922  *  @mc_addr_count: number of multicast addresses to program
1923  *
1924  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
1925  *  The caller must have a packed mc_addr_list of multicast addresses.
1926  **/
1927 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
1928                                               u8 *mc_addr_list,
1929                                               u32 mc_addr_count)
1930 {
1931         u16 phy_reg = 0;
1932         int i;
1933         s32 ret_val;
1934
1935         DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
1936
1937         e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
1938
1939         ret_val = hw->phy.ops.acquire(hw);
1940         if (ret_val)
1941                 return;
1942
1943         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1944         if (ret_val)
1945                 goto release;
1946
1947         for (i = 0; i < hw->mac.mta_reg_count; i++) {
1948                 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
1949                                            (u16)(hw->mac.mta_shadow[i] &
1950                                                  0xFFFF));
1951                 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
1952                                            (u16)((hw->mac.mta_shadow[i] >> 16) &
1953                                                  0xFFFF));
1954         }
1955
1956         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1957
1958 release:
1959         hw->phy.ops.release(hw);
1960 }
1961
1962 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
1963 /**
1964  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1965  *  @hw: pointer to the HW structure
1966  *
1967  *  Checks if firmware is blocking the reset of the PHY.
1968  *  This is a function pointer entry point only called by
1969  *  reset routines.
1970  **/
1971 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1972 {
1973         u32 fwsm;
1974         bool blocked = false;
1975         int i = 0;
1976
1977         DEBUGFUNC("e1000_check_reset_block_ich8lan");
1978
1979         do {
1980                 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1981                 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
1982                         blocked = true;
1983                         msec_delay(10);
1984                         continue;
1985                 }
1986                 blocked = false;
1987         } while (blocked && (i++ < 10));
1988         return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
1989 }
1990
1991 /**
1992  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1993  *  @hw: pointer to the HW structure
1994  *
1995  *  Assumes semaphore already acquired.
1996  *
1997  **/
1998 STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1999 {
2000         u16 phy_data;
2001         u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2002         u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2003                 E1000_STRAP_SMT_FREQ_SHIFT;
2004         s32 ret_val;
2005
2006         strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2007
2008         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2009         if (ret_val)
2010                 return ret_val;
2011
2012         phy_data &= ~HV_SMB_ADDR_MASK;
2013         phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2014         phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2015
2016         if (hw->phy.type == e1000_phy_i217) {
2017                 /* Restore SMBus frequency */
2018                 if (freq--) {
2019                         phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2020                         phy_data |= (freq & (1 << 0)) <<
2021                                 HV_SMB_ADDR_FREQ_LOW_SHIFT;
2022                         phy_data |= (freq & (1 << 1)) <<
2023                                 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2024                 } else {
2025                         DEBUGOUT("Unsupported SMB frequency in PHY\n");
2026                 }
2027         }
2028
2029         return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2030 }
2031
2032 /**
2033  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2034  *  @hw:   pointer to the HW structure
2035  *
2036  *  SW should configure the LCD from the NVM extended configuration region
2037  *  as a workaround for certain parts.
2038  **/
2039 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2040 {
2041         struct e1000_phy_info *phy = &hw->phy;
2042         u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2043         s32 ret_val = E1000_SUCCESS;
2044         u16 word_addr, reg_data, reg_addr, phy_page = 0;
2045
2046         DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2047
2048         /* Initialize the PHY from the NVM on ICH platforms.  This
2049          * is needed due to an issue where the NVM configuration is
2050          * not properly autoloaded after power transitions.
2051          * Therefore, after each PHY reset, we will load the
2052          * configuration data out of the NVM manually.
2053          */
2054         switch (hw->mac.type) {
2055         case e1000_ich8lan:
2056                 if (phy->type != e1000_phy_igp_3)
2057                         return ret_val;
2058
2059                 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2060                     (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2061                         sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2062                         break;
2063                 }
2064                 /* Fall-thru */
2065         case e1000_pchlan:
2066         case e1000_pch2lan:
2067         case e1000_pch_lpt:
2068                 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2069                 break;
2070         default:
2071                 return ret_val;
2072         }
2073
2074         ret_val = hw->phy.ops.acquire(hw);
2075         if (ret_val)
2076                 return ret_val;
2077
2078         data = E1000_READ_REG(hw, E1000_FEXTNVM);
2079         if (!(data & sw_cfg_mask))
2080                 goto release;
2081
2082         /* Make sure HW does not configure LCD from PHY
2083          * extended configuration before SW configuration
2084          */
2085         data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2086         if ((hw->mac.type < e1000_pch2lan) &&
2087             (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2088                         goto release;
2089
2090         cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2091         cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2092         cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2093         if (!cnf_size)
2094                 goto release;
2095
2096         cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2097         cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2098
2099         if (((hw->mac.type == e1000_pchlan) &&
2100              !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2101             (hw->mac.type > e1000_pchlan)) {
2102                 /* HW configures the SMBus address and LEDs when the
2103                  * OEM and LCD Write Enable bits are set in the NVM.
2104                  * When both NVM bits are cleared, SW will configure
2105                  * them instead.
2106                  */
2107                 ret_val = e1000_write_smbus_addr(hw);
2108                 if (ret_val)
2109                         goto release;
2110
2111                 data = E1000_READ_REG(hw, E1000_LEDCTL);
2112                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2113                                                         (u16)data);
2114                 if (ret_val)
2115                         goto release;
2116         }
2117
2118         /* Configure LCD from extended configuration region. */
2119
2120         /* cnf_base_addr is in DWORD */
2121         word_addr = (u16)(cnf_base_addr << 1);
2122
2123         for (i = 0; i < cnf_size; i++) {
2124                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2125                                            &reg_data);
2126                 if (ret_val)
2127                         goto release;
2128
2129                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2130                                            1, &reg_addr);
2131                 if (ret_val)
2132                         goto release;
2133
2134                 /* Save off the PHY page for future writes. */
2135                 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2136                         phy_page = reg_data;
2137                         continue;
2138                 }
2139
2140                 reg_addr &= PHY_REG_MASK;
2141                 reg_addr |= phy_page;
2142
2143                 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2144                                                     reg_data);
2145                 if (ret_val)
2146                         goto release;
2147         }
2148
2149 release:
2150         hw->phy.ops.release(hw);
2151         return ret_val;
2152 }
2153
2154 /**
2155  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2156  *  @hw:   pointer to the HW structure
2157  *  @link: link up bool flag
2158  *
2159  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2160  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2161  *  If link is down, the function will restore the default K1 setting located
2162  *  in the NVM.
2163  **/
2164 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2165 {
2166         s32 ret_val = E1000_SUCCESS;
2167         u16 status_reg = 0;
2168         bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2169
2170         DEBUGFUNC("e1000_k1_gig_workaround_hv");
2171
2172         if (hw->mac.type != e1000_pchlan)
2173                 return E1000_SUCCESS;
2174
2175         /* Wrap the whole flow with the sw flag */
2176         ret_val = hw->phy.ops.acquire(hw);
2177         if (ret_val)
2178                 return ret_val;
2179
2180         /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2181         if (link) {
2182                 if (hw->phy.type == e1000_phy_82578) {
2183                         ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2184                                                               &status_reg);
2185                         if (ret_val)
2186                                 goto release;
2187
2188                         status_reg &= (BM_CS_STATUS_LINK_UP |
2189                                        BM_CS_STATUS_RESOLVED |
2190                                        BM_CS_STATUS_SPEED_MASK);
2191
2192                         if (status_reg == (BM_CS_STATUS_LINK_UP |
2193                                            BM_CS_STATUS_RESOLVED |
2194                                            BM_CS_STATUS_SPEED_1000))
2195                                 k1_enable = false;
2196                 }
2197
2198                 if (hw->phy.type == e1000_phy_82577) {
2199                         ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2200                                                               &status_reg);
2201                         if (ret_val)
2202                                 goto release;
2203
2204                         status_reg &= (HV_M_STATUS_LINK_UP |
2205                                        HV_M_STATUS_AUTONEG_COMPLETE |
2206                                        HV_M_STATUS_SPEED_MASK);
2207
2208                         if (status_reg == (HV_M_STATUS_LINK_UP |
2209                                            HV_M_STATUS_AUTONEG_COMPLETE |
2210                                            HV_M_STATUS_SPEED_1000))
2211                                 k1_enable = false;
2212                 }
2213
2214                 /* Link stall fix for link up */
2215                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2216                                                        0x0100);
2217                 if (ret_val)
2218                         goto release;
2219
2220         } else {
2221                 /* Link stall fix for link down */
2222                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2223                                                        0x4100);
2224                 if (ret_val)
2225                         goto release;
2226         }
2227
2228         ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2229
2230 release:
2231         hw->phy.ops.release(hw);
2232
2233         return ret_val;
2234 }
2235
2236 /**
2237  *  e1000_configure_k1_ich8lan - Configure K1 power state
2238  *  @hw: pointer to the HW structure
2239  *  @enable: K1 state to configure
2240  *
2241  *  Configure the K1 power state based on the provided parameter.
2242  *  Assumes semaphore already acquired.
2243  *
2244  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2245  **/
2246 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2247 {
2248         s32 ret_val;
2249         u32 ctrl_reg = 0;
2250         u32 ctrl_ext = 0;
2251         u32 reg = 0;
2252         u16 kmrn_reg = 0;
2253
2254         DEBUGFUNC("e1000_configure_k1_ich8lan");
2255
2256         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2257                                              &kmrn_reg);
2258         if (ret_val)
2259                 return ret_val;
2260
2261         if (k1_enable)
2262                 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2263         else
2264                 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2265
2266         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2267                                               kmrn_reg);
2268         if (ret_val)
2269                 return ret_val;
2270
2271         usec_delay(20);
2272         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2273         ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2274
2275         reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2276         reg |= E1000_CTRL_FRCSPD;
2277         E1000_WRITE_REG(hw, E1000_CTRL, reg);
2278
2279         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2280         E1000_WRITE_FLUSH(hw);
2281         usec_delay(20);
2282         E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2283         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2284         E1000_WRITE_FLUSH(hw);
2285         usec_delay(20);
2286
2287         return E1000_SUCCESS;
2288 }
2289
2290 /**
2291  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2292  *  @hw:       pointer to the HW structure
2293  *  @d0_state: boolean if entering d0 or d3 device state
2294  *
2295  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2296  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2297  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2298  **/
2299 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2300 {
2301         s32 ret_val = 0;
2302         u32 mac_reg;
2303         u16 oem_reg;
2304
2305         DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2306
2307         if (hw->mac.type < e1000_pchlan)
2308                 return ret_val;
2309
2310         ret_val = hw->phy.ops.acquire(hw);
2311         if (ret_val)
2312                 return ret_val;
2313
2314         if (hw->mac.type == e1000_pchlan) {
2315                 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2316                 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2317                         goto release;
2318         }
2319
2320         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2321         if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2322                 goto release;
2323
2324         mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2325
2326         ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2327         if (ret_val)
2328                 goto release;
2329
2330         oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2331
2332         if (d0_state) {
2333                 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2334                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2335
2336                 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2337                         oem_reg |= HV_OEM_BITS_LPLU;
2338         } else {
2339                 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2340                     E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2341                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2342
2343                 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2344                     E1000_PHY_CTRL_NOND0A_LPLU))
2345                         oem_reg |= HV_OEM_BITS_LPLU;
2346         }
2347
2348         /* Set Restart auto-neg to activate the bits */
2349         if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2350             !hw->phy.ops.check_reset_block(hw))
2351                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2352
2353         ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2354
2355 release:
2356         hw->phy.ops.release(hw);
2357
2358         return ret_val;
2359 }
2360
2361
2362 /**
2363  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2364  *  @hw:   pointer to the HW structure
2365  **/
2366 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2367 {
2368         s32 ret_val;
2369         u16 data;
2370
2371         DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2372
2373         ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2374         if (ret_val)
2375                 return ret_val;
2376
2377         data |= HV_KMRN_MDIO_SLOW;
2378
2379         ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2380
2381         return ret_val;
2382 }
2383
2384 /**
2385  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2386  *  done after every PHY reset.
2387  **/
2388 STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2389 {
2390         s32 ret_val = E1000_SUCCESS;
2391         u16 phy_data;
2392
2393         DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2394
2395         if (hw->mac.type != e1000_pchlan)
2396                 return E1000_SUCCESS;
2397
2398         /* Set MDIO slow mode before any other MDIO access */
2399         if (hw->phy.type == e1000_phy_82577) {
2400                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2401                 if (ret_val)
2402                         return ret_val;
2403         }
2404
2405         if (((hw->phy.type == e1000_phy_82577) &&
2406              ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2407             ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2408                 /* Disable generation of early preamble */
2409                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2410                 if (ret_val)
2411                         return ret_val;
2412
2413                 /* Preamble tuning for SSC */
2414                 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2415                                                 0xA204);
2416                 if (ret_val)
2417                         return ret_val;
2418         }
2419
2420         if (hw->phy.type == e1000_phy_82578) {
2421                 /* Return registers to default by doing a soft reset then
2422                  * writing 0x3140 to the control register.
2423                  */
2424                 if (hw->phy.revision < 2) {
2425                         e1000_phy_sw_reset_generic(hw);
2426                         ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2427                                                         0x3140);
2428                 }
2429         }
2430
2431         /* Select page 0 */
2432         ret_val = hw->phy.ops.acquire(hw);
2433         if (ret_val)
2434                 return ret_val;
2435
2436         hw->phy.addr = 1;
2437         ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2438         hw->phy.ops.release(hw);
2439         if (ret_val)
2440                 return ret_val;
2441
2442         /* Configure the K1 Si workaround during phy reset assuming there is
2443          * link so that it disables K1 if link is in 1Gbps.
2444          */
2445         ret_val = e1000_k1_gig_workaround_hv(hw, true);
2446         if (ret_val)
2447                 return ret_val;
2448
2449         /* Workaround for link disconnects on a busy hub in half duplex */
2450         ret_val = hw->phy.ops.acquire(hw);
2451         if (ret_val)
2452                 return ret_val;
2453         ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2454         if (ret_val)
2455                 goto release;
2456         ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2457                                                phy_data & 0x00FF);
2458         if (ret_val)
2459                 goto release;
2460
2461         /* set MSE higher to enable link to stay up when noise is high */
2462         ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2463 release:
2464         hw->phy.ops.release(hw);
2465
2466         return ret_val;
2467 }
2468
2469 /**
2470  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2471  *  @hw:   pointer to the HW structure
2472  **/
2473 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2474 {
2475         u32 mac_reg;
2476         u16 i, phy_reg = 0;
2477         s32 ret_val;
2478
2479         DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2480
2481         ret_val = hw->phy.ops.acquire(hw);
2482         if (ret_val)
2483                 return;
2484         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2485         if (ret_val)
2486                 goto release;
2487
2488         /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2489         for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2490                 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2491                 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2492                                            (u16)(mac_reg & 0xFFFF));
2493                 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2494                                            (u16)((mac_reg >> 16) & 0xFFFF));
2495
2496                 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2497                 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2498                                            (u16)(mac_reg & 0xFFFF));
2499                 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2500                                            (u16)((mac_reg & E1000_RAH_AV)
2501                                                  >> 16));
2502         }
2503
2504         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2505
2506 release:
2507         hw->phy.ops.release(hw);
2508 }
2509
2510 #ifndef CRC32_OS_SUPPORT
2511 STATIC u32 e1000_calc_rx_da_crc(u8 mac[])
2512 {
2513         u32 poly = 0xEDB88320;  /* Polynomial for 802.3 CRC calculation */
2514         u32 i, j, mask, crc;
2515
2516         DEBUGFUNC("e1000_calc_rx_da_crc");
2517
2518         crc = 0xffffffff;
2519         for (i = 0; i < 6; i++) {
2520                 crc = crc ^ mac[i];
2521                 for (j = 8; j > 0; j--) {
2522                         mask = (crc & 1) * (-1);
2523                         crc = (crc >> 1) ^ (poly & mask);
2524                 }
2525         }
2526         return ~crc;
2527 }
2528
2529 #endif /* CRC32_OS_SUPPORT */
2530 /**
2531  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2532  *  with 82579 PHY
2533  *  @hw: pointer to the HW structure
2534  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2535  **/
2536 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2537 {
2538         s32 ret_val = E1000_SUCCESS;
2539         u16 phy_reg, data;
2540         u32 mac_reg;
2541         u16 i;
2542
2543         DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2544
2545         if (hw->mac.type < e1000_pch2lan)
2546                 return E1000_SUCCESS;
2547
2548         /* disable Rx path while enabling/disabling workaround */
2549         hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2550         ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2551                                         phy_reg | (1 << 14));
2552         if (ret_val)
2553                 return ret_val;
2554
2555         if (enable) {
2556                 /* Write Rx addresses (rar_entry_count for RAL/H, and
2557                  * SHRAL/H) and initial CRC values to the MAC
2558                  */
2559                 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2560                         u8 mac_addr[ETH_ADDR_LEN] = {0};
2561                         u32 addr_high, addr_low;
2562
2563                         addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2564                         if (!(addr_high & E1000_RAH_AV))
2565                                 continue;
2566                         addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2567                         mac_addr[0] = (addr_low & 0xFF);
2568                         mac_addr[1] = ((addr_low >> 8) & 0xFF);
2569                         mac_addr[2] = ((addr_low >> 16) & 0xFF);
2570                         mac_addr[3] = ((addr_low >> 24) & 0xFF);
2571                         mac_addr[4] = (addr_high & 0xFF);
2572                         mac_addr[5] = ((addr_high >> 8) & 0xFF);
2573
2574 #ifndef CRC32_OS_SUPPORT
2575                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2576                                         e1000_calc_rx_da_crc(mac_addr));
2577 #else /* CRC32_OS_SUPPORT */
2578                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2579                                         E1000_CRC32(ETH_ADDR_LEN, mac_addr));
2580 #endif /* CRC32_OS_SUPPORT */
2581                 }
2582
2583                 /* Write Rx addresses to the PHY */
2584                 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2585
2586                 /* Enable jumbo frame workaround in the MAC */
2587                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2588                 mac_reg &= ~(1 << 14);
2589                 mac_reg |= (7 << 15);
2590                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2591
2592                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2593                 mac_reg |= E1000_RCTL_SECRC;
2594                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2595
2596                 ret_val = e1000_read_kmrn_reg_generic(hw,
2597                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2598                                                 &data);
2599                 if (ret_val)
2600                         return ret_val;
2601                 ret_val = e1000_write_kmrn_reg_generic(hw,
2602                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2603                                                 data | (1 << 0));
2604                 if (ret_val)
2605                         return ret_val;
2606                 ret_val = e1000_read_kmrn_reg_generic(hw,
2607                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2608                                                 &data);
2609                 if (ret_val)
2610                         return ret_val;
2611                 data &= ~(0xF << 8);
2612                 data |= (0xB << 8);
2613                 ret_val = e1000_write_kmrn_reg_generic(hw,
2614                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2615                                                 data);
2616                 if (ret_val)
2617                         return ret_val;
2618
2619                 /* Enable jumbo frame workaround in the PHY */
2620                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2621                 data &= ~(0x7F << 5);
2622                 data |= (0x37 << 5);
2623                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2624                 if (ret_val)
2625                         return ret_val;
2626                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2627                 data &= ~(1 << 13);
2628                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2629                 if (ret_val)
2630                         return ret_val;
2631                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2632                 data &= ~(0x3FF << 2);
2633                 data |= (E1000_TX_PTR_GAP << 2);
2634                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2635                 if (ret_val)
2636                         return ret_val;
2637                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2638                 if (ret_val)
2639                         return ret_val;
2640                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2641                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2642                                                 (1 << 10));
2643                 if (ret_val)
2644                         return ret_val;
2645         } else {
2646                 /* Write MAC register values back to h/w defaults */
2647                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2648                 mac_reg &= ~(0xF << 14);
2649                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2650
2651                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2652                 mac_reg &= ~E1000_RCTL_SECRC;
2653                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2654
2655                 ret_val = e1000_read_kmrn_reg_generic(hw,
2656                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2657                                                 &data);
2658                 if (ret_val)
2659                         return ret_val;
2660                 ret_val = e1000_write_kmrn_reg_generic(hw,
2661                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2662                                                 data & ~(1 << 0));
2663                 if (ret_val)
2664                         return ret_val;
2665                 ret_val = e1000_read_kmrn_reg_generic(hw,
2666                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2667                                                 &data);
2668                 if (ret_val)
2669                         return ret_val;
2670                 data &= ~(0xF << 8);
2671                 data |= (0xB << 8);
2672                 ret_val = e1000_write_kmrn_reg_generic(hw,
2673                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2674                                                 data);
2675                 if (ret_val)
2676                         return ret_val;
2677
2678                 /* Write PHY register values back to h/w defaults */
2679                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2680                 data &= ~(0x7F << 5);
2681                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2682                 if (ret_val)
2683                         return ret_val;
2684                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2685                 data |= (1 << 13);
2686                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2687                 if (ret_val)
2688                         return ret_val;
2689                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2690                 data &= ~(0x3FF << 2);
2691                 data |= (0x8 << 2);
2692                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2693                 if (ret_val)
2694                         return ret_val;
2695                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2696                 if (ret_val)
2697                         return ret_val;
2698                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2699                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2700                                                 ~(1 << 10));
2701                 if (ret_val)
2702                         return ret_val;
2703         }
2704
2705         /* re-enable Rx path after enabling/disabling workaround */
2706         return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2707                                      ~(1 << 14));
2708 }
2709
2710 /**
2711  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2712  *  done after every PHY reset.
2713  **/
2714 STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2715 {
2716         s32 ret_val = E1000_SUCCESS;
2717
2718         DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2719
2720         if (hw->mac.type != e1000_pch2lan)
2721                 return E1000_SUCCESS;
2722
2723         /* Set MDIO slow mode before any other MDIO access */
2724         ret_val = e1000_set_mdio_slow_mode_hv(hw);
2725         if (ret_val)
2726                 return ret_val;
2727
2728         ret_val = hw->phy.ops.acquire(hw);
2729         if (ret_val)
2730                 return ret_val;
2731         /* set MSE higher to enable link to stay up when noise is high */
2732         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2733         if (ret_val)
2734                 goto release;
2735         /* drop link after 5 times MSE threshold was reached */
2736         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2737 release:
2738         hw->phy.ops.release(hw);
2739
2740         return ret_val;
2741 }
2742
2743 /**
2744  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2745  *  @hw:   pointer to the HW structure
2746  *
2747  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2748  *  Disable K1 for 1000 and 100 speeds
2749  **/
2750 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2751 {
2752         s32 ret_val = E1000_SUCCESS;
2753         u16 status_reg = 0;
2754
2755         DEBUGFUNC("e1000_k1_workaround_lv");
2756
2757         if (hw->mac.type != e1000_pch2lan)
2758                 return E1000_SUCCESS;
2759
2760         /* Set K1 beacon duration based on 10Mbs speed */
2761         ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2762         if (ret_val)
2763                 return ret_val;
2764
2765         if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2766             == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2767                 if (status_reg &
2768                     (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2769                         u16 pm_phy_reg;
2770
2771                         /* LV 1G/100 Packet drop issue wa  */
2772                         ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2773                                                        &pm_phy_reg);
2774                         if (ret_val)
2775                                 return ret_val;
2776                         pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2777                         ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2778                                                         pm_phy_reg);
2779                         if (ret_val)
2780                                 return ret_val;
2781                 } else {
2782                         u32 mac_reg;
2783                         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2784                         mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2785                         mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2786                         E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2787                 }
2788         }
2789
2790         return ret_val;
2791 }
2792
2793 /**
2794  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2795  *  @hw:   pointer to the HW structure
2796  *  @gate: boolean set to true to gate, false to ungate
2797  *
2798  *  Gate/ungate the automatic PHY configuration via hardware; perform
2799  *  the configuration via software instead.
2800  **/
2801 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2802 {
2803         u32 extcnf_ctrl;
2804
2805         DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2806
2807         if (hw->mac.type < e1000_pch2lan)
2808                 return;
2809
2810         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2811
2812         if (gate)
2813                 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2814         else
2815                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2816
2817         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2818 }
2819
2820 /**
2821  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2822  *  @hw: pointer to the HW structure
2823  *
2824  *  Check the appropriate indication the MAC has finished configuring the
2825  *  PHY after a software reset.
2826  **/
2827 STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2828 {
2829         u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2830
2831         DEBUGFUNC("e1000_lan_init_done_ich8lan");
2832
2833         /* Wait for basic configuration completes before proceeding */
2834         do {
2835                 data = E1000_READ_REG(hw, E1000_STATUS);
2836                 data &= E1000_STATUS_LAN_INIT_DONE;
2837                 usec_delay(100);
2838         } while ((!data) && --loop);
2839
2840         /* If basic configuration is incomplete before the above loop
2841          * count reaches 0, loading the configuration from NVM will
2842          * leave the PHY in a bad state possibly resulting in no link.
2843          */
2844         if (loop == 0)
2845                 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2846
2847         /* Clear the Init Done bit for the next init event */
2848         data = E1000_READ_REG(hw, E1000_STATUS);
2849         data &= ~E1000_STATUS_LAN_INIT_DONE;
2850         E1000_WRITE_REG(hw, E1000_STATUS, data);
2851 }
2852
2853 /**
2854  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2855  *  @hw: pointer to the HW structure
2856  **/
2857 STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2858 {
2859         s32 ret_val = E1000_SUCCESS;
2860         u16 reg;
2861
2862         DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2863
2864         if (hw->phy.ops.check_reset_block(hw))
2865                 return E1000_SUCCESS;
2866
2867         /* Allow time for h/w to get to quiescent state after reset */
2868         msec_delay(10);
2869
2870         /* Perform any necessary post-reset workarounds */
2871         switch (hw->mac.type) {
2872         case e1000_pchlan:
2873                 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2874                 if (ret_val)
2875                         return ret_val;
2876                 break;
2877         case e1000_pch2lan:
2878                 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2879                 if (ret_val)
2880                         return ret_val;
2881                 break;
2882         default:
2883                 break;
2884         }
2885
2886         /* Clear the host wakeup bit after lcd reset */
2887         if (hw->mac.type >= e1000_pchlan) {
2888                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
2889                 reg &= ~BM_WUC_HOST_WU_BIT;
2890                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
2891         }
2892
2893         /* Configure the LCD with the extended configuration region in NVM */
2894         ret_val = e1000_sw_lcd_config_ich8lan(hw);
2895         if (ret_val)
2896                 return ret_val;
2897
2898         /* Configure the LCD with the OEM bits in NVM */
2899         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2900
2901         if (hw->mac.type == e1000_pch2lan) {
2902                 /* Ungate automatic PHY configuration on non-managed 82579 */
2903                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
2904                     E1000_ICH_FWSM_FW_VALID)) {
2905                         msec_delay(10);
2906                         e1000_gate_hw_phy_config_ich8lan(hw, false);
2907                 }
2908
2909                 /* Set EEE LPI Update Timer to 200usec */
2910                 ret_val = hw->phy.ops.acquire(hw);
2911                 if (ret_val)
2912                         return ret_val;
2913                 ret_val = e1000_write_emi_reg_locked(hw,
2914                                                      I82579_LPI_UPDATE_TIMER,
2915                                                      0x1387);
2916                 hw->phy.ops.release(hw);
2917         }
2918
2919         return ret_val;
2920 }
2921
2922 /**
2923  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2924  *  @hw: pointer to the HW structure
2925  *
2926  *  Resets the PHY
2927  *  This is a function pointer entry point called by drivers
2928  *  or other shared routines.
2929  **/
2930 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2931 {
2932         s32 ret_val = E1000_SUCCESS;
2933
2934         DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2935
2936         /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2937         if ((hw->mac.type == e1000_pch2lan) &&
2938             !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2939                 e1000_gate_hw_phy_config_ich8lan(hw, true);
2940
2941         ret_val = e1000_phy_hw_reset_generic(hw);
2942         if (ret_val)
2943                 return ret_val;
2944
2945         return e1000_post_phy_reset_ich8lan(hw);
2946 }
2947
2948 /**
2949  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2950  *  @hw: pointer to the HW structure
2951  *  @active: true to enable LPLU, false to disable
2952  *
2953  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2954  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2955  *  the phy speed. This function will manually set the LPLU bit and restart
2956  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2957  *  since it configures the same bit.
2958  **/
2959 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2960 {
2961         s32 ret_val;
2962         u16 oem_reg;
2963
2964         DEBUGFUNC("e1000_set_lplu_state_pchlan");
2965
2966         ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2967         if (ret_val)
2968                 return ret_val;
2969
2970         if (active)
2971                 oem_reg |= HV_OEM_BITS_LPLU;
2972         else
2973                 oem_reg &= ~HV_OEM_BITS_LPLU;
2974
2975         if (!hw->phy.ops.check_reset_block(hw))
2976                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2977
2978         return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2979 }
2980
2981 /**
2982  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2983  *  @hw: pointer to the HW structure
2984  *  @active: true to enable LPLU, false to disable
2985  *
2986  *  Sets the LPLU D0 state according to the active flag.  When
2987  *  activating LPLU this function also disables smart speed
2988  *  and vice versa.  LPLU will not be activated unless the
2989  *  device autonegotiation advertisement meets standards of
2990  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2991  *  This is a function pointer entry point only called by
2992  *  PHY setup routines.
2993  **/
2994 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2995 {
2996         struct e1000_phy_info *phy = &hw->phy;
2997         u32 phy_ctrl;
2998         s32 ret_val = E1000_SUCCESS;
2999         u16 data;
3000
3001         DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3002
3003         if (phy->type == e1000_phy_ife)
3004                 return E1000_SUCCESS;
3005
3006         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3007
3008         if (active) {
3009                 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3010                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3011
3012                 if (phy->type != e1000_phy_igp_3)
3013                         return E1000_SUCCESS;
3014
3015                 /* Call gig speed drop workaround on LPLU before accessing
3016                  * any PHY registers
3017                  */
3018                 if (hw->mac.type == e1000_ich8lan)
3019                         e1000_gig_downshift_workaround_ich8lan(hw);
3020
3021                 /* When LPLU is enabled, we should disable SmartSpeed */
3022                 ret_val = phy->ops.read_reg(hw,
3023                                             IGP01E1000_PHY_PORT_CONFIG,
3024                                             &data);
3025                 if (ret_val)
3026                         return ret_val;
3027                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3028                 ret_val = phy->ops.write_reg(hw,
3029                                              IGP01E1000_PHY_PORT_CONFIG,
3030                                              data);
3031                 if (ret_val)
3032                         return ret_val;
3033         } else {
3034                 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3035                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3036
3037                 if (phy->type != e1000_phy_igp_3)
3038                         return E1000_SUCCESS;
3039
3040                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3041                  * during Dx states where the power conservation is most
3042                  * important.  During driver activity we should enable
3043                  * SmartSpeed, so performance is maintained.
3044                  */
3045                 if (phy->smart_speed == e1000_smart_speed_on) {
3046                         ret_val = phy->ops.read_reg(hw,
3047                                                     IGP01E1000_PHY_PORT_CONFIG,
3048                                                     &data);
3049                         if (ret_val)
3050                                 return ret_val;
3051
3052                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3053                         ret_val = phy->ops.write_reg(hw,
3054                                                      IGP01E1000_PHY_PORT_CONFIG,
3055                                                      data);
3056                         if (ret_val)
3057                                 return ret_val;
3058                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3059                         ret_val = phy->ops.read_reg(hw,
3060                                                     IGP01E1000_PHY_PORT_CONFIG,
3061                                                     &data);
3062                         if (ret_val)
3063                                 return ret_val;
3064
3065                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3066                         ret_val = phy->ops.write_reg(hw,
3067                                                      IGP01E1000_PHY_PORT_CONFIG,
3068                                                      data);
3069                         if (ret_val)
3070                                 return ret_val;
3071                 }
3072         }
3073
3074         return E1000_SUCCESS;
3075 }
3076
3077 /**
3078  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3079  *  @hw: pointer to the HW structure
3080  *  @active: true to enable LPLU, false to disable
3081  *
3082  *  Sets the LPLU D3 state according to the active flag.  When
3083  *  activating LPLU this function also disables smart speed
3084  *  and vice versa.  LPLU will not be activated unless the
3085  *  device autonegotiation advertisement meets standards of
3086  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3087  *  This is a function pointer entry point only called by
3088  *  PHY setup routines.
3089  **/
3090 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3091 {
3092         struct e1000_phy_info *phy = &hw->phy;
3093         u32 phy_ctrl;
3094         s32 ret_val = E1000_SUCCESS;
3095         u16 data;
3096
3097         DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3098
3099         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3100
3101         if (!active) {
3102                 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3103                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3104
3105                 if (phy->type != e1000_phy_igp_3)
3106                         return E1000_SUCCESS;
3107
3108                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3109                  * during Dx states where the power conservation is most
3110                  * important.  During driver activity we should enable
3111                  * SmartSpeed, so performance is maintained.
3112                  */
3113                 if (phy->smart_speed == e1000_smart_speed_on) {
3114                         ret_val = phy->ops.read_reg(hw,
3115                                                     IGP01E1000_PHY_PORT_CONFIG,
3116                                                     &data);
3117                         if (ret_val)
3118                                 return ret_val;
3119
3120                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3121                         ret_val = phy->ops.write_reg(hw,
3122                                                      IGP01E1000_PHY_PORT_CONFIG,
3123                                                      data);
3124                         if (ret_val)
3125                                 return ret_val;
3126                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3127                         ret_val = phy->ops.read_reg(hw,
3128                                                     IGP01E1000_PHY_PORT_CONFIG,
3129                                                     &data);
3130                         if (ret_val)
3131                                 return ret_val;
3132
3133                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3134                         ret_val = phy->ops.write_reg(hw,
3135                                                      IGP01E1000_PHY_PORT_CONFIG,
3136                                                      data);
3137                         if (ret_val)
3138                                 return ret_val;
3139                 }
3140         } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3141                    (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3142                    (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3143                 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3144                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3145
3146                 if (phy->type != e1000_phy_igp_3)
3147                         return E1000_SUCCESS;
3148
3149                 /* Call gig speed drop workaround on LPLU before accessing
3150                  * any PHY registers
3151                  */
3152                 if (hw->mac.type == e1000_ich8lan)
3153                         e1000_gig_downshift_workaround_ich8lan(hw);
3154
3155                 /* When LPLU is enabled, we should disable SmartSpeed */
3156                 ret_val = phy->ops.read_reg(hw,
3157                                             IGP01E1000_PHY_PORT_CONFIG,
3158                                             &data);
3159                 if (ret_val)
3160                         return ret_val;
3161
3162                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3163                 ret_val = phy->ops.write_reg(hw,
3164                                              IGP01E1000_PHY_PORT_CONFIG,
3165                                              data);
3166         }
3167
3168         return ret_val;
3169 }
3170
3171 /**
3172  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3173  *  @hw: pointer to the HW structure
3174  *  @bank:  pointer to the variable that returns the active bank
3175  *
3176  *  Reads signature byte from the NVM using the flash access registers.
3177  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3178  **/
3179 STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3180 {
3181         u32 eecd;
3182         struct e1000_nvm_info *nvm = &hw->nvm;
3183         u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3184         u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3185         u8 sig_byte = 0;
3186         s32 ret_val;
3187
3188         DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3189
3190         switch (hw->mac.type) {
3191         case e1000_ich8lan:
3192         case e1000_ich9lan:
3193                 eecd = E1000_READ_REG(hw, E1000_EECD);
3194                 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3195                     E1000_EECD_SEC1VAL_VALID_MASK) {
3196                         if (eecd & E1000_EECD_SEC1VAL)
3197                                 *bank = 1;
3198                         else
3199                                 *bank = 0;
3200
3201                         return E1000_SUCCESS;
3202                 }
3203                 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3204                 /* fall-thru */
3205         default:
3206                 /* set bank to 0 in case flash read fails */
3207                 *bank = 0;
3208
3209                 /* Check bank 0 */
3210                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3211                                                         &sig_byte);
3212                 if (ret_val)
3213                         return ret_val;
3214                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3215                     E1000_ICH_NVM_SIG_VALUE) {
3216                         *bank = 0;
3217                         return E1000_SUCCESS;
3218                 }
3219
3220                 /* Check bank 1 */
3221                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3222                                                         bank1_offset,
3223                                                         &sig_byte);
3224                 if (ret_val)
3225                         return ret_val;
3226                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3227                     E1000_ICH_NVM_SIG_VALUE) {
3228                         *bank = 1;
3229                         return E1000_SUCCESS;
3230                 }
3231
3232                 DEBUGOUT("ERROR: No valid NVM bank present\n");
3233                 return -E1000_ERR_NVM;
3234         }
3235 }
3236
3237 /**
3238  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3239  *  @hw: pointer to the HW structure
3240  *  @offset: The offset (in bytes) of the word(s) to read.
3241  *  @words: Size of data to read in words
3242  *  @data: Pointer to the word(s) to read at offset.
3243  *
3244  *  Reads a word(s) from the NVM using the flash access registers.
3245  **/
3246 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3247                                   u16 *data)
3248 {
3249         struct e1000_nvm_info *nvm = &hw->nvm;
3250         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3251         u32 act_offset;
3252         s32 ret_val = E1000_SUCCESS;
3253         u32 bank = 0;
3254         u16 i, word;
3255
3256         DEBUGFUNC("e1000_read_nvm_ich8lan");
3257
3258         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3259             (words == 0)) {
3260                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3261                 ret_val = -E1000_ERR_NVM;
3262                 goto out;
3263         }
3264
3265         nvm->ops.acquire(hw);
3266
3267         ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3268         if (ret_val != E1000_SUCCESS) {
3269                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3270                 bank = 0;
3271         }
3272
3273         act_offset = (bank) ? nvm->flash_bank_size : 0;
3274         act_offset += offset;
3275
3276         ret_val = E1000_SUCCESS;
3277         for (i = 0; i < words; i++) {
3278                 if (dev_spec->shadow_ram[offset+i].modified) {
3279                         data[i] = dev_spec->shadow_ram[offset+i].value;
3280                 } else {
3281                         ret_val = e1000_read_flash_word_ich8lan(hw,
3282                                                                 act_offset + i,
3283                                                                 &word);
3284                         if (ret_val)
3285                                 break;
3286                         data[i] = word;
3287                 }
3288         }
3289
3290         nvm->ops.release(hw);
3291
3292 out:
3293         if (ret_val)
3294                 DEBUGOUT1("NVM read error: %d\n", ret_val);
3295
3296         return ret_val;
3297 }
3298
3299 /**
3300  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3301  *  @hw: pointer to the HW structure
3302  *
3303  *  This function does initial flash setup so that a new read/write/erase cycle
3304  *  can be started.
3305  **/
3306 STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3307 {
3308         union ich8_hws_flash_status hsfsts;
3309         s32 ret_val = -E1000_ERR_NVM;
3310
3311         DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3312
3313         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3314
3315         /* Check if the flash descriptor is valid */
3316         if (!hsfsts.hsf_status.fldesvalid) {
3317                 DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3318                 return -E1000_ERR_NVM;
3319         }
3320
3321         /* Clear FCERR and DAEL in hw status by writing 1 */
3322         hsfsts.hsf_status.flcerr = 1;
3323         hsfsts.hsf_status.dael = 1;
3324         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3325
3326         /* Either we should have a hardware SPI cycle in progress
3327          * bit to check against, in order to start a new cycle or
3328          * FDONE bit should be changed in the hardware so that it
3329          * is 1 after hardware reset, which can then be used as an
3330          * indication whether a cycle is in progress or has been
3331          * completed.
3332          */
3333
3334         if (!hsfsts.hsf_status.flcinprog) {
3335                 /* There is no cycle running at present,
3336                  * so we can start a cycle.
3337                  * Begin by setting Flash Cycle Done.
3338                  */
3339                 hsfsts.hsf_status.flcdone = 1;
3340                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3341                 ret_val = E1000_SUCCESS;
3342         } else {
3343                 s32 i;
3344
3345                 /* Otherwise poll for sometime so the current
3346                  * cycle has a chance to end before giving up.
3347                  */
3348                 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3349                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3350                                                               ICH_FLASH_HSFSTS);
3351                         if (!hsfsts.hsf_status.flcinprog) {
3352                                 ret_val = E1000_SUCCESS;
3353                                 break;
3354                         }
3355                         usec_delay(1);
3356                 }
3357                 if (ret_val == E1000_SUCCESS) {
3358                         /* Successful in waiting for previous cycle to timeout,
3359                          * now set the Flash Cycle Done.
3360                          */
3361                         hsfsts.hsf_status.flcdone = 1;
3362                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3363                                                 hsfsts.regval);
3364                 } else {
3365                         DEBUGOUT("Flash controller busy, cannot get access\n");
3366                 }
3367         }
3368
3369         return ret_val;
3370 }
3371
3372 /**
3373  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3374  *  @hw: pointer to the HW structure
3375  *  @timeout: maximum time to wait for completion
3376  *
3377  *  This function starts a flash cycle and waits for its completion.
3378  **/
3379 STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3380 {
3381         union ich8_hws_flash_ctrl hsflctl;
3382         union ich8_hws_flash_status hsfsts;
3383         u32 i = 0;
3384
3385         DEBUGFUNC("e1000_flash_cycle_ich8lan");
3386
3387         /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3388         hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3389         hsflctl.hsf_ctrl.flcgo = 1;
3390
3391         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3392
3393         /* wait till FDONE bit is set to 1 */
3394         do {
3395                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3396                 if (hsfsts.hsf_status.flcdone)
3397                         break;
3398                 usec_delay(1);
3399         } while (i++ < timeout);
3400
3401         if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3402                 return E1000_SUCCESS;
3403
3404         return -E1000_ERR_NVM;
3405 }
3406
3407 /**
3408  *  e1000_read_flash_word_ich8lan - Read word from flash
3409  *  @hw: pointer to the HW structure
3410  *  @offset: offset to data location
3411  *  @data: pointer to the location for storing the data
3412  *
3413  *  Reads the flash word at offset into data.  Offset is converted
3414  *  to bytes before read.
3415  **/
3416 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3417                                          u16 *data)
3418 {
3419         DEBUGFUNC("e1000_read_flash_word_ich8lan");
3420
3421         if (!data)
3422                 return -E1000_ERR_NVM;
3423
3424         /* Must convert offset into bytes. */
3425         offset <<= 1;
3426
3427         return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3428 }
3429
3430 /**
3431  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3432  *  @hw: pointer to the HW structure
3433  *  @offset: The offset of the byte to read.
3434  *  @data: Pointer to a byte to store the value read.
3435  *
3436  *  Reads a single byte from the NVM using the flash access registers.
3437  **/
3438 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3439                                          u8 *data)
3440 {
3441         s32 ret_val;
3442         u16 word = 0;
3443
3444         ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3445
3446         if (ret_val)
3447                 return ret_val;
3448
3449         *data = (u8)word;
3450
3451         return E1000_SUCCESS;
3452 }
3453
3454 /**
3455  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3456  *  @hw: pointer to the HW structure
3457  *  @offset: The offset (in bytes) of the byte or word to read.
3458  *  @size: Size of data to read, 1=byte 2=word
3459  *  @data: Pointer to the word to store the value read.
3460  *
3461  *  Reads a byte or word from the NVM using the flash access registers.
3462  **/
3463 STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3464                                          u8 size, u16 *data)
3465 {
3466         union ich8_hws_flash_status hsfsts;
3467         union ich8_hws_flash_ctrl hsflctl;
3468         u32 flash_linear_addr;
3469         u32 flash_data = 0;
3470         s32 ret_val = -E1000_ERR_NVM;
3471         u8 count = 0;
3472
3473         DEBUGFUNC("e1000_read_flash_data_ich8lan");
3474
3475         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3476                 return -E1000_ERR_NVM;
3477         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3478                              hw->nvm.flash_base_addr);
3479
3480         do {
3481                 usec_delay(1);
3482                 /* Steps */
3483                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3484                 if (ret_val != E1000_SUCCESS)
3485                         break;
3486                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3487
3488                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3489                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3490                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3491                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3492
3493                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3494
3495                 ret_val =
3496                     e1000_flash_cycle_ich8lan(hw,
3497                                               ICH_FLASH_READ_COMMAND_TIMEOUT);
3498
3499                 /* Check if FCERR is set to 1, if set to 1, clear it
3500                  * and try the whole sequence a few more times, else
3501                  * read in (shift in) the Flash Data0, the order is
3502                  * least significant byte first msb to lsb
3503                  */
3504                 if (ret_val == E1000_SUCCESS) {
3505                         flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3506                         if (size == 1)
3507                                 *data = (u8)(flash_data & 0x000000FF);
3508                         else if (size == 2)
3509                                 *data = (u16)(flash_data & 0x0000FFFF);
3510                         break;
3511                 } else {
3512                         /* If we've gotten here, then things are probably
3513                          * completely hosed, but if the error condition is
3514                          * detected, it won't hurt to give it another try...
3515                          * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3516                          */
3517                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3518                                                               ICH_FLASH_HSFSTS);
3519                         if (hsfsts.hsf_status.flcerr) {
3520                                 /* Repeat for some time before giving up. */
3521                                 continue;
3522                         } else if (!hsfsts.hsf_status.flcdone) {
3523                                 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3524                                 break;
3525                         }
3526                 }
3527         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3528
3529         return ret_val;
3530 }
3531
3532 /**
3533  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3534  *  @hw: pointer to the HW structure
3535  *  @offset: The offset (in bytes) of the word(s) to write.
3536  *  @words: Size of data to write in words
3537  *  @data: Pointer to the word(s) to write at offset.
3538  *
3539  *  Writes a byte or word to the NVM using the flash access registers.
3540  **/
3541 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3542                                    u16 *data)
3543 {
3544         struct e1000_nvm_info *nvm = &hw->nvm;
3545         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3546         u16 i;
3547
3548         DEBUGFUNC("e1000_write_nvm_ich8lan");
3549
3550         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3551             (words == 0)) {
3552                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3553                 return -E1000_ERR_NVM;
3554         }
3555
3556         nvm->ops.acquire(hw);
3557
3558         for (i = 0; i < words; i++) {
3559                 dev_spec->shadow_ram[offset+i].modified = true;
3560                 dev_spec->shadow_ram[offset+i].value = data[i];
3561         }
3562
3563         nvm->ops.release(hw);
3564
3565         return E1000_SUCCESS;
3566 }
3567
3568 /**
3569  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3570  *  @hw: pointer to the HW structure
3571  *
3572  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3573  *  which writes the checksum to the shadow ram.  The changes in the shadow
3574  *  ram are then committed to the EEPROM by processing each bank at a time
3575  *  checking for the modified bit and writing only the pending changes.
3576  *  After a successful commit, the shadow ram is cleared and is ready for
3577  *  future writes.
3578  **/
3579 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3580 {
3581         struct e1000_nvm_info *nvm = &hw->nvm;
3582         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3583         u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3584         s32 ret_val;
3585         u16 data;
3586
3587         DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
3588
3589         ret_val = e1000_update_nvm_checksum_generic(hw);
3590         if (ret_val)
3591                 goto out;
3592
3593         if (nvm->type != e1000_nvm_flash_sw)
3594                 goto out;
3595
3596         nvm->ops.acquire(hw);
3597
3598         /* We're writing to the opposite bank so if we're on bank 1,
3599          * write to bank 0 etc.  We also need to erase the segment that
3600          * is going to be written
3601          */
3602         ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3603         if (ret_val != E1000_SUCCESS) {
3604                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3605                 bank = 0;
3606         }
3607
3608         if (bank == 0) {
3609                 new_bank_offset = nvm->flash_bank_size;
3610                 old_bank_offset = 0;
3611                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3612                 if (ret_val)
3613                         goto release;
3614         } else {
3615                 old_bank_offset = nvm->flash_bank_size;
3616                 new_bank_offset = 0;
3617                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3618                 if (ret_val)
3619                         goto release;
3620         }
3621
3622         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3623                 /* Determine whether to write the value stored
3624                  * in the other NVM bank or a modified value stored
3625                  * in the shadow RAM
3626                  */
3627                 if (dev_spec->shadow_ram[i].modified) {
3628                         data = dev_spec->shadow_ram[i].value;
3629                 } else {
3630                         ret_val = e1000_read_flash_word_ich8lan(hw, i +
3631                                                                 old_bank_offset,
3632                                                                 &data);
3633                         if (ret_val)
3634                                 break;
3635                 }
3636
3637                 /* If the word is 0x13, then make sure the signature bits
3638                  * (15:14) are 11b until the commit has completed.
3639                  * This will allow us to write 10b which indicates the
3640                  * signature is valid.  We want to do this after the write
3641                  * has completed so that we don't mark the segment valid
3642                  * while the write is still in progress
3643                  */
3644                 if (i == E1000_ICH_NVM_SIG_WORD)
3645                         data |= E1000_ICH_NVM_SIG_MASK;
3646
3647                 /* Convert offset to bytes. */
3648                 act_offset = (i + new_bank_offset) << 1;
3649
3650                 usec_delay(100);
3651                 /* Write the bytes to the new bank. */
3652                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3653                                                                act_offset,
3654                                                                (u8)data);
3655                 if (ret_val)
3656                         break;
3657
3658                 usec_delay(100);
3659                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3660                                                           act_offset + 1,
3661                                                           (u8)(data >> 8));
3662                 if (ret_val)
3663                         break;
3664         }
3665
3666         /* Don't bother writing the segment valid bits if sector
3667          * programming failed.
3668          */
3669         if (ret_val) {
3670                 DEBUGOUT("Flash commit failed.\n");
3671                 goto release;
3672         }
3673
3674         /* Finally validate the new segment by setting bit 15:14
3675          * to 10b in word 0x13 , this can be done without an
3676          * erase as well since these bits are 11 to start with
3677          * and we need to change bit 14 to 0b
3678          */
3679         act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3680         ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
3681         if (ret_val)
3682                 goto release;
3683
3684         data &= 0xBFFF;
3685         ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3686                                                        act_offset * 2 + 1,
3687                                                        (u8)(data >> 8));
3688         if (ret_val)
3689                 goto release;
3690
3691         /* And invalidate the previously valid segment by setting
3692          * its signature word (0x13) high_byte to 0b. This can be
3693          * done without an erase because flash erase sets all bits
3694          * to 1's. We can write 1's to 0's without an erase
3695          */
3696         act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3697         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
3698         if (ret_val)
3699                 goto release;
3700
3701         /* Great!  Everything worked, we can now clear the cached entries. */
3702         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3703                 dev_spec->shadow_ram[i].modified = false;
3704                 dev_spec->shadow_ram[i].value = 0xFFFF;
3705         }
3706
3707 release:
3708         nvm->ops.release(hw);
3709
3710         /* Reload the EEPROM, or else modifications will not appear
3711          * until after the next adapter reset.
3712          */
3713         if (!ret_val) {
3714                 nvm->ops.reload(hw);
3715                 msec_delay(10);
3716         }
3717
3718 out:
3719         if (ret_val)
3720                 DEBUGOUT1("NVM update error: %d\n", ret_val);
3721
3722         return ret_val;
3723 }
3724
3725 /**
3726  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
3727  *  @hw: pointer to the HW structure
3728  *
3729  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
3730  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
3731  *  calculated, in which case we need to calculate the checksum and set bit 6.
3732  **/
3733 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
3734 {
3735         s32 ret_val;
3736         u16 data;
3737         u16 word;
3738         u16 valid_csum_mask;
3739
3740         DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
3741
3742         /* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
3743          * the checksum needs to be fixed.  This bit is an indication that
3744          * the NVM was prepared by OEM software and did not calculate
3745          * the checksum...a likely scenario.
3746          */
3747         switch (hw->mac.type) {
3748         case e1000_pch_lpt:
3749                 word = NVM_COMPAT;
3750                 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
3751                 break;
3752         default:
3753                 word = NVM_FUTURE_INIT_WORD1;
3754                 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
3755                 break;
3756         }
3757
3758         ret_val = hw->nvm.ops.read(hw, word, 1, &data);
3759         if (ret_val)
3760                 return ret_val;
3761
3762         if (!(data & valid_csum_mask)) {
3763                 data |= valid_csum_mask;
3764                 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
3765                 if (ret_val)
3766                         return ret_val;
3767                 ret_val = hw->nvm.ops.update(hw);
3768                 if (ret_val)
3769                         return ret_val;
3770         }
3771
3772         return e1000_validate_nvm_checksum_generic(hw);
3773 }
3774
3775 /**
3776  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3777  *  @hw: pointer to the HW structure
3778  *  @offset: The offset (in bytes) of the byte/word to read.
3779  *  @size: Size of data to read, 1=byte 2=word
3780  *  @data: The byte(s) to write to the NVM.
3781  *
3782  *  Writes one/two bytes to the NVM using the flash access registers.
3783  **/
3784 STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3785                                           u8 size, u16 data)
3786 {
3787         union ich8_hws_flash_status hsfsts;
3788         union ich8_hws_flash_ctrl hsflctl;
3789         u32 flash_linear_addr;
3790         u32 flash_data = 0;
3791         s32 ret_val;
3792         u8 count = 0;
3793
3794         DEBUGFUNC("e1000_write_ich8_data");
3795
3796         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3797                 return -E1000_ERR_NVM;
3798
3799         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3800                              hw->nvm.flash_base_addr);
3801
3802         do {
3803                 usec_delay(1);
3804                 /* Steps */
3805                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3806                 if (ret_val != E1000_SUCCESS)
3807                         break;
3808                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3809
3810                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3811                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3812                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3813                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3814
3815                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3816
3817                 if (size == 1)
3818                         flash_data = (u32)data & 0x00FF;
3819                 else
3820                         flash_data = (u32)data;
3821
3822                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
3823
3824                 /* check if FCERR is set to 1 , if set to 1, clear it
3825                  * and try the whole sequence a few more times else done
3826                  */
3827                 ret_val =
3828                     e1000_flash_cycle_ich8lan(hw,
3829                                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3830                 if (ret_val == E1000_SUCCESS)
3831                         break;
3832
3833                 /* If we're here, then things are most likely
3834                  * completely hosed, but if the error condition
3835                  * is detected, it won't hurt to give it another
3836                  * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3837                  */
3838                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3839                 if (hsfsts.hsf_status.flcerr)
3840                         /* Repeat for some time before giving up. */
3841                         continue;
3842                 if (!hsfsts.hsf_status.flcdone) {
3843                         DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3844                         break;
3845                 }
3846         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3847
3848         return ret_val;
3849 }
3850
3851 /**
3852  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3853  *  @hw: pointer to the HW structure
3854  *  @offset: The index of the byte to read.
3855  *  @data: The byte to write to the NVM.
3856  *
3857  *  Writes a single byte to the NVM using the flash access registers.
3858  **/
3859 STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3860                                           u8 data)
3861 {
3862         u16 word = (u16)data;
3863
3864         DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3865
3866         return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3867 }
3868
3869 /**
3870  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3871  *  @hw: pointer to the HW structure
3872  *  @offset: The offset of the byte to write.
3873  *  @byte: The byte to write to the NVM.
3874  *
3875  *  Writes a single byte to the NVM using the flash access registers.
3876  *  Goes through a retry algorithm before giving up.
3877  **/
3878 STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3879                                                 u32 offset, u8 byte)
3880 {
3881         s32 ret_val;
3882         u16 program_retries;
3883
3884         DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3885
3886         ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3887         if (!ret_val)
3888                 return ret_val;
3889
3890         for (program_retries = 0; program_retries < 100; program_retries++) {
3891                 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3892                 usec_delay(100);
3893                 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3894                 if (ret_val == E1000_SUCCESS)
3895                         break;
3896         }
3897         if (program_retries == 100)
3898                 return -E1000_ERR_NVM;
3899
3900         return E1000_SUCCESS;
3901 }
3902
3903 /**
3904  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3905  *  @hw: pointer to the HW structure
3906  *  @bank: 0 for first bank, 1 for second bank, etc.
3907  *
3908  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3909  *  bank N is 4096 * N + flash_reg_addr.
3910  **/
3911 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3912 {
3913         struct e1000_nvm_info *nvm = &hw->nvm;
3914         union ich8_hws_flash_status hsfsts;
3915         union ich8_hws_flash_ctrl hsflctl;
3916         u32 flash_linear_addr;
3917         /* bank size is in 16bit words - adjust to bytes */
3918         u32 flash_bank_size = nvm->flash_bank_size * 2;
3919         s32 ret_val;
3920         s32 count = 0;
3921         s32 j, iteration, sector_size;
3922
3923         DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3924
3925         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3926
3927         /* Determine HW Sector size: Read BERASE bits of hw flash status
3928          * register
3929          * 00: The Hw sector is 256 bytes, hence we need to erase 16
3930          *     consecutive sectors.  The start index for the nth Hw sector
3931          *     can be calculated as = bank * 4096 + n * 256
3932          * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3933          *     The start index for the nth Hw sector can be calculated
3934          *     as = bank * 4096
3935          * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3936          *     (ich9 only, otherwise error condition)
3937          * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3938          */
3939         switch (hsfsts.hsf_status.berasesz) {
3940         case 0:
3941                 /* Hw sector size 256 */
3942                 sector_size = ICH_FLASH_SEG_SIZE_256;
3943                 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3944                 break;
3945         case 1:
3946                 sector_size = ICH_FLASH_SEG_SIZE_4K;
3947                 iteration = 1;
3948                 break;
3949         case 2:
3950                 sector_size = ICH_FLASH_SEG_SIZE_8K;
3951                 iteration = 1;
3952                 break;
3953         case 3:
3954                 sector_size = ICH_FLASH_SEG_SIZE_64K;
3955                 iteration = 1;
3956                 break;
3957         default:
3958                 return -E1000_ERR_NVM;
3959         }
3960
3961         /* Start with the base address, then add the sector offset. */
3962         flash_linear_addr = hw->nvm.flash_base_addr;
3963         flash_linear_addr += (bank) ? flash_bank_size : 0;
3964
3965         for (j = 0; j < iteration; j++) {
3966                 do {
3967                         u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
3968
3969                         /* Steps */
3970                         ret_val = e1000_flash_cycle_init_ich8lan(hw);
3971                         if (ret_val)
3972                                 return ret_val;
3973
3974                         /* Write a value 11 (block Erase) in Flash
3975                          * Cycle field in hw flash control
3976                          */
3977                         hsflctl.regval =
3978                             E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3979
3980                         hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3981                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3982                                                 hsflctl.regval);
3983
3984                         /* Write the last 24 bits of an index within the
3985                          * block into Flash Linear address field in Flash
3986                          * Address.
3987                          */
3988                         flash_linear_addr += (j * sector_size);
3989                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3990                                               flash_linear_addr);
3991
3992                         ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
3993                         if (ret_val == E1000_SUCCESS)
3994                                 break;
3995
3996                         /* Check if FCERR is set to 1.  If 1,
3997                          * clear it and try the whole sequence
3998                          * a few more times else Done
3999                          */
4000                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4001                                                       ICH_FLASH_HSFSTS);
4002                         if (hsfsts.hsf_status.flcerr)
4003                                 /* repeat for some time before giving up */
4004                                 continue;
4005                         else if (!hsfsts.hsf_status.flcdone)
4006                                 return ret_val;
4007                 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4008         }
4009
4010         return E1000_SUCCESS;
4011 }
4012
4013 /**
4014  *  e1000_valid_led_default_ich8lan - Set the default LED settings
4015  *  @hw: pointer to the HW structure
4016  *  @data: Pointer to the LED settings
4017  *
4018  *  Reads the LED default settings from the NVM to data.  If the NVM LED
4019  *  settings is all 0's or F's, set the LED default to a valid LED default
4020  *  setting.
4021  **/
4022 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4023 {
4024         s32 ret_val;
4025
4026         DEBUGFUNC("e1000_valid_led_default_ich8lan");
4027
4028         ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4029         if (ret_val) {
4030                 DEBUGOUT("NVM Read Error\n");
4031                 return ret_val;
4032         }
4033
4034         if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4035                 *data = ID_LED_DEFAULT_ICH8LAN;
4036
4037         return E1000_SUCCESS;
4038 }
4039
4040 /**
4041  *  e1000_id_led_init_pchlan - store LED configurations
4042  *  @hw: pointer to the HW structure
4043  *
4044  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4045  *  the PHY LED configuration register.
4046  *
4047  *  PCH also does not have an "always on" or "always off" mode which
4048  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4049  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4050  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4051  *  link based on logic in e1000_led_[on|off]_pchlan().
4052  **/
4053 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4054 {
4055         struct e1000_mac_info *mac = &hw->mac;
4056         s32 ret_val;
4057         const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4058         const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4059         u16 data, i, temp, shift;
4060
4061         DEBUGFUNC("e1000_id_led_init_pchlan");
4062
4063         /* Get default ID LED modes */
4064         ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4065         if (ret_val)
4066                 return ret_val;
4067
4068         mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4069         mac->ledctl_mode1 = mac->ledctl_default;
4070         mac->ledctl_mode2 = mac->ledctl_default;
4071
4072         for (i = 0; i < 4; i++) {
4073                 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4074                 shift = (i * 5);
4075                 switch (temp) {
4076                 case ID_LED_ON1_DEF2:
4077                 case ID_LED_ON1_ON2:
4078                 case ID_LED_ON1_OFF2:
4079                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4080                         mac->ledctl_mode1 |= (ledctl_on << shift);
4081                         break;
4082                 case ID_LED_OFF1_DEF2:
4083                 case ID_LED_OFF1_ON2:
4084                 case ID_LED_OFF1_OFF2:
4085                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4086                         mac->ledctl_mode1 |= (ledctl_off << shift);
4087                         break;
4088                 default:
4089                         /* Do nothing */
4090                         break;
4091                 }
4092                 switch (temp) {
4093                 case ID_LED_DEF1_ON2:
4094                 case ID_LED_ON1_ON2:
4095                 case ID_LED_OFF1_ON2:
4096                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4097                         mac->ledctl_mode2 |= (ledctl_on << shift);
4098                         break;
4099                 case ID_LED_DEF1_OFF2:
4100                 case ID_LED_ON1_OFF2:
4101                 case ID_LED_OFF1_OFF2:
4102                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4103                         mac->ledctl_mode2 |= (ledctl_off << shift);
4104                         break;
4105                 default:
4106                         /* Do nothing */
4107                         break;
4108                 }
4109         }
4110
4111         return E1000_SUCCESS;
4112 }
4113
4114 /**
4115  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4116  *  @hw: pointer to the HW structure
4117  *
4118  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4119  *  register, so the the bus width is hard coded.
4120  **/
4121 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4122 {
4123         struct e1000_bus_info *bus = &hw->bus;
4124         s32 ret_val;
4125
4126         DEBUGFUNC("e1000_get_bus_info_ich8lan");
4127
4128         ret_val = e1000_get_bus_info_pcie_generic(hw);
4129
4130         /* ICH devices are "PCI Express"-ish.  They have
4131          * a configuration space, but do not contain
4132          * PCI Express Capability registers, so bus width
4133          * must be hardcoded.
4134          */
4135         if (bus->width == e1000_bus_width_unknown)
4136                 bus->width = e1000_bus_width_pcie_x1;
4137
4138         return ret_val;
4139 }
4140
4141 /**
4142  *  e1000_reset_hw_ich8lan - Reset the hardware
4143  *  @hw: pointer to the HW structure
4144  *
4145  *  Does a full reset of the hardware which includes a reset of the PHY and
4146  *  MAC.
4147  **/
4148 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4149 {
4150         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4151         u16 kum_cfg;
4152         u32 ctrl, reg;
4153         s32 ret_val;
4154
4155         DEBUGFUNC("e1000_reset_hw_ich8lan");
4156
4157         /* Prevent the PCI-E bus from sticking if there is no TLP connection
4158          * on the last TLP read/write transaction when MAC is reset.
4159          */
4160         ret_val = e1000_disable_pcie_master_generic(hw);
4161         if (ret_val)
4162                 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4163
4164         DEBUGOUT("Masking off all interrupts\n");
4165         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4166
4167         /* Disable the Transmit and Receive units.  Then delay to allow
4168          * any pending transactions to complete before we hit the MAC
4169          * with the global reset.
4170          */
4171         E1000_WRITE_REG(hw, E1000_RCTL, 0);
4172         E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4173         E1000_WRITE_FLUSH(hw);
4174
4175         msec_delay(10);
4176
4177         /* Workaround for ICH8 bit corruption issue in FIFO memory */
4178         if (hw->mac.type == e1000_ich8lan) {
4179                 /* Set Tx and Rx buffer allocation to 8k apiece. */
4180                 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4181                 /* Set Packet Buffer Size to 16k. */
4182                 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4183         }
4184
4185         if (hw->mac.type == e1000_pchlan) {
4186                 /* Save the NVM K1 bit setting*/
4187                 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4188                 if (ret_val)
4189                         return ret_val;
4190
4191                 if (kum_cfg & E1000_NVM_K1_ENABLE)
4192                         dev_spec->nvm_k1_enabled = true;
4193                 else
4194                         dev_spec->nvm_k1_enabled = false;
4195         }
4196
4197         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4198
4199         if (!hw->phy.ops.check_reset_block(hw)) {
4200                 /* Full-chip reset requires MAC and PHY reset at the same
4201                  * time to make sure the interface between MAC and the
4202                  * external PHY is reset.
4203                  */
4204                 ctrl |= E1000_CTRL_PHY_RST;
4205
4206                 /* Gate automatic PHY configuration by hardware on
4207                  * non-managed 82579
4208                  */
4209                 if ((hw->mac.type == e1000_pch2lan) &&
4210                     !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4211                         e1000_gate_hw_phy_config_ich8lan(hw, true);
4212         }
4213         ret_val = e1000_acquire_swflag_ich8lan(hw);
4214         DEBUGOUT("Issuing a global reset to ich8lan\n");
4215         E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4216         /* cannot issue a flush here because it hangs the hardware */
4217         msec_delay(20);
4218
4219         /* Set Phy Config Counter to 50msec */
4220         if (hw->mac.type == e1000_pch2lan) {
4221                 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4222                 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4223                 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4224                 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4225         }
4226
4227         if (!ret_val)
4228                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
4229
4230         if (ctrl & E1000_CTRL_PHY_RST) {
4231                 ret_val = hw->phy.ops.get_cfg_done(hw);
4232                 if (ret_val)
4233                         return ret_val;
4234
4235                 ret_val = e1000_post_phy_reset_ich8lan(hw);
4236                 if (ret_val)
4237                         return ret_val;
4238         }
4239
4240         /* For PCH, this write will make sure that any noise
4241          * will be detected as a CRC error and be dropped rather than show up
4242          * as a bad packet to the DMA engine.
4243          */
4244         if (hw->mac.type == e1000_pchlan)
4245                 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
4246
4247         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4248         E1000_READ_REG(hw, E1000_ICR);
4249
4250         reg = E1000_READ_REG(hw, E1000_KABGTXD);
4251         reg |= E1000_KABGTXD_BGSQLBIAS;
4252         E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
4253
4254         return E1000_SUCCESS;
4255 }
4256
4257 /**
4258  *  e1000_init_hw_ich8lan - Initialize the hardware
4259  *  @hw: pointer to the HW structure
4260  *
4261  *  Prepares the hardware for transmit and receive by doing the following:
4262  *   - initialize hardware bits
4263  *   - initialize LED identification
4264  *   - setup receive address registers
4265  *   - setup flow control
4266  *   - setup transmit descriptors
4267  *   - clear statistics
4268  **/
4269 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4270 {
4271         struct e1000_mac_info *mac = &hw->mac;
4272         u32 ctrl_ext, txdctl, snoop;
4273         s32 ret_val;
4274         u16 i;
4275
4276         DEBUGFUNC("e1000_init_hw_ich8lan");
4277
4278         e1000_initialize_hw_bits_ich8lan(hw);
4279
4280         /* Initialize identification LED */
4281         ret_val = mac->ops.id_led_init(hw);
4282         /* An error is not fatal and we should not stop init due to this */
4283         if (ret_val)
4284                 DEBUGOUT("Error initializing identification LED\n");
4285
4286         /* Setup the receive address. */
4287         e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
4288
4289         /* Zero out the Multicast HASH table */
4290         DEBUGOUT("Zeroing the MTA\n");
4291         for (i = 0; i < mac->mta_reg_count; i++)
4292                 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4293
4294         /* The 82578 Rx buffer will stall if wakeup is enabled in host and
4295          * the ME.  Disable wakeup by clearing the host wakeup bit.
4296          * Reset the phy after disabling host wakeup to reset the Rx buffer.
4297          */
4298         if (hw->phy.type == e1000_phy_82578) {
4299                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
4300                 i &= ~BM_WUC_HOST_WU_BIT;
4301                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
4302                 ret_val = e1000_phy_hw_reset_ich8lan(hw);
4303                 if (ret_val)
4304                         return ret_val;
4305         }
4306
4307         /* Setup link and flow control */
4308         ret_val = mac->ops.setup_link(hw);
4309
4310         /* Set the transmit descriptor write-back policy for both queues */
4311         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
4312         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4313                   E1000_TXDCTL_FULL_TX_DESC_WB);
4314         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4315                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4316         E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
4317         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
4318         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4319                   E1000_TXDCTL_FULL_TX_DESC_WB);
4320         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4321                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4322         E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
4323
4324         /* ICH8 has opposite polarity of no_snoop bits.
4325          * By default, we should use snoop behavior.
4326          */
4327         if (mac->type == e1000_ich8lan)
4328                 snoop = PCIE_ICH8_SNOOP_ALL;
4329         else
4330                 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
4331         e1000_set_pcie_no_snoop_generic(hw, snoop);
4332
4333         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
4334         ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4335         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
4336
4337         /* Clear all of the statistics registers (clear on read).  It is
4338          * important that we do this after we have tried to establish link
4339          * because the symbol error count will increment wildly if there
4340          * is no link.
4341          */
4342         e1000_clear_hw_cntrs_ich8lan(hw);
4343
4344         return ret_val;
4345 }
4346
4347 /**
4348  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4349  *  @hw: pointer to the HW structure
4350  *
4351  *  Sets/Clears required hardware bits necessary for correctly setting up the
4352  *  hardware for transmit and receive.
4353  **/
4354 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4355 {
4356         u32 reg;
4357
4358         DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
4359
4360         /* Extended Device Control */
4361         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
4362         reg |= (1 << 22);
4363         /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4364         if (hw->mac.type >= e1000_pchlan)
4365                 reg |= E1000_CTRL_EXT_PHYPDEN;
4366         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
4367
4368         /* Transmit Descriptor Control 0 */
4369         reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
4370         reg |= (1 << 22);
4371         E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
4372
4373         /* Transmit Descriptor Control 1 */
4374         reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
4375         reg |= (1 << 22);
4376         E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
4377
4378         /* Transmit Arbitration Control 0 */
4379         reg = E1000_READ_REG(hw, E1000_TARC(0));
4380         if (hw->mac.type == e1000_ich8lan)
4381                 reg |= (1 << 28) | (1 << 29);
4382         reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
4383         E1000_WRITE_REG(hw, E1000_TARC(0), reg);
4384
4385         /* Transmit Arbitration Control 1 */
4386         reg = E1000_READ_REG(hw, E1000_TARC(1));
4387         if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
4388                 reg &= ~(1 << 28);
4389         else
4390                 reg |= (1 << 28);
4391         reg |= (1 << 24) | (1 << 26) | (1 << 30);
4392         E1000_WRITE_REG(hw, E1000_TARC(1), reg);
4393
4394         /* Device Status */
4395         if (hw->mac.type == e1000_ich8lan) {
4396                 reg = E1000_READ_REG(hw, E1000_STATUS);
4397                 reg &= ~(1 << 31);
4398                 E1000_WRITE_REG(hw, E1000_STATUS, reg);
4399         }
4400
4401         /* work-around descriptor data corruption issue during nfs v2 udp
4402          * traffic, just disable the nfs filtering capability
4403          */
4404         reg = E1000_READ_REG(hw, E1000_RFCTL);
4405         reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4406
4407         /* Disable IPv6 extension header parsing because some malformed
4408          * IPv6 headers can hang the Rx.
4409          */
4410         if (hw->mac.type == e1000_ich8lan)
4411                 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4412         E1000_WRITE_REG(hw, E1000_RFCTL, reg);
4413
4414         /* Enable ECC on Lynxpoint */
4415         if (hw->mac.type == e1000_pch_lpt) {
4416                 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
4417                 reg |= E1000_PBECCSTS_ECC_ENABLE;
4418                 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
4419
4420                 reg = E1000_READ_REG(hw, E1000_CTRL);
4421                 reg |= E1000_CTRL_MEHE;
4422                 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4423         }
4424
4425         return;
4426 }
4427
4428 /**
4429  *  e1000_setup_link_ich8lan - Setup flow control and link settings
4430  *  @hw: pointer to the HW structure
4431  *
4432  *  Determines which flow control settings to use, then configures flow
4433  *  control.  Calls the appropriate media-specific link configuration
4434  *  function.  Assuming the adapter has a valid link partner, a valid link
4435  *  should be established.  Assumes the hardware has previously been reset
4436  *  and the transmitter and receiver are not enabled.
4437  **/
4438 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4439 {
4440         s32 ret_val;
4441
4442         DEBUGFUNC("e1000_setup_link_ich8lan");
4443
4444         if (hw->phy.ops.check_reset_block(hw))
4445                 return E1000_SUCCESS;
4446
4447         /* ICH parts do not have a word in the NVM to determine
4448          * the default flow control setting, so we explicitly
4449          * set it to full.
4450          */
4451         if (hw->fc.requested_mode == e1000_fc_default)
4452                 hw->fc.requested_mode = e1000_fc_full;
4453
4454         /* Save off the requested flow control mode for use later.  Depending
4455          * on the link partner's capabilities, we may or may not use this mode.
4456          */
4457         hw->fc.current_mode = hw->fc.requested_mode;
4458
4459         DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
4460                 hw->fc.current_mode);
4461
4462         /* Continue to configure the copper link. */
4463         ret_val = hw->mac.ops.setup_physical_interface(hw);
4464         if (ret_val)
4465                 return ret_val;
4466
4467         E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
4468         if ((hw->phy.type == e1000_phy_82578) ||
4469             (hw->phy.type == e1000_phy_82579) ||
4470             (hw->phy.type == e1000_phy_i217) ||
4471             (hw->phy.type == e1000_phy_82577)) {
4472                 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
4473
4474                 ret_val = hw->phy.ops.write_reg(hw,
4475                                              PHY_REG(BM_PORT_CTRL_PAGE, 27),
4476                                              hw->fc.pause_time);
4477                 if (ret_val)
4478                         return ret_val;
4479         }
4480
4481         return e1000_set_fc_watermarks_generic(hw);
4482 }
4483
4484 /**
4485  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4486  *  @hw: pointer to the HW structure
4487  *
4488  *  Configures the kumeran interface to the PHY to wait the appropriate time
4489  *  when polling the PHY, then call the generic setup_copper_link to finish
4490  *  configuring the copper link.
4491  **/
4492 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4493 {
4494         u32 ctrl;
4495         s32 ret_val;
4496         u16 reg_data;
4497
4498         DEBUGFUNC("e1000_setup_copper_link_ich8lan");
4499
4500         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4501         ctrl |= E1000_CTRL_SLU;
4502         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4503         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4504
4505         /* Set the mac to wait the maximum time between each iteration
4506          * and increase the max iterations when polling the phy;
4507          * this fixes erroneous timeouts at 10Mbps.
4508          */
4509         ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
4510                                                0xFFFF);
4511         if (ret_val)
4512                 return ret_val;
4513         ret_val = e1000_read_kmrn_reg_generic(hw,
4514                                               E1000_KMRNCTRLSTA_INBAND_PARAM,
4515                                               &reg_data);
4516         if (ret_val)
4517                 return ret_val;
4518         reg_data |= 0x3F;
4519         ret_val = e1000_write_kmrn_reg_generic(hw,
4520                                                E1000_KMRNCTRLSTA_INBAND_PARAM,
4521                                                reg_data);
4522         if (ret_val)
4523                 return ret_val;
4524
4525         switch (hw->phy.type) {
4526         case e1000_phy_igp_3:
4527                 ret_val = e1000_copper_link_setup_igp(hw);
4528                 if (ret_val)
4529                         return ret_val;
4530                 break;
4531         case e1000_phy_bm:
4532         case e1000_phy_82578:
4533                 ret_val = e1000_copper_link_setup_m88(hw);
4534                 if (ret_val)
4535                         return ret_val;
4536                 break;
4537         case e1000_phy_82577:
4538         case e1000_phy_82579:
4539                 ret_val = e1000_copper_link_setup_82577(hw);
4540                 if (ret_val)
4541                         return ret_val;
4542                 break;
4543         case e1000_phy_ife:
4544                 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
4545                                                &reg_data);
4546                 if (ret_val)
4547                         return ret_val;
4548
4549                 reg_data &= ~IFE_PMC_AUTO_MDIX;
4550
4551                 switch (hw->phy.mdix) {
4552                 case 1:
4553                         reg_data &= ~IFE_PMC_FORCE_MDIX;
4554                         break;
4555                 case 2:
4556                         reg_data |= IFE_PMC_FORCE_MDIX;
4557                         break;
4558                 case 0:
4559                 default:
4560                         reg_data |= IFE_PMC_AUTO_MDIX;
4561                         break;
4562                 }
4563                 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
4564                                                 reg_data);
4565                 if (ret_val)
4566                         return ret_val;
4567                 break;
4568         default:
4569                 break;
4570         }
4571
4572         return e1000_setup_copper_link_generic(hw);
4573 }
4574
4575 /**
4576  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
4577  *  @hw: pointer to the HW structure
4578  *
4579  *  Calls the PHY specific link setup function and then calls the
4580  *  generic setup_copper_link to finish configuring the link for
4581  *  Lynxpoint PCH devices
4582  **/
4583 STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
4584 {
4585         u32 ctrl;
4586         s32 ret_val;
4587
4588         DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
4589
4590         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4591         ctrl |= E1000_CTRL_SLU;
4592         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4593         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4594
4595         ret_val = e1000_copper_link_setup_82577(hw);
4596         if (ret_val)
4597                 return ret_val;
4598
4599         return e1000_setup_copper_link_generic(hw);
4600 }
4601
4602 /**
4603  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
4604  *  @hw: pointer to the HW structure
4605  *  @speed: pointer to store current link speed
4606  *  @duplex: pointer to store the current link duplex
4607  *
4608  *  Calls the generic get_speed_and_duplex to retrieve the current link
4609  *  information and then calls the Kumeran lock loss workaround for links at
4610  *  gigabit speeds.
4611  **/
4612 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
4613                                           u16 *duplex)
4614 {
4615         s32 ret_val;
4616
4617         DEBUGFUNC("e1000_get_link_up_info_ich8lan");
4618
4619         ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
4620         if (ret_val)
4621                 return ret_val;
4622
4623         if ((hw->mac.type == e1000_ich8lan) &&
4624             (hw->phy.type == e1000_phy_igp_3) &&
4625             (*speed == SPEED_1000)) {
4626                 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
4627         }
4628
4629         return ret_val;
4630 }
4631
4632 /**
4633  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
4634  *  @hw: pointer to the HW structure
4635  *
4636  *  Work-around for 82566 Kumeran PCS lock loss:
4637  *  On link status change (i.e. PCI reset, speed change) and link is up and
4638  *  speed is gigabit-
4639  *    0) if workaround is optionally disabled do nothing
4640  *    1) wait 1ms for Kumeran link to come up
4641  *    2) check Kumeran Diagnostic register PCS lock loss bit
4642  *    3) if not set the link is locked (all is good), otherwise...
4643  *    4) reset the PHY
4644  *    5) repeat up to 10 times
4645  *  Note: this is only called for IGP3 copper when speed is 1gb.
4646  **/
4647 STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
4648 {
4649         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4650         u32 phy_ctrl;
4651         s32 ret_val;
4652         u16 i, data;
4653         bool link;
4654
4655         DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
4656
4657         if (!dev_spec->kmrn_lock_loss_workaround_enabled)
4658                 return E1000_SUCCESS;
4659
4660         /* Make sure link is up before proceeding.  If not just return.
4661          * Attempting this while link is negotiating fouled up link
4662          * stability
4663          */
4664         ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
4665         if (!link)
4666                 return E1000_SUCCESS;
4667
4668         for (i = 0; i < 10; i++) {
4669                 /* read once to clear */
4670                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4671                 if (ret_val)
4672                         return ret_val;
4673                 /* and again to get new status */
4674                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4675                 if (ret_val)
4676                         return ret_val;
4677
4678                 /* check for PCS lock */
4679                 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
4680                         return E1000_SUCCESS;
4681
4682                 /* Issue PHY reset */
4683                 hw->phy.ops.reset(hw);
4684                 msec_delay_irq(5);
4685         }
4686         /* Disable GigE link negotiation */
4687         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4688         phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
4689                      E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4690         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4691
4692         /* Call gig speed drop workaround on Gig disable before accessing
4693          * any PHY registers
4694          */
4695         e1000_gig_downshift_workaround_ich8lan(hw);
4696
4697         /* unable to acquire PCS lock */
4698         return -E1000_ERR_PHY;
4699 }
4700
4701 /**
4702  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
4703  *  @hw: pointer to the HW structure
4704  *  @state: boolean value used to set the current Kumeran workaround state
4705  *
4706  *  If ICH8, set the current Kumeran workaround state (enabled - true
4707  *  /disabled - false).
4708  **/
4709 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
4710                                                  bool state)
4711 {
4712         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4713
4714         DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
4715
4716         if (hw->mac.type != e1000_ich8lan) {
4717                 DEBUGOUT("Workaround applies to ICH8 only.\n");
4718                 return;
4719         }
4720
4721         dev_spec->kmrn_lock_loss_workaround_enabled = state;
4722
4723         return;
4724 }
4725
4726 /**
4727  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
4728  *  @hw: pointer to the HW structure
4729  *
4730  *  Workaround for 82566 power-down on D3 entry:
4731  *    1) disable gigabit link
4732  *    2) write VR power-down enable
4733  *    3) read it back
4734  *  Continue if successful, else issue LCD reset and repeat
4735  **/
4736 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
4737 {
4738         u32 reg;
4739         u16 data;
4740         u8  retry = 0;
4741
4742         DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
4743
4744         if (hw->phy.type != e1000_phy_igp_3)
4745                 return;
4746
4747         /* Try the workaround twice (if needed) */
4748         do {
4749                 /* Disable link */
4750                 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
4751                 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
4752                         E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4753                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
4754
4755                 /* Call gig speed drop workaround on Gig disable before
4756                  * accessing any PHY registers
4757                  */
4758                 if (hw->mac.type == e1000_ich8lan)
4759                         e1000_gig_downshift_workaround_ich8lan(hw);
4760
4761                 /* Write VR power-down enable */
4762                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4763                 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4764                 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
4765                                       data | IGP3_VR_CTRL_MODE_SHUTDOWN);
4766
4767                 /* Read it back and test */
4768                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4769                 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4770                 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
4771                         break;
4772
4773                 /* Issue PHY reset and repeat at most one more time */
4774                 reg = E1000_READ_REG(hw, E1000_CTRL);
4775                 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
4776                 retry++;
4777         } while (retry);
4778 }
4779
4780 /**
4781  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
4782  *  @hw: pointer to the HW structure
4783  *
4784  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
4785  *  LPLU, Gig disable, MDIC PHY reset):
4786  *    1) Set Kumeran Near-end loopback
4787  *    2) Clear Kumeran Near-end loopback
4788  *  Should only be called for ICH8[m] devices with any 1G Phy.
4789  **/
4790 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
4791 {
4792         s32 ret_val;
4793         u16 reg_data;
4794
4795         DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
4796
4797         if ((hw->mac.type != e1000_ich8lan) ||
4798             (hw->phy.type == e1000_phy_ife))
4799                 return;
4800
4801         ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4802                                               &reg_data);
4803         if (ret_val)
4804                 return;
4805         reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
4806         ret_val = e1000_write_kmrn_reg_generic(hw,
4807                                                E1000_KMRNCTRLSTA_DIAG_OFFSET,
4808                                                reg_data);
4809         if (ret_val)
4810                 return;
4811         reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
4812         e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4813                                      reg_data);
4814 }
4815
4816 /**
4817  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4818  *  @hw: pointer to the HW structure
4819  *
4820  *  During S0 to Sx transition, it is possible the link remains at gig
4821  *  instead of negotiating to a lower speed.  Before going to Sx, set
4822  *  'Gig Disable' to force link speed negotiation to a lower speed based on
4823  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
4824  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4825  *  needs to be written.
4826  *  Parts that support (and are linked to a partner which support) EEE in
4827  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4828  *  than 10Mbps w/o EEE.
4829  **/
4830 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4831 {
4832         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4833         u32 phy_ctrl;
4834         s32 ret_val;
4835
4836         DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
4837
4838         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4839         phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4840
4841         if (hw->phy.type == e1000_phy_i217) {
4842                 u16 phy_reg, device_id = hw->device_id;
4843
4844                 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4845                     (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
4846                     (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
4847                     (device_id == E1000_DEV_ID_PCH_I218_V3)) {
4848                         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
4849
4850                         E1000_WRITE_REG(hw, E1000_FEXTNVM6,
4851                                         fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4852                 }
4853
4854                 ret_val = hw->phy.ops.acquire(hw);
4855                 if (ret_val)
4856                         goto out;
4857
4858                 if (!dev_spec->eee_disable) {
4859                         u16 eee_advert;
4860
4861                         ret_val =
4862                             e1000_read_emi_reg_locked(hw,
4863                                                       I217_EEE_ADVERTISEMENT,
4864                                                       &eee_advert);
4865                         if (ret_val)
4866                                 goto release;
4867
4868                         /* Disable LPLU if both link partners support 100BaseT
4869                          * EEE and 100Full is advertised on both ends of the
4870                          * link, and enable Auto Enable LPI since there will
4871                          * be no driver to enable LPI while in Sx.
4872                          */
4873                         if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4874                             (dev_spec->eee_lp_ability &
4875                              I82579_EEE_100_SUPPORTED) &&
4876                             (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
4877                                 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4878                                               E1000_PHY_CTRL_NOND0A_LPLU);
4879
4880                                 /* Set Auto Enable LPI after link up */
4881                                 hw->phy.ops.read_reg_locked(hw,
4882                                                             I217_LPI_GPIO_CTRL,
4883                                                             &phy_reg);
4884                                 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4885                                 hw->phy.ops.write_reg_locked(hw,
4886                                                              I217_LPI_GPIO_CTRL,
4887                                                              phy_reg);
4888                         }
4889                 }
4890
4891                 /* For i217 Intel Rapid Start Technology support,
4892                  * when the system is going into Sx and no manageability engine
4893                  * is present, the driver must configure proxy to reset only on
4894                  * power good.  LPI (Low Power Idle) state must also reset only
4895                  * on power good, as well as the MTA (Multicast table array).
4896                  * The SMBus release must also be disabled on LCD reset.
4897                  */
4898                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4899                       E1000_ICH_FWSM_FW_VALID)) {
4900                         /* Enable proxy to reset only on power good. */
4901                         hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
4902                                                     &phy_reg);
4903                         phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4904                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
4905                                                      phy_reg);
4906
4907                         /* Set bit enable LPI (EEE) to reset only on
4908                          * power good.
4909                         */
4910                         hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
4911                         phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4912                         hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
4913
4914                         /* Disable the SMB release on LCD reset. */
4915                         hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
4916                         phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4917                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4918                 }
4919
4920                 /* Enable MTA to reset for Intel Rapid Start Technology
4921                  * Support
4922                  */
4923                 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
4924                 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4925                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4926
4927 release:
4928                 hw->phy.ops.release(hw);
4929         }
4930 out:
4931         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4932
4933         if (hw->mac.type == e1000_ich8lan)
4934                 e1000_gig_downshift_workaround_ich8lan(hw);
4935
4936         if (hw->mac.type >= e1000_pchlan) {
4937                 e1000_oem_bits_config_ich8lan(hw, false);
4938
4939                 /* Reset PHY to activate OEM bits on 82577/8 */
4940                 if (hw->mac.type == e1000_pchlan)
4941                         e1000_phy_hw_reset_generic(hw);
4942
4943                 ret_val = hw->phy.ops.acquire(hw);
4944                 if (ret_val)
4945                         return;
4946                 e1000_write_smbus_addr(hw);
4947                 hw->phy.ops.release(hw);
4948         }
4949
4950         return;
4951 }
4952
4953 /**
4954  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
4955  *  @hw: pointer to the HW structure
4956  *
4957  *  During Sx to S0 transitions on non-managed devices or managed devices
4958  *  on which PHY resets are not blocked, if the PHY registers cannot be
4959  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
4960  *  the PHY.
4961  *  On i217, setup Intel Rapid Start Technology.
4962  **/
4963 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4964 {
4965         s32 ret_val;
4966
4967         DEBUGFUNC("e1000_resume_workarounds_pchlan");
4968         if (hw->mac.type < e1000_pch2lan)
4969                 return E1000_SUCCESS;
4970
4971         ret_val = e1000_init_phy_workarounds_pchlan(hw);
4972         if (ret_val) {
4973                 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
4974                 return ret_val;
4975         }
4976
4977         /* For i217 Intel Rapid Start Technology support when the system
4978          * is transitioning from Sx and no manageability engine is present
4979          * configure SMBus to restore on reset, disable proxy, and enable
4980          * the reset on MTA (Multicast table array).
4981          */
4982         if (hw->phy.type == e1000_phy_i217) {
4983                 u16 phy_reg;
4984
4985                 ret_val = hw->phy.ops.acquire(hw);
4986                 if (ret_val) {
4987                         DEBUGOUT("Failed to setup iRST\n");
4988                         return ret_val;
4989                 }
4990
4991                 /* Clear Auto Enable LPI after link up */
4992                 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
4993                 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4994                 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
4995
4996                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4997                     E1000_ICH_FWSM_FW_VALID)) {
4998                         /* Restore clear on SMB if no manageability engine
4999                          * is present
5000                          */
5001                         ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5002                                                               &phy_reg);
5003                         if (ret_val)
5004                                 goto release;
5005                         phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5006                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5007
5008                         /* Disable Proxy */
5009                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5010                 }
5011                 /* Enable reset on MTA */
5012                 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5013                                                       &phy_reg);
5014                 if (ret_val)
5015                         goto release;
5016                 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5017                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5018 release:
5019                 if (ret_val)
5020                         DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5021                 hw->phy.ops.release(hw);
5022                 return ret_val;
5023         }
5024         return E1000_SUCCESS;
5025 }
5026
5027 /**
5028  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5029  *  @hw: pointer to the HW structure
5030  *
5031  *  Return the LED back to the default configuration.
5032  **/
5033 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5034 {
5035         DEBUGFUNC("e1000_cleanup_led_ich8lan");
5036
5037         if (hw->phy.type == e1000_phy_ife)
5038                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5039                                              0);
5040
5041         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5042         return E1000_SUCCESS;
5043 }
5044
5045 /**
5046  *  e1000_led_on_ich8lan - Turn LEDs on
5047  *  @hw: pointer to the HW structure
5048  *
5049  *  Turn on the LEDs.
5050  **/
5051 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5052 {
5053         DEBUGFUNC("e1000_led_on_ich8lan");
5054
5055         if (hw->phy.type == e1000_phy_ife)
5056                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5057                                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5058
5059         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5060         return E1000_SUCCESS;
5061 }
5062
5063 /**
5064  *  e1000_led_off_ich8lan - Turn LEDs off
5065  *  @hw: pointer to the HW structure
5066  *
5067  *  Turn off the LEDs.
5068  **/
5069 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5070 {
5071         DEBUGFUNC("e1000_led_off_ich8lan");
5072
5073         if (hw->phy.type == e1000_phy_ife)
5074                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5075                                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5076
5077         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5078         return E1000_SUCCESS;
5079 }
5080
5081 /**
5082  *  e1000_setup_led_pchlan - Configures SW controllable LED
5083  *  @hw: pointer to the HW structure
5084  *
5085  *  This prepares the SW controllable LED for use.
5086  **/
5087 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5088 {
5089         DEBUGFUNC("e1000_setup_led_pchlan");
5090
5091         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5092                                      (u16)hw->mac.ledctl_mode1);
5093 }
5094
5095 /**
5096  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5097  *  @hw: pointer to the HW structure
5098  *
5099  *  Return the LED back to the default configuration.
5100  **/
5101 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5102 {
5103         DEBUGFUNC("e1000_cleanup_led_pchlan");
5104
5105         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5106                                      (u16)hw->mac.ledctl_default);
5107 }
5108
5109 /**
5110  *  e1000_led_on_pchlan - Turn LEDs on
5111  *  @hw: pointer to the HW structure
5112  *
5113  *  Turn on the LEDs.
5114  **/
5115 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5116 {
5117         u16 data = (u16)hw->mac.ledctl_mode2;
5118         u32 i, led;
5119
5120         DEBUGFUNC("e1000_led_on_pchlan");
5121
5122         /* If no link, then turn LED on by setting the invert bit
5123          * for each LED that's mode is "link_up" in ledctl_mode2.
5124          */
5125         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5126                 for (i = 0; i < 3; i++) {
5127                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5128                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5129                             E1000_LEDCTL_MODE_LINK_UP)
5130                                 continue;
5131                         if (led & E1000_PHY_LED0_IVRT)
5132                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5133                         else
5134                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5135                 }
5136         }
5137
5138         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5139 }
5140
5141 /**
5142  *  e1000_led_off_pchlan - Turn LEDs off
5143  *  @hw: pointer to the HW structure
5144  *
5145  *  Turn off the LEDs.
5146  **/
5147 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5148 {
5149         u16 data = (u16)hw->mac.ledctl_mode1;
5150         u32 i, led;
5151
5152         DEBUGFUNC("e1000_led_off_pchlan");
5153
5154         /* If no link, then turn LED off by clearing the invert bit
5155          * for each LED that's mode is "link_up" in ledctl_mode1.
5156          */
5157         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5158                 for (i = 0; i < 3; i++) {
5159                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5160                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5161                             E1000_LEDCTL_MODE_LINK_UP)
5162                                 continue;
5163                         if (led & E1000_PHY_LED0_IVRT)
5164                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5165                         else
5166                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5167                 }
5168         }
5169
5170         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5171 }
5172
5173 /**
5174  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5175  *  @hw: pointer to the HW structure
5176  *
5177  *  Read appropriate register for the config done bit for completion status
5178  *  and configure the PHY through s/w for EEPROM-less parts.
5179  *
5180  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5181  *  config done bit, so only an error is logged and continues.  If we were
5182  *  to return with error, EEPROM-less silicon would not be able to be reset
5183  *  or change link.
5184  **/
5185 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5186 {
5187         s32 ret_val = E1000_SUCCESS;
5188         u32 bank = 0;
5189         u32 status;
5190
5191         DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5192
5193         e1000_get_cfg_done_generic(hw);
5194
5195         /* Wait for indication from h/w that it has completed basic config */
5196         if (hw->mac.type >= e1000_ich10lan) {
5197                 e1000_lan_init_done_ich8lan(hw);
5198         } else {
5199                 ret_val = e1000_get_auto_rd_done_generic(hw);
5200                 if (ret_val) {
5201                         /* When auto config read does not complete, do not
5202                          * return with an error. This can happen in situations
5203                          * where there is no eeprom and prevents getting link.
5204                          */
5205                         DEBUGOUT("Auto Read Done did not complete\n");
5206                         ret_val = E1000_SUCCESS;
5207                 }
5208         }
5209
5210         /* Clear PHY Reset Asserted bit */
5211         status = E1000_READ_REG(hw, E1000_STATUS);
5212         if (status & E1000_STATUS_PHYRA)
5213                 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5214         else
5215                 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5216
5217         /* If EEPROM is not marked present, init the IGP 3 PHY manually */
5218         if (hw->mac.type <= e1000_ich9lan) {
5219                 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5220                     (hw->phy.type == e1000_phy_igp_3)) {
5221                         e1000_phy_init_script_igp3(hw);
5222                 }
5223         } else {
5224                 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5225                         /* Maybe we should do a basic PHY config */
5226                         DEBUGOUT("EEPROM not present\n");
5227                         ret_val = -E1000_ERR_CONFIG;
5228                 }
5229         }
5230
5231         return ret_val;
5232 }
5233
5234 /**
5235  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5236  * @hw: pointer to the HW structure
5237  *
5238  * In the case of a PHY power down to save power, or to turn off link during a
5239  * driver unload, or wake on lan is not enabled, remove the link.
5240  **/
5241 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5242 {
5243         /* If the management interface is not enabled, then power down */
5244         if (!(hw->mac.ops.check_mng_mode(hw) ||
5245               hw->phy.ops.check_reset_block(hw)))
5246                 e1000_power_down_phy_copper(hw);
5247
5248         return;
5249 }
5250
5251 /**
5252  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5253  *  @hw: pointer to the HW structure
5254  *
5255  *  Clears hardware counters specific to the silicon family and calls
5256  *  clear_hw_cntrs_generic to clear all general purpose counters.
5257  **/
5258 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5259 {
5260         u16 phy_data;
5261         s32 ret_val;
5262
5263         DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
5264
5265         e1000_clear_hw_cntrs_base_generic(hw);
5266
5267         E1000_READ_REG(hw, E1000_ALGNERRC);
5268         E1000_READ_REG(hw, E1000_RXERRC);
5269         E1000_READ_REG(hw, E1000_TNCRS);
5270         E1000_READ_REG(hw, E1000_CEXTERR);
5271         E1000_READ_REG(hw, E1000_TSCTC);
5272         E1000_READ_REG(hw, E1000_TSCTFC);
5273
5274         E1000_READ_REG(hw, E1000_MGTPRC);
5275         E1000_READ_REG(hw, E1000_MGTPDC);
5276         E1000_READ_REG(hw, E1000_MGTPTC);
5277
5278         E1000_READ_REG(hw, E1000_IAC);
5279         E1000_READ_REG(hw, E1000_ICRXOC);
5280
5281         /* Clear PHY statistics registers */
5282         if ((hw->phy.type == e1000_phy_82578) ||
5283             (hw->phy.type == e1000_phy_82579) ||
5284             (hw->phy.type == e1000_phy_i217) ||
5285             (hw->phy.type == e1000_phy_82577)) {
5286                 ret_val = hw->phy.ops.acquire(hw);
5287                 if (ret_val)
5288                         return;
5289                 ret_val = hw->phy.ops.set_page(hw,
5290                                                HV_STATS_PAGE << IGP_PAGE_SHIFT);
5291                 if (ret_val)
5292                         goto release;
5293                 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5294                 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5295                 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5296                 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5297                 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5298                 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5299                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5300                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5301                 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5302                 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5303                 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5304                 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5305                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5306                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5307 release:
5308                 hw->phy.ops.release(hw);
5309         }
5310 }
5311