e1000/base: fix jumbo frame CRC failures
[dpdk.git] / drivers / net / e1000 / base / e1000_ich8lan.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /* 82562G 10/100 Network Connection
35  * 82562G-2 10/100 Network Connection
36  * 82562GT 10/100 Network Connection
37  * 82562GT-2 10/100 Network Connection
38  * 82562V 10/100 Network Connection
39  * 82562V-2 10/100 Network Connection
40  * 82566DC-2 Gigabit Network Connection
41  * 82566DC Gigabit Network Connection
42  * 82566DM-2 Gigabit Network Connection
43  * 82566DM Gigabit Network Connection
44  * 82566MC Gigabit Network Connection
45  * 82566MM Gigabit Network Connection
46  * 82567LM Gigabit Network Connection
47  * 82567LF Gigabit Network Connection
48  * 82567V Gigabit Network Connection
49  * 82567LM-2 Gigabit Network Connection
50  * 82567LF-2 Gigabit Network Connection
51  * 82567V-2 Gigabit Network Connection
52  * 82567LF-3 Gigabit Network Connection
53  * 82567LM-3 Gigabit Network Connection
54  * 82567LM-4 Gigabit Network Connection
55  * 82577LM Gigabit Network Connection
56  * 82577LC Gigabit Network Connection
57  * 82578DM Gigabit Network Connection
58  * 82578DC Gigabit Network Connection
59  * 82579LM Gigabit Network Connection
60  * 82579V Gigabit Network Connection
61  * Ethernet Connection I217-LM
62  * Ethernet Connection I217-V
63  * Ethernet Connection I218-V
64  * Ethernet Connection I218-LM
65  * Ethernet Connection (2) I218-LM
66  * Ethernet Connection (2) I218-V
67  * Ethernet Connection (3) I218-LM
68  * Ethernet Connection (3) I218-V
69  */
70
71 #include "e1000_api.h"
72
73 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
74 STATIC s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 STATIC s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 STATIC int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 STATIC int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
84 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
85                                               u8 *mc_addr_list,
86                                               u32 mc_addr_count);
87 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
88 STATIC s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
89 STATIC s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
90 STATIC s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
91 STATIC s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
92                                             bool active);
93 STATIC s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
94                                             bool active);
95 STATIC s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
96                                    u16 words, u16 *data);
97 STATIC s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
98                                     u16 words, u16 *data);
99 STATIC s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100 STATIC s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 STATIC s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
102                                             u16 *data);
103 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
104 STATIC s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
105 STATIC s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
106 STATIC s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
107 STATIC s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
108 STATIC s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
109 STATIC s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
110 STATIC s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
111                                            u16 *speed, u16 *duplex);
112 STATIC s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
113 STATIC s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
114 STATIC s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
115 STATIC s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
116 STATIC s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
117 STATIC s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
118 STATIC s32  e1000_led_on_pchlan(struct e1000_hw *hw);
119 STATIC s32  e1000_led_off_pchlan(struct e1000_hw *hw);
120 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
121 STATIC s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
122 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
123 STATIC s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
124 STATIC s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
125                                           u32 offset, u8 *data);
126 STATIC s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
127                                           u8 size, u16 *data);
128 STATIC s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
129                                           u32 offset, u16 *data);
130 STATIC s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
131                                                  u32 offset, u8 byte);
132 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
133 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
134 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
135 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
136 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
137 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
138
139 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
140 /* Offset 04h HSFSTS */
141 union ich8_hws_flash_status {
142         struct ich8_hsfsts {
143                 u16 flcdone:1; /* bit 0 Flash Cycle Done */
144                 u16 flcerr:1; /* bit 1 Flash Cycle Error */
145                 u16 dael:1; /* bit 2 Direct Access error Log */
146                 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
147                 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
148                 u16 reserved1:2; /* bit 13:6 Reserved */
149                 u16 reserved2:6; /* bit 13:6 Reserved */
150                 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
151                 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
152         } hsf_status;
153         u16 regval;
154 };
155
156 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
157 /* Offset 06h FLCTL */
158 union ich8_hws_flash_ctrl {
159         struct ich8_hsflctl {
160                 u16 flcgo:1;   /* 0 Flash Cycle Go */
161                 u16 flcycle:2;   /* 2:1 Flash Cycle */
162                 u16 reserved:5;   /* 7:3 Reserved  */
163                 u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
164                 u16 flockdn:6;   /* 15:10 Reserved */
165         } hsf_ctrl;
166         u16 regval;
167 };
168
169 /* ICH Flash Region Access Permissions */
170 union ich8_hws_flash_regacc {
171         struct ich8_flracc {
172                 u32 grra:8; /* 0:7 GbE region Read Access */
173                 u32 grwa:8; /* 8:15 GbE region Write Access */
174                 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
175                 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
176         } hsf_flregacc;
177         u16 regval;
178 };
179
180 /**
181  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
182  *  @hw: pointer to the HW structure
183  *
184  *  Test access to the PHY registers by reading the PHY ID registers.  If
185  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
186  *  otherwise assume the read PHY ID is correct if it is valid.
187  *
188  *  Assumes the sw/fw/hw semaphore is already acquired.
189  **/
190 STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
191 {
192         u16 phy_reg = 0;
193         u32 phy_id = 0;
194         s32 ret_val = 0;
195         u16 retry_count;
196         u32 mac_reg = 0;
197
198         for (retry_count = 0; retry_count < 2; retry_count++) {
199                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
200                 if (ret_val || (phy_reg == 0xFFFF))
201                         continue;
202                 phy_id = (u32)(phy_reg << 16);
203
204                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
205                 if (ret_val || (phy_reg == 0xFFFF)) {
206                         phy_id = 0;
207                         continue;
208                 }
209                 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
210                 break;
211         }
212
213         if (hw->phy.id) {
214                 if  (hw->phy.id == phy_id)
215                         goto out;
216         } else if (phy_id) {
217                 hw->phy.id = phy_id;
218                 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
219                 goto out;
220         }
221
222         /* In case the PHY needs to be in mdio slow mode,
223          * set slow mode and try to get the PHY id again.
224          */
225         if (hw->mac.type < e1000_pch_lpt) {
226                 hw->phy.ops.release(hw);
227                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
228                 if (!ret_val)
229                         ret_val = e1000_get_phy_id(hw);
230                 hw->phy.ops.acquire(hw);
231         }
232
233         if (ret_val)
234                 return false;
235 out:
236         if (hw->mac.type == e1000_pch_lpt) {
237                 /* Unforce SMBus mode in PHY */
238                 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
239                 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
240                 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
241
242                 /* Unforce SMBus mode in MAC */
243                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
244                 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
245                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
246         }
247
248         return true;
249 }
250
251 /**
252  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
253  *  @hw: pointer to the HW structure
254  *
255  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
256  *  used to reset the PHY to a quiescent state when necessary.
257  **/
258 STATIC void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
259 {
260         u32 mac_reg;
261
262         DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
263
264         /* Set Phy Config Counter to 50msec */
265         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
266         mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
267         mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
268         E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
269
270         /* Toggle LANPHYPC Value bit */
271         mac_reg = E1000_READ_REG(hw, E1000_CTRL);
272         mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
273         mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
274         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
275         E1000_WRITE_FLUSH(hw);
276         usec_delay(10);
277         mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
278         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
279         E1000_WRITE_FLUSH(hw);
280
281         if (hw->mac.type < e1000_pch_lpt) {
282                 msec_delay(50);
283         } else {
284                 u16 count = 20;
285
286                 do {
287                         msec_delay(5);
288                 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
289                            E1000_CTRL_EXT_LPCD) && count--);
290
291                 msec_delay(30);
292         }
293 }
294
295 /**
296  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
297  *  @hw: pointer to the HW structure
298  *
299  *  Workarounds/flow necessary for PHY initialization during driver load
300  *  and resume paths.
301  **/
302 STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
303 {
304         u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
305         s32 ret_val;
306
307         DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
308
309         /* Gate automatic PHY configuration by hardware on managed and
310          * non-managed 82579 and newer adapters.
311          */
312         e1000_gate_hw_phy_config_ich8lan(hw, true);
313
314 #ifdef ULP_SUPPORT
315         /* It is not possible to be certain of the current state of ULP
316          * so forcibly disable it.
317          */
318         hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
319
320 #endif /* ULP_SUPPORT */
321         ret_val = hw->phy.ops.acquire(hw);
322         if (ret_val) {
323                 DEBUGOUT("Failed to initialize PHY flow\n");
324                 goto out;
325         }
326
327         /* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
328          * inaccessible and resetting the PHY is not blocked, toggle the
329          * LANPHYPC Value bit to force the interconnect to PCIe mode.
330          */
331         switch (hw->mac.type) {
332         case e1000_pch_lpt:
333                 if (e1000_phy_is_accessible_pchlan(hw))
334                         break;
335
336                 /* Before toggling LANPHYPC, see if PHY is accessible by
337                  * forcing MAC to SMBus mode first.
338                  */
339                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
340                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
341                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
342
343                 /* Wait 50 milliseconds for MAC to finish any retries
344                  * that it might be trying to perform from previous
345                  * attempts to acknowledge any phy read requests.
346                  */
347                  msec_delay(50);
348
349                 /* fall-through */
350         case e1000_pch2lan:
351                 if (e1000_phy_is_accessible_pchlan(hw))
352                         break;
353
354                 /* fall-through */
355         case e1000_pchlan:
356                 if ((hw->mac.type == e1000_pchlan) &&
357                     (fwsm & E1000_ICH_FWSM_FW_VALID))
358                         break;
359
360                 if (hw->phy.ops.check_reset_block(hw)) {
361                         DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
362                         ret_val = -E1000_ERR_PHY;
363                         break;
364                 }
365
366                 /* Toggle LANPHYPC Value bit */
367                 e1000_toggle_lanphypc_pch_lpt(hw);
368                 if (hw->mac.type >= e1000_pch_lpt) {
369                         if (e1000_phy_is_accessible_pchlan(hw))
370                                 break;
371
372                         /* Toggling LANPHYPC brings the PHY out of SMBus mode
373                          * so ensure that the MAC is also out of SMBus mode
374                          */
375                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
376                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
377                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
378
379                         if (e1000_phy_is_accessible_pchlan(hw))
380                                 break;
381
382                         ret_val = -E1000_ERR_PHY;
383                 }
384                 break;
385         default:
386                 break;
387         }
388
389         hw->phy.ops.release(hw);
390         if (!ret_val) {
391
392                 /* Check to see if able to reset PHY.  Print error if not */
393                 if (hw->phy.ops.check_reset_block(hw)) {
394                         ERROR_REPORT("Reset blocked by ME\n");
395                         goto out;
396                 }
397
398                 /* Reset the PHY before any access to it.  Doing so, ensures
399                  * that the PHY is in a known good state before we read/write
400                  * PHY registers.  The generic reset is sufficient here,
401                  * because we haven't determined the PHY type yet.
402                  */
403                 ret_val = e1000_phy_hw_reset_generic(hw);
404                 if (ret_val)
405                         goto out;
406
407                 /* On a successful reset, possibly need to wait for the PHY
408                  * to quiesce to an accessible state before returning control
409                  * to the calling function.  If the PHY does not quiesce, then
410                  * return E1000E_BLK_PHY_RESET, as this is the condition that
411                  *  the PHY is in.
412                  */
413                 ret_val = hw->phy.ops.check_reset_block(hw);
414                 if (ret_val)
415                         ERROR_REPORT("ME blocked access to PHY after reset\n");
416         }
417
418 out:
419         /* Ungate automatic PHY configuration on non-managed 82579 */
420         if ((hw->mac.type == e1000_pch2lan) &&
421             !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
422                 msec_delay(10);
423                 e1000_gate_hw_phy_config_ich8lan(hw, false);
424         }
425
426         return ret_val;
427 }
428
429 /**
430  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
431  *  @hw: pointer to the HW structure
432  *
433  *  Initialize family-specific PHY parameters and function pointers.
434  **/
435 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
436 {
437         struct e1000_phy_info *phy = &hw->phy;
438         s32 ret_val;
439
440         DEBUGFUNC("e1000_init_phy_params_pchlan");
441
442         phy->addr               = 1;
443         phy->reset_delay_us     = 100;
444
445         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
446         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
447         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
448         phy->ops.set_page       = e1000_set_page_igp;
449         phy->ops.read_reg       = e1000_read_phy_reg_hv;
450         phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
451         phy->ops.read_reg_page  = e1000_read_phy_reg_page_hv;
452         phy->ops.release        = e1000_release_swflag_ich8lan;
453         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
454         phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
455         phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
456         phy->ops.write_reg      = e1000_write_phy_reg_hv;
457         phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
458         phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
459         phy->ops.power_up       = e1000_power_up_phy_copper;
460         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
461         phy->autoneg_mask       = AUTONEG_ADVERTISE_SPEED_DEFAULT;
462
463         phy->id = e1000_phy_unknown;
464
465         ret_val = e1000_init_phy_workarounds_pchlan(hw);
466         if (ret_val)
467                 return ret_val;
468
469         if (phy->id == e1000_phy_unknown)
470                 switch (hw->mac.type) {
471                 default:
472                         ret_val = e1000_get_phy_id(hw);
473                         if (ret_val)
474                                 return ret_val;
475                         if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
476                                 break;
477                         /* fall-through */
478                 case e1000_pch2lan:
479                 case e1000_pch_lpt:
480                         /* In case the PHY needs to be in mdio slow mode,
481                          * set slow mode and try to get the PHY id again.
482                          */
483                         ret_val = e1000_set_mdio_slow_mode_hv(hw);
484                         if (ret_val)
485                                 return ret_val;
486                         ret_val = e1000_get_phy_id(hw);
487                         if (ret_val)
488                                 return ret_val;
489                         break;
490                 }
491         phy->type = e1000_get_phy_type_from_id(phy->id);
492
493         switch (phy->type) {
494         case e1000_phy_82577:
495         case e1000_phy_82579:
496         case e1000_phy_i217:
497                 phy->ops.check_polarity = e1000_check_polarity_82577;
498                 phy->ops.force_speed_duplex =
499                         e1000_phy_force_speed_duplex_82577;
500                 phy->ops.get_cable_length = e1000_get_cable_length_82577;
501                 phy->ops.get_info = e1000_get_phy_info_82577;
502                 phy->ops.commit = e1000_phy_sw_reset_generic;
503                 break;
504         case e1000_phy_82578:
505                 phy->ops.check_polarity = e1000_check_polarity_m88;
506                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
507                 phy->ops.get_cable_length = e1000_get_cable_length_m88;
508                 phy->ops.get_info = e1000_get_phy_info_m88;
509                 break;
510         default:
511                 ret_val = -E1000_ERR_PHY;
512                 break;
513         }
514
515         return ret_val;
516 }
517
518 /**
519  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
520  *  @hw: pointer to the HW structure
521  *
522  *  Initialize family-specific PHY parameters and function pointers.
523  **/
524 STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
525 {
526         struct e1000_phy_info *phy = &hw->phy;
527         s32 ret_val;
528         u16 i = 0;
529
530         DEBUGFUNC("e1000_init_phy_params_ich8lan");
531
532         phy->addr               = 1;
533         phy->reset_delay_us     = 100;
534
535         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
536         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
537         phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
538         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
539         phy->ops.read_reg       = e1000_read_phy_reg_igp;
540         phy->ops.release        = e1000_release_swflag_ich8lan;
541         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
542         phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
543         phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
544         phy->ops.write_reg      = e1000_write_phy_reg_igp;
545         phy->ops.power_up       = e1000_power_up_phy_copper;
546         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
547
548         /* We may need to do this twice - once for IGP and if that fails,
549          * we'll set BM func pointers and try again
550          */
551         ret_val = e1000_determine_phy_address(hw);
552         if (ret_val) {
553                 phy->ops.write_reg = e1000_write_phy_reg_bm;
554                 phy->ops.read_reg  = e1000_read_phy_reg_bm;
555                 ret_val = e1000_determine_phy_address(hw);
556                 if (ret_val) {
557                         DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
558                         return ret_val;
559                 }
560         }
561
562         phy->id = 0;
563         while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
564                (i++ < 100)) {
565                 msec_delay(1);
566                 ret_val = e1000_get_phy_id(hw);
567                 if (ret_val)
568                         return ret_val;
569         }
570
571         /* Verify phy id */
572         switch (phy->id) {
573         case IGP03E1000_E_PHY_ID:
574                 phy->type = e1000_phy_igp_3;
575                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
576                 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
577                 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
578                 phy->ops.get_info = e1000_get_phy_info_igp;
579                 phy->ops.check_polarity = e1000_check_polarity_igp;
580                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
581                 break;
582         case IFE_E_PHY_ID:
583         case IFE_PLUS_E_PHY_ID:
584         case IFE_C_E_PHY_ID:
585                 phy->type = e1000_phy_ife;
586                 phy->autoneg_mask = E1000_ALL_NOT_GIG;
587                 phy->ops.get_info = e1000_get_phy_info_ife;
588                 phy->ops.check_polarity = e1000_check_polarity_ife;
589                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
590                 break;
591         case BME1000_E_PHY_ID:
592                 phy->type = e1000_phy_bm;
593                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
594                 phy->ops.read_reg = e1000_read_phy_reg_bm;
595                 phy->ops.write_reg = e1000_write_phy_reg_bm;
596                 phy->ops.commit = e1000_phy_sw_reset_generic;
597                 phy->ops.get_info = e1000_get_phy_info_m88;
598                 phy->ops.check_polarity = e1000_check_polarity_m88;
599                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
600                 break;
601         default:
602                 return -E1000_ERR_PHY;
603                 break;
604         }
605
606         return E1000_SUCCESS;
607 }
608
609 /**
610  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
611  *  @hw: pointer to the HW structure
612  *
613  *  Initialize family-specific NVM parameters and function
614  *  pointers.
615  **/
616 STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
617 {
618         struct e1000_nvm_info *nvm = &hw->nvm;
619         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
620         u32 gfpreg, sector_base_addr, sector_end_addr;
621         u16 i;
622
623         DEBUGFUNC("e1000_init_nvm_params_ich8lan");
624
625         /* Can't read flash registers if the register set isn't mapped. */
626         nvm->type = e1000_nvm_flash_sw;
627         if (!hw->flash_address) {
628                 DEBUGOUT("ERROR: Flash registers not mapped\n");
629                 return -E1000_ERR_CONFIG;
630         }
631
632         gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
633
634         /* sector_X_addr is a "sector"-aligned address (4096 bytes)
635          * Add 1 to sector_end_addr since this sector is included in
636          * the overall size.
637          */
638         sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
639         sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
640
641         /* flash_base_addr is byte-aligned */
642         nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
643
644         /* find total size of the NVM, then cut in half since the total
645          * size represents two separate NVM banks.
646          */
647         nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
648                                 << FLASH_SECTOR_ADDR_SHIFT);
649         nvm->flash_bank_size /= 2;
650         /* Adjust to word count */
651         nvm->flash_bank_size /= sizeof(u16);
652
653         nvm->word_size = E1000_SHADOW_RAM_WORDS;
654
655         /* Clear shadow ram */
656         for (i = 0; i < nvm->word_size; i++) {
657                 dev_spec->shadow_ram[i].modified = false;
658                 dev_spec->shadow_ram[i].value    = 0xFFFF;
659         }
660
661         E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
662         E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
663
664         /* Function Pointers */
665         nvm->ops.acquire        = e1000_acquire_nvm_ich8lan;
666         nvm->ops.release        = e1000_release_nvm_ich8lan;
667         nvm->ops.read           = e1000_read_nvm_ich8lan;
668         nvm->ops.update         = e1000_update_nvm_checksum_ich8lan;
669         nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
670         nvm->ops.validate       = e1000_validate_nvm_checksum_ich8lan;
671         nvm->ops.write          = e1000_write_nvm_ich8lan;
672
673         return E1000_SUCCESS;
674 }
675
676 /**
677  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
678  *  @hw: pointer to the HW structure
679  *
680  *  Initialize family-specific MAC parameters and function
681  *  pointers.
682  **/
683 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
684 {
685         struct e1000_mac_info *mac = &hw->mac;
686 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
687         u16 pci_cfg;
688 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
689
690         DEBUGFUNC("e1000_init_mac_params_ich8lan");
691
692         /* Set media type function pointer */
693         hw->phy.media_type = e1000_media_type_copper;
694
695         /* Set mta register count */
696         mac->mta_reg_count = 32;
697         /* Set rar entry count */
698         mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
699         if (mac->type == e1000_ich8lan)
700                 mac->rar_entry_count--;
701         /* Set if part includes ASF firmware */
702         mac->asf_firmware_present = true;
703         /* FWSM register */
704         mac->has_fwsm = true;
705         /* ARC subsystem not supported */
706         mac->arc_subsystem_valid = false;
707         /* Adaptive IFS supported */
708         mac->adaptive_ifs = true;
709
710         /* Function pointers */
711
712         /* bus type/speed/width */
713         mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
714         /* function id */
715         mac->ops.set_lan_id = e1000_set_lan_id_single_port;
716         /* reset */
717         mac->ops.reset_hw = e1000_reset_hw_ich8lan;
718         /* hw initialization */
719         mac->ops.init_hw = e1000_init_hw_ich8lan;
720         /* link setup */
721         mac->ops.setup_link = e1000_setup_link_ich8lan;
722         /* physical interface setup */
723         mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
724         /* check for link */
725         mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
726         /* link info */
727         mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
728         /* multicast address update */
729         mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
730         /* clear hardware counters */
731         mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
732
733         /* LED and other operations */
734         switch (mac->type) {
735         case e1000_ich8lan:
736         case e1000_ich9lan:
737         case e1000_ich10lan:
738                 /* check management mode */
739                 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
740                 /* ID LED init */
741                 mac->ops.id_led_init = e1000_id_led_init_generic;
742                 /* blink LED */
743                 mac->ops.blink_led = e1000_blink_led_generic;
744                 /* setup LED */
745                 mac->ops.setup_led = e1000_setup_led_generic;
746                 /* cleanup LED */
747                 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
748                 /* turn on/off LED */
749                 mac->ops.led_on = e1000_led_on_ich8lan;
750                 mac->ops.led_off = e1000_led_off_ich8lan;
751                 break;
752         case e1000_pch2lan:
753                 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
754                 mac->ops.rar_set = e1000_rar_set_pch2lan;
755                 /* fall-through */
756         case e1000_pch_lpt:
757 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
758                 /* multicast address update for pch2 */
759                 mac->ops.update_mc_addr_list =
760                         e1000_update_mc_addr_list_pch2lan;
761                 /* fall-through */
762 #endif
763         case e1000_pchlan:
764 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
765                 /* save PCH revision_id */
766                 e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
767                 hw->revision_id = (u8)(pci_cfg &= 0x000F);
768 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
769                 /* check management mode */
770                 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
771                 /* ID LED init */
772                 mac->ops.id_led_init = e1000_id_led_init_pchlan;
773                 /* setup LED */
774                 mac->ops.setup_led = e1000_setup_led_pchlan;
775                 /* cleanup LED */
776                 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
777                 /* turn on/off LED */
778                 mac->ops.led_on = e1000_led_on_pchlan;
779                 mac->ops.led_off = e1000_led_off_pchlan;
780                 break;
781         default:
782                 break;
783         }
784
785         if (mac->type == e1000_pch_lpt) {
786                 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
787                 mac->ops.rar_set = e1000_rar_set_pch_lpt;
788                 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
789         }
790
791         /* Enable PCS Lock-loss workaround for ICH8 */
792         if (mac->type == e1000_ich8lan)
793                 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
794
795         return E1000_SUCCESS;
796 }
797
798 /**
799  *  __e1000_access_emi_reg_locked - Read/write EMI register
800  *  @hw: pointer to the HW structure
801  *  @addr: EMI address to program
802  *  @data: pointer to value to read/write from/to the EMI address
803  *  @read: boolean flag to indicate read or write
804  *
805  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
806  **/
807 STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
808                                          u16 *data, bool read)
809 {
810         s32 ret_val;
811
812         DEBUGFUNC("__e1000_access_emi_reg_locked");
813
814         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
815         if (ret_val)
816                 return ret_val;
817
818         if (read)
819                 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
820                                                       data);
821         else
822                 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
823                                                        *data);
824
825         return ret_val;
826 }
827
828 /**
829  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
830  *  @hw: pointer to the HW structure
831  *  @addr: EMI address to program
832  *  @data: value to be read from the EMI address
833  *
834  *  Assumes the SW/FW/HW Semaphore is already acquired.
835  **/
836 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
837 {
838         DEBUGFUNC("e1000_read_emi_reg_locked");
839
840         return __e1000_access_emi_reg_locked(hw, addr, data, true);
841 }
842
843 /**
844  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
845  *  @hw: pointer to the HW structure
846  *  @addr: EMI address to program
847  *  @data: value to be written to the EMI address
848  *
849  *  Assumes the SW/FW/HW Semaphore is already acquired.
850  **/
851 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
852 {
853         DEBUGFUNC("e1000_read_emi_reg_locked");
854
855         return __e1000_access_emi_reg_locked(hw, addr, &data, false);
856 }
857
858 /**
859  *  e1000_set_eee_pchlan - Enable/disable EEE support
860  *  @hw: pointer to the HW structure
861  *
862  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
863  *  the link and the EEE capabilities of the link partner.  The LPI Control
864  *  register bits will remain set only if/when link is up.
865  *
866  *  EEE LPI must not be asserted earlier than one second after link is up.
867  *  On 82579, EEE LPI should not be enabled until such time otherwise there
868  *  can be link issues with some switches.  Other devices can have EEE LPI
869  *  enabled immediately upon link up since they have a timer in hardware which
870  *  prevents LPI from being asserted too early.
871  **/
872 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
873 {
874         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
875         s32 ret_val;
876         u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
877
878         DEBUGFUNC("e1000_set_eee_pchlan");
879
880         switch (hw->phy.type) {
881         case e1000_phy_82579:
882                 lpa = I82579_EEE_LP_ABILITY;
883                 pcs_status = I82579_EEE_PCS_STATUS;
884                 adv_addr = I82579_EEE_ADVERTISEMENT;
885                 break;
886         case e1000_phy_i217:
887                 lpa = I217_EEE_LP_ABILITY;
888                 pcs_status = I217_EEE_PCS_STATUS;
889                 adv_addr = I217_EEE_ADVERTISEMENT;
890                 break;
891         default:
892                 return E1000_SUCCESS;
893         }
894
895         ret_val = hw->phy.ops.acquire(hw);
896         if (ret_val)
897                 return ret_val;
898
899         ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
900         if (ret_val)
901                 goto release;
902
903         /* Clear bits that enable EEE in various speeds */
904         lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
905
906         /* Enable EEE if not disabled by user */
907         if (!dev_spec->eee_disable) {
908                 /* Save off link partner's EEE ability */
909                 ret_val = e1000_read_emi_reg_locked(hw, lpa,
910                                                     &dev_spec->eee_lp_ability);
911                 if (ret_val)
912                         goto release;
913
914                 /* Read EEE advertisement */
915                 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
916                 if (ret_val)
917                         goto release;
918
919                 /* Enable EEE only for speeds in which the link partner is
920                  * EEE capable and for which we advertise EEE.
921                  */
922                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
923                         lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
924
925                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
926                         hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
927                         if (data & NWAY_LPAR_100TX_FD_CAPS)
928                                 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
929                         else
930                                 /* EEE is not supported in 100Half, so ignore
931                                  * partner's EEE in 100 ability if full-duplex
932                                  * is not advertised.
933                                  */
934                                 dev_spec->eee_lp_ability &=
935                                     ~I82579_EEE_100_SUPPORTED;
936                 }
937         }
938
939         if (hw->phy.type == e1000_phy_82579) {
940                 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
941                                                     &data);
942                 if (ret_val)
943                         goto release;
944
945                 data &= ~I82579_LPI_100_PLL_SHUT;
946                 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
947                                                      data);
948         }
949
950         /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
951         ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
952         if (ret_val)
953                 goto release;
954
955         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
956 release:
957         hw->phy.ops.release(hw);
958
959         return ret_val;
960 }
961
962 /**
963  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
964  *  @hw:   pointer to the HW structure
965  *  @link: link up bool flag
966  *
967  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
968  *  preventing further DMA write requests.  Workaround the issue by disabling
969  *  the de-assertion of the clock request when in 1Gpbs mode.
970  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
971  *  speeds in order to avoid Tx hangs.
972  **/
973 STATIC s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
974 {
975         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
976         u32 status = E1000_READ_REG(hw, E1000_STATUS);
977         s32 ret_val = E1000_SUCCESS;
978         u16 reg;
979
980         if (link && (status & E1000_STATUS_SPEED_1000)) {
981                 ret_val = hw->phy.ops.acquire(hw);
982                 if (ret_val)
983                         return ret_val;
984
985                 ret_val =
986                     e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
987                                                &reg);
988                 if (ret_val)
989                         goto release;
990
991                 ret_val =
992                     e1000_write_kmrn_reg_locked(hw,
993                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
994                                                 reg &
995                                                 ~E1000_KMRNCTRLSTA_K1_ENABLE);
996                 if (ret_val)
997                         goto release;
998
999                 usec_delay(10);
1000
1001                 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1002                                 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1003
1004                 ret_val =
1005                     e1000_write_kmrn_reg_locked(hw,
1006                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
1007                                                 reg);
1008 release:
1009                 hw->phy.ops.release(hw);
1010         } else {
1011                 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1012                 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1013
1014                 if (!link || ((status & E1000_STATUS_SPEED_100) &&
1015                               (status & E1000_STATUS_FD)))
1016                         goto update_fextnvm6;
1017
1018                 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1019                 if (ret_val)
1020                         return ret_val;
1021
1022                 /* Clear link status transmit timeout */
1023                 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1024
1025                 if (status & E1000_STATUS_SPEED_100) {
1026                         /* Set inband Tx timeout to 5x10us for 100Half */
1027                         reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1028
1029                         /* Do not extend the K1 entry latency for 100Half */
1030                         fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1031                 } else {
1032                         /* Set inband Tx timeout to 50x10us for 10Full/Half */
1033                         reg |= 50 <<
1034                                I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1035
1036                         /* Extend the K1 entry latency for 10 Mbps */
1037                         fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1038                 }
1039
1040                 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1041                 if (ret_val)
1042                         return ret_val;
1043
1044 update_fextnvm6:
1045                 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1046         }
1047
1048         return ret_val;
1049 }
1050
1051 #ifdef ULP_SUPPORT
1052 /**
1053  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1054  *  @hw: pointer to the HW structure
1055  *  @to_sx: boolean indicating a system power state transition to Sx
1056  *
1057  *  When link is down, configure ULP mode to significantly reduce the power
1058  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1059  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1060  *  system, configure the ULP mode by software.
1061  */
1062 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1063 {
1064         u32 mac_reg;
1065         s32 ret_val = E1000_SUCCESS;
1066         u16 phy_reg;
1067
1068         if ((hw->mac.type < e1000_pch_lpt) ||
1069             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1070             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1071             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1072             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1073             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1074                 return 0;
1075
1076         if (!to_sx) {
1077                 int i = 0;
1078                 /* Poll up to 5 seconds for Cable Disconnected indication */
1079                 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1080                          E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1081                         /* Bail if link is re-acquired */
1082                         if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1083                                 return -E1000_ERR_PHY;
1084                         if (i++ == 100)
1085                                 break;
1086
1087                         msec_delay(50);
1088                 }
1089                 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1090                           (E1000_READ_REG(hw, E1000_FEXT) &
1091                            E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1092                           i * 50);
1093         }
1094
1095         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1096                 /* Request ME configure ULP mode in the PHY */
1097                 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1098                 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1099                 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1100
1101                 goto out;
1102         }
1103
1104         ret_val = hw->phy.ops.acquire(hw);
1105         if (ret_val)
1106                 goto out;
1107
1108         /* During S0 Idle keep the phy in PCI-E mode */
1109         if (hw->dev_spec.ich8lan.smbus_disable)
1110                 goto skip_smbus;
1111
1112         /* Force SMBus mode in PHY */
1113         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1114         if (ret_val)
1115                 goto release;
1116         phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1117         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1118
1119         /* Force SMBus mode in MAC */
1120         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1121         mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1122         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1123
1124 skip_smbus:
1125         if (!to_sx) {
1126                 /* Change the 'Link Status Change' interrupt to trigger
1127                  * on 'Cable Status Change'
1128                  */
1129                 ret_val = e1000_read_kmrn_reg_locked(hw,
1130                                                      E1000_KMRNCTRLSTA_OP_MODES,
1131                                                      &phy_reg);
1132                 if (ret_val)
1133                         goto release;
1134                 phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1135                 e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1136                                             phy_reg);
1137         }
1138
1139         /* Set Inband ULP Exit, Reset to SMBus mode and
1140          * Disable SMBus Release on PERST# in PHY
1141          */
1142         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1143         if (ret_val)
1144                 goto release;
1145         phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1146                     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1147         if (to_sx) {
1148                 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1149                         phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1150
1151                 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1152         } else {
1153                 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1154         }
1155         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1156
1157         /* Set Disable SMBus Release on PERST# in MAC */
1158         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1159         mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1160         E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1161
1162         /* Commit ULP changes in PHY by starting auto ULP configuration */
1163         phy_reg |= I218_ULP_CONFIG1_START;
1164         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1165
1166         if (!to_sx) {
1167                 /* Disable Tx so that the MAC doesn't send any (buffered)
1168                  * packets to the PHY.
1169                  */
1170                 mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1171                 mac_reg &= ~E1000_TCTL_EN;
1172                 E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1173         }
1174 release:
1175         hw->phy.ops.release(hw);
1176 out:
1177         if (ret_val)
1178                 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1179         else
1180                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1181
1182         return ret_val;
1183 }
1184
1185 /**
1186  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1187  *  @hw: pointer to the HW structure
1188  *  @force: boolean indicating whether or not to force disabling ULP
1189  *
1190  *  Un-configure ULP mode when link is up, the system is transitioned from
1191  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1192  *  system, poll for an indication from ME that ULP has been un-configured.
1193  *  If not on an ME enabled system, un-configure the ULP mode by software.
1194  *
1195  *  During nominal operation, this function is called when link is acquired
1196  *  to disable ULP mode (force=false); otherwise, for example when unloading
1197  *  the driver or during Sx->S0 transitions, this is called with force=true
1198  *  to forcibly disable ULP.
1199
1200  *  When the cable is plugged in while the device is in D0, a Cable Status
1201  *  Change interrupt is generated which causes this function to be called
1202  *  to partially disable ULP mode and restart autonegotiation.  This function
1203  *  is then called again due to the resulting Link Status Change interrupt
1204  *  to finish cleaning up after the ULP flow.
1205  */
1206 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1207 {
1208         s32 ret_val = E1000_SUCCESS;
1209         u32 mac_reg;
1210         u16 phy_reg;
1211         int i = 0;
1212
1213         if ((hw->mac.type < e1000_pch_lpt) ||
1214             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1215             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1216             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1217             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1218             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1219                 return 0;
1220
1221         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1222                 if (force) {
1223                         /* Request ME un-configure ULP mode in the PHY */
1224                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1225                         mac_reg &= ~E1000_H2ME_ULP;
1226                         mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1227                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1228                 }
1229
1230                 /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
1231                 while (E1000_READ_REG(hw, E1000_FWSM) &
1232                        E1000_FWSM_ULP_CFG_DONE) {
1233                         if (i++ == 10) {
1234                                 ret_val = -E1000_ERR_PHY;
1235                                 goto out;
1236                         }
1237
1238                         msec_delay(10);
1239                 }
1240                 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1241
1242                 if (force) {
1243                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1244                         mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1245                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1246                 } else {
1247                         /* Clear H2ME.ULP after ME ULP configuration */
1248                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1249                         mac_reg &= ~E1000_H2ME_ULP;
1250                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1251
1252                         /* Restore link speed advertisements and restart
1253                          * Auto-negotiation
1254                          */
1255                         ret_val = e1000_phy_setup_autoneg(hw);
1256                         if (ret_val)
1257                                 goto out;
1258
1259                         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1260                 }
1261
1262                 goto out;
1263         }
1264
1265         ret_val = hw->phy.ops.acquire(hw);
1266         if (ret_val)
1267                 goto out;
1268
1269         /* Revert the change to the 'Link Status Change'
1270          * interrupt to trigger on 'Cable Status Change'
1271          */
1272         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1273                                              &phy_reg);
1274         if (ret_val)
1275                 goto release;
1276         phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1277         e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg);
1278
1279         if (force)
1280                 /* Toggle LANPHYPC Value bit */
1281                 e1000_toggle_lanphypc_pch_lpt(hw);
1282
1283         /* Unforce SMBus mode in PHY */
1284         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1285         if (ret_val) {
1286                 /* The MAC might be in PCIe mode, so temporarily force to
1287                  * SMBus mode in order to access the PHY.
1288                  */
1289                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1290                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1291                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1292
1293                 msec_delay(50);
1294
1295                 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1296                                                        &phy_reg);
1297                 if (ret_val)
1298                         goto release;
1299         }
1300         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1301         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1302
1303         /* Unforce SMBus mode in MAC */
1304         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1305         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1306         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1307
1308         /* When ULP mode was previously entered, K1 was disabled by the
1309          * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1310          */
1311         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1312         if (ret_val)
1313                 goto release;
1314         phy_reg |= HV_PM_CTRL_K1_ENABLE;
1315         e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1316
1317         /* Clear ULP enabled configuration */
1318         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1319         if (ret_val)
1320                 goto release;
1321         /* CSC interrupt received due to ULP Indication */
1322         if ((phy_reg & I218_ULP_CONFIG1_IND) || force) {
1323                 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1324                              I218_ULP_CONFIG1_STICKY_ULP |
1325                              I218_ULP_CONFIG1_RESET_TO_SMBUS |
1326                              I218_ULP_CONFIG1_WOL_HOST |
1327                              I218_ULP_CONFIG1_INBAND_EXIT |
1328                              I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1329                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1330
1331                 /* Commit ULP changes by starting auto ULP configuration */
1332                 phy_reg |= I218_ULP_CONFIG1_START;
1333                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1334
1335                 /* Clear Disable SMBus Release on PERST# in MAC */
1336                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1337                 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1338                 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1339
1340                 if (!force) {
1341                         hw->phy.ops.release(hw);
1342
1343                         if (hw->mac.autoneg)
1344                                 e1000_phy_setup_autoneg(hw);
1345
1346                         e1000_sw_lcd_config_ich8lan(hw);
1347
1348                         e1000_oem_bits_config_ich8lan(hw, true);
1349
1350                         /* Set ULP state to unknown and return non-zero to
1351                          * indicate no link (yet) and re-enter on the next LSC
1352                          * to finish disabling ULP flow.
1353                          */
1354                         hw->dev_spec.ich8lan.ulp_state =
1355                             e1000_ulp_state_unknown;
1356
1357                         return 1;
1358                 }
1359         }
1360
1361         /* Re-enable Tx */
1362         mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1363         mac_reg |= E1000_TCTL_EN;
1364         E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1365
1366 release:
1367         hw->phy.ops.release(hw);
1368         if (force) {
1369                 hw->phy.ops.reset(hw);
1370                 msec_delay(50);
1371         }
1372 out:
1373         if (ret_val)
1374                 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1375         else
1376                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1377
1378         return ret_val;
1379 }
1380
1381 #endif /* ULP_SUPPORT */
1382 /**
1383  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1384  *  @hw: pointer to the HW structure
1385  *
1386  *  Checks to see of the link status of the hardware has changed.  If a
1387  *  change in link status has been detected, then we read the PHY registers
1388  *  to get the current speed/duplex if link exists.
1389  **/
1390 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1391 {
1392         struct e1000_mac_info *mac = &hw->mac;
1393         s32 ret_val;
1394         bool link = false;
1395         u16 phy_reg;
1396
1397         DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1398
1399         /* We only want to go out to the PHY registers to see if Auto-Neg
1400          * has completed and/or if our link status has changed.  The
1401          * get_link_status flag is set upon receiving a Link Status
1402          * Change or Rx Sequence Error interrupt.
1403          */
1404         if (!mac->get_link_status)
1405                 return E1000_SUCCESS;
1406
1407         if ((hw->mac.type < e1000_pch_lpt) ||
1408             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1409             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) {
1410                 /* First we want to see if the MII Status Register reports
1411                  * link.  If so, then we want to get the current speed/duplex
1412                  * of the PHY.
1413                  */
1414                 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1415                 if (ret_val)
1416                         return ret_val;
1417         } else {
1418                 /* Check the MAC's STATUS register to determine link state
1419                  * since the PHY could be inaccessible while in ULP mode.
1420                  */
1421                 link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
1422                 if (link)
1423                         ret_val = e1000_disable_ulp_lpt_lp(hw, false);
1424                 else
1425                         ret_val = e1000_enable_ulp_lpt_lp(hw, false);
1426
1427                 if (ret_val)
1428                         return ret_val;
1429         }
1430
1431         if (hw->mac.type == e1000_pchlan) {
1432                 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1433                 if (ret_val)
1434                         return ret_val;
1435         }
1436
1437         /* When connected at 10Mbps half-duplex, some parts are excessively
1438          * aggressive resulting in many collisions. To avoid this, increase
1439          * the IPG and reduce Rx latency in the PHY.
1440          */
1441         if (((hw->mac.type == e1000_pch2lan) ||
1442              (hw->mac.type == e1000_pch_lpt)) && link) {
1443                 u32 reg;
1444                 reg = E1000_READ_REG(hw, E1000_STATUS);
1445                 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1446                         u16 emi_addr;
1447
1448                         reg = E1000_READ_REG(hw, E1000_TIPG);
1449                         reg &= ~E1000_TIPG_IPGT_MASK;
1450                         reg |= 0xFF;
1451                         E1000_WRITE_REG(hw, E1000_TIPG, reg);
1452
1453                         /* Reduce Rx latency in analog PHY */
1454                         ret_val = hw->phy.ops.acquire(hw);
1455                         if (ret_val)
1456                                 return ret_val;
1457
1458                         if (hw->mac.type == e1000_pch2lan)
1459                                 emi_addr = I82579_RX_CONFIG;
1460                         else
1461                                 emi_addr = I217_RX_CONFIG;
1462                         ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
1463
1464                         hw->phy.ops.release(hw);
1465
1466                         if (ret_val)
1467                                 return ret_val;
1468                 }
1469         }
1470
1471         /* Work-around I218 hang issue */
1472         if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1473             (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1474             (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1475             (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1476                 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1477                 if (ret_val)
1478                         return ret_val;
1479         }
1480
1481         /* Clear link partner's EEE ability */
1482         hw->dev_spec.ich8lan.eee_lp_ability = 0;
1483
1484         if (!link)
1485                 return E1000_SUCCESS; /* No link detected */
1486
1487         mac->get_link_status = false;
1488
1489         switch (hw->mac.type) {
1490         case e1000_pch2lan:
1491                 ret_val = e1000_k1_workaround_lv(hw);
1492                 if (ret_val)
1493                         return ret_val;
1494                 /* fall-thru */
1495         case e1000_pchlan:
1496                 if (hw->phy.type == e1000_phy_82578) {
1497                         ret_val = e1000_link_stall_workaround_hv(hw);
1498                         if (ret_val)
1499                                 return ret_val;
1500                 }
1501
1502                 /* Workaround for PCHx parts in half-duplex:
1503                  * Set the number of preambles removed from the packet
1504                  * when it is passed from the PHY to the MAC to prevent
1505                  * the MAC from misinterpreting the packet type.
1506                  */
1507                 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1508                 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1509
1510                 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1511                     E1000_STATUS_FD)
1512                         phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1513
1514                 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1515                 break;
1516         default:
1517                 break;
1518         }
1519
1520         /* Check if there was DownShift, must be checked
1521          * immediately after link-up
1522          */
1523         e1000_check_downshift_generic(hw);
1524
1525         /* Enable/Disable EEE after link up */
1526         if (hw->phy.type > e1000_phy_82579) {
1527                 ret_val = e1000_set_eee_pchlan(hw);
1528                 if (ret_val)
1529                         return ret_val;
1530         }
1531
1532         /* If we are forcing speed/duplex, then we simply return since
1533          * we have already determined whether we have link or not.
1534          */
1535         if (!mac->autoneg)
1536                 return -E1000_ERR_CONFIG;
1537
1538         /* Auto-Neg is enabled.  Auto Speed Detection takes care
1539          * of MAC speed/duplex configuration.  So we only need to
1540          * configure Collision Distance in the MAC.
1541          */
1542         mac->ops.config_collision_dist(hw);
1543
1544         /* Configure Flow Control now that Auto-Neg has completed.
1545          * First, we need to restore the desired flow control
1546          * settings because we may have had to re-autoneg with a
1547          * different link partner.
1548          */
1549         ret_val = e1000_config_fc_after_link_up_generic(hw);
1550         if (ret_val)
1551                 DEBUGOUT("Error configuring flow control\n");
1552
1553         return ret_val;
1554 }
1555
1556 /**
1557  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1558  *  @hw: pointer to the HW structure
1559  *
1560  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1561  **/
1562 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1563 {
1564         DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1565
1566         hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1567         hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1568         switch (hw->mac.type) {
1569         case e1000_ich8lan:
1570         case e1000_ich9lan:
1571         case e1000_ich10lan:
1572                 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1573                 break;
1574         case e1000_pchlan:
1575         case e1000_pch2lan:
1576         case e1000_pch_lpt:
1577                 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1578                 break;
1579         default:
1580                 break;
1581         }
1582 }
1583
1584 /**
1585  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1586  *  @hw: pointer to the HW structure
1587  *
1588  *  Acquires the mutex for performing NVM operations.
1589  **/
1590 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1591 {
1592         DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1593
1594         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1595
1596         return E1000_SUCCESS;
1597 }
1598
1599 /**
1600  *  e1000_release_nvm_ich8lan - Release NVM mutex
1601  *  @hw: pointer to the HW structure
1602  *
1603  *  Releases the mutex used while performing NVM operations.
1604  **/
1605 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1606 {
1607         DEBUGFUNC("e1000_release_nvm_ich8lan");
1608
1609         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1610
1611         return;
1612 }
1613
1614 /**
1615  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1616  *  @hw: pointer to the HW structure
1617  *
1618  *  Acquires the software control flag for performing PHY and select
1619  *  MAC CSR accesses.
1620  **/
1621 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1622 {
1623         u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1624         s32 ret_val = E1000_SUCCESS;
1625
1626         DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1627
1628         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1629
1630         while (timeout) {
1631                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1632                 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1633                         break;
1634
1635                 msec_delay_irq(1);
1636                 timeout--;
1637         }
1638
1639         if (!timeout) {
1640                 DEBUGOUT("SW has already locked the resource.\n");
1641                 ret_val = -E1000_ERR_CONFIG;
1642                 goto out;
1643         }
1644
1645         timeout = SW_FLAG_TIMEOUT;
1646
1647         extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1648         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1649
1650         while (timeout) {
1651                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1652                 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1653                         break;
1654
1655                 msec_delay_irq(1);
1656                 timeout--;
1657         }
1658
1659         if (!timeout) {
1660                 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1661                           E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1662                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1663                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1664                 ret_val = -E1000_ERR_CONFIG;
1665                 goto out;
1666         }
1667
1668 out:
1669         if (ret_val)
1670                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1671
1672         return ret_val;
1673 }
1674
1675 /**
1676  *  e1000_release_swflag_ich8lan - Release software control flag
1677  *  @hw: pointer to the HW structure
1678  *
1679  *  Releases the software control flag for performing PHY and select
1680  *  MAC CSR accesses.
1681  **/
1682 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1683 {
1684         u32 extcnf_ctrl;
1685
1686         DEBUGFUNC("e1000_release_swflag_ich8lan");
1687
1688         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1689
1690         if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1691                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1692                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1693         } else {
1694                 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1695         }
1696
1697         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1698
1699         return;
1700 }
1701
1702 /**
1703  *  e1000_check_mng_mode_ich8lan - Checks management mode
1704  *  @hw: pointer to the HW structure
1705  *
1706  *  This checks if the adapter has any manageability enabled.
1707  *  This is a function pointer entry point only called by read/write
1708  *  routines for the PHY and NVM parts.
1709  **/
1710 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1711 {
1712         u32 fwsm;
1713
1714         DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1715
1716         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1717
1718         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1719                ((fwsm & E1000_FWSM_MODE_MASK) ==
1720                 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1721 }
1722
1723 /**
1724  *  e1000_check_mng_mode_pchlan - Checks management mode
1725  *  @hw: pointer to the HW structure
1726  *
1727  *  This checks if the adapter has iAMT enabled.
1728  *  This is a function pointer entry point only called by read/write
1729  *  routines for the PHY and NVM parts.
1730  **/
1731 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1732 {
1733         u32 fwsm;
1734
1735         DEBUGFUNC("e1000_check_mng_mode_pchlan");
1736
1737         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1738
1739         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1740                (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1741 }
1742
1743 /**
1744  *  e1000_rar_set_pch2lan - Set receive address register
1745  *  @hw: pointer to the HW structure
1746  *  @addr: pointer to the receive address
1747  *  @index: receive address array register
1748  *
1749  *  Sets the receive address array register at index to the address passed
1750  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1751  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1752  *  Use SHRA[0-3] in place of those reserved for ME.
1753  **/
1754 STATIC int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1755 {
1756         u32 rar_low, rar_high;
1757
1758         DEBUGFUNC("e1000_rar_set_pch2lan");
1759
1760         /* HW expects these in little endian so we reverse the byte order
1761          * from network order (big endian) to little endian
1762          */
1763         rar_low = ((u32) addr[0] |
1764                    ((u32) addr[1] << 8) |
1765                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1766
1767         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1768
1769         /* If MAC address zero, no need to set the AV bit */
1770         if (rar_low || rar_high)
1771                 rar_high |= E1000_RAH_AV;
1772
1773         if (index == 0) {
1774                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1775                 E1000_WRITE_FLUSH(hw);
1776                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1777                 E1000_WRITE_FLUSH(hw);
1778                 return E1000_SUCCESS;
1779         }
1780
1781         /* RAR[1-6] are owned by manageability.  Skip those and program the
1782          * next address into the SHRA register array.
1783          */
1784         if (index < (u32) (hw->mac.rar_entry_count)) {
1785                 s32 ret_val;
1786
1787                 ret_val = e1000_acquire_swflag_ich8lan(hw);
1788                 if (ret_val)
1789                         goto out;
1790
1791                 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1792                 E1000_WRITE_FLUSH(hw);
1793                 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1794                 E1000_WRITE_FLUSH(hw);
1795
1796                 e1000_release_swflag_ich8lan(hw);
1797
1798                 /* verify the register updates */
1799                 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1800                     (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1801                         return E1000_SUCCESS;
1802
1803                 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1804                          (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1805         }
1806
1807 out:
1808         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1809         return -E1000_ERR_CONFIG;
1810 }
1811
1812 /**
1813  *  e1000_rar_set_pch_lpt - Set receive address registers
1814  *  @hw: pointer to the HW structure
1815  *  @addr: pointer to the receive address
1816  *  @index: receive address array register
1817  *
1818  *  Sets the receive address register array at index to the address passed
1819  *  in by addr. For LPT, RAR[0] is the base address register that is to
1820  *  contain the MAC address. SHRA[0-10] are the shared receive address
1821  *  registers that are shared between the Host and manageability engine (ME).
1822  **/
1823 STATIC int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1824 {
1825         u32 rar_low, rar_high;
1826         u32 wlock_mac;
1827
1828         DEBUGFUNC("e1000_rar_set_pch_lpt");
1829
1830         /* HW expects these in little endian so we reverse the byte order
1831          * from network order (big endian) to little endian
1832          */
1833         rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1834                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1835
1836         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1837
1838         /* If MAC address zero, no need to set the AV bit */
1839         if (rar_low || rar_high)
1840                 rar_high |= E1000_RAH_AV;
1841
1842         if (index == 0) {
1843                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1844                 E1000_WRITE_FLUSH(hw);
1845                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1846                 E1000_WRITE_FLUSH(hw);
1847                 return E1000_SUCCESS;
1848         }
1849
1850         /* The manageability engine (ME) can lock certain SHRAR registers that
1851          * it is using - those registers are unavailable for use.
1852          */
1853         if (index < hw->mac.rar_entry_count) {
1854                 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
1855                             E1000_FWSM_WLOCK_MAC_MASK;
1856                 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1857
1858                 /* Check if all SHRAR registers are locked */
1859                 if (wlock_mac == 1)
1860                         goto out;
1861
1862                 if ((wlock_mac == 0) || (index <= wlock_mac)) {
1863                         s32 ret_val;
1864
1865                         ret_val = e1000_acquire_swflag_ich8lan(hw);
1866
1867                         if (ret_val)
1868                                 goto out;
1869
1870                         E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
1871                                         rar_low);
1872                         E1000_WRITE_FLUSH(hw);
1873                         E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
1874                                         rar_high);
1875                         E1000_WRITE_FLUSH(hw);
1876
1877                         e1000_release_swflag_ich8lan(hw);
1878
1879                         /* verify the register updates */
1880                         if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1881                             (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
1882                                 return E1000_SUCCESS;
1883                 }
1884         }
1885
1886 out:
1887         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1888         return -E1000_ERR_CONFIG;
1889 }
1890
1891 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
1892 /**
1893  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
1894  *  @hw: pointer to the HW structure
1895  *  @mc_addr_list: array of multicast addresses to program
1896  *  @mc_addr_count: number of multicast addresses to program
1897  *
1898  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
1899  *  The caller must have a packed mc_addr_list of multicast addresses.
1900  **/
1901 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
1902                                               u8 *mc_addr_list,
1903                                               u32 mc_addr_count)
1904 {
1905         u16 phy_reg = 0;
1906         int i;
1907         s32 ret_val;
1908
1909         DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
1910
1911         e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
1912
1913         ret_val = hw->phy.ops.acquire(hw);
1914         if (ret_val)
1915                 return;
1916
1917         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1918         if (ret_val)
1919                 goto release;
1920
1921         for (i = 0; i < hw->mac.mta_reg_count; i++) {
1922                 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
1923                                            (u16)(hw->mac.mta_shadow[i] &
1924                                                  0xFFFF));
1925                 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
1926                                            (u16)((hw->mac.mta_shadow[i] >> 16) &
1927                                                  0xFFFF));
1928         }
1929
1930         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1931
1932 release:
1933         hw->phy.ops.release(hw);
1934 }
1935
1936 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
1937 /**
1938  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1939  *  @hw: pointer to the HW structure
1940  *
1941  *  Checks if firmware is blocking the reset of the PHY.
1942  *  This is a function pointer entry point only called by
1943  *  reset routines.
1944  **/
1945 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1946 {
1947         u32 fwsm;
1948         bool blocked = false;
1949         int i = 0;
1950
1951         DEBUGFUNC("e1000_check_reset_block_ich8lan");
1952
1953         do {
1954                 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1955                 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
1956                         blocked = true;
1957                         msec_delay(10);
1958                         continue;
1959                 }
1960                 blocked = false;
1961         } while (blocked && (i++ < 10));
1962         return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
1963 }
1964
1965 /**
1966  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1967  *  @hw: pointer to the HW structure
1968  *
1969  *  Assumes semaphore already acquired.
1970  *
1971  **/
1972 STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1973 {
1974         u16 phy_data;
1975         u32 strap = E1000_READ_REG(hw, E1000_STRAP);
1976         u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
1977                 E1000_STRAP_SMT_FREQ_SHIFT;
1978         s32 ret_val;
1979
1980         strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1981
1982         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1983         if (ret_val)
1984                 return ret_val;
1985
1986         phy_data &= ~HV_SMB_ADDR_MASK;
1987         phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1988         phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1989
1990         if (hw->phy.type == e1000_phy_i217) {
1991                 /* Restore SMBus frequency */
1992                 if (freq--) {
1993                         phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
1994                         phy_data |= (freq & (1 << 0)) <<
1995                                 HV_SMB_ADDR_FREQ_LOW_SHIFT;
1996                         phy_data |= (freq & (1 << 1)) <<
1997                                 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
1998                 } else {
1999                         DEBUGOUT("Unsupported SMB frequency in PHY\n");
2000                 }
2001         }
2002
2003         return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2004 }
2005
2006 /**
2007  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2008  *  @hw:   pointer to the HW structure
2009  *
2010  *  SW should configure the LCD from the NVM extended configuration region
2011  *  as a workaround for certain parts.
2012  **/
2013 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2014 {
2015         struct e1000_phy_info *phy = &hw->phy;
2016         u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2017         s32 ret_val = E1000_SUCCESS;
2018         u16 word_addr, reg_data, reg_addr, phy_page = 0;
2019
2020         DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2021
2022         /* Initialize the PHY from the NVM on ICH platforms.  This
2023          * is needed due to an issue where the NVM configuration is
2024          * not properly autoloaded after power transitions.
2025          * Therefore, after each PHY reset, we will load the
2026          * configuration data out of the NVM manually.
2027          */
2028         switch (hw->mac.type) {
2029         case e1000_ich8lan:
2030                 if (phy->type != e1000_phy_igp_3)
2031                         return ret_val;
2032
2033                 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2034                     (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2035                         sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2036                         break;
2037                 }
2038                 /* Fall-thru */
2039         case e1000_pchlan:
2040         case e1000_pch2lan:
2041         case e1000_pch_lpt:
2042                 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2043                 break;
2044         default:
2045                 return ret_val;
2046         }
2047
2048         ret_val = hw->phy.ops.acquire(hw);
2049         if (ret_val)
2050                 return ret_val;
2051
2052         data = E1000_READ_REG(hw, E1000_FEXTNVM);
2053         if (!(data & sw_cfg_mask))
2054                 goto release;
2055
2056         /* Make sure HW does not configure LCD from PHY
2057          * extended configuration before SW configuration
2058          */
2059         data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2060         if ((hw->mac.type < e1000_pch2lan) &&
2061             (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2062                         goto release;
2063
2064         cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2065         cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2066         cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2067         if (!cnf_size)
2068                 goto release;
2069
2070         cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2071         cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2072
2073         if (((hw->mac.type == e1000_pchlan) &&
2074              !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2075             (hw->mac.type > e1000_pchlan)) {
2076                 /* HW configures the SMBus address and LEDs when the
2077                  * OEM and LCD Write Enable bits are set in the NVM.
2078                  * When both NVM bits are cleared, SW will configure
2079                  * them instead.
2080                  */
2081                 ret_val = e1000_write_smbus_addr(hw);
2082                 if (ret_val)
2083                         goto release;
2084
2085                 data = E1000_READ_REG(hw, E1000_LEDCTL);
2086                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2087                                                         (u16)data);
2088                 if (ret_val)
2089                         goto release;
2090         }
2091
2092         /* Configure LCD from extended configuration region. */
2093
2094         /* cnf_base_addr is in DWORD */
2095         word_addr = (u16)(cnf_base_addr << 1);
2096
2097         for (i = 0; i < cnf_size; i++) {
2098                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2099                                            &reg_data);
2100                 if (ret_val)
2101                         goto release;
2102
2103                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2104                                            1, &reg_addr);
2105                 if (ret_val)
2106                         goto release;
2107
2108                 /* Save off the PHY page for future writes. */
2109                 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2110                         phy_page = reg_data;
2111                         continue;
2112                 }
2113
2114                 reg_addr &= PHY_REG_MASK;
2115                 reg_addr |= phy_page;
2116
2117                 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2118                                                     reg_data);
2119                 if (ret_val)
2120                         goto release;
2121         }
2122
2123 release:
2124         hw->phy.ops.release(hw);
2125         return ret_val;
2126 }
2127
2128 /**
2129  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2130  *  @hw:   pointer to the HW structure
2131  *  @link: link up bool flag
2132  *
2133  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2134  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2135  *  If link is down, the function will restore the default K1 setting located
2136  *  in the NVM.
2137  **/
2138 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2139 {
2140         s32 ret_val = E1000_SUCCESS;
2141         u16 status_reg = 0;
2142         bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2143
2144         DEBUGFUNC("e1000_k1_gig_workaround_hv");
2145
2146         if (hw->mac.type != e1000_pchlan)
2147                 return E1000_SUCCESS;
2148
2149         /* Wrap the whole flow with the sw flag */
2150         ret_val = hw->phy.ops.acquire(hw);
2151         if (ret_val)
2152                 return ret_val;
2153
2154         /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2155         if (link) {
2156                 if (hw->phy.type == e1000_phy_82578) {
2157                         ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2158                                                               &status_reg);
2159                         if (ret_val)
2160                                 goto release;
2161
2162                         status_reg &= (BM_CS_STATUS_LINK_UP |
2163                                        BM_CS_STATUS_RESOLVED |
2164                                        BM_CS_STATUS_SPEED_MASK);
2165
2166                         if (status_reg == (BM_CS_STATUS_LINK_UP |
2167                                            BM_CS_STATUS_RESOLVED |
2168                                            BM_CS_STATUS_SPEED_1000))
2169                                 k1_enable = false;
2170                 }
2171
2172                 if (hw->phy.type == e1000_phy_82577) {
2173                         ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2174                                                               &status_reg);
2175                         if (ret_val)
2176                                 goto release;
2177
2178                         status_reg &= (HV_M_STATUS_LINK_UP |
2179                                        HV_M_STATUS_AUTONEG_COMPLETE |
2180                                        HV_M_STATUS_SPEED_MASK);
2181
2182                         if (status_reg == (HV_M_STATUS_LINK_UP |
2183                                            HV_M_STATUS_AUTONEG_COMPLETE |
2184                                            HV_M_STATUS_SPEED_1000))
2185                                 k1_enable = false;
2186                 }
2187
2188                 /* Link stall fix for link up */
2189                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2190                                                        0x0100);
2191                 if (ret_val)
2192                         goto release;
2193
2194         } else {
2195                 /* Link stall fix for link down */
2196                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2197                                                        0x4100);
2198                 if (ret_val)
2199                         goto release;
2200         }
2201
2202         ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2203
2204 release:
2205         hw->phy.ops.release(hw);
2206
2207         return ret_val;
2208 }
2209
2210 /**
2211  *  e1000_configure_k1_ich8lan - Configure K1 power state
2212  *  @hw: pointer to the HW structure
2213  *  @enable: K1 state to configure
2214  *
2215  *  Configure the K1 power state based on the provided parameter.
2216  *  Assumes semaphore already acquired.
2217  *
2218  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2219  **/
2220 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2221 {
2222         s32 ret_val;
2223         u32 ctrl_reg = 0;
2224         u32 ctrl_ext = 0;
2225         u32 reg = 0;
2226         u16 kmrn_reg = 0;
2227
2228         DEBUGFUNC("e1000_configure_k1_ich8lan");
2229
2230         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2231                                              &kmrn_reg);
2232         if (ret_val)
2233                 return ret_val;
2234
2235         if (k1_enable)
2236                 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2237         else
2238                 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2239
2240         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2241                                               kmrn_reg);
2242         if (ret_val)
2243                 return ret_val;
2244
2245         usec_delay(20);
2246         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2247         ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2248
2249         reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2250         reg |= E1000_CTRL_FRCSPD;
2251         E1000_WRITE_REG(hw, E1000_CTRL, reg);
2252
2253         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2254         E1000_WRITE_FLUSH(hw);
2255         usec_delay(20);
2256         E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2257         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2258         E1000_WRITE_FLUSH(hw);
2259         usec_delay(20);
2260
2261         return E1000_SUCCESS;
2262 }
2263
2264 /**
2265  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2266  *  @hw:       pointer to the HW structure
2267  *  @d0_state: boolean if entering d0 or d3 device state
2268  *
2269  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2270  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2271  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2272  **/
2273 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2274 {
2275         s32 ret_val = 0;
2276         u32 mac_reg;
2277         u16 oem_reg;
2278
2279         DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2280
2281         if (hw->mac.type < e1000_pchlan)
2282                 return ret_val;
2283
2284         ret_val = hw->phy.ops.acquire(hw);
2285         if (ret_val)
2286                 return ret_val;
2287
2288         if (hw->mac.type == e1000_pchlan) {
2289                 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2290                 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2291                         goto release;
2292         }
2293
2294         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2295         if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2296                 goto release;
2297
2298         mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2299
2300         ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2301         if (ret_val)
2302                 goto release;
2303
2304         oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2305
2306         if (d0_state) {
2307                 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2308                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2309
2310                 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2311                         oem_reg |= HV_OEM_BITS_LPLU;
2312         } else {
2313                 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2314                     E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2315                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2316
2317                 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2318                     E1000_PHY_CTRL_NOND0A_LPLU))
2319                         oem_reg |= HV_OEM_BITS_LPLU;
2320         }
2321
2322         /* Set Restart auto-neg to activate the bits */
2323         if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2324             !hw->phy.ops.check_reset_block(hw))
2325                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2326
2327         ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2328
2329 release:
2330         hw->phy.ops.release(hw);
2331
2332         return ret_val;
2333 }
2334
2335
2336 /**
2337  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2338  *  @hw:   pointer to the HW structure
2339  **/
2340 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2341 {
2342         s32 ret_val;
2343         u16 data;
2344
2345         DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2346
2347         ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2348         if (ret_val)
2349                 return ret_val;
2350
2351         data |= HV_KMRN_MDIO_SLOW;
2352
2353         ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2354
2355         return ret_val;
2356 }
2357
2358 /**
2359  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2360  *  done after every PHY reset.
2361  **/
2362 STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2363 {
2364         s32 ret_val = E1000_SUCCESS;
2365         u16 phy_data;
2366
2367         DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2368
2369         if (hw->mac.type != e1000_pchlan)
2370                 return E1000_SUCCESS;
2371
2372         /* Set MDIO slow mode before any other MDIO access */
2373         if (hw->phy.type == e1000_phy_82577) {
2374                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2375                 if (ret_val)
2376                         return ret_val;
2377         }
2378
2379         if (((hw->phy.type == e1000_phy_82577) &&
2380              ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2381             ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2382                 /* Disable generation of early preamble */
2383                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2384                 if (ret_val)
2385                         return ret_val;
2386
2387                 /* Preamble tuning for SSC */
2388                 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2389                                                 0xA204);
2390                 if (ret_val)
2391                         return ret_val;
2392         }
2393
2394         if (hw->phy.type == e1000_phy_82578) {
2395                 /* Return registers to default by doing a soft reset then
2396                  * writing 0x3140 to the control register.
2397                  */
2398                 if (hw->phy.revision < 2) {
2399                         e1000_phy_sw_reset_generic(hw);
2400                         ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2401                                                         0x3140);
2402                 }
2403         }
2404
2405         /* Select page 0 */
2406         ret_val = hw->phy.ops.acquire(hw);
2407         if (ret_val)
2408                 return ret_val;
2409
2410         hw->phy.addr = 1;
2411         ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2412         hw->phy.ops.release(hw);
2413         if (ret_val)
2414                 return ret_val;
2415
2416         /* Configure the K1 Si workaround during phy reset assuming there is
2417          * link so that it disables K1 if link is in 1Gbps.
2418          */
2419         ret_val = e1000_k1_gig_workaround_hv(hw, true);
2420         if (ret_val)
2421                 return ret_val;
2422
2423         /* Workaround for link disconnects on a busy hub in half duplex */
2424         ret_val = hw->phy.ops.acquire(hw);
2425         if (ret_val)
2426                 return ret_val;
2427         ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2428         if (ret_val)
2429                 goto release;
2430         ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2431                                                phy_data & 0x00FF);
2432         if (ret_val)
2433                 goto release;
2434
2435         /* set MSE higher to enable link to stay up when noise is high */
2436         ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2437 release:
2438         hw->phy.ops.release(hw);
2439
2440         return ret_val;
2441 }
2442
2443 /**
2444  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2445  *  @hw:   pointer to the HW structure
2446  **/
2447 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2448 {
2449         u32 mac_reg;
2450         u16 i, phy_reg = 0;
2451         s32 ret_val;
2452
2453         DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2454
2455         ret_val = hw->phy.ops.acquire(hw);
2456         if (ret_val)
2457                 return;
2458         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2459         if (ret_val)
2460                 goto release;
2461
2462         /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2463         for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2464                 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2465                 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2466                                            (u16)(mac_reg & 0xFFFF));
2467                 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2468                                            (u16)((mac_reg >> 16) & 0xFFFF));
2469
2470                 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2471                 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2472                                            (u16)(mac_reg & 0xFFFF));
2473                 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2474                                            (u16)((mac_reg & E1000_RAH_AV)
2475                                                  >> 16));
2476         }
2477
2478         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2479
2480 release:
2481         hw->phy.ops.release(hw);
2482 }
2483
2484 #ifndef CRC32_OS_SUPPORT
2485 STATIC u32 e1000_calc_rx_da_crc(u8 mac[])
2486 {
2487         u32 poly = 0xEDB88320;  /* Polynomial for 802.3 CRC calculation */
2488         u32 i, j, mask, crc;
2489
2490         DEBUGFUNC("e1000_calc_rx_da_crc");
2491
2492         crc = 0xffffffff;
2493         for (i = 0; i < 6; i++) {
2494                 crc = crc ^ mac[i];
2495                 for (j = 8; j > 0; j--) {
2496                         mask = (crc & 1) * (-1);
2497                         crc = (crc >> 1) ^ (poly & mask);
2498                 }
2499         }
2500         return ~crc;
2501 }
2502
2503 #endif /* CRC32_OS_SUPPORT */
2504 /**
2505  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2506  *  with 82579 PHY
2507  *  @hw: pointer to the HW structure
2508  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2509  **/
2510 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2511 {
2512         s32 ret_val = E1000_SUCCESS;
2513         u16 phy_reg, data;
2514         u32 mac_reg;
2515         u16 i;
2516
2517         DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2518
2519         if (hw->mac.type < e1000_pch2lan)
2520                 return E1000_SUCCESS;
2521
2522         /* disable Rx path while enabling/disabling workaround */
2523         hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2524         ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2525                                         phy_reg | (1 << 14));
2526         if (ret_val)
2527                 return ret_val;
2528
2529         if (enable) {
2530                 /* Write Rx addresses (rar_entry_count for RAL/H, and
2531                  * SHRAL/H) and initial CRC values to the MAC
2532                  */
2533                 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2534                         u8 mac_addr[ETH_ADDR_LEN] = {0};
2535                         u32 addr_high, addr_low;
2536
2537                         addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2538                         if (!(addr_high & E1000_RAH_AV))
2539                                 continue;
2540                         addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2541                         mac_addr[0] = (addr_low & 0xFF);
2542                         mac_addr[1] = ((addr_low >> 8) & 0xFF);
2543                         mac_addr[2] = ((addr_low >> 16) & 0xFF);
2544                         mac_addr[3] = ((addr_low >> 24) & 0xFF);
2545                         mac_addr[4] = (addr_high & 0xFF);
2546                         mac_addr[5] = ((addr_high >> 8) & 0xFF);
2547
2548 #ifndef CRC32_OS_SUPPORT
2549                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2550                                         e1000_calc_rx_da_crc(mac_addr));
2551 #else /* CRC32_OS_SUPPORT */
2552                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2553                                         E1000_CRC32(ETH_ADDR_LEN, mac_addr));
2554 #endif /* CRC32_OS_SUPPORT */
2555                 }
2556
2557                 /* Write Rx addresses to the PHY */
2558                 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2559
2560                 /* Enable jumbo frame workaround in the MAC */
2561                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2562                 mac_reg &= ~(1 << 14);
2563                 mac_reg |= (7 << 15);
2564                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2565
2566                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2567                 mac_reg |= E1000_RCTL_SECRC;
2568                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2569
2570                 ret_val = e1000_read_kmrn_reg_generic(hw,
2571                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2572                                                 &data);
2573                 if (ret_val)
2574                         return ret_val;
2575                 ret_val = e1000_write_kmrn_reg_generic(hw,
2576                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2577                                                 data | (1 << 0));
2578                 if (ret_val)
2579                         return ret_val;
2580                 ret_val = e1000_read_kmrn_reg_generic(hw,
2581                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2582                                                 &data);
2583                 if (ret_val)
2584                         return ret_val;
2585                 data &= ~(0xF << 8);
2586                 data |= (0xB << 8);
2587                 ret_val = e1000_write_kmrn_reg_generic(hw,
2588                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2589                                                 data);
2590                 if (ret_val)
2591                         return ret_val;
2592
2593                 /* Enable jumbo frame workaround in the PHY */
2594                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2595                 data &= ~(0x7F << 5);
2596                 data |= (0x37 << 5);
2597                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2598                 if (ret_val)
2599                         return ret_val;
2600                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2601                 data &= ~(1 << 13);
2602                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2603                 if (ret_val)
2604                         return ret_val;
2605                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2606                 data &= ~(0x3FF << 2);
2607                 data |= (E1000_TX_PTR_GAP << 2);
2608                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2609                 if (ret_val)
2610                         return ret_val;
2611                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2612                 if (ret_val)
2613                         return ret_val;
2614                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2615                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2616                                                 (1 << 10));
2617                 if (ret_val)
2618                         return ret_val;
2619         } else {
2620                 /* Write MAC register values back to h/w defaults */
2621                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2622                 mac_reg &= ~(0xF << 14);
2623                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2624
2625                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2626                 mac_reg &= ~E1000_RCTL_SECRC;
2627                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2628
2629                 ret_val = e1000_read_kmrn_reg_generic(hw,
2630                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2631                                                 &data);
2632                 if (ret_val)
2633                         return ret_val;
2634                 ret_val = e1000_write_kmrn_reg_generic(hw,
2635                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2636                                                 data & ~(1 << 0));
2637                 if (ret_val)
2638                         return ret_val;
2639                 ret_val = e1000_read_kmrn_reg_generic(hw,
2640                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2641                                                 &data);
2642                 if (ret_val)
2643                         return ret_val;
2644                 data &= ~(0xF << 8);
2645                 data |= (0xB << 8);
2646                 ret_val = e1000_write_kmrn_reg_generic(hw,
2647                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2648                                                 data);
2649                 if (ret_val)
2650                         return ret_val;
2651
2652                 /* Write PHY register values back to h/w defaults */
2653                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2654                 data &= ~(0x7F << 5);
2655                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2656                 if (ret_val)
2657                         return ret_val;
2658                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2659                 data |= (1 << 13);
2660                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2661                 if (ret_val)
2662                         return ret_val;
2663                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2664                 data &= ~(0x3FF << 2);
2665                 data |= (0x8 << 2);
2666                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2667                 if (ret_val)
2668                         return ret_val;
2669                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2670                 if (ret_val)
2671                         return ret_val;
2672                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2673                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2674                                                 ~(1 << 10));
2675                 if (ret_val)
2676                         return ret_val;
2677         }
2678
2679         /* re-enable Rx path after enabling/disabling workaround */
2680         return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2681                                      ~(1 << 14));
2682 }
2683
2684 /**
2685  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2686  *  done after every PHY reset.
2687  **/
2688 STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2689 {
2690         s32 ret_val = E1000_SUCCESS;
2691
2692         DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2693
2694         if (hw->mac.type != e1000_pch2lan)
2695                 return E1000_SUCCESS;
2696
2697         /* Set MDIO slow mode before any other MDIO access */
2698         ret_val = e1000_set_mdio_slow_mode_hv(hw);
2699         if (ret_val)
2700                 return ret_val;
2701
2702         ret_val = hw->phy.ops.acquire(hw);
2703         if (ret_val)
2704                 return ret_val;
2705         /* set MSE higher to enable link to stay up when noise is high */
2706         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2707         if (ret_val)
2708                 goto release;
2709         /* drop link after 5 times MSE threshold was reached */
2710         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2711 release:
2712         hw->phy.ops.release(hw);
2713
2714         return ret_val;
2715 }
2716
2717 /**
2718  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2719  *  @hw:   pointer to the HW structure
2720  *
2721  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2722  *  Disable K1 for 1000 and 100 speeds
2723  **/
2724 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2725 {
2726         s32 ret_val = E1000_SUCCESS;
2727         u16 status_reg = 0;
2728
2729         DEBUGFUNC("e1000_k1_workaround_lv");
2730
2731         if (hw->mac.type != e1000_pch2lan)
2732                 return E1000_SUCCESS;
2733
2734         /* Set K1 beacon duration based on 10Mbs speed */
2735         ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2736         if (ret_val)
2737                 return ret_val;
2738
2739         if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2740             == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2741                 if (status_reg &
2742                     (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2743                         u16 pm_phy_reg;
2744
2745                         /* LV 1G/100 Packet drop issue wa  */
2746                         ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2747                                                        &pm_phy_reg);
2748                         if (ret_val)
2749                                 return ret_val;
2750                         pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2751                         ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2752                                                         pm_phy_reg);
2753                         if (ret_val)
2754                                 return ret_val;
2755                 } else {
2756                         u32 mac_reg;
2757                         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2758                         mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2759                         mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2760                         E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2761                 }
2762         }
2763
2764         return ret_val;
2765 }
2766
2767 /**
2768  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2769  *  @hw:   pointer to the HW structure
2770  *  @gate: boolean set to true to gate, false to ungate
2771  *
2772  *  Gate/ungate the automatic PHY configuration via hardware; perform
2773  *  the configuration via software instead.
2774  **/
2775 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2776 {
2777         u32 extcnf_ctrl;
2778
2779         DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2780
2781         if (hw->mac.type < e1000_pch2lan)
2782                 return;
2783
2784         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2785
2786         if (gate)
2787                 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2788         else
2789                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2790
2791         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2792 }
2793
2794 /**
2795  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2796  *  @hw: pointer to the HW structure
2797  *
2798  *  Check the appropriate indication the MAC has finished configuring the
2799  *  PHY after a software reset.
2800  **/
2801 STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2802 {
2803         u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2804
2805         DEBUGFUNC("e1000_lan_init_done_ich8lan");
2806
2807         /* Wait for basic configuration completes before proceeding */
2808         do {
2809                 data = E1000_READ_REG(hw, E1000_STATUS);
2810                 data &= E1000_STATUS_LAN_INIT_DONE;
2811                 usec_delay(100);
2812         } while ((!data) && --loop);
2813
2814         /* If basic configuration is incomplete before the above loop
2815          * count reaches 0, loading the configuration from NVM will
2816          * leave the PHY in a bad state possibly resulting in no link.
2817          */
2818         if (loop == 0)
2819                 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2820
2821         /* Clear the Init Done bit for the next init event */
2822         data = E1000_READ_REG(hw, E1000_STATUS);
2823         data &= ~E1000_STATUS_LAN_INIT_DONE;
2824         E1000_WRITE_REG(hw, E1000_STATUS, data);
2825 }
2826
2827 /**
2828  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2829  *  @hw: pointer to the HW structure
2830  **/
2831 STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2832 {
2833         s32 ret_val = E1000_SUCCESS;
2834         u16 reg;
2835
2836         DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2837
2838         if (hw->phy.ops.check_reset_block(hw))
2839                 return E1000_SUCCESS;
2840
2841         /* Allow time for h/w to get to quiescent state after reset */
2842         msec_delay(10);
2843
2844         /* Perform any necessary post-reset workarounds */
2845         switch (hw->mac.type) {
2846         case e1000_pchlan:
2847                 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2848                 if (ret_val)
2849                         return ret_val;
2850                 break;
2851         case e1000_pch2lan:
2852                 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2853                 if (ret_val)
2854                         return ret_val;
2855                 break;
2856         default:
2857                 break;
2858         }
2859
2860         /* Clear the host wakeup bit after lcd reset */
2861         if (hw->mac.type >= e1000_pchlan) {
2862                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
2863                 reg &= ~BM_WUC_HOST_WU_BIT;
2864                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
2865         }
2866
2867         /* Configure the LCD with the extended configuration region in NVM */
2868         ret_val = e1000_sw_lcd_config_ich8lan(hw);
2869         if (ret_val)
2870                 return ret_val;
2871
2872         /* Configure the LCD with the OEM bits in NVM */
2873         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2874
2875         if (hw->mac.type == e1000_pch2lan) {
2876                 /* Ungate automatic PHY configuration on non-managed 82579 */
2877                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
2878                     E1000_ICH_FWSM_FW_VALID)) {
2879                         msec_delay(10);
2880                         e1000_gate_hw_phy_config_ich8lan(hw, false);
2881                 }
2882
2883                 /* Set EEE LPI Update Timer to 200usec */
2884                 ret_val = hw->phy.ops.acquire(hw);
2885                 if (ret_val)
2886                         return ret_val;
2887                 ret_val = e1000_write_emi_reg_locked(hw,
2888                                                      I82579_LPI_UPDATE_TIMER,
2889                                                      0x1387);
2890                 hw->phy.ops.release(hw);
2891         }
2892
2893         return ret_val;
2894 }
2895
2896 /**
2897  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2898  *  @hw: pointer to the HW structure
2899  *
2900  *  Resets the PHY
2901  *  This is a function pointer entry point called by drivers
2902  *  or other shared routines.
2903  **/
2904 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2905 {
2906         s32 ret_val = E1000_SUCCESS;
2907
2908         DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2909
2910         /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2911         if ((hw->mac.type == e1000_pch2lan) &&
2912             !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2913                 e1000_gate_hw_phy_config_ich8lan(hw, true);
2914
2915         ret_val = e1000_phy_hw_reset_generic(hw);
2916         if (ret_val)
2917                 return ret_val;
2918
2919         return e1000_post_phy_reset_ich8lan(hw);
2920 }
2921
2922 /**
2923  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2924  *  @hw: pointer to the HW structure
2925  *  @active: true to enable LPLU, false to disable
2926  *
2927  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2928  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2929  *  the phy speed. This function will manually set the LPLU bit and restart
2930  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2931  *  since it configures the same bit.
2932  **/
2933 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2934 {
2935         s32 ret_val;
2936         u16 oem_reg;
2937
2938         DEBUGFUNC("e1000_set_lplu_state_pchlan");
2939
2940         ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2941         if (ret_val)
2942                 return ret_val;
2943
2944         if (active)
2945                 oem_reg |= HV_OEM_BITS_LPLU;
2946         else
2947                 oem_reg &= ~HV_OEM_BITS_LPLU;
2948
2949         if (!hw->phy.ops.check_reset_block(hw))
2950                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2951
2952         return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2953 }
2954
2955 /**
2956  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2957  *  @hw: pointer to the HW structure
2958  *  @active: true to enable LPLU, false to disable
2959  *
2960  *  Sets the LPLU D0 state according to the active flag.  When
2961  *  activating LPLU this function also disables smart speed
2962  *  and vice versa.  LPLU will not be activated unless the
2963  *  device autonegotiation advertisement meets standards of
2964  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2965  *  This is a function pointer entry point only called by
2966  *  PHY setup routines.
2967  **/
2968 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2969 {
2970         struct e1000_phy_info *phy = &hw->phy;
2971         u32 phy_ctrl;
2972         s32 ret_val = E1000_SUCCESS;
2973         u16 data;
2974
2975         DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2976
2977         if (phy->type == e1000_phy_ife)
2978                 return E1000_SUCCESS;
2979
2980         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2981
2982         if (active) {
2983                 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2984                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2985
2986                 if (phy->type != e1000_phy_igp_3)
2987                         return E1000_SUCCESS;
2988
2989                 /* Call gig speed drop workaround on LPLU before accessing
2990                  * any PHY registers
2991                  */
2992                 if (hw->mac.type == e1000_ich8lan)
2993                         e1000_gig_downshift_workaround_ich8lan(hw);
2994
2995                 /* When LPLU is enabled, we should disable SmartSpeed */
2996                 ret_val = phy->ops.read_reg(hw,
2997                                             IGP01E1000_PHY_PORT_CONFIG,
2998                                             &data);
2999                 if (ret_val)
3000                         return ret_val;
3001                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3002                 ret_val = phy->ops.write_reg(hw,
3003                                              IGP01E1000_PHY_PORT_CONFIG,
3004                                              data);
3005                 if (ret_val)
3006                         return ret_val;
3007         } else {
3008                 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3009                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3010
3011                 if (phy->type != e1000_phy_igp_3)
3012                         return E1000_SUCCESS;
3013
3014                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3015                  * during Dx states where the power conservation is most
3016                  * important.  During driver activity we should enable
3017                  * SmartSpeed, so performance is maintained.
3018                  */
3019                 if (phy->smart_speed == e1000_smart_speed_on) {
3020                         ret_val = phy->ops.read_reg(hw,
3021                                                     IGP01E1000_PHY_PORT_CONFIG,
3022                                                     &data);
3023                         if (ret_val)
3024                                 return ret_val;
3025
3026                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3027                         ret_val = phy->ops.write_reg(hw,
3028                                                      IGP01E1000_PHY_PORT_CONFIG,
3029                                                      data);
3030                         if (ret_val)
3031                                 return ret_val;
3032                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3033                         ret_val = phy->ops.read_reg(hw,
3034                                                     IGP01E1000_PHY_PORT_CONFIG,
3035                                                     &data);
3036                         if (ret_val)
3037                                 return ret_val;
3038
3039                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3040                         ret_val = phy->ops.write_reg(hw,
3041                                                      IGP01E1000_PHY_PORT_CONFIG,
3042                                                      data);
3043                         if (ret_val)
3044                                 return ret_val;
3045                 }
3046         }
3047
3048         return E1000_SUCCESS;
3049 }
3050
3051 /**
3052  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3053  *  @hw: pointer to the HW structure
3054  *  @active: true to enable LPLU, false to disable
3055  *
3056  *  Sets the LPLU D3 state according to the active flag.  When
3057  *  activating LPLU this function also disables smart speed
3058  *  and vice versa.  LPLU will not be activated unless the
3059  *  device autonegotiation advertisement meets standards of
3060  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3061  *  This is a function pointer entry point only called by
3062  *  PHY setup routines.
3063  **/
3064 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3065 {
3066         struct e1000_phy_info *phy = &hw->phy;
3067         u32 phy_ctrl;
3068         s32 ret_val = E1000_SUCCESS;
3069         u16 data;
3070
3071         DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3072
3073         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3074
3075         if (!active) {
3076                 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3077                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3078
3079                 if (phy->type != e1000_phy_igp_3)
3080                         return E1000_SUCCESS;
3081
3082                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3083                  * during Dx states where the power conservation is most
3084                  * important.  During driver activity we should enable
3085                  * SmartSpeed, so performance is maintained.
3086                  */
3087                 if (phy->smart_speed == e1000_smart_speed_on) {
3088                         ret_val = phy->ops.read_reg(hw,
3089                                                     IGP01E1000_PHY_PORT_CONFIG,
3090                                                     &data);
3091                         if (ret_val)
3092                                 return ret_val;
3093
3094                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3095                         ret_val = phy->ops.write_reg(hw,
3096                                                      IGP01E1000_PHY_PORT_CONFIG,
3097                                                      data);
3098                         if (ret_val)
3099                                 return ret_val;
3100                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3101                         ret_val = phy->ops.read_reg(hw,
3102                                                     IGP01E1000_PHY_PORT_CONFIG,
3103                                                     &data);
3104                         if (ret_val)
3105                                 return ret_val;
3106
3107                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3108                         ret_val = phy->ops.write_reg(hw,
3109                                                      IGP01E1000_PHY_PORT_CONFIG,
3110                                                      data);
3111                         if (ret_val)
3112                                 return ret_val;
3113                 }
3114         } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3115                    (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3116                    (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3117                 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3118                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3119
3120                 if (phy->type != e1000_phy_igp_3)
3121                         return E1000_SUCCESS;
3122
3123                 /* Call gig speed drop workaround on LPLU before accessing
3124                  * any PHY registers
3125                  */
3126                 if (hw->mac.type == e1000_ich8lan)
3127                         e1000_gig_downshift_workaround_ich8lan(hw);
3128
3129                 /* When LPLU is enabled, we should disable SmartSpeed */
3130                 ret_val = phy->ops.read_reg(hw,
3131                                             IGP01E1000_PHY_PORT_CONFIG,
3132                                             &data);
3133                 if (ret_val)
3134                         return ret_val;
3135
3136                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3137                 ret_val = phy->ops.write_reg(hw,
3138                                              IGP01E1000_PHY_PORT_CONFIG,
3139                                              data);
3140         }
3141
3142         return ret_val;
3143 }
3144
3145 /**
3146  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3147  *  @hw: pointer to the HW structure
3148  *  @bank:  pointer to the variable that returns the active bank
3149  *
3150  *  Reads signature byte from the NVM using the flash access registers.
3151  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3152  **/
3153 STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3154 {
3155         u32 eecd;
3156         struct e1000_nvm_info *nvm = &hw->nvm;
3157         u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3158         u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3159         u8 sig_byte = 0;
3160         s32 ret_val;
3161
3162         DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3163
3164         switch (hw->mac.type) {
3165         case e1000_ich8lan:
3166         case e1000_ich9lan:
3167                 eecd = E1000_READ_REG(hw, E1000_EECD);
3168                 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3169                     E1000_EECD_SEC1VAL_VALID_MASK) {
3170                         if (eecd & E1000_EECD_SEC1VAL)
3171                                 *bank = 1;
3172                         else
3173                                 *bank = 0;
3174
3175                         return E1000_SUCCESS;
3176                 }
3177                 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3178                 /* fall-thru */
3179         default:
3180                 /* set bank to 0 in case flash read fails */
3181                 *bank = 0;
3182
3183                 /* Check bank 0 */
3184                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3185                                                         &sig_byte);
3186                 if (ret_val)
3187                         return ret_val;
3188                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3189                     E1000_ICH_NVM_SIG_VALUE) {
3190                         *bank = 0;
3191                         return E1000_SUCCESS;
3192                 }
3193
3194                 /* Check bank 1 */
3195                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3196                                                         bank1_offset,
3197                                                         &sig_byte);
3198                 if (ret_val)
3199                         return ret_val;
3200                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3201                     E1000_ICH_NVM_SIG_VALUE) {
3202                         *bank = 1;
3203                         return E1000_SUCCESS;
3204                 }
3205
3206                 DEBUGOUT("ERROR: No valid NVM bank present\n");
3207                 return -E1000_ERR_NVM;
3208         }
3209 }
3210
3211 /**
3212  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3213  *  @hw: pointer to the HW structure
3214  *  @offset: The offset (in bytes) of the word(s) to read.
3215  *  @words: Size of data to read in words
3216  *  @data: Pointer to the word(s) to read at offset.
3217  *
3218  *  Reads a word(s) from the NVM using the flash access registers.
3219  **/
3220 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3221                                   u16 *data)
3222 {
3223         struct e1000_nvm_info *nvm = &hw->nvm;
3224         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3225         u32 act_offset;
3226         s32 ret_val = E1000_SUCCESS;
3227         u32 bank = 0;
3228         u16 i, word;
3229
3230         DEBUGFUNC("e1000_read_nvm_ich8lan");
3231
3232         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3233             (words == 0)) {
3234                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3235                 ret_val = -E1000_ERR_NVM;
3236                 goto out;
3237         }
3238
3239         nvm->ops.acquire(hw);
3240
3241         ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3242         if (ret_val != E1000_SUCCESS) {
3243                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3244                 bank = 0;
3245         }
3246
3247         act_offset = (bank) ? nvm->flash_bank_size : 0;
3248         act_offset += offset;
3249
3250         ret_val = E1000_SUCCESS;
3251         for (i = 0; i < words; i++) {
3252                 if (dev_spec->shadow_ram[offset+i].modified) {
3253                         data[i] = dev_spec->shadow_ram[offset+i].value;
3254                 } else {
3255                         ret_val = e1000_read_flash_word_ich8lan(hw,
3256                                                                 act_offset + i,
3257                                                                 &word);
3258                         if (ret_val)
3259                                 break;
3260                         data[i] = word;
3261                 }
3262         }
3263
3264         nvm->ops.release(hw);
3265
3266 out:
3267         if (ret_val)
3268                 DEBUGOUT1("NVM read error: %d\n", ret_val);
3269
3270         return ret_val;
3271 }
3272
3273 /**
3274  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3275  *  @hw: pointer to the HW structure
3276  *
3277  *  This function does initial flash setup so that a new read/write/erase cycle
3278  *  can be started.
3279  **/
3280 STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3281 {
3282         union ich8_hws_flash_status hsfsts;
3283         s32 ret_val = -E1000_ERR_NVM;
3284
3285         DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3286
3287         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3288
3289         /* Check if the flash descriptor is valid */
3290         if (!hsfsts.hsf_status.fldesvalid) {
3291                 DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3292                 return -E1000_ERR_NVM;
3293         }
3294
3295         /* Clear FCERR and DAEL in hw status by writing 1 */
3296         hsfsts.hsf_status.flcerr = 1;
3297         hsfsts.hsf_status.dael = 1;
3298         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3299
3300         /* Either we should have a hardware SPI cycle in progress
3301          * bit to check against, in order to start a new cycle or
3302          * FDONE bit should be changed in the hardware so that it
3303          * is 1 after hardware reset, which can then be used as an
3304          * indication whether a cycle is in progress or has been
3305          * completed.
3306          */
3307
3308         if (!hsfsts.hsf_status.flcinprog) {
3309                 /* There is no cycle running at present,
3310                  * so we can start a cycle.
3311                  * Begin by setting Flash Cycle Done.
3312                  */
3313                 hsfsts.hsf_status.flcdone = 1;
3314                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3315                 ret_val = E1000_SUCCESS;
3316         } else {
3317                 s32 i;
3318
3319                 /* Otherwise poll for sometime so the current
3320                  * cycle has a chance to end before giving up.
3321                  */
3322                 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3323                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3324                                                               ICH_FLASH_HSFSTS);
3325                         if (!hsfsts.hsf_status.flcinprog) {
3326                                 ret_val = E1000_SUCCESS;
3327                                 break;
3328                         }
3329                         usec_delay(1);
3330                 }
3331                 if (ret_val == E1000_SUCCESS) {
3332                         /* Successful in waiting for previous cycle to timeout,
3333                          * now set the Flash Cycle Done.
3334                          */
3335                         hsfsts.hsf_status.flcdone = 1;
3336                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3337                                                 hsfsts.regval);
3338                 } else {
3339                         DEBUGOUT("Flash controller busy, cannot get access\n");
3340                 }
3341         }
3342
3343         return ret_val;
3344 }
3345
3346 /**
3347  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3348  *  @hw: pointer to the HW structure
3349  *  @timeout: maximum time to wait for completion
3350  *
3351  *  This function starts a flash cycle and waits for its completion.
3352  **/
3353 STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3354 {
3355         union ich8_hws_flash_ctrl hsflctl;
3356         union ich8_hws_flash_status hsfsts;
3357         u32 i = 0;
3358
3359         DEBUGFUNC("e1000_flash_cycle_ich8lan");
3360
3361         /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3362         hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3363         hsflctl.hsf_ctrl.flcgo = 1;
3364
3365         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3366
3367         /* wait till FDONE bit is set to 1 */
3368         do {
3369                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3370                 if (hsfsts.hsf_status.flcdone)
3371                         break;
3372                 usec_delay(1);
3373         } while (i++ < timeout);
3374
3375         if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3376                 return E1000_SUCCESS;
3377
3378         return -E1000_ERR_NVM;
3379 }
3380
3381 /**
3382  *  e1000_read_flash_word_ich8lan - Read word from flash
3383  *  @hw: pointer to the HW structure
3384  *  @offset: offset to data location
3385  *  @data: pointer to the location for storing the data
3386  *
3387  *  Reads the flash word at offset into data.  Offset is converted
3388  *  to bytes before read.
3389  **/
3390 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3391                                          u16 *data)
3392 {
3393         DEBUGFUNC("e1000_read_flash_word_ich8lan");
3394
3395         if (!data)
3396                 return -E1000_ERR_NVM;
3397
3398         /* Must convert offset into bytes. */
3399         offset <<= 1;
3400
3401         return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3402 }
3403
3404 /**
3405  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3406  *  @hw: pointer to the HW structure
3407  *  @offset: The offset of the byte to read.
3408  *  @data: Pointer to a byte to store the value read.
3409  *
3410  *  Reads a single byte from the NVM using the flash access registers.
3411  **/
3412 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3413                                          u8 *data)
3414 {
3415         s32 ret_val;
3416         u16 word = 0;
3417
3418         ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3419
3420         if (ret_val)
3421                 return ret_val;
3422
3423         *data = (u8)word;
3424
3425         return E1000_SUCCESS;
3426 }
3427
3428 /**
3429  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3430  *  @hw: pointer to the HW structure
3431  *  @offset: The offset (in bytes) of the byte or word to read.
3432  *  @size: Size of data to read, 1=byte 2=word
3433  *  @data: Pointer to the word to store the value read.
3434  *
3435  *  Reads a byte or word from the NVM using the flash access registers.
3436  **/
3437 STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3438                                          u8 size, u16 *data)
3439 {
3440         union ich8_hws_flash_status hsfsts;
3441         union ich8_hws_flash_ctrl hsflctl;
3442         u32 flash_linear_addr;
3443         u32 flash_data = 0;
3444         s32 ret_val = -E1000_ERR_NVM;
3445         u8 count = 0;
3446
3447         DEBUGFUNC("e1000_read_flash_data_ich8lan");
3448
3449         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3450                 return -E1000_ERR_NVM;
3451         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3452                              hw->nvm.flash_base_addr);
3453
3454         do {
3455                 usec_delay(1);
3456                 /* Steps */
3457                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3458                 if (ret_val != E1000_SUCCESS)
3459                         break;
3460                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3461
3462                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3463                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3464                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3465                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3466
3467                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3468
3469                 ret_val =
3470                     e1000_flash_cycle_ich8lan(hw,
3471                                               ICH_FLASH_READ_COMMAND_TIMEOUT);
3472
3473                 /* Check if FCERR is set to 1, if set to 1, clear it
3474                  * and try the whole sequence a few more times, else
3475                  * read in (shift in) the Flash Data0, the order is
3476                  * least significant byte first msb to lsb
3477                  */
3478                 if (ret_val == E1000_SUCCESS) {
3479                         flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3480                         if (size == 1)
3481                                 *data = (u8)(flash_data & 0x000000FF);
3482                         else if (size == 2)
3483                                 *data = (u16)(flash_data & 0x0000FFFF);
3484                         break;
3485                 } else {
3486                         /* If we've gotten here, then things are probably
3487                          * completely hosed, but if the error condition is
3488                          * detected, it won't hurt to give it another try...
3489                          * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3490                          */
3491                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3492                                                               ICH_FLASH_HSFSTS);
3493                         if (hsfsts.hsf_status.flcerr) {
3494                                 /* Repeat for some time before giving up. */
3495                                 continue;
3496                         } else if (!hsfsts.hsf_status.flcdone) {
3497                                 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3498                                 break;
3499                         }
3500                 }
3501         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3502
3503         return ret_val;
3504 }
3505
3506 /**
3507  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3508  *  @hw: pointer to the HW structure
3509  *  @offset: The offset (in bytes) of the word(s) to write.
3510  *  @words: Size of data to write in words
3511  *  @data: Pointer to the word(s) to write at offset.
3512  *
3513  *  Writes a byte or word to the NVM using the flash access registers.
3514  **/
3515 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3516                                    u16 *data)
3517 {
3518         struct e1000_nvm_info *nvm = &hw->nvm;
3519         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3520         u16 i;
3521
3522         DEBUGFUNC("e1000_write_nvm_ich8lan");
3523
3524         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3525             (words == 0)) {
3526                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3527                 return -E1000_ERR_NVM;
3528         }
3529
3530         nvm->ops.acquire(hw);
3531
3532         for (i = 0; i < words; i++) {
3533                 dev_spec->shadow_ram[offset+i].modified = true;
3534                 dev_spec->shadow_ram[offset+i].value = data[i];
3535         }
3536
3537         nvm->ops.release(hw);
3538
3539         return E1000_SUCCESS;
3540 }
3541
3542 /**
3543  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3544  *  @hw: pointer to the HW structure
3545  *
3546  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3547  *  which writes the checksum to the shadow ram.  The changes in the shadow
3548  *  ram are then committed to the EEPROM by processing each bank at a time
3549  *  checking for the modified bit and writing only the pending changes.
3550  *  After a successful commit, the shadow ram is cleared and is ready for
3551  *  future writes.
3552  **/
3553 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3554 {
3555         struct e1000_nvm_info *nvm = &hw->nvm;
3556         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3557         u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3558         s32 ret_val;
3559         u16 data;
3560
3561         DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
3562
3563         ret_val = e1000_update_nvm_checksum_generic(hw);
3564         if (ret_val)
3565                 goto out;
3566
3567         if (nvm->type != e1000_nvm_flash_sw)
3568                 goto out;
3569
3570         nvm->ops.acquire(hw);
3571
3572         /* We're writing to the opposite bank so if we're on bank 1,
3573          * write to bank 0 etc.  We also need to erase the segment that
3574          * is going to be written
3575          */
3576         ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3577         if (ret_val != E1000_SUCCESS) {
3578                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3579                 bank = 0;
3580         }
3581
3582         if (bank == 0) {
3583                 new_bank_offset = nvm->flash_bank_size;
3584                 old_bank_offset = 0;
3585                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3586                 if (ret_val)
3587                         goto release;
3588         } else {
3589                 old_bank_offset = nvm->flash_bank_size;
3590                 new_bank_offset = 0;
3591                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3592                 if (ret_val)
3593                         goto release;
3594         }
3595
3596         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3597                 /* Determine whether to write the value stored
3598                  * in the other NVM bank or a modified value stored
3599                  * in the shadow RAM
3600                  */
3601                 if (dev_spec->shadow_ram[i].modified) {
3602                         data = dev_spec->shadow_ram[i].value;
3603                 } else {
3604                         ret_val = e1000_read_flash_word_ich8lan(hw, i +
3605                                                                 old_bank_offset,
3606                                                                 &data);
3607                         if (ret_val)
3608                                 break;
3609                 }
3610
3611                 /* If the word is 0x13, then make sure the signature bits
3612                  * (15:14) are 11b until the commit has completed.
3613                  * This will allow us to write 10b which indicates the
3614                  * signature is valid.  We want to do this after the write
3615                  * has completed so that we don't mark the segment valid
3616                  * while the write is still in progress
3617                  */
3618                 if (i == E1000_ICH_NVM_SIG_WORD)
3619                         data |= E1000_ICH_NVM_SIG_MASK;
3620
3621                 /* Convert offset to bytes. */
3622                 act_offset = (i + new_bank_offset) << 1;
3623
3624                 usec_delay(100);
3625                 /* Write the bytes to the new bank. */
3626                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3627                                                                act_offset,
3628                                                                (u8)data);
3629                 if (ret_val)
3630                         break;
3631
3632                 usec_delay(100);
3633                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3634                                                           act_offset + 1,
3635                                                           (u8)(data >> 8));
3636                 if (ret_val)
3637                         break;
3638         }
3639
3640         /* Don't bother writing the segment valid bits if sector
3641          * programming failed.
3642          */
3643         if (ret_val) {
3644                 DEBUGOUT("Flash commit failed.\n");
3645                 goto release;
3646         }
3647
3648         /* Finally validate the new segment by setting bit 15:14
3649          * to 10b in word 0x13 , this can be done without an
3650          * erase as well since these bits are 11 to start with
3651          * and we need to change bit 14 to 0b
3652          */
3653         act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3654         ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
3655         if (ret_val)
3656                 goto release;
3657
3658         data &= 0xBFFF;
3659         ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3660                                                        act_offset * 2 + 1,
3661                                                        (u8)(data >> 8));
3662         if (ret_val)
3663                 goto release;
3664
3665         /* And invalidate the previously valid segment by setting
3666          * its signature word (0x13) high_byte to 0b. This can be
3667          * done without an erase because flash erase sets all bits
3668          * to 1's. We can write 1's to 0's without an erase
3669          */
3670         act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3671         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
3672         if (ret_val)
3673                 goto release;
3674
3675         /* Great!  Everything worked, we can now clear the cached entries. */
3676         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3677                 dev_spec->shadow_ram[i].modified = false;
3678                 dev_spec->shadow_ram[i].value = 0xFFFF;
3679         }
3680
3681 release:
3682         nvm->ops.release(hw);
3683
3684         /* Reload the EEPROM, or else modifications will not appear
3685          * until after the next adapter reset.
3686          */
3687         if (!ret_val) {
3688                 nvm->ops.reload(hw);
3689                 msec_delay(10);
3690         }
3691
3692 out:
3693         if (ret_val)
3694                 DEBUGOUT1("NVM update error: %d\n", ret_val);
3695
3696         return ret_val;
3697 }
3698
3699 /**
3700  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
3701  *  @hw: pointer to the HW structure
3702  *
3703  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
3704  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
3705  *  calculated, in which case we need to calculate the checksum and set bit 6.
3706  **/
3707 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
3708 {
3709         s32 ret_val;
3710         u16 data;
3711         u16 word;
3712         u16 valid_csum_mask;
3713
3714         DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
3715
3716         /* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
3717          * the checksum needs to be fixed.  This bit is an indication that
3718          * the NVM was prepared by OEM software and did not calculate
3719          * the checksum...a likely scenario.
3720          */
3721         switch (hw->mac.type) {
3722         case e1000_pch_lpt:
3723                 word = NVM_COMPAT;
3724                 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
3725                 break;
3726         default:
3727                 word = NVM_FUTURE_INIT_WORD1;
3728                 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
3729                 break;
3730         }
3731
3732         ret_val = hw->nvm.ops.read(hw, word, 1, &data);
3733         if (ret_val)
3734                 return ret_val;
3735
3736         if (!(data & valid_csum_mask)) {
3737                 data |= valid_csum_mask;
3738                 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
3739                 if (ret_val)
3740                         return ret_val;
3741                 ret_val = hw->nvm.ops.update(hw);
3742                 if (ret_val)
3743                         return ret_val;
3744         }
3745
3746         return e1000_validate_nvm_checksum_generic(hw);
3747 }
3748
3749 /**
3750  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3751  *  @hw: pointer to the HW structure
3752  *  @offset: The offset (in bytes) of the byte/word to read.
3753  *  @size: Size of data to read, 1=byte 2=word
3754  *  @data: The byte(s) to write to the NVM.
3755  *
3756  *  Writes one/two bytes to the NVM using the flash access registers.
3757  **/
3758 STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3759                                           u8 size, u16 data)
3760 {
3761         union ich8_hws_flash_status hsfsts;
3762         union ich8_hws_flash_ctrl hsflctl;
3763         u32 flash_linear_addr;
3764         u32 flash_data = 0;
3765         s32 ret_val;
3766         u8 count = 0;
3767
3768         DEBUGFUNC("e1000_write_ich8_data");
3769
3770         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3771                 return -E1000_ERR_NVM;
3772
3773         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3774                              hw->nvm.flash_base_addr);
3775
3776         do {
3777                 usec_delay(1);
3778                 /* Steps */
3779                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3780                 if (ret_val != E1000_SUCCESS)
3781                         break;
3782                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3783
3784                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3785                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3786                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3787                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3788
3789                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3790
3791                 if (size == 1)
3792                         flash_data = (u32)data & 0x00FF;
3793                 else
3794                         flash_data = (u32)data;
3795
3796                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
3797
3798                 /* check if FCERR is set to 1 , if set to 1, clear it
3799                  * and try the whole sequence a few more times else done
3800                  */
3801                 ret_val =
3802                     e1000_flash_cycle_ich8lan(hw,
3803                                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3804                 if (ret_val == E1000_SUCCESS)
3805                         break;
3806
3807                 /* If we're here, then things are most likely
3808                  * completely hosed, but if the error condition
3809                  * is detected, it won't hurt to give it another
3810                  * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3811                  */
3812                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3813                 if (hsfsts.hsf_status.flcerr)
3814                         /* Repeat for some time before giving up. */
3815                         continue;
3816                 if (!hsfsts.hsf_status.flcdone) {
3817                         DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3818                         break;
3819                 }
3820         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3821
3822         return ret_val;
3823 }
3824
3825 /**
3826  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3827  *  @hw: pointer to the HW structure
3828  *  @offset: The index of the byte to read.
3829  *  @data: The byte to write to the NVM.
3830  *
3831  *  Writes a single byte to the NVM using the flash access registers.
3832  **/
3833 STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3834                                           u8 data)
3835 {
3836         u16 word = (u16)data;
3837
3838         DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3839
3840         return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3841 }
3842
3843 /**
3844  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3845  *  @hw: pointer to the HW structure
3846  *  @offset: The offset of the byte to write.
3847  *  @byte: The byte to write to the NVM.
3848  *
3849  *  Writes a single byte to the NVM using the flash access registers.
3850  *  Goes through a retry algorithm before giving up.
3851  **/
3852 STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3853                                                 u32 offset, u8 byte)
3854 {
3855         s32 ret_val;
3856         u16 program_retries;
3857
3858         DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3859
3860         ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3861         if (!ret_val)
3862                 return ret_val;
3863
3864         for (program_retries = 0; program_retries < 100; program_retries++) {
3865                 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3866                 usec_delay(100);
3867                 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3868                 if (ret_val == E1000_SUCCESS)
3869                         break;
3870         }
3871         if (program_retries == 100)
3872                 return -E1000_ERR_NVM;
3873
3874         return E1000_SUCCESS;
3875 }
3876
3877 /**
3878  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3879  *  @hw: pointer to the HW structure
3880  *  @bank: 0 for first bank, 1 for second bank, etc.
3881  *
3882  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3883  *  bank N is 4096 * N + flash_reg_addr.
3884  **/
3885 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3886 {
3887         struct e1000_nvm_info *nvm = &hw->nvm;
3888         union ich8_hws_flash_status hsfsts;
3889         union ich8_hws_flash_ctrl hsflctl;
3890         u32 flash_linear_addr;
3891         /* bank size is in 16bit words - adjust to bytes */
3892         u32 flash_bank_size = nvm->flash_bank_size * 2;
3893         s32 ret_val;
3894         s32 count = 0;
3895         s32 j, iteration, sector_size;
3896
3897         DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3898
3899         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3900
3901         /* Determine HW Sector size: Read BERASE bits of hw flash status
3902          * register
3903          * 00: The Hw sector is 256 bytes, hence we need to erase 16
3904          *     consecutive sectors.  The start index for the nth Hw sector
3905          *     can be calculated as = bank * 4096 + n * 256
3906          * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3907          *     The start index for the nth Hw sector can be calculated
3908          *     as = bank * 4096
3909          * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3910          *     (ich9 only, otherwise error condition)
3911          * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3912          */
3913         switch (hsfsts.hsf_status.berasesz) {
3914         case 0:
3915                 /* Hw sector size 256 */
3916                 sector_size = ICH_FLASH_SEG_SIZE_256;
3917                 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3918                 break;
3919         case 1:
3920                 sector_size = ICH_FLASH_SEG_SIZE_4K;
3921                 iteration = 1;
3922                 break;
3923         case 2:
3924                 sector_size = ICH_FLASH_SEG_SIZE_8K;
3925                 iteration = 1;
3926                 break;
3927         case 3:
3928                 sector_size = ICH_FLASH_SEG_SIZE_64K;
3929                 iteration = 1;
3930                 break;
3931         default:
3932                 return -E1000_ERR_NVM;
3933         }
3934
3935         /* Start with the base address, then add the sector offset. */
3936         flash_linear_addr = hw->nvm.flash_base_addr;
3937         flash_linear_addr += (bank) ? flash_bank_size : 0;
3938
3939         for (j = 0; j < iteration; j++) {
3940                 do {
3941                         u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
3942
3943                         /* Steps */
3944                         ret_val = e1000_flash_cycle_init_ich8lan(hw);
3945                         if (ret_val)
3946                                 return ret_val;
3947
3948                         /* Write a value 11 (block Erase) in Flash
3949                          * Cycle field in hw flash control
3950                          */
3951                         hsflctl.regval =
3952                             E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3953
3954                         hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3955                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3956                                                 hsflctl.regval);
3957
3958                         /* Write the last 24 bits of an index within the
3959                          * block into Flash Linear address field in Flash
3960                          * Address.
3961                          */
3962                         flash_linear_addr += (j * sector_size);
3963                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3964                                               flash_linear_addr);
3965
3966                         ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
3967                         if (ret_val == E1000_SUCCESS)
3968                                 break;
3969
3970                         /* Check if FCERR is set to 1.  If 1,
3971                          * clear it and try the whole sequence
3972                          * a few more times else Done
3973                          */
3974                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3975                                                       ICH_FLASH_HSFSTS);
3976                         if (hsfsts.hsf_status.flcerr)
3977                                 /* repeat for some time before giving up */
3978                                 continue;
3979                         else if (!hsfsts.hsf_status.flcdone)
3980                                 return ret_val;
3981                 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3982         }
3983
3984         return E1000_SUCCESS;
3985 }
3986
3987 /**
3988  *  e1000_valid_led_default_ich8lan - Set the default LED settings
3989  *  @hw: pointer to the HW structure
3990  *  @data: Pointer to the LED settings
3991  *
3992  *  Reads the LED default settings from the NVM to data.  If the NVM LED
3993  *  settings is all 0's or F's, set the LED default to a valid LED default
3994  *  setting.
3995  **/
3996 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3997 {
3998         s32 ret_val;
3999
4000         DEBUGFUNC("e1000_valid_led_default_ich8lan");
4001
4002         ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4003         if (ret_val) {
4004                 DEBUGOUT("NVM Read Error\n");
4005                 return ret_val;
4006         }
4007
4008         if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4009                 *data = ID_LED_DEFAULT_ICH8LAN;
4010
4011         return E1000_SUCCESS;
4012 }
4013
4014 /**
4015  *  e1000_id_led_init_pchlan - store LED configurations
4016  *  @hw: pointer to the HW structure
4017  *
4018  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4019  *  the PHY LED configuration register.
4020  *
4021  *  PCH also does not have an "always on" or "always off" mode which
4022  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4023  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4024  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4025  *  link based on logic in e1000_led_[on|off]_pchlan().
4026  **/
4027 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4028 {
4029         struct e1000_mac_info *mac = &hw->mac;
4030         s32 ret_val;
4031         const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4032         const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4033         u16 data, i, temp, shift;
4034
4035         DEBUGFUNC("e1000_id_led_init_pchlan");
4036
4037         /* Get default ID LED modes */
4038         ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4039         if (ret_val)
4040                 return ret_val;
4041
4042         mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4043         mac->ledctl_mode1 = mac->ledctl_default;
4044         mac->ledctl_mode2 = mac->ledctl_default;
4045
4046         for (i = 0; i < 4; i++) {
4047                 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4048                 shift = (i * 5);
4049                 switch (temp) {
4050                 case ID_LED_ON1_DEF2:
4051                 case ID_LED_ON1_ON2:
4052                 case ID_LED_ON1_OFF2:
4053                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4054                         mac->ledctl_mode1 |= (ledctl_on << shift);
4055                         break;
4056                 case ID_LED_OFF1_DEF2:
4057                 case ID_LED_OFF1_ON2:
4058                 case ID_LED_OFF1_OFF2:
4059                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4060                         mac->ledctl_mode1 |= (ledctl_off << shift);
4061                         break;
4062                 default:
4063                         /* Do nothing */
4064                         break;
4065                 }
4066                 switch (temp) {
4067                 case ID_LED_DEF1_ON2:
4068                 case ID_LED_ON1_ON2:
4069                 case ID_LED_OFF1_ON2:
4070                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4071                         mac->ledctl_mode2 |= (ledctl_on << shift);
4072                         break;
4073                 case ID_LED_DEF1_OFF2:
4074                 case ID_LED_ON1_OFF2:
4075                 case ID_LED_OFF1_OFF2:
4076                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4077                         mac->ledctl_mode2 |= (ledctl_off << shift);
4078                         break;
4079                 default:
4080                         /* Do nothing */
4081                         break;
4082                 }
4083         }
4084
4085         return E1000_SUCCESS;
4086 }
4087
4088 /**
4089  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4090  *  @hw: pointer to the HW structure
4091  *
4092  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4093  *  register, so the the bus width is hard coded.
4094  **/
4095 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4096 {
4097         struct e1000_bus_info *bus = &hw->bus;
4098         s32 ret_val;
4099
4100         DEBUGFUNC("e1000_get_bus_info_ich8lan");
4101
4102         ret_val = e1000_get_bus_info_pcie_generic(hw);
4103
4104         /* ICH devices are "PCI Express"-ish.  They have
4105          * a configuration space, but do not contain
4106          * PCI Express Capability registers, so bus width
4107          * must be hardcoded.
4108          */
4109         if (bus->width == e1000_bus_width_unknown)
4110                 bus->width = e1000_bus_width_pcie_x1;
4111
4112         return ret_val;
4113 }
4114
4115 /**
4116  *  e1000_reset_hw_ich8lan - Reset the hardware
4117  *  @hw: pointer to the HW structure
4118  *
4119  *  Does a full reset of the hardware which includes a reset of the PHY and
4120  *  MAC.
4121  **/
4122 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4123 {
4124         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4125         u16 kum_cfg;
4126         u32 ctrl, reg;
4127         s32 ret_val;
4128
4129         DEBUGFUNC("e1000_reset_hw_ich8lan");
4130
4131         /* Prevent the PCI-E bus from sticking if there is no TLP connection
4132          * on the last TLP read/write transaction when MAC is reset.
4133          */
4134         ret_val = e1000_disable_pcie_master_generic(hw);
4135         if (ret_val)
4136                 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4137
4138         DEBUGOUT("Masking off all interrupts\n");
4139         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4140
4141         /* Disable the Transmit and Receive units.  Then delay to allow
4142          * any pending transactions to complete before we hit the MAC
4143          * with the global reset.
4144          */
4145         E1000_WRITE_REG(hw, E1000_RCTL, 0);
4146         E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4147         E1000_WRITE_FLUSH(hw);
4148
4149         msec_delay(10);
4150
4151         /* Workaround for ICH8 bit corruption issue in FIFO memory */
4152         if (hw->mac.type == e1000_ich8lan) {
4153                 /* Set Tx and Rx buffer allocation to 8k apiece. */
4154                 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4155                 /* Set Packet Buffer Size to 16k. */
4156                 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4157         }
4158
4159         if (hw->mac.type == e1000_pchlan) {
4160                 /* Save the NVM K1 bit setting*/
4161                 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4162                 if (ret_val)
4163                         return ret_val;
4164
4165                 if (kum_cfg & E1000_NVM_K1_ENABLE)
4166                         dev_spec->nvm_k1_enabled = true;
4167                 else
4168                         dev_spec->nvm_k1_enabled = false;
4169         }
4170
4171         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4172
4173         if (!hw->phy.ops.check_reset_block(hw)) {
4174                 /* Full-chip reset requires MAC and PHY reset at the same
4175                  * time to make sure the interface between MAC and the
4176                  * external PHY is reset.
4177                  */
4178                 ctrl |= E1000_CTRL_PHY_RST;
4179
4180                 /* Gate automatic PHY configuration by hardware on
4181                  * non-managed 82579
4182                  */
4183                 if ((hw->mac.type == e1000_pch2lan) &&
4184                     !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4185                         e1000_gate_hw_phy_config_ich8lan(hw, true);
4186         }
4187         ret_val = e1000_acquire_swflag_ich8lan(hw);
4188         DEBUGOUT("Issuing a global reset to ich8lan\n");
4189         E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4190         /* cannot issue a flush here because it hangs the hardware */
4191         msec_delay(20);
4192
4193         /* Set Phy Config Counter to 50msec */
4194         if (hw->mac.type == e1000_pch2lan) {
4195                 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4196                 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4197                 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4198                 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4199         }
4200
4201         if (!ret_val)
4202                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
4203
4204         if (ctrl & E1000_CTRL_PHY_RST) {
4205                 ret_val = hw->phy.ops.get_cfg_done(hw);
4206                 if (ret_val)
4207                         return ret_val;
4208
4209                 ret_val = e1000_post_phy_reset_ich8lan(hw);
4210                 if (ret_val)
4211                         return ret_val;
4212         }
4213
4214         /* For PCH, this write will make sure that any noise
4215          * will be detected as a CRC error and be dropped rather than show up
4216          * as a bad packet to the DMA engine.
4217          */
4218         if (hw->mac.type == e1000_pchlan)
4219                 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
4220
4221         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4222         E1000_READ_REG(hw, E1000_ICR);
4223
4224         reg = E1000_READ_REG(hw, E1000_KABGTXD);
4225         reg |= E1000_KABGTXD_BGSQLBIAS;
4226         E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
4227
4228         return E1000_SUCCESS;
4229 }
4230
4231 /**
4232  *  e1000_init_hw_ich8lan - Initialize the hardware
4233  *  @hw: pointer to the HW structure
4234  *
4235  *  Prepares the hardware for transmit and receive by doing the following:
4236  *   - initialize hardware bits
4237  *   - initialize LED identification
4238  *   - setup receive address registers
4239  *   - setup flow control
4240  *   - setup transmit descriptors
4241  *   - clear statistics
4242  **/
4243 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4244 {
4245         struct e1000_mac_info *mac = &hw->mac;
4246         u32 ctrl_ext, txdctl, snoop;
4247         s32 ret_val;
4248         u16 i;
4249
4250         DEBUGFUNC("e1000_init_hw_ich8lan");
4251
4252         e1000_initialize_hw_bits_ich8lan(hw);
4253
4254         /* Initialize identification LED */
4255         ret_val = mac->ops.id_led_init(hw);
4256         /* An error is not fatal and we should not stop init due to this */
4257         if (ret_val)
4258                 DEBUGOUT("Error initializing identification LED\n");
4259
4260         /* Setup the receive address. */
4261         e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
4262
4263         /* Zero out the Multicast HASH table */
4264         DEBUGOUT("Zeroing the MTA\n");
4265         for (i = 0; i < mac->mta_reg_count; i++)
4266                 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4267
4268         /* The 82578 Rx buffer will stall if wakeup is enabled in host and
4269          * the ME.  Disable wakeup by clearing the host wakeup bit.
4270          * Reset the phy after disabling host wakeup to reset the Rx buffer.
4271          */
4272         if (hw->phy.type == e1000_phy_82578) {
4273                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
4274                 i &= ~BM_WUC_HOST_WU_BIT;
4275                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
4276                 ret_val = e1000_phy_hw_reset_ich8lan(hw);
4277                 if (ret_val)
4278                         return ret_val;
4279         }
4280
4281         /* Setup link and flow control */
4282         ret_val = mac->ops.setup_link(hw);
4283
4284         /* Set the transmit descriptor write-back policy for both queues */
4285         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
4286         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4287                   E1000_TXDCTL_FULL_TX_DESC_WB);
4288         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4289                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4290         E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
4291         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
4292         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4293                   E1000_TXDCTL_FULL_TX_DESC_WB);
4294         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4295                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4296         E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
4297
4298         /* ICH8 has opposite polarity of no_snoop bits.
4299          * By default, we should use snoop behavior.
4300          */
4301         if (mac->type == e1000_ich8lan)
4302                 snoop = PCIE_ICH8_SNOOP_ALL;
4303         else
4304                 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
4305         e1000_set_pcie_no_snoop_generic(hw, snoop);
4306
4307         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
4308         ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4309         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
4310
4311         /* Clear all of the statistics registers (clear on read).  It is
4312          * important that we do this after we have tried to establish link
4313          * because the symbol error count will increment wildly if there
4314          * is no link.
4315          */
4316         e1000_clear_hw_cntrs_ich8lan(hw);
4317
4318         return ret_val;
4319 }
4320
4321 /**
4322  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4323  *  @hw: pointer to the HW structure
4324  *
4325  *  Sets/Clears required hardware bits necessary for correctly setting up the
4326  *  hardware for transmit and receive.
4327  **/
4328 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4329 {
4330         u32 reg;
4331
4332         DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
4333
4334         /* Extended Device Control */
4335         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
4336         reg |= (1 << 22);
4337         /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4338         if (hw->mac.type >= e1000_pchlan)
4339                 reg |= E1000_CTRL_EXT_PHYPDEN;
4340         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
4341
4342         /* Transmit Descriptor Control 0 */
4343         reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
4344         reg |= (1 << 22);
4345         E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
4346
4347         /* Transmit Descriptor Control 1 */
4348         reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
4349         reg |= (1 << 22);
4350         E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
4351
4352         /* Transmit Arbitration Control 0 */
4353         reg = E1000_READ_REG(hw, E1000_TARC(0));
4354         if (hw->mac.type == e1000_ich8lan)
4355                 reg |= (1 << 28) | (1 << 29);
4356         reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
4357         E1000_WRITE_REG(hw, E1000_TARC(0), reg);
4358
4359         /* Transmit Arbitration Control 1 */
4360         reg = E1000_READ_REG(hw, E1000_TARC(1));
4361         if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
4362                 reg &= ~(1 << 28);
4363         else
4364                 reg |= (1 << 28);
4365         reg |= (1 << 24) | (1 << 26) | (1 << 30);
4366         E1000_WRITE_REG(hw, E1000_TARC(1), reg);
4367
4368         /* Device Status */
4369         if (hw->mac.type == e1000_ich8lan) {
4370                 reg = E1000_READ_REG(hw, E1000_STATUS);
4371                 reg &= ~(1 << 31);
4372                 E1000_WRITE_REG(hw, E1000_STATUS, reg);
4373         }
4374
4375         /* work-around descriptor data corruption issue during nfs v2 udp
4376          * traffic, just disable the nfs filtering capability
4377          */
4378         reg = E1000_READ_REG(hw, E1000_RFCTL);
4379         reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4380
4381         /* Disable IPv6 extension header parsing because some malformed
4382          * IPv6 headers can hang the Rx.
4383          */
4384         if (hw->mac.type == e1000_ich8lan)
4385                 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4386         E1000_WRITE_REG(hw, E1000_RFCTL, reg);
4387
4388         /* Enable ECC on Lynxpoint */
4389         if (hw->mac.type == e1000_pch_lpt) {
4390                 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
4391                 reg |= E1000_PBECCSTS_ECC_ENABLE;
4392                 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
4393
4394                 reg = E1000_READ_REG(hw, E1000_CTRL);
4395                 reg |= E1000_CTRL_MEHE;
4396                 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4397         }
4398
4399         return;
4400 }
4401
4402 /**
4403  *  e1000_setup_link_ich8lan - Setup flow control and link settings
4404  *  @hw: pointer to the HW structure
4405  *
4406  *  Determines which flow control settings to use, then configures flow
4407  *  control.  Calls the appropriate media-specific link configuration
4408  *  function.  Assuming the adapter has a valid link partner, a valid link
4409  *  should be established.  Assumes the hardware has previously been reset
4410  *  and the transmitter and receiver are not enabled.
4411  **/
4412 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4413 {
4414         s32 ret_val;
4415
4416         DEBUGFUNC("e1000_setup_link_ich8lan");
4417
4418         if (hw->phy.ops.check_reset_block(hw))
4419                 return E1000_SUCCESS;
4420
4421         /* ICH parts do not have a word in the NVM to determine
4422          * the default flow control setting, so we explicitly
4423          * set it to full.
4424          */
4425         if (hw->fc.requested_mode == e1000_fc_default)
4426                 hw->fc.requested_mode = e1000_fc_full;
4427
4428         /* Save off the requested flow control mode for use later.  Depending
4429          * on the link partner's capabilities, we may or may not use this mode.
4430          */
4431         hw->fc.current_mode = hw->fc.requested_mode;
4432
4433         DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
4434                 hw->fc.current_mode);
4435
4436         /* Continue to configure the copper link. */
4437         ret_val = hw->mac.ops.setup_physical_interface(hw);
4438         if (ret_val)
4439                 return ret_val;
4440
4441         E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
4442         if ((hw->phy.type == e1000_phy_82578) ||
4443             (hw->phy.type == e1000_phy_82579) ||
4444             (hw->phy.type == e1000_phy_i217) ||
4445             (hw->phy.type == e1000_phy_82577)) {
4446                 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
4447
4448                 ret_val = hw->phy.ops.write_reg(hw,
4449                                              PHY_REG(BM_PORT_CTRL_PAGE, 27),
4450                                              hw->fc.pause_time);
4451                 if (ret_val)
4452                         return ret_val;
4453         }
4454
4455         return e1000_set_fc_watermarks_generic(hw);
4456 }
4457
4458 /**
4459  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4460  *  @hw: pointer to the HW structure
4461  *
4462  *  Configures the kumeran interface to the PHY to wait the appropriate time
4463  *  when polling the PHY, then call the generic setup_copper_link to finish
4464  *  configuring the copper link.
4465  **/
4466 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4467 {
4468         u32 ctrl;
4469         s32 ret_val;
4470         u16 reg_data;
4471
4472         DEBUGFUNC("e1000_setup_copper_link_ich8lan");
4473
4474         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4475         ctrl |= E1000_CTRL_SLU;
4476         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4477         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4478
4479         /* Set the mac to wait the maximum time between each iteration
4480          * and increase the max iterations when polling the phy;
4481          * this fixes erroneous timeouts at 10Mbps.
4482          */
4483         ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
4484                                                0xFFFF);
4485         if (ret_val)
4486                 return ret_val;
4487         ret_val = e1000_read_kmrn_reg_generic(hw,
4488                                               E1000_KMRNCTRLSTA_INBAND_PARAM,
4489                                               &reg_data);
4490         if (ret_val)
4491                 return ret_val;
4492         reg_data |= 0x3F;
4493         ret_val = e1000_write_kmrn_reg_generic(hw,
4494                                                E1000_KMRNCTRLSTA_INBAND_PARAM,
4495                                                reg_data);
4496         if (ret_val)
4497                 return ret_val;
4498
4499         switch (hw->phy.type) {
4500         case e1000_phy_igp_3:
4501                 ret_val = e1000_copper_link_setup_igp(hw);
4502                 if (ret_val)
4503                         return ret_val;
4504                 break;
4505         case e1000_phy_bm:
4506         case e1000_phy_82578:
4507                 ret_val = e1000_copper_link_setup_m88(hw);
4508                 if (ret_val)
4509                         return ret_val;
4510                 break;
4511         case e1000_phy_82577:
4512         case e1000_phy_82579:
4513                 ret_val = e1000_copper_link_setup_82577(hw);
4514                 if (ret_val)
4515                         return ret_val;
4516                 break;
4517         case e1000_phy_ife:
4518                 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
4519                                                &reg_data);
4520                 if (ret_val)
4521                         return ret_val;
4522
4523                 reg_data &= ~IFE_PMC_AUTO_MDIX;
4524
4525                 switch (hw->phy.mdix) {
4526                 case 1:
4527                         reg_data &= ~IFE_PMC_FORCE_MDIX;
4528                         break;
4529                 case 2:
4530                         reg_data |= IFE_PMC_FORCE_MDIX;
4531                         break;
4532                 case 0:
4533                 default:
4534                         reg_data |= IFE_PMC_AUTO_MDIX;
4535                         break;
4536                 }
4537                 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
4538                                                 reg_data);
4539                 if (ret_val)
4540                         return ret_val;
4541                 break;
4542         default:
4543                 break;
4544         }
4545
4546         return e1000_setup_copper_link_generic(hw);
4547 }
4548
4549 /**
4550  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
4551  *  @hw: pointer to the HW structure
4552  *
4553  *  Calls the PHY specific link setup function and then calls the
4554  *  generic setup_copper_link to finish configuring the link for
4555  *  Lynxpoint PCH devices
4556  **/
4557 STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
4558 {
4559         u32 ctrl;
4560         s32 ret_val;
4561
4562         DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
4563
4564         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4565         ctrl |= E1000_CTRL_SLU;
4566         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4567         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4568
4569         ret_val = e1000_copper_link_setup_82577(hw);
4570         if (ret_val)
4571                 return ret_val;
4572
4573         return e1000_setup_copper_link_generic(hw);
4574 }
4575
4576 /**
4577  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
4578  *  @hw: pointer to the HW structure
4579  *  @speed: pointer to store current link speed
4580  *  @duplex: pointer to store the current link duplex
4581  *
4582  *  Calls the generic get_speed_and_duplex to retrieve the current link
4583  *  information and then calls the Kumeran lock loss workaround for links at
4584  *  gigabit speeds.
4585  **/
4586 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
4587                                           u16 *duplex)
4588 {
4589         s32 ret_val;
4590
4591         DEBUGFUNC("e1000_get_link_up_info_ich8lan");
4592
4593         ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
4594         if (ret_val)
4595                 return ret_val;
4596
4597         if ((hw->mac.type == e1000_ich8lan) &&
4598             (hw->phy.type == e1000_phy_igp_3) &&
4599             (*speed == SPEED_1000)) {
4600                 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
4601         }
4602
4603         return ret_val;
4604 }
4605
4606 /**
4607  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
4608  *  @hw: pointer to the HW structure
4609  *
4610  *  Work-around for 82566 Kumeran PCS lock loss:
4611  *  On link status change (i.e. PCI reset, speed change) and link is up and
4612  *  speed is gigabit-
4613  *    0) if workaround is optionally disabled do nothing
4614  *    1) wait 1ms for Kumeran link to come up
4615  *    2) check Kumeran Diagnostic register PCS lock loss bit
4616  *    3) if not set the link is locked (all is good), otherwise...
4617  *    4) reset the PHY
4618  *    5) repeat up to 10 times
4619  *  Note: this is only called for IGP3 copper when speed is 1gb.
4620  **/
4621 STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
4622 {
4623         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4624         u32 phy_ctrl;
4625         s32 ret_val;
4626         u16 i, data;
4627         bool link;
4628
4629         DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
4630
4631         if (!dev_spec->kmrn_lock_loss_workaround_enabled)
4632                 return E1000_SUCCESS;
4633
4634         /* Make sure link is up before proceeding.  If not just return.
4635          * Attempting this while link is negotiating fouled up link
4636          * stability
4637          */
4638         ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
4639         if (!link)
4640                 return E1000_SUCCESS;
4641
4642         for (i = 0; i < 10; i++) {
4643                 /* read once to clear */
4644                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4645                 if (ret_val)
4646                         return ret_val;
4647                 /* and again to get new status */
4648                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4649                 if (ret_val)
4650                         return ret_val;
4651
4652                 /* check for PCS lock */
4653                 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
4654                         return E1000_SUCCESS;
4655
4656                 /* Issue PHY reset */
4657                 hw->phy.ops.reset(hw);
4658                 msec_delay_irq(5);
4659         }
4660         /* Disable GigE link negotiation */
4661         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4662         phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
4663                      E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4664         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4665
4666         /* Call gig speed drop workaround on Gig disable before accessing
4667          * any PHY registers
4668          */
4669         e1000_gig_downshift_workaround_ich8lan(hw);
4670
4671         /* unable to acquire PCS lock */
4672         return -E1000_ERR_PHY;
4673 }
4674
4675 /**
4676  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
4677  *  @hw: pointer to the HW structure
4678  *  @state: boolean value used to set the current Kumeran workaround state
4679  *
4680  *  If ICH8, set the current Kumeran workaround state (enabled - true
4681  *  /disabled - false).
4682  **/
4683 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
4684                                                  bool state)
4685 {
4686         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4687
4688         DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
4689
4690         if (hw->mac.type != e1000_ich8lan) {
4691                 DEBUGOUT("Workaround applies to ICH8 only.\n");
4692                 return;
4693         }
4694
4695         dev_spec->kmrn_lock_loss_workaround_enabled = state;
4696
4697         return;
4698 }
4699
4700 /**
4701  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
4702  *  @hw: pointer to the HW structure
4703  *
4704  *  Workaround for 82566 power-down on D3 entry:
4705  *    1) disable gigabit link
4706  *    2) write VR power-down enable
4707  *    3) read it back
4708  *  Continue if successful, else issue LCD reset and repeat
4709  **/
4710 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
4711 {
4712         u32 reg;
4713         u16 data;
4714         u8  retry = 0;
4715
4716         DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
4717
4718         if (hw->phy.type != e1000_phy_igp_3)
4719                 return;
4720
4721         /* Try the workaround twice (if needed) */
4722         do {
4723                 /* Disable link */
4724                 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
4725                 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
4726                         E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4727                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
4728
4729                 /* Call gig speed drop workaround on Gig disable before
4730                  * accessing any PHY registers
4731                  */
4732                 if (hw->mac.type == e1000_ich8lan)
4733                         e1000_gig_downshift_workaround_ich8lan(hw);
4734
4735                 /* Write VR power-down enable */
4736                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4737                 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4738                 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
4739                                       data | IGP3_VR_CTRL_MODE_SHUTDOWN);
4740
4741                 /* Read it back and test */
4742                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4743                 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4744                 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
4745                         break;
4746
4747                 /* Issue PHY reset and repeat at most one more time */
4748                 reg = E1000_READ_REG(hw, E1000_CTRL);
4749                 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
4750                 retry++;
4751         } while (retry);
4752 }
4753
4754 /**
4755  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
4756  *  @hw: pointer to the HW structure
4757  *
4758  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
4759  *  LPLU, Gig disable, MDIC PHY reset):
4760  *    1) Set Kumeran Near-end loopback
4761  *    2) Clear Kumeran Near-end loopback
4762  *  Should only be called for ICH8[m] devices with any 1G Phy.
4763  **/
4764 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
4765 {
4766         s32 ret_val;
4767         u16 reg_data;
4768
4769         DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
4770
4771         if ((hw->mac.type != e1000_ich8lan) ||
4772             (hw->phy.type == e1000_phy_ife))
4773                 return;
4774
4775         ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4776                                               &reg_data);
4777         if (ret_val)
4778                 return;
4779         reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
4780         ret_val = e1000_write_kmrn_reg_generic(hw,
4781                                                E1000_KMRNCTRLSTA_DIAG_OFFSET,
4782                                                reg_data);
4783         if (ret_val)
4784                 return;
4785         reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
4786         e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4787                                      reg_data);
4788 }
4789
4790 /**
4791  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4792  *  @hw: pointer to the HW structure
4793  *
4794  *  During S0 to Sx transition, it is possible the link remains at gig
4795  *  instead of negotiating to a lower speed.  Before going to Sx, set
4796  *  'Gig Disable' to force link speed negotiation to a lower speed based on
4797  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
4798  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4799  *  needs to be written.
4800  *  Parts that support (and are linked to a partner which support) EEE in
4801  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4802  *  than 10Mbps w/o EEE.
4803  **/
4804 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4805 {
4806         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4807         u32 phy_ctrl;
4808         s32 ret_val;
4809
4810         DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
4811
4812         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4813         phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4814
4815         if (hw->phy.type == e1000_phy_i217) {
4816                 u16 phy_reg, device_id = hw->device_id;
4817
4818                 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4819                     (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
4820                     (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
4821                     (device_id == E1000_DEV_ID_PCH_I218_V3)) {
4822                         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
4823
4824                         E1000_WRITE_REG(hw, E1000_FEXTNVM6,
4825                                         fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4826                 }
4827
4828                 ret_val = hw->phy.ops.acquire(hw);
4829                 if (ret_val)
4830                         goto out;
4831
4832                 if (!dev_spec->eee_disable) {
4833                         u16 eee_advert;
4834
4835                         ret_val =
4836                             e1000_read_emi_reg_locked(hw,
4837                                                       I217_EEE_ADVERTISEMENT,
4838                                                       &eee_advert);
4839                         if (ret_val)
4840                                 goto release;
4841
4842                         /* Disable LPLU if both link partners support 100BaseT
4843                          * EEE and 100Full is advertised on both ends of the
4844                          * link, and enable Auto Enable LPI since there will
4845                          * be no driver to enable LPI while in Sx.
4846                          */
4847                         if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4848                             (dev_spec->eee_lp_ability &
4849                              I82579_EEE_100_SUPPORTED) &&
4850                             (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
4851                                 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4852                                               E1000_PHY_CTRL_NOND0A_LPLU);
4853
4854                                 /* Set Auto Enable LPI after link up */
4855                                 hw->phy.ops.read_reg_locked(hw,
4856                                                             I217_LPI_GPIO_CTRL,
4857                                                             &phy_reg);
4858                                 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4859                                 hw->phy.ops.write_reg_locked(hw,
4860                                                              I217_LPI_GPIO_CTRL,
4861                                                              phy_reg);
4862                         }
4863                 }
4864
4865                 /* For i217 Intel Rapid Start Technology support,
4866                  * when the system is going into Sx and no manageability engine
4867                  * is present, the driver must configure proxy to reset only on
4868                  * power good.  LPI (Low Power Idle) state must also reset only
4869                  * on power good, as well as the MTA (Multicast table array).
4870                  * The SMBus release must also be disabled on LCD reset.
4871                  */
4872                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4873                       E1000_ICH_FWSM_FW_VALID)) {
4874                         /* Enable proxy to reset only on power good. */
4875                         hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
4876                                                     &phy_reg);
4877                         phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4878                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
4879                                                      phy_reg);
4880
4881                         /* Set bit enable LPI (EEE) to reset only on
4882                          * power good.
4883                         */
4884                         hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
4885                         phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4886                         hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
4887
4888                         /* Disable the SMB release on LCD reset. */
4889                         hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
4890                         phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4891                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4892                 }
4893
4894                 /* Enable MTA to reset for Intel Rapid Start Technology
4895                  * Support
4896                  */
4897                 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
4898                 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4899                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4900
4901 release:
4902                 hw->phy.ops.release(hw);
4903         }
4904 out:
4905         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4906
4907         if (hw->mac.type == e1000_ich8lan)
4908                 e1000_gig_downshift_workaround_ich8lan(hw);
4909
4910         if (hw->mac.type >= e1000_pchlan) {
4911                 e1000_oem_bits_config_ich8lan(hw, false);
4912
4913                 /* Reset PHY to activate OEM bits on 82577/8 */
4914                 if (hw->mac.type == e1000_pchlan)
4915                         e1000_phy_hw_reset_generic(hw);
4916
4917                 ret_val = hw->phy.ops.acquire(hw);
4918                 if (ret_val)
4919                         return;
4920                 e1000_write_smbus_addr(hw);
4921                 hw->phy.ops.release(hw);
4922         }
4923
4924         return;
4925 }
4926
4927 /**
4928  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
4929  *  @hw: pointer to the HW structure
4930  *
4931  *  During Sx to S0 transitions on non-managed devices or managed devices
4932  *  on which PHY resets are not blocked, if the PHY registers cannot be
4933  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
4934  *  the PHY.
4935  *  On i217, setup Intel Rapid Start Technology.
4936  **/
4937 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4938 {
4939         s32 ret_val;
4940
4941         DEBUGFUNC("e1000_resume_workarounds_pchlan");
4942         if (hw->mac.type < e1000_pch2lan)
4943                 return E1000_SUCCESS;
4944
4945         ret_val = e1000_init_phy_workarounds_pchlan(hw);
4946         if (ret_val) {
4947                 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
4948                 return ret_val;
4949         }
4950
4951         /* For i217 Intel Rapid Start Technology support when the system
4952          * is transitioning from Sx and no manageability engine is present
4953          * configure SMBus to restore on reset, disable proxy, and enable
4954          * the reset on MTA (Multicast table array).
4955          */
4956         if (hw->phy.type == e1000_phy_i217) {
4957                 u16 phy_reg;
4958
4959                 ret_val = hw->phy.ops.acquire(hw);
4960                 if (ret_val) {
4961                         DEBUGOUT("Failed to setup iRST\n");
4962                         return ret_val;
4963                 }
4964
4965                 /* Clear Auto Enable LPI after link up */
4966                 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
4967                 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4968                 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
4969
4970                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4971                     E1000_ICH_FWSM_FW_VALID)) {
4972                         /* Restore clear on SMB if no manageability engine
4973                          * is present
4974                          */
4975                         ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
4976                                                               &phy_reg);
4977                         if (ret_val)
4978                                 goto release;
4979                         phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
4980                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4981
4982                         /* Disable Proxy */
4983                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
4984                 }
4985                 /* Enable reset on MTA */
4986                 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
4987                                                       &phy_reg);
4988                 if (ret_val)
4989                         goto release;
4990                 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
4991                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4992 release:
4993                 if (ret_val)
4994                         DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
4995                 hw->phy.ops.release(hw);
4996                 return ret_val;
4997         }
4998         return E1000_SUCCESS;
4999 }
5000
5001 /**
5002  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5003  *  @hw: pointer to the HW structure
5004  *
5005  *  Return the LED back to the default configuration.
5006  **/
5007 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5008 {
5009         DEBUGFUNC("e1000_cleanup_led_ich8lan");
5010
5011         if (hw->phy.type == e1000_phy_ife)
5012                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5013                                              0);
5014
5015         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5016         return E1000_SUCCESS;
5017 }
5018
5019 /**
5020  *  e1000_led_on_ich8lan - Turn LEDs on
5021  *  @hw: pointer to the HW structure
5022  *
5023  *  Turn on the LEDs.
5024  **/
5025 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5026 {
5027         DEBUGFUNC("e1000_led_on_ich8lan");
5028
5029         if (hw->phy.type == e1000_phy_ife)
5030                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5031                                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5032
5033         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5034         return E1000_SUCCESS;
5035 }
5036
5037 /**
5038  *  e1000_led_off_ich8lan - Turn LEDs off
5039  *  @hw: pointer to the HW structure
5040  *
5041  *  Turn off the LEDs.
5042  **/
5043 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5044 {
5045         DEBUGFUNC("e1000_led_off_ich8lan");
5046
5047         if (hw->phy.type == e1000_phy_ife)
5048                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5049                                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5050
5051         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5052         return E1000_SUCCESS;
5053 }
5054
5055 /**
5056  *  e1000_setup_led_pchlan - Configures SW controllable LED
5057  *  @hw: pointer to the HW structure
5058  *
5059  *  This prepares the SW controllable LED for use.
5060  **/
5061 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5062 {
5063         DEBUGFUNC("e1000_setup_led_pchlan");
5064
5065         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5066                                      (u16)hw->mac.ledctl_mode1);
5067 }
5068
5069 /**
5070  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5071  *  @hw: pointer to the HW structure
5072  *
5073  *  Return the LED back to the default configuration.
5074  **/
5075 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5076 {
5077         DEBUGFUNC("e1000_cleanup_led_pchlan");
5078
5079         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5080                                      (u16)hw->mac.ledctl_default);
5081 }
5082
5083 /**
5084  *  e1000_led_on_pchlan - Turn LEDs on
5085  *  @hw: pointer to the HW structure
5086  *
5087  *  Turn on the LEDs.
5088  **/
5089 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5090 {
5091         u16 data = (u16)hw->mac.ledctl_mode2;
5092         u32 i, led;
5093
5094         DEBUGFUNC("e1000_led_on_pchlan");
5095
5096         /* If no link, then turn LED on by setting the invert bit
5097          * for each LED that's mode is "link_up" in ledctl_mode2.
5098          */
5099         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5100                 for (i = 0; i < 3; i++) {
5101                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5102                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5103                             E1000_LEDCTL_MODE_LINK_UP)
5104                                 continue;
5105                         if (led & E1000_PHY_LED0_IVRT)
5106                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5107                         else
5108                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5109                 }
5110         }
5111
5112         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5113 }
5114
5115 /**
5116  *  e1000_led_off_pchlan - Turn LEDs off
5117  *  @hw: pointer to the HW structure
5118  *
5119  *  Turn off the LEDs.
5120  **/
5121 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5122 {
5123         u16 data = (u16)hw->mac.ledctl_mode1;
5124         u32 i, led;
5125
5126         DEBUGFUNC("e1000_led_off_pchlan");
5127
5128         /* If no link, then turn LED off by clearing the invert bit
5129          * for each LED that's mode is "link_up" in ledctl_mode1.
5130          */
5131         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5132                 for (i = 0; i < 3; i++) {
5133                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5134                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5135                             E1000_LEDCTL_MODE_LINK_UP)
5136                                 continue;
5137                         if (led & E1000_PHY_LED0_IVRT)
5138                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5139                         else
5140                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5141                 }
5142         }
5143
5144         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5145 }
5146
5147 /**
5148  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5149  *  @hw: pointer to the HW structure
5150  *
5151  *  Read appropriate register for the config done bit for completion status
5152  *  and configure the PHY through s/w for EEPROM-less parts.
5153  *
5154  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5155  *  config done bit, so only an error is logged and continues.  If we were
5156  *  to return with error, EEPROM-less silicon would not be able to be reset
5157  *  or change link.
5158  **/
5159 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5160 {
5161         s32 ret_val = E1000_SUCCESS;
5162         u32 bank = 0;
5163         u32 status;
5164
5165         DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5166
5167         e1000_get_cfg_done_generic(hw);
5168
5169         /* Wait for indication from h/w that it has completed basic config */
5170         if (hw->mac.type >= e1000_ich10lan) {
5171                 e1000_lan_init_done_ich8lan(hw);
5172         } else {
5173                 ret_val = e1000_get_auto_rd_done_generic(hw);
5174                 if (ret_val) {
5175                         /* When auto config read does not complete, do not
5176                          * return with an error. This can happen in situations
5177                          * where there is no eeprom and prevents getting link.
5178                          */
5179                         DEBUGOUT("Auto Read Done did not complete\n");
5180                         ret_val = E1000_SUCCESS;
5181                 }
5182         }
5183
5184         /* Clear PHY Reset Asserted bit */
5185         status = E1000_READ_REG(hw, E1000_STATUS);
5186         if (status & E1000_STATUS_PHYRA)
5187                 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5188         else
5189                 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5190
5191         /* If EEPROM is not marked present, init the IGP 3 PHY manually */
5192         if (hw->mac.type <= e1000_ich9lan) {
5193                 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5194                     (hw->phy.type == e1000_phy_igp_3)) {
5195                         e1000_phy_init_script_igp3(hw);
5196                 }
5197         } else {
5198                 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5199                         /* Maybe we should do a basic PHY config */
5200                         DEBUGOUT("EEPROM not present\n");
5201                         ret_val = -E1000_ERR_CONFIG;
5202                 }
5203         }
5204
5205         return ret_val;
5206 }
5207
5208 /**
5209  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5210  * @hw: pointer to the HW structure
5211  *
5212  * In the case of a PHY power down to save power, or to turn off link during a
5213  * driver unload, or wake on lan is not enabled, remove the link.
5214  **/
5215 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5216 {
5217         /* If the management interface is not enabled, then power down */
5218         if (!(hw->mac.ops.check_mng_mode(hw) ||
5219               hw->phy.ops.check_reset_block(hw)))
5220                 e1000_power_down_phy_copper(hw);
5221
5222         return;
5223 }
5224
5225 /**
5226  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5227  *  @hw: pointer to the HW structure
5228  *
5229  *  Clears hardware counters specific to the silicon family and calls
5230  *  clear_hw_cntrs_generic to clear all general purpose counters.
5231  **/
5232 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5233 {
5234         u16 phy_data;
5235         s32 ret_val;
5236
5237         DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
5238
5239         e1000_clear_hw_cntrs_base_generic(hw);
5240
5241         E1000_READ_REG(hw, E1000_ALGNERRC);
5242         E1000_READ_REG(hw, E1000_RXERRC);
5243         E1000_READ_REG(hw, E1000_TNCRS);
5244         E1000_READ_REG(hw, E1000_CEXTERR);
5245         E1000_READ_REG(hw, E1000_TSCTC);
5246         E1000_READ_REG(hw, E1000_TSCTFC);
5247
5248         E1000_READ_REG(hw, E1000_MGTPRC);
5249         E1000_READ_REG(hw, E1000_MGTPDC);
5250         E1000_READ_REG(hw, E1000_MGTPTC);
5251
5252         E1000_READ_REG(hw, E1000_IAC);
5253         E1000_READ_REG(hw, E1000_ICRXOC);
5254
5255         /* Clear PHY statistics registers */
5256         if ((hw->phy.type == e1000_phy_82578) ||
5257             (hw->phy.type == e1000_phy_82579) ||
5258             (hw->phy.type == e1000_phy_i217) ||
5259             (hw->phy.type == e1000_phy_82577)) {
5260                 ret_val = hw->phy.ops.acquire(hw);
5261                 if (ret_val)
5262                         return;
5263                 ret_val = hw->phy.ops.set_page(hw,
5264                                                HV_STATS_PAGE << IGP_PAGE_SHIFT);
5265                 if (ret_val)
5266                         goto release;
5267                 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5268                 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5269                 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5270                 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5271                 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5272                 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5273                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5274                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5275                 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5276                 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5277                 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5278                 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5279                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5280                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5281 release:
5282                 hw->phy.ops.release(hw);
5283         }
5284 }
5285