70eba710b17ba4ca7669c38e8afa9e736e0c5f68
[dpdk.git] / drivers / net / e1000 / base / e1000_ich8lan.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /* 82562G 10/100 Network Connection
35  * 82562G-2 10/100 Network Connection
36  * 82562GT 10/100 Network Connection
37  * 82562GT-2 10/100 Network Connection
38  * 82562V 10/100 Network Connection
39  * 82562V-2 10/100 Network Connection
40  * 82566DC-2 Gigabit Network Connection
41  * 82566DC Gigabit Network Connection
42  * 82566DM-2 Gigabit Network Connection
43  * 82566DM Gigabit Network Connection
44  * 82566MC Gigabit Network Connection
45  * 82566MM Gigabit Network Connection
46  * 82567LM Gigabit Network Connection
47  * 82567LF Gigabit Network Connection
48  * 82567V Gigabit Network Connection
49  * 82567LM-2 Gigabit Network Connection
50  * 82567LF-2 Gigabit Network Connection
51  * 82567V-2 Gigabit Network Connection
52  * 82567LF-3 Gigabit Network Connection
53  * 82567LM-3 Gigabit Network Connection
54  * 82567LM-4 Gigabit Network Connection
55  * 82577LM Gigabit Network Connection
56  * 82577LC Gigabit Network Connection
57  * 82578DM Gigabit Network Connection
58  * 82578DC Gigabit Network Connection
59  * 82579LM Gigabit Network Connection
60  * 82579V Gigabit Network Connection
61  * Ethernet Connection I217-LM
62  * Ethernet Connection I217-V
63  * Ethernet Connection I218-V
64  * Ethernet Connection I218-LM
65  * Ethernet Connection (2) I218-LM
66  * Ethernet Connection (2) I218-V
67  * Ethernet Connection (3) I218-LM
68  * Ethernet Connection (3) I218-V
69  */
70
71 #include "e1000_api.h"
72
73 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
74 STATIC s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 STATIC s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 STATIC int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 STATIC int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
84 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
85                                               u8 *mc_addr_list,
86                                               u32 mc_addr_count);
87 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
88 STATIC s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
89 STATIC s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
90 STATIC s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
91 STATIC s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
92                                             bool active);
93 STATIC s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
94                                             bool active);
95 STATIC s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
96                                    u16 words, u16 *data);
97 STATIC s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
98                                     u16 words, u16 *data);
99 STATIC s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100 STATIC s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 STATIC s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
102                                             u16 *data);
103 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
104 STATIC s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
105 STATIC s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
106 STATIC s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
107 STATIC s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
108 STATIC s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
109 STATIC s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
110 STATIC s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
111                                            u16 *speed, u16 *duplex);
112 STATIC s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
113 STATIC s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
114 STATIC s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
115 STATIC s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
116 STATIC s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
117 STATIC s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
118 STATIC s32  e1000_led_on_pchlan(struct e1000_hw *hw);
119 STATIC s32  e1000_led_off_pchlan(struct e1000_hw *hw);
120 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
121 STATIC s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
122 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
123 STATIC s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
124 STATIC s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
125                                           u32 offset, u8 *data);
126 STATIC s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
127                                           u8 size, u16 *data);
128 STATIC s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
129                                           u32 offset, u16 *data);
130 STATIC s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
131                                                  u32 offset, u8 byte);
132 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
133 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
134 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
135 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
136 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
137 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
138
139 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
140 /* Offset 04h HSFSTS */
141 union ich8_hws_flash_status {
142         struct ich8_hsfsts {
143                 u16 flcdone:1; /* bit 0 Flash Cycle Done */
144                 u16 flcerr:1; /* bit 1 Flash Cycle Error */
145                 u16 dael:1; /* bit 2 Direct Access error Log */
146                 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
147                 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
148                 u16 reserved1:2; /* bit 13:6 Reserved */
149                 u16 reserved2:6; /* bit 13:6 Reserved */
150                 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
151                 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
152         } hsf_status;
153         u16 regval;
154 };
155
156 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
157 /* Offset 06h FLCTL */
158 union ich8_hws_flash_ctrl {
159         struct ich8_hsflctl {
160                 u16 flcgo:1;   /* 0 Flash Cycle Go */
161                 u16 flcycle:2;   /* 2:1 Flash Cycle */
162                 u16 reserved:5;   /* 7:3 Reserved  */
163                 u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
164                 u16 flockdn:6;   /* 15:10 Reserved */
165         } hsf_ctrl;
166         u16 regval;
167 };
168
169 /* ICH Flash Region Access Permissions */
170 union ich8_hws_flash_regacc {
171         struct ich8_flracc {
172                 u32 grra:8; /* 0:7 GbE region Read Access */
173                 u32 grwa:8; /* 8:15 GbE region Write Access */
174                 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
175                 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
176         } hsf_flregacc;
177         u16 regval;
178 };
179
180 /**
181  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
182  *  @hw: pointer to the HW structure
183  *
184  *  Test access to the PHY registers by reading the PHY ID registers.  If
185  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
186  *  otherwise assume the read PHY ID is correct if it is valid.
187  *
188  *  Assumes the sw/fw/hw semaphore is already acquired.
189  **/
190 STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
191 {
192         u16 phy_reg = 0;
193         u32 phy_id = 0;
194         s32 ret_val = 0;
195         u16 retry_count;
196         u32 mac_reg = 0;
197
198         for (retry_count = 0; retry_count < 2; retry_count++) {
199                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
200                 if (ret_val || (phy_reg == 0xFFFF))
201                         continue;
202                 phy_id = (u32)(phy_reg << 16);
203
204                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
205                 if (ret_val || (phy_reg == 0xFFFF)) {
206                         phy_id = 0;
207                         continue;
208                 }
209                 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
210                 break;
211         }
212
213         if (hw->phy.id) {
214                 if  (hw->phy.id == phy_id)
215                         goto out;
216         } else if (phy_id) {
217                 hw->phy.id = phy_id;
218                 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
219                 goto out;
220         }
221
222         /* In case the PHY needs to be in mdio slow mode,
223          * set slow mode and try to get the PHY id again.
224          */
225         if (hw->mac.type < e1000_pch_lpt) {
226                 hw->phy.ops.release(hw);
227                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
228                 if (!ret_val)
229                         ret_val = e1000_get_phy_id(hw);
230                 hw->phy.ops.acquire(hw);
231         }
232
233         if (ret_val)
234                 return false;
235 out:
236         if (hw->mac.type == e1000_pch_lpt) {
237                 /* Unforce SMBus mode in PHY */
238                 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
239                 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
240                 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
241
242                 /* Unforce SMBus mode in MAC */
243                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
244                 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
245                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
246         }
247
248         return true;
249 }
250
251 /**
252  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
253  *  @hw: pointer to the HW structure
254  *
255  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
256  *  used to reset the PHY to a quiescent state when necessary.
257  **/
258 STATIC void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
259 {
260         u32 mac_reg;
261
262         DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
263
264         /* Set Phy Config Counter to 50msec */
265         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
266         mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
267         mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
268         E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
269
270         /* Toggle LANPHYPC Value bit */
271         mac_reg = E1000_READ_REG(hw, E1000_CTRL);
272         mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
273         mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
274         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
275         E1000_WRITE_FLUSH(hw);
276         usec_delay(10);
277         mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
278         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
279         E1000_WRITE_FLUSH(hw);
280
281         if (hw->mac.type < e1000_pch_lpt) {
282                 msec_delay(50);
283         } else {
284                 u16 count = 20;
285
286                 do {
287                         msec_delay(5);
288                 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
289                            E1000_CTRL_EXT_LPCD) && count--);
290
291                 msec_delay(30);
292         }
293 }
294
295 /**
296  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
297  *  @hw: pointer to the HW structure
298  *
299  *  Workarounds/flow necessary for PHY initialization during driver load
300  *  and resume paths.
301  **/
302 STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
303 {
304         u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
305         s32 ret_val;
306
307         DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
308
309         /* Gate automatic PHY configuration by hardware on managed and
310          * non-managed 82579 and newer adapters.
311          */
312         e1000_gate_hw_phy_config_ich8lan(hw, true);
313
314 #ifdef ULP_SUPPORT
315         /* It is not possible to be certain of the current state of ULP
316          * so forcibly disable it.
317          */
318         hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
319
320 #endif /* ULP_SUPPORT */
321         ret_val = hw->phy.ops.acquire(hw);
322         if (ret_val) {
323                 DEBUGOUT("Failed to initialize PHY flow\n");
324                 goto out;
325         }
326
327         /* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
328          * inaccessible and resetting the PHY is not blocked, toggle the
329          * LANPHYPC Value bit to force the interconnect to PCIe mode.
330          */
331         switch (hw->mac.type) {
332         case e1000_pch_lpt:
333                 if (e1000_phy_is_accessible_pchlan(hw))
334                         break;
335
336                 /* Before toggling LANPHYPC, see if PHY is accessible by
337                  * forcing MAC to SMBus mode first.
338                  */
339                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
340                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
341                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
342
343                 /* Wait 50 milliseconds for MAC to finish any retries
344                  * that it might be trying to perform from previous
345                  * attempts to acknowledge any phy read requests.
346                  */
347                  msec_delay(50);
348
349                 /* fall-through */
350         case e1000_pch2lan:
351                 if (e1000_phy_is_accessible_pchlan(hw))
352                         break;
353
354                 /* fall-through */
355         case e1000_pchlan:
356                 if ((hw->mac.type == e1000_pchlan) &&
357                     (fwsm & E1000_ICH_FWSM_FW_VALID))
358                         break;
359
360                 if (hw->phy.ops.check_reset_block(hw)) {
361                         DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
362                         ret_val = -E1000_ERR_PHY;
363                         break;
364                 }
365
366                 /* Toggle LANPHYPC Value bit */
367                 e1000_toggle_lanphypc_pch_lpt(hw);
368                 if (hw->mac.type >= e1000_pch_lpt) {
369                         if (e1000_phy_is_accessible_pchlan(hw))
370                                 break;
371
372                         /* Toggling LANPHYPC brings the PHY out of SMBus mode
373                          * so ensure that the MAC is also out of SMBus mode
374                          */
375                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
376                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
377                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
378
379                         if (e1000_phy_is_accessible_pchlan(hw))
380                                 break;
381
382                         ret_val = -E1000_ERR_PHY;
383                 }
384                 break;
385         default:
386                 break;
387         }
388
389         hw->phy.ops.release(hw);
390         if (!ret_val) {
391
392                 /* Check to see if able to reset PHY.  Print error if not */
393                 if (hw->phy.ops.check_reset_block(hw)) {
394                         ERROR_REPORT("Reset blocked by ME\n");
395                         goto out;
396                 }
397
398                 /* Reset the PHY before any access to it.  Doing so, ensures
399                  * that the PHY is in a known good state before we read/write
400                  * PHY registers.  The generic reset is sufficient here,
401                  * because we haven't determined the PHY type yet.
402                  */
403                 ret_val = e1000_phy_hw_reset_generic(hw);
404                 if (ret_val)
405                         goto out;
406
407                 /* On a successful reset, possibly need to wait for the PHY
408                  * to quiesce to an accessible state before returning control
409                  * to the calling function.  If the PHY does not quiesce, then
410                  * return E1000E_BLK_PHY_RESET, as this is the condition that
411                  *  the PHY is in.
412                  */
413                 ret_val = hw->phy.ops.check_reset_block(hw);
414                 if (ret_val)
415                         ERROR_REPORT("ME blocked access to PHY after reset\n");
416         }
417
418 out:
419         /* Ungate automatic PHY configuration on non-managed 82579 */
420         if ((hw->mac.type == e1000_pch2lan) &&
421             !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
422                 msec_delay(10);
423                 e1000_gate_hw_phy_config_ich8lan(hw, false);
424         }
425
426         return ret_val;
427 }
428
429 /**
430  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
431  *  @hw: pointer to the HW structure
432  *
433  *  Initialize family-specific PHY parameters and function pointers.
434  **/
435 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
436 {
437         struct e1000_phy_info *phy = &hw->phy;
438         s32 ret_val;
439
440         DEBUGFUNC("e1000_init_phy_params_pchlan");
441
442         phy->addr               = 1;
443         phy->reset_delay_us     = 100;
444
445         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
446         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
447         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
448         phy->ops.set_page       = e1000_set_page_igp;
449         phy->ops.read_reg       = e1000_read_phy_reg_hv;
450         phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
451         phy->ops.read_reg_page  = e1000_read_phy_reg_page_hv;
452         phy->ops.release        = e1000_release_swflag_ich8lan;
453         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
454         phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
455         phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
456         phy->ops.write_reg      = e1000_write_phy_reg_hv;
457         phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
458         phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
459         phy->ops.power_up       = e1000_power_up_phy_copper;
460         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
461         phy->autoneg_mask       = AUTONEG_ADVERTISE_SPEED_DEFAULT;
462
463         phy->id = e1000_phy_unknown;
464
465         ret_val = e1000_init_phy_workarounds_pchlan(hw);
466         if (ret_val)
467                 return ret_val;
468
469         if (phy->id == e1000_phy_unknown)
470                 switch (hw->mac.type) {
471                 default:
472                         ret_val = e1000_get_phy_id(hw);
473                         if (ret_val)
474                                 return ret_val;
475                         if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
476                                 break;
477                         /* fall-through */
478                 case e1000_pch2lan:
479                 case e1000_pch_lpt:
480                         /* In case the PHY needs to be in mdio slow mode,
481                          * set slow mode and try to get the PHY id again.
482                          */
483                         ret_val = e1000_set_mdio_slow_mode_hv(hw);
484                         if (ret_val)
485                                 return ret_val;
486                         ret_val = e1000_get_phy_id(hw);
487                         if (ret_val)
488                                 return ret_val;
489                         break;
490                 }
491         phy->type = e1000_get_phy_type_from_id(phy->id);
492
493         switch (phy->type) {
494         case e1000_phy_82577:
495         case e1000_phy_82579:
496         case e1000_phy_i217:
497                 phy->ops.check_polarity = e1000_check_polarity_82577;
498                 phy->ops.force_speed_duplex =
499                         e1000_phy_force_speed_duplex_82577;
500                 phy->ops.get_cable_length = e1000_get_cable_length_82577;
501                 phy->ops.get_info = e1000_get_phy_info_82577;
502                 phy->ops.commit = e1000_phy_sw_reset_generic;
503                 break;
504         case e1000_phy_82578:
505                 phy->ops.check_polarity = e1000_check_polarity_m88;
506                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
507                 phy->ops.get_cable_length = e1000_get_cable_length_m88;
508                 phy->ops.get_info = e1000_get_phy_info_m88;
509                 break;
510         default:
511                 ret_val = -E1000_ERR_PHY;
512                 break;
513         }
514
515         return ret_val;
516 }
517
518 /**
519  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
520  *  @hw: pointer to the HW structure
521  *
522  *  Initialize family-specific PHY parameters and function pointers.
523  **/
524 STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
525 {
526         struct e1000_phy_info *phy = &hw->phy;
527         s32 ret_val;
528         u16 i = 0;
529
530         DEBUGFUNC("e1000_init_phy_params_ich8lan");
531
532         phy->addr               = 1;
533         phy->reset_delay_us     = 100;
534
535         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
536         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
537         phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
538         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
539         phy->ops.read_reg       = e1000_read_phy_reg_igp;
540         phy->ops.release        = e1000_release_swflag_ich8lan;
541         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
542         phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
543         phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
544         phy->ops.write_reg      = e1000_write_phy_reg_igp;
545         phy->ops.power_up       = e1000_power_up_phy_copper;
546         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
547
548         /* We may need to do this twice - once for IGP and if that fails,
549          * we'll set BM func pointers and try again
550          */
551         ret_val = e1000_determine_phy_address(hw);
552         if (ret_val) {
553                 phy->ops.write_reg = e1000_write_phy_reg_bm;
554                 phy->ops.read_reg  = e1000_read_phy_reg_bm;
555                 ret_val = e1000_determine_phy_address(hw);
556                 if (ret_val) {
557                         DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
558                         return ret_val;
559                 }
560         }
561
562         phy->id = 0;
563         while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
564                (i++ < 100)) {
565                 msec_delay(1);
566                 ret_val = e1000_get_phy_id(hw);
567                 if (ret_val)
568                         return ret_val;
569         }
570
571         /* Verify phy id */
572         switch (phy->id) {
573         case IGP03E1000_E_PHY_ID:
574                 phy->type = e1000_phy_igp_3;
575                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
576                 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
577                 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
578                 phy->ops.get_info = e1000_get_phy_info_igp;
579                 phy->ops.check_polarity = e1000_check_polarity_igp;
580                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
581                 break;
582         case IFE_E_PHY_ID:
583         case IFE_PLUS_E_PHY_ID:
584         case IFE_C_E_PHY_ID:
585                 phy->type = e1000_phy_ife;
586                 phy->autoneg_mask = E1000_ALL_NOT_GIG;
587                 phy->ops.get_info = e1000_get_phy_info_ife;
588                 phy->ops.check_polarity = e1000_check_polarity_ife;
589                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
590                 break;
591         case BME1000_E_PHY_ID:
592                 phy->type = e1000_phy_bm;
593                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
594                 phy->ops.read_reg = e1000_read_phy_reg_bm;
595                 phy->ops.write_reg = e1000_write_phy_reg_bm;
596                 phy->ops.commit = e1000_phy_sw_reset_generic;
597                 phy->ops.get_info = e1000_get_phy_info_m88;
598                 phy->ops.check_polarity = e1000_check_polarity_m88;
599                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
600                 break;
601         default:
602                 return -E1000_ERR_PHY;
603                 break;
604         }
605
606         return E1000_SUCCESS;
607 }
608
609 /**
610  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
611  *  @hw: pointer to the HW structure
612  *
613  *  Initialize family-specific NVM parameters and function
614  *  pointers.
615  **/
616 STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
617 {
618         struct e1000_nvm_info *nvm = &hw->nvm;
619         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
620         u32 gfpreg, sector_base_addr, sector_end_addr;
621         u16 i;
622
623         DEBUGFUNC("e1000_init_nvm_params_ich8lan");
624
625         /* Can't read flash registers if the register set isn't mapped. */
626         nvm->type = e1000_nvm_flash_sw;
627         if (!hw->flash_address) {
628                 DEBUGOUT("ERROR: Flash registers not mapped\n");
629                 return -E1000_ERR_CONFIG;
630         }
631
632         gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
633
634         /* sector_X_addr is a "sector"-aligned address (4096 bytes)
635          * Add 1 to sector_end_addr since this sector is included in
636          * the overall size.
637          */
638         sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
639         sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
640
641         /* flash_base_addr is byte-aligned */
642         nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
643
644         /* find total size of the NVM, then cut in half since the total
645          * size represents two separate NVM banks.
646          */
647         nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
648                                 << FLASH_SECTOR_ADDR_SHIFT);
649         nvm->flash_bank_size /= 2;
650         /* Adjust to word count */
651         nvm->flash_bank_size /= sizeof(u16);
652
653         nvm->word_size = E1000_SHADOW_RAM_WORDS;
654
655         /* Clear shadow ram */
656         for (i = 0; i < nvm->word_size; i++) {
657                 dev_spec->shadow_ram[i].modified = false;
658                 dev_spec->shadow_ram[i].value    = 0xFFFF;
659         }
660
661         E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
662         E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
663
664         /* Function Pointers */
665         nvm->ops.acquire        = e1000_acquire_nvm_ich8lan;
666         nvm->ops.release        = e1000_release_nvm_ich8lan;
667         nvm->ops.read           = e1000_read_nvm_ich8lan;
668         nvm->ops.update         = e1000_update_nvm_checksum_ich8lan;
669         nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
670         nvm->ops.validate       = e1000_validate_nvm_checksum_ich8lan;
671         nvm->ops.write          = e1000_write_nvm_ich8lan;
672
673         return E1000_SUCCESS;
674 }
675
676 /**
677  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
678  *  @hw: pointer to the HW structure
679  *
680  *  Initialize family-specific MAC parameters and function
681  *  pointers.
682  **/
683 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
684 {
685         struct e1000_mac_info *mac = &hw->mac;
686 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
687         u16 pci_cfg;
688 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
689
690         DEBUGFUNC("e1000_init_mac_params_ich8lan");
691
692         /* Set media type function pointer */
693         hw->phy.media_type = e1000_media_type_copper;
694
695         /* Set mta register count */
696         mac->mta_reg_count = 32;
697         /* Set rar entry count */
698         mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
699         if (mac->type == e1000_ich8lan)
700                 mac->rar_entry_count--;
701         /* Set if part includes ASF firmware */
702         mac->asf_firmware_present = true;
703         /* FWSM register */
704         mac->has_fwsm = true;
705         /* ARC subsystem not supported */
706         mac->arc_subsystem_valid = false;
707         /* Adaptive IFS supported */
708         mac->adaptive_ifs = true;
709
710         /* Function pointers */
711
712         /* bus type/speed/width */
713         mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
714         /* function id */
715         mac->ops.set_lan_id = e1000_set_lan_id_single_port;
716         /* reset */
717         mac->ops.reset_hw = e1000_reset_hw_ich8lan;
718         /* hw initialization */
719         mac->ops.init_hw = e1000_init_hw_ich8lan;
720         /* link setup */
721         mac->ops.setup_link = e1000_setup_link_ich8lan;
722         /* physical interface setup */
723         mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
724         /* check for link */
725         mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
726         /* link info */
727         mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
728         /* multicast address update */
729         mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
730         /* clear hardware counters */
731         mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
732
733         /* LED and other operations */
734         switch (mac->type) {
735         case e1000_ich8lan:
736         case e1000_ich9lan:
737         case e1000_ich10lan:
738                 /* check management mode */
739                 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
740                 /* ID LED init */
741                 mac->ops.id_led_init = e1000_id_led_init_generic;
742                 /* blink LED */
743                 mac->ops.blink_led = e1000_blink_led_generic;
744                 /* setup LED */
745                 mac->ops.setup_led = e1000_setup_led_generic;
746                 /* cleanup LED */
747                 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
748                 /* turn on/off LED */
749                 mac->ops.led_on = e1000_led_on_ich8lan;
750                 mac->ops.led_off = e1000_led_off_ich8lan;
751                 break;
752         case e1000_pch2lan:
753                 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
754                 mac->ops.rar_set = e1000_rar_set_pch2lan;
755                 /* fall-through */
756         case e1000_pch_lpt:
757 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
758                 /* multicast address update for pch2 */
759                 mac->ops.update_mc_addr_list =
760                         e1000_update_mc_addr_list_pch2lan;
761                 /* fall-through */
762 #endif
763         case e1000_pchlan:
764 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
765                 /* save PCH revision_id */
766                 e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
767                 hw->revision_id = (u8)(pci_cfg &= 0x000F);
768 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
769                 /* check management mode */
770                 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
771                 /* ID LED init */
772                 mac->ops.id_led_init = e1000_id_led_init_pchlan;
773                 /* setup LED */
774                 mac->ops.setup_led = e1000_setup_led_pchlan;
775                 /* cleanup LED */
776                 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
777                 /* turn on/off LED */
778                 mac->ops.led_on = e1000_led_on_pchlan;
779                 mac->ops.led_off = e1000_led_off_pchlan;
780                 break;
781         default:
782                 break;
783         }
784
785         if (mac->type == e1000_pch_lpt) {
786                 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
787                 mac->ops.rar_set = e1000_rar_set_pch_lpt;
788                 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
789         }
790
791         /* Enable PCS Lock-loss workaround for ICH8 */
792         if (mac->type == e1000_ich8lan)
793                 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
794
795         return E1000_SUCCESS;
796 }
797
798 /**
799  *  __e1000_access_emi_reg_locked - Read/write EMI register
800  *  @hw: pointer to the HW structure
801  *  @addr: EMI address to program
802  *  @data: pointer to value to read/write from/to the EMI address
803  *  @read: boolean flag to indicate read or write
804  *
805  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
806  **/
807 STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
808                                          u16 *data, bool read)
809 {
810         s32 ret_val;
811
812         DEBUGFUNC("__e1000_access_emi_reg_locked");
813
814         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
815         if (ret_val)
816                 return ret_val;
817
818         if (read)
819                 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
820                                                       data);
821         else
822                 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
823                                                        *data);
824
825         return ret_val;
826 }
827
828 /**
829  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
830  *  @hw: pointer to the HW structure
831  *  @addr: EMI address to program
832  *  @data: value to be read from the EMI address
833  *
834  *  Assumes the SW/FW/HW Semaphore is already acquired.
835  **/
836 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
837 {
838         DEBUGFUNC("e1000_read_emi_reg_locked");
839
840         return __e1000_access_emi_reg_locked(hw, addr, data, true);
841 }
842
843 /**
844  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
845  *  @hw: pointer to the HW structure
846  *  @addr: EMI address to program
847  *  @data: value to be written to the EMI address
848  *
849  *  Assumes the SW/FW/HW Semaphore is already acquired.
850  **/
851 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
852 {
853         DEBUGFUNC("e1000_read_emi_reg_locked");
854
855         return __e1000_access_emi_reg_locked(hw, addr, &data, false);
856 }
857
858 /**
859  *  e1000_set_eee_pchlan - Enable/disable EEE support
860  *  @hw: pointer to the HW structure
861  *
862  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
863  *  the link and the EEE capabilities of the link partner.  The LPI Control
864  *  register bits will remain set only if/when link is up.
865  *
866  *  EEE LPI must not be asserted earlier than one second after link is up.
867  *  On 82579, EEE LPI should not be enabled until such time otherwise there
868  *  can be link issues with some switches.  Other devices can have EEE LPI
869  *  enabled immediately upon link up since they have a timer in hardware which
870  *  prevents LPI from being asserted too early.
871  **/
872 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
873 {
874         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
875         s32 ret_val;
876         u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
877
878         DEBUGFUNC("e1000_set_eee_pchlan");
879
880         switch (hw->phy.type) {
881         case e1000_phy_82579:
882                 lpa = I82579_EEE_LP_ABILITY;
883                 pcs_status = I82579_EEE_PCS_STATUS;
884                 adv_addr = I82579_EEE_ADVERTISEMENT;
885                 break;
886         case e1000_phy_i217:
887                 lpa = I217_EEE_LP_ABILITY;
888                 pcs_status = I217_EEE_PCS_STATUS;
889                 adv_addr = I217_EEE_ADVERTISEMENT;
890                 break;
891         default:
892                 return E1000_SUCCESS;
893         }
894
895         ret_val = hw->phy.ops.acquire(hw);
896         if (ret_val)
897                 return ret_val;
898
899         ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
900         if (ret_val)
901                 goto release;
902
903         /* Clear bits that enable EEE in various speeds */
904         lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
905
906         /* Enable EEE if not disabled by user */
907         if (!dev_spec->eee_disable) {
908                 /* Save off link partner's EEE ability */
909                 ret_val = e1000_read_emi_reg_locked(hw, lpa,
910                                                     &dev_spec->eee_lp_ability);
911                 if (ret_val)
912                         goto release;
913
914                 /* Read EEE advertisement */
915                 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
916                 if (ret_val)
917                         goto release;
918
919                 /* Enable EEE only for speeds in which the link partner is
920                  * EEE capable and for which we advertise EEE.
921                  */
922                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
923                         lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
924
925                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
926                         hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
927                         if (data & NWAY_LPAR_100TX_FD_CAPS)
928                                 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
929                         else
930                                 /* EEE is not supported in 100Half, so ignore
931                                  * partner's EEE in 100 ability if full-duplex
932                                  * is not advertised.
933                                  */
934                                 dev_spec->eee_lp_ability &=
935                                     ~I82579_EEE_100_SUPPORTED;
936                 }
937         }
938
939         if (hw->phy.type == e1000_phy_82579) {
940                 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
941                                                     &data);
942                 if (ret_val)
943                         goto release;
944
945                 data &= ~I82579_LPI_100_PLL_SHUT;
946                 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
947                                                      data);
948         }
949
950         /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
951         ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
952         if (ret_val)
953                 goto release;
954
955         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
956 release:
957         hw->phy.ops.release(hw);
958
959         return ret_val;
960 }
961
962 /**
963  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
964  *  @hw:   pointer to the HW structure
965  *  @link: link up bool flag
966  *
967  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
968  *  preventing further DMA write requests.  Workaround the issue by disabling
969  *  the de-assertion of the clock request when in 1Gpbs mode.
970  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
971  *  speeds in order to avoid Tx hangs.
972  **/
973 STATIC s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
974 {
975         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
976         u32 status = E1000_READ_REG(hw, E1000_STATUS);
977         s32 ret_val = E1000_SUCCESS;
978         u16 reg;
979
980         if (link && (status & E1000_STATUS_SPEED_1000)) {
981                 ret_val = hw->phy.ops.acquire(hw);
982                 if (ret_val)
983                         return ret_val;
984
985                 ret_val =
986                     e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
987                                                &reg);
988                 if (ret_val)
989                         goto release;
990
991                 ret_val =
992                     e1000_write_kmrn_reg_locked(hw,
993                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
994                                                 reg &
995                                                 ~E1000_KMRNCTRLSTA_K1_ENABLE);
996                 if (ret_val)
997                         goto release;
998
999                 usec_delay(10);
1000
1001                 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1002                                 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1003
1004                 ret_val =
1005                     e1000_write_kmrn_reg_locked(hw,
1006                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
1007                                                 reg);
1008 release:
1009                 hw->phy.ops.release(hw);
1010         } else {
1011                 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1012                 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1013
1014                 if (!link || ((status & E1000_STATUS_SPEED_100) &&
1015                               (status & E1000_STATUS_FD)))
1016                         goto update_fextnvm6;
1017
1018                 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1019                 if (ret_val)
1020                         return ret_val;
1021
1022                 /* Clear link status transmit timeout */
1023                 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1024
1025                 if (status & E1000_STATUS_SPEED_100) {
1026                         /* Set inband Tx timeout to 5x10us for 100Half */
1027                         reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1028
1029                         /* Do not extend the K1 entry latency for 100Half */
1030                         fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1031                 } else {
1032                         /* Set inband Tx timeout to 50x10us for 10Full/Half */
1033                         reg |= 50 <<
1034                                I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1035
1036                         /* Extend the K1 entry latency for 10 Mbps */
1037                         fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1038                 }
1039
1040                 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1041                 if (ret_val)
1042                         return ret_val;
1043
1044 update_fextnvm6:
1045                 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1046         }
1047
1048         return ret_val;
1049 }
1050
1051 #ifdef ULP_SUPPORT
1052 /**
1053  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1054  *  @hw: pointer to the HW structure
1055  *  @to_sx: boolean indicating a system power state transition to Sx
1056  *
1057  *  When link is down, configure ULP mode to significantly reduce the power
1058  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1059  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1060  *  system, configure the ULP mode by software.
1061  */
1062 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1063 {
1064         u32 mac_reg;
1065         s32 ret_val = E1000_SUCCESS;
1066         u16 phy_reg;
1067
1068         if ((hw->mac.type < e1000_pch_lpt) ||
1069             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1070             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1071             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1072             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1073             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1074                 return 0;
1075
1076         if (!to_sx) {
1077                 int i = 0;
1078                 /* Poll up to 5 seconds for Cable Disconnected indication */
1079                 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1080                          E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1081                         /* Bail if link is re-acquired */
1082                         if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1083                                 return -E1000_ERR_PHY;
1084                         if (i++ == 100)
1085                                 break;
1086
1087                         msec_delay(50);
1088                 }
1089                 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1090                           (E1000_READ_REG(hw, E1000_FEXT) &
1091                            E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1092                           i * 50);
1093                 if (!(E1000_READ_REG(hw, E1000_FEXT) &
1094                     E1000_FEXT_PHY_CABLE_DISCONNECTED))
1095                         return 0;
1096         }
1097
1098         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1099                 /* Request ME configure ULP mode in the PHY */
1100                 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1101                 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1102                 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1103
1104                 goto out;
1105         }
1106
1107         ret_val = hw->phy.ops.acquire(hw);
1108         if (ret_val)
1109                 goto out;
1110
1111         /* During S0 Idle keep the phy in PCI-E mode */
1112         if (hw->dev_spec.ich8lan.smbus_disable)
1113                 goto skip_smbus;
1114
1115         /* Force SMBus mode in PHY */
1116         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1117         if (ret_val)
1118                 goto release;
1119         phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1120         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1121
1122         /* Force SMBus mode in MAC */
1123         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1124         mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1125         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1126
1127 skip_smbus:
1128         if (!to_sx) {
1129                 /* Change the 'Link Status Change' interrupt to trigger
1130                  * on 'Cable Status Change'
1131                  */
1132                 ret_val = e1000_read_kmrn_reg_locked(hw,
1133                                                      E1000_KMRNCTRLSTA_OP_MODES,
1134                                                      &phy_reg);
1135                 if (ret_val)
1136                         goto release;
1137                 phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1138                 e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1139                                             phy_reg);
1140         }
1141
1142         /* Set Inband ULP Exit, Reset to SMBus mode and
1143          * Disable SMBus Release on PERST# in PHY
1144          */
1145         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1146         if (ret_val)
1147                 goto release;
1148         phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1149                     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1150         if (to_sx) {
1151                 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1152                         phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1153
1154                 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1155         } else {
1156                 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1157         }
1158         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1159
1160         /* Set Disable SMBus Release on PERST# in MAC */
1161         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1162         mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1163         E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1164
1165         /* Commit ULP changes in PHY by starting auto ULP configuration */
1166         phy_reg |= I218_ULP_CONFIG1_START;
1167         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1168
1169         if (!to_sx) {
1170                 /* Disable Tx so that the MAC doesn't send any (buffered)
1171                  * packets to the PHY.
1172                  */
1173                 mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1174                 mac_reg &= ~E1000_TCTL_EN;
1175                 E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1176         }
1177 release:
1178         hw->phy.ops.release(hw);
1179 out:
1180         if (ret_val)
1181                 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1182         else
1183                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1184
1185         return ret_val;
1186 }
1187
1188 /**
1189  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1190  *  @hw: pointer to the HW structure
1191  *  @force: boolean indicating whether or not to force disabling ULP
1192  *
1193  *  Un-configure ULP mode when link is up, the system is transitioned from
1194  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1195  *  system, poll for an indication from ME that ULP has been un-configured.
1196  *  If not on an ME enabled system, un-configure the ULP mode by software.
1197  *
1198  *  During nominal operation, this function is called when link is acquired
1199  *  to disable ULP mode (force=false); otherwise, for example when unloading
1200  *  the driver or during Sx->S0 transitions, this is called with force=true
1201  *  to forcibly disable ULP.
1202
1203  *  When the cable is plugged in while the device is in D0, a Cable Status
1204  *  Change interrupt is generated which causes this function to be called
1205  *  to partially disable ULP mode and restart autonegotiation.  This function
1206  *  is then called again due to the resulting Link Status Change interrupt
1207  *  to finish cleaning up after the ULP flow.
1208  */
1209 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1210 {
1211         s32 ret_val = E1000_SUCCESS;
1212         u32 mac_reg;
1213         u16 phy_reg;
1214         int i = 0;
1215
1216         if ((hw->mac.type < e1000_pch_lpt) ||
1217             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1218             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1219             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1220             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1221             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1222                 return 0;
1223
1224         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1225                 if (force) {
1226                         /* Request ME un-configure ULP mode in the PHY */
1227                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1228                         mac_reg &= ~E1000_H2ME_ULP;
1229                         mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1230                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1231                 }
1232
1233                 /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
1234                 while (E1000_READ_REG(hw, E1000_FWSM) &
1235                        E1000_FWSM_ULP_CFG_DONE) {
1236                         if (i++ == 10) {
1237                                 ret_val = -E1000_ERR_PHY;
1238                                 goto out;
1239                         }
1240
1241                         msec_delay(10);
1242                 }
1243                 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1244
1245                 if (force) {
1246                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1247                         mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1248                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1249                 } else {
1250                         /* Clear H2ME.ULP after ME ULP configuration */
1251                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1252                         mac_reg &= ~E1000_H2ME_ULP;
1253                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1254
1255                         /* Restore link speed advertisements and restart
1256                          * Auto-negotiation
1257                          */
1258                         if (hw->mac.autoneg) {
1259                                 ret_val = e1000_phy_setup_autoneg(hw);
1260                                 if (ret_val)
1261                                         goto out;
1262                         } else {
1263                                 ret_val = e1000_setup_copper_link_generic(hw);
1264                                 if (ret_val)
1265                                         goto out;
1266                         }
1267                         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1268                 }
1269
1270                 goto out;
1271         }
1272
1273         ret_val = hw->phy.ops.acquire(hw);
1274         if (ret_val)
1275                 goto out;
1276
1277         /* Revert the change to the 'Link Status Change'
1278          * interrupt to trigger on 'Cable Status Change'
1279          */
1280         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1281                                              &phy_reg);
1282         if (ret_val)
1283                 goto release;
1284         phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1285         e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg);
1286
1287         if (force)
1288                 /* Toggle LANPHYPC Value bit */
1289                 e1000_toggle_lanphypc_pch_lpt(hw);
1290
1291         /* Unforce SMBus mode in PHY */
1292         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1293         if (ret_val) {
1294                 /* The MAC might be in PCIe mode, so temporarily force to
1295                  * SMBus mode in order to access the PHY.
1296                  */
1297                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1298                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1299                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1300
1301                 msec_delay(50);
1302
1303                 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1304                                                        &phy_reg);
1305                 if (ret_val)
1306                         goto release;
1307         }
1308         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1309         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1310
1311         /* Unforce SMBus mode in MAC */
1312         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1313         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1314         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1315
1316         /* When ULP mode was previously entered, K1 was disabled by the
1317          * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1318          */
1319         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1320         if (ret_val)
1321                 goto release;
1322         phy_reg |= HV_PM_CTRL_K1_ENABLE;
1323         e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1324
1325         /* Clear ULP enabled configuration */
1326         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1327         if (ret_val)
1328                 goto release;
1329         /* CSC interrupt received due to ULP Indication */
1330         if ((phy_reg & I218_ULP_CONFIG1_IND) || force) {
1331                 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1332                              I218_ULP_CONFIG1_STICKY_ULP |
1333                              I218_ULP_CONFIG1_RESET_TO_SMBUS |
1334                              I218_ULP_CONFIG1_WOL_HOST |
1335                              I218_ULP_CONFIG1_INBAND_EXIT |
1336                              I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1337                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1338
1339                 /* Commit ULP changes by starting auto ULP configuration */
1340                 phy_reg |= I218_ULP_CONFIG1_START;
1341                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1342
1343                 /* Clear Disable SMBus Release on PERST# in MAC */
1344                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1345                 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1346                 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1347
1348                 if (!force) {
1349                         hw->phy.ops.release(hw);
1350
1351                         if (hw->mac.autoneg)
1352                                 e1000_phy_setup_autoneg(hw);
1353
1354                         e1000_sw_lcd_config_ich8lan(hw);
1355
1356                         e1000_oem_bits_config_ich8lan(hw, true);
1357
1358                         /* Set ULP state to unknown and return non-zero to
1359                          * indicate no link (yet) and re-enter on the next LSC
1360                          * to finish disabling ULP flow.
1361                          */
1362                         hw->dev_spec.ich8lan.ulp_state =
1363                             e1000_ulp_state_unknown;
1364
1365                         return 1;
1366                 }
1367         }
1368
1369         /* Re-enable Tx */
1370         mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1371         mac_reg |= E1000_TCTL_EN;
1372         E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1373
1374 release:
1375         hw->phy.ops.release(hw);
1376         if (force) {
1377                 hw->phy.ops.reset(hw);
1378                 msec_delay(50);
1379         }
1380 out:
1381         if (ret_val)
1382                 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1383         else
1384                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1385
1386         return ret_val;
1387 }
1388
1389 #endif /* ULP_SUPPORT */
1390 /**
1391  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1392  *  @hw: pointer to the HW structure
1393  *
1394  *  Checks to see of the link status of the hardware has changed.  If a
1395  *  change in link status has been detected, then we read the PHY registers
1396  *  to get the current speed/duplex if link exists.
1397  **/
1398 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1399 {
1400         struct e1000_mac_info *mac = &hw->mac;
1401         s32 ret_val, tipg_reg = 0;
1402         u16 emi_addr, emi_val = 0;
1403         bool link = false;
1404         u16 phy_reg;
1405
1406         DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1407
1408         /* We only want to go out to the PHY registers to see if Auto-Neg
1409          * has completed and/or if our link status has changed.  The
1410          * get_link_status flag is set upon receiving a Link Status
1411          * Change or Rx Sequence Error interrupt.
1412          */
1413         if (!mac->get_link_status)
1414                 return E1000_SUCCESS;
1415
1416         if ((hw->mac.type < e1000_pch_lpt) ||
1417             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1418             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) {
1419                 /* First we want to see if the MII Status Register reports
1420                  * link.  If so, then we want to get the current speed/duplex
1421                  * of the PHY.
1422                  */
1423                 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1424                 if (ret_val)
1425                         return ret_val;
1426         } else {
1427                 /* Check the MAC's STATUS register to determine link state
1428                  * since the PHY could be inaccessible while in ULP mode.
1429                  */
1430                 link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
1431                 if (link)
1432                         ret_val = e1000_disable_ulp_lpt_lp(hw, false);
1433                 else
1434                         ret_val = e1000_enable_ulp_lpt_lp(hw, false);
1435
1436                 if (ret_val)
1437                         return ret_val;
1438         }
1439
1440         if (hw->mac.type == e1000_pchlan) {
1441                 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1442                 if (ret_val)
1443                         return ret_val;
1444         }
1445
1446         /* When connected at 10Mbps half-duplex, some parts are excessively
1447          * aggressive resulting in many collisions. To avoid this, increase
1448          * the IPG and reduce Rx latency in the PHY.
1449          */
1450         if (((hw->mac.type == e1000_pch2lan) ||
1451              (hw->mac.type == e1000_pch_lpt)) && link) {
1452                 u16 speed, duplex;
1453
1454                 e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1455                 tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1456                 tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1457
1458                 if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1459                         tipg_reg |= 0xFF;
1460                         /* Reduce Rx latency in analog PHY */
1461                         emi_val = 0;
1462                 } else {
1463                         /* Roll back the default values */
1464                         tipg_reg |= 0x08;
1465                         emi_val = 1;
1466                 }
1467
1468                 E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1469
1470                 ret_val = hw->phy.ops.acquire(hw);
1471                 if (ret_val)
1472                         return ret_val;
1473
1474                 if (hw->mac.type == e1000_pch2lan)
1475                         emi_addr = I82579_RX_CONFIG;
1476                 else
1477                         emi_addr = I217_RX_CONFIG;
1478                 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1479
1480                 hw->phy.ops.release(hw);
1481
1482                 if (ret_val)
1483                         return ret_val;
1484         }
1485
1486         /* I217 Packet Loss issue:
1487          * ensure that FEXTNVM4 Beacon Duration is set correctly
1488          * on power up.
1489          * Set the Beacon Duration for I217 to 8 usec
1490          */
1491         if (hw->mac.type == e1000_pch_lpt) {
1492                 u32 mac_reg;
1493
1494                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1495                 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1496                 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1497                 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1498         }
1499
1500         /* Work-around I218 hang issue */
1501         if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1502             (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1503             (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1504             (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1505                 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1506                 if (ret_val)
1507                         return ret_val;
1508         }
1509         /* Clear link partner's EEE ability */
1510         hw->dev_spec.ich8lan.eee_lp_ability = 0;
1511
1512         /* Configure K0s minimum time */
1513         if (hw->mac.type == e1000_pch_lpt) {
1514                 e1000_configure_k0s_lpt(hw, K1_ENTRY_LATENCY, K1_MIN_TIME);
1515         }
1516
1517         if (!link)
1518                 return E1000_SUCCESS; /* No link detected */
1519
1520         mac->get_link_status = false;
1521
1522         switch (hw->mac.type) {
1523         case e1000_pch2lan:
1524                 ret_val = e1000_k1_workaround_lv(hw);
1525                 if (ret_val)
1526                         return ret_val;
1527                 /* fall-thru */
1528         case e1000_pchlan:
1529                 if (hw->phy.type == e1000_phy_82578) {
1530                         ret_val = e1000_link_stall_workaround_hv(hw);
1531                         if (ret_val)
1532                                 return ret_val;
1533                 }
1534
1535                 /* Workaround for PCHx parts in half-duplex:
1536                  * Set the number of preambles removed from the packet
1537                  * when it is passed from the PHY to the MAC to prevent
1538                  * the MAC from misinterpreting the packet type.
1539                  */
1540                 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1541                 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1542
1543                 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1544                     E1000_STATUS_FD)
1545                         phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1546
1547                 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1548                 break;
1549         default:
1550                 break;
1551         }
1552
1553         /* Check if there was DownShift, must be checked
1554          * immediately after link-up
1555          */
1556         e1000_check_downshift_generic(hw);
1557
1558         /* Enable/Disable EEE after link up */
1559         if (hw->phy.type > e1000_phy_82579) {
1560                 ret_val = e1000_set_eee_pchlan(hw);
1561                 if (ret_val)
1562                         return ret_val;
1563         }
1564
1565         /* If we are forcing speed/duplex, then we simply return since
1566          * we have already determined whether we have link or not.
1567          */
1568         if (!mac->autoneg)
1569                 return -E1000_ERR_CONFIG;
1570
1571         /* Auto-Neg is enabled.  Auto Speed Detection takes care
1572          * of MAC speed/duplex configuration.  So we only need to
1573          * configure Collision Distance in the MAC.
1574          */
1575         mac->ops.config_collision_dist(hw);
1576
1577         /* Configure Flow Control now that Auto-Neg has completed.
1578          * First, we need to restore the desired flow control
1579          * settings because we may have had to re-autoneg with a
1580          * different link partner.
1581          */
1582         ret_val = e1000_config_fc_after_link_up_generic(hw);
1583         if (ret_val)
1584                 DEBUGOUT("Error configuring flow control\n");
1585
1586         return ret_val;
1587 }
1588
1589 /**
1590  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1591  *  @hw: pointer to the HW structure
1592  *
1593  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1594  **/
1595 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1596 {
1597         DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1598
1599         hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1600         hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1601         switch (hw->mac.type) {
1602         case e1000_ich8lan:
1603         case e1000_ich9lan:
1604         case e1000_ich10lan:
1605                 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1606                 break;
1607         case e1000_pchlan:
1608         case e1000_pch2lan:
1609         case e1000_pch_lpt:
1610                 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1611                 break;
1612         default:
1613                 break;
1614         }
1615 }
1616
1617 /**
1618  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1619  *  @hw: pointer to the HW structure
1620  *
1621  *  Acquires the mutex for performing NVM operations.
1622  **/
1623 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1624 {
1625         DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1626
1627         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1628
1629         return E1000_SUCCESS;
1630 }
1631
1632 /**
1633  *  e1000_release_nvm_ich8lan - Release NVM mutex
1634  *  @hw: pointer to the HW structure
1635  *
1636  *  Releases the mutex used while performing NVM operations.
1637  **/
1638 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1639 {
1640         DEBUGFUNC("e1000_release_nvm_ich8lan");
1641
1642         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1643
1644         return;
1645 }
1646
1647 /**
1648  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1649  *  @hw: pointer to the HW structure
1650  *
1651  *  Acquires the software control flag for performing PHY and select
1652  *  MAC CSR accesses.
1653  **/
1654 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1655 {
1656         u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1657         s32 ret_val = E1000_SUCCESS;
1658
1659         DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1660
1661         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1662
1663         while (timeout) {
1664                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1665                 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1666                         break;
1667
1668                 msec_delay_irq(1);
1669                 timeout--;
1670         }
1671
1672         if (!timeout) {
1673                 DEBUGOUT("SW has already locked the resource.\n");
1674                 ret_val = -E1000_ERR_CONFIG;
1675                 goto out;
1676         }
1677
1678         timeout = SW_FLAG_TIMEOUT;
1679
1680         extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1681         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1682
1683         while (timeout) {
1684                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1685                 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1686                         break;
1687
1688                 msec_delay_irq(1);
1689                 timeout--;
1690         }
1691
1692         if (!timeout) {
1693                 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1694                           E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1695                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1696                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1697                 ret_val = -E1000_ERR_CONFIG;
1698                 goto out;
1699         }
1700
1701 out:
1702         if (ret_val)
1703                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1704
1705         return ret_val;
1706 }
1707
1708 /**
1709  *  e1000_release_swflag_ich8lan - Release software control flag
1710  *  @hw: pointer to the HW structure
1711  *
1712  *  Releases the software control flag for performing PHY and select
1713  *  MAC CSR accesses.
1714  **/
1715 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1716 {
1717         u32 extcnf_ctrl;
1718
1719         DEBUGFUNC("e1000_release_swflag_ich8lan");
1720
1721         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1722
1723         if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1724                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1725                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1726         } else {
1727                 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1728         }
1729
1730         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1731
1732         return;
1733 }
1734
1735 /**
1736  *  e1000_check_mng_mode_ich8lan - Checks management mode
1737  *  @hw: pointer to the HW structure
1738  *
1739  *  This checks if the adapter has any manageability enabled.
1740  *  This is a function pointer entry point only called by read/write
1741  *  routines for the PHY and NVM parts.
1742  **/
1743 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1744 {
1745         u32 fwsm;
1746
1747         DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1748
1749         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1750
1751         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1752                ((fwsm & E1000_FWSM_MODE_MASK) ==
1753                 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1754 }
1755
1756 /**
1757  *  e1000_check_mng_mode_pchlan - Checks management mode
1758  *  @hw: pointer to the HW structure
1759  *
1760  *  This checks if the adapter has iAMT enabled.
1761  *  This is a function pointer entry point only called by read/write
1762  *  routines for the PHY and NVM parts.
1763  **/
1764 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1765 {
1766         u32 fwsm;
1767
1768         DEBUGFUNC("e1000_check_mng_mode_pchlan");
1769
1770         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1771
1772         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1773                (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1774 }
1775
1776 /**
1777  *  e1000_rar_set_pch2lan - Set receive address register
1778  *  @hw: pointer to the HW structure
1779  *  @addr: pointer to the receive address
1780  *  @index: receive address array register
1781  *
1782  *  Sets the receive address array register at index to the address passed
1783  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1784  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1785  *  Use SHRA[0-3] in place of those reserved for ME.
1786  **/
1787 STATIC int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1788 {
1789         u32 rar_low, rar_high;
1790
1791         DEBUGFUNC("e1000_rar_set_pch2lan");
1792
1793         /* HW expects these in little endian so we reverse the byte order
1794          * from network order (big endian) to little endian
1795          */
1796         rar_low = ((u32) addr[0] |
1797                    ((u32) addr[1] << 8) |
1798                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1799
1800         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1801
1802         /* If MAC address zero, no need to set the AV bit */
1803         if (rar_low || rar_high)
1804                 rar_high |= E1000_RAH_AV;
1805
1806         if (index == 0) {
1807                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1808                 E1000_WRITE_FLUSH(hw);
1809                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1810                 E1000_WRITE_FLUSH(hw);
1811                 return E1000_SUCCESS;
1812         }
1813
1814         /* RAR[1-6] are owned by manageability.  Skip those and program the
1815          * next address into the SHRA register array.
1816          */
1817         if (index < (u32) (hw->mac.rar_entry_count)) {
1818                 s32 ret_val;
1819
1820                 ret_val = e1000_acquire_swflag_ich8lan(hw);
1821                 if (ret_val)
1822                         goto out;
1823
1824                 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1825                 E1000_WRITE_FLUSH(hw);
1826                 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1827                 E1000_WRITE_FLUSH(hw);
1828
1829                 e1000_release_swflag_ich8lan(hw);
1830
1831                 /* verify the register updates */
1832                 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1833                     (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1834                         return E1000_SUCCESS;
1835
1836                 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1837                          (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1838         }
1839
1840 out:
1841         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1842         return -E1000_ERR_CONFIG;
1843 }
1844
1845 /**
1846  *  e1000_rar_set_pch_lpt - Set receive address registers
1847  *  @hw: pointer to the HW structure
1848  *  @addr: pointer to the receive address
1849  *  @index: receive address array register
1850  *
1851  *  Sets the receive address register array at index to the address passed
1852  *  in by addr. For LPT, RAR[0] is the base address register that is to
1853  *  contain the MAC address. SHRA[0-10] are the shared receive address
1854  *  registers that are shared between the Host and manageability engine (ME).
1855  **/
1856 STATIC int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1857 {
1858         u32 rar_low, rar_high;
1859         u32 wlock_mac;
1860
1861         DEBUGFUNC("e1000_rar_set_pch_lpt");
1862
1863         /* HW expects these in little endian so we reverse the byte order
1864          * from network order (big endian) to little endian
1865          */
1866         rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1867                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1868
1869         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1870
1871         /* If MAC address zero, no need to set the AV bit */
1872         if (rar_low || rar_high)
1873                 rar_high |= E1000_RAH_AV;
1874
1875         if (index == 0) {
1876                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1877                 E1000_WRITE_FLUSH(hw);
1878                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1879                 E1000_WRITE_FLUSH(hw);
1880                 return E1000_SUCCESS;
1881         }
1882
1883         /* The manageability engine (ME) can lock certain SHRAR registers that
1884          * it is using - those registers are unavailable for use.
1885          */
1886         if (index < hw->mac.rar_entry_count) {
1887                 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
1888                             E1000_FWSM_WLOCK_MAC_MASK;
1889                 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1890
1891                 /* Check if all SHRAR registers are locked */
1892                 if (wlock_mac == 1)
1893                         goto out;
1894
1895                 if ((wlock_mac == 0) || (index <= wlock_mac)) {
1896                         s32 ret_val;
1897
1898                         ret_val = e1000_acquire_swflag_ich8lan(hw);
1899
1900                         if (ret_val)
1901                                 goto out;
1902
1903                         E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
1904                                         rar_low);
1905                         E1000_WRITE_FLUSH(hw);
1906                         E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
1907                                         rar_high);
1908                         E1000_WRITE_FLUSH(hw);
1909
1910                         e1000_release_swflag_ich8lan(hw);
1911
1912                         /* verify the register updates */
1913                         if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1914                             (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
1915                                 return E1000_SUCCESS;
1916                 }
1917         }
1918
1919 out:
1920         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1921         return -E1000_ERR_CONFIG;
1922 }
1923
1924 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
1925 /**
1926  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
1927  *  @hw: pointer to the HW structure
1928  *  @mc_addr_list: array of multicast addresses to program
1929  *  @mc_addr_count: number of multicast addresses to program
1930  *
1931  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
1932  *  The caller must have a packed mc_addr_list of multicast addresses.
1933  **/
1934 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
1935                                               u8 *mc_addr_list,
1936                                               u32 mc_addr_count)
1937 {
1938         u16 phy_reg = 0;
1939         int i;
1940         s32 ret_val;
1941
1942         DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
1943
1944         e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
1945
1946         ret_val = hw->phy.ops.acquire(hw);
1947         if (ret_val)
1948                 return;
1949
1950         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1951         if (ret_val)
1952                 goto release;
1953
1954         for (i = 0; i < hw->mac.mta_reg_count; i++) {
1955                 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
1956                                            (u16)(hw->mac.mta_shadow[i] &
1957                                                  0xFFFF));
1958                 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
1959                                            (u16)((hw->mac.mta_shadow[i] >> 16) &
1960                                                  0xFFFF));
1961         }
1962
1963         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1964
1965 release:
1966         hw->phy.ops.release(hw);
1967 }
1968
1969 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
1970 /**
1971  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1972  *  @hw: pointer to the HW structure
1973  *
1974  *  Checks if firmware is blocking the reset of the PHY.
1975  *  This is a function pointer entry point only called by
1976  *  reset routines.
1977  **/
1978 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1979 {
1980         u32 fwsm;
1981         bool blocked = false;
1982         int i = 0;
1983
1984         DEBUGFUNC("e1000_check_reset_block_ich8lan");
1985
1986         do {
1987                 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1988                 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
1989                         blocked = true;
1990                         msec_delay(10);
1991                         continue;
1992                 }
1993                 blocked = false;
1994         } while (blocked && (i++ < 30));
1995         return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
1996 }
1997
1998 /**
1999  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2000  *  @hw: pointer to the HW structure
2001  *
2002  *  Assumes semaphore already acquired.
2003  *
2004  **/
2005 STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2006 {
2007         u16 phy_data;
2008         u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2009         u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2010                 E1000_STRAP_SMT_FREQ_SHIFT;
2011         s32 ret_val;
2012
2013         strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2014
2015         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2016         if (ret_val)
2017                 return ret_val;
2018
2019         phy_data &= ~HV_SMB_ADDR_MASK;
2020         phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2021         phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2022
2023         if (hw->phy.type == e1000_phy_i217) {
2024                 /* Restore SMBus frequency */
2025                 if (freq--) {
2026                         phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2027                         phy_data |= (freq & (1 << 0)) <<
2028                                 HV_SMB_ADDR_FREQ_LOW_SHIFT;
2029                         phy_data |= (freq & (1 << 1)) <<
2030                                 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2031                 } else {
2032                         DEBUGOUT("Unsupported SMB frequency in PHY\n");
2033                 }
2034         }
2035
2036         return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2037 }
2038
2039 /**
2040  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2041  *  @hw:   pointer to the HW structure
2042  *
2043  *  SW should configure the LCD from the NVM extended configuration region
2044  *  as a workaround for certain parts.
2045  **/
2046 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2047 {
2048         struct e1000_phy_info *phy = &hw->phy;
2049         u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2050         s32 ret_val = E1000_SUCCESS;
2051         u16 word_addr, reg_data, reg_addr, phy_page = 0;
2052
2053         DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2054
2055         /* Initialize the PHY from the NVM on ICH platforms.  This
2056          * is needed due to an issue where the NVM configuration is
2057          * not properly autoloaded after power transitions.
2058          * Therefore, after each PHY reset, we will load the
2059          * configuration data out of the NVM manually.
2060          */
2061         switch (hw->mac.type) {
2062         case e1000_ich8lan:
2063                 if (phy->type != e1000_phy_igp_3)
2064                         return ret_val;
2065
2066                 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2067                     (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2068                         sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2069                         break;
2070                 }
2071                 /* Fall-thru */
2072         case e1000_pchlan:
2073         case e1000_pch2lan:
2074         case e1000_pch_lpt:
2075                 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2076                 break;
2077         default:
2078                 return ret_val;
2079         }
2080
2081         ret_val = hw->phy.ops.acquire(hw);
2082         if (ret_val)
2083                 return ret_val;
2084
2085         data = E1000_READ_REG(hw, E1000_FEXTNVM);
2086         if (!(data & sw_cfg_mask))
2087                 goto release;
2088
2089         /* Make sure HW does not configure LCD from PHY
2090          * extended configuration before SW configuration
2091          */
2092         data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2093         if ((hw->mac.type < e1000_pch2lan) &&
2094             (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2095                         goto release;
2096
2097         cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2098         cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2099         cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2100         if (!cnf_size)
2101                 goto release;
2102
2103         cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2104         cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2105
2106         if (((hw->mac.type == e1000_pchlan) &&
2107              !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2108             (hw->mac.type > e1000_pchlan)) {
2109                 /* HW configures the SMBus address and LEDs when the
2110                  * OEM and LCD Write Enable bits are set in the NVM.
2111                  * When both NVM bits are cleared, SW will configure
2112                  * them instead.
2113                  */
2114                 ret_val = e1000_write_smbus_addr(hw);
2115                 if (ret_val)
2116                         goto release;
2117
2118                 data = E1000_READ_REG(hw, E1000_LEDCTL);
2119                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2120                                                         (u16)data);
2121                 if (ret_val)
2122                         goto release;
2123         }
2124
2125         /* Configure LCD from extended configuration region. */
2126
2127         /* cnf_base_addr is in DWORD */
2128         word_addr = (u16)(cnf_base_addr << 1);
2129
2130         for (i = 0; i < cnf_size; i++) {
2131                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2132                                            &reg_data);
2133                 if (ret_val)
2134                         goto release;
2135
2136                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2137                                            1, &reg_addr);
2138                 if (ret_val)
2139                         goto release;
2140
2141                 /* Save off the PHY page for future writes. */
2142                 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2143                         phy_page = reg_data;
2144                         continue;
2145                 }
2146
2147                 reg_addr &= PHY_REG_MASK;
2148                 reg_addr |= phy_page;
2149
2150                 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2151                                                     reg_data);
2152                 if (ret_val)
2153                         goto release;
2154         }
2155
2156 release:
2157         hw->phy.ops.release(hw);
2158         return ret_val;
2159 }
2160
2161 /**
2162  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2163  *  @hw:   pointer to the HW structure
2164  *  @link: link up bool flag
2165  *
2166  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2167  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2168  *  If link is down, the function will restore the default K1 setting located
2169  *  in the NVM.
2170  **/
2171 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2172 {
2173         s32 ret_val = E1000_SUCCESS;
2174         u16 status_reg = 0;
2175         bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2176
2177         DEBUGFUNC("e1000_k1_gig_workaround_hv");
2178
2179         if (hw->mac.type != e1000_pchlan)
2180                 return E1000_SUCCESS;
2181
2182         /* Wrap the whole flow with the sw flag */
2183         ret_val = hw->phy.ops.acquire(hw);
2184         if (ret_val)
2185                 return ret_val;
2186
2187         /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2188         if (link) {
2189                 if (hw->phy.type == e1000_phy_82578) {
2190                         ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2191                                                               &status_reg);
2192                         if (ret_val)
2193                                 goto release;
2194
2195                         status_reg &= (BM_CS_STATUS_LINK_UP |
2196                                        BM_CS_STATUS_RESOLVED |
2197                                        BM_CS_STATUS_SPEED_MASK);
2198
2199                         if (status_reg == (BM_CS_STATUS_LINK_UP |
2200                                            BM_CS_STATUS_RESOLVED |
2201                                            BM_CS_STATUS_SPEED_1000))
2202                                 k1_enable = false;
2203                 }
2204
2205                 if (hw->phy.type == e1000_phy_82577) {
2206                         ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2207                                                               &status_reg);
2208                         if (ret_val)
2209                                 goto release;
2210
2211                         status_reg &= (HV_M_STATUS_LINK_UP |
2212                                        HV_M_STATUS_AUTONEG_COMPLETE |
2213                                        HV_M_STATUS_SPEED_MASK);
2214
2215                         if (status_reg == (HV_M_STATUS_LINK_UP |
2216                                            HV_M_STATUS_AUTONEG_COMPLETE |
2217                                            HV_M_STATUS_SPEED_1000))
2218                                 k1_enable = false;
2219                 }
2220
2221                 /* Link stall fix for link up */
2222                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2223                                                        0x0100);
2224                 if (ret_val)
2225                         goto release;
2226
2227         } else {
2228                 /* Link stall fix for link down */
2229                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2230                                                        0x4100);
2231                 if (ret_val)
2232                         goto release;
2233         }
2234
2235         ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2236
2237 release:
2238         hw->phy.ops.release(hw);
2239
2240         return ret_val;
2241 }
2242
2243 /**
2244  *  e1000_configure_k1_ich8lan - Configure K1 power state
2245  *  @hw: pointer to the HW structure
2246  *  @enable: K1 state to configure
2247  *
2248  *  Configure the K1 power state based on the provided parameter.
2249  *  Assumes semaphore already acquired.
2250  *
2251  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2252  **/
2253 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2254 {
2255         s32 ret_val;
2256         u32 ctrl_reg = 0;
2257         u32 ctrl_ext = 0;
2258         u32 reg = 0;
2259         u16 kmrn_reg = 0;
2260
2261         DEBUGFUNC("e1000_configure_k1_ich8lan");
2262
2263         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2264                                              &kmrn_reg);
2265         if (ret_val)
2266                 return ret_val;
2267
2268         if (k1_enable)
2269                 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2270         else
2271                 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2272
2273         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2274                                               kmrn_reg);
2275         if (ret_val)
2276                 return ret_val;
2277
2278         usec_delay(20);
2279         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2280         ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2281
2282         reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2283         reg |= E1000_CTRL_FRCSPD;
2284         E1000_WRITE_REG(hw, E1000_CTRL, reg);
2285
2286         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2287         E1000_WRITE_FLUSH(hw);
2288         usec_delay(20);
2289         E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2290         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2291         E1000_WRITE_FLUSH(hw);
2292         usec_delay(20);
2293
2294         return E1000_SUCCESS;
2295 }
2296
2297 /**
2298  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2299  *  @hw:       pointer to the HW structure
2300  *  @d0_state: boolean if entering d0 or d3 device state
2301  *
2302  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2303  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2304  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2305  **/
2306 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2307 {
2308         s32 ret_val = 0;
2309         u32 mac_reg;
2310         u16 oem_reg;
2311
2312         DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2313
2314         if (hw->mac.type < e1000_pchlan)
2315                 return ret_val;
2316
2317         ret_val = hw->phy.ops.acquire(hw);
2318         if (ret_val)
2319                 return ret_val;
2320
2321         if (hw->mac.type == e1000_pchlan) {
2322                 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2323                 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2324                         goto release;
2325         }
2326
2327         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2328         if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2329                 goto release;
2330
2331         mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2332
2333         ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2334         if (ret_val)
2335                 goto release;
2336
2337         oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2338
2339         if (d0_state) {
2340                 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2341                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2342
2343                 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2344                         oem_reg |= HV_OEM_BITS_LPLU;
2345         } else {
2346                 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2347                     E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2348                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2349
2350                 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2351                     E1000_PHY_CTRL_NOND0A_LPLU))
2352                         oem_reg |= HV_OEM_BITS_LPLU;
2353         }
2354
2355         /* Set Restart auto-neg to activate the bits */
2356         if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2357             !hw->phy.ops.check_reset_block(hw))
2358                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2359
2360         ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2361
2362 release:
2363         hw->phy.ops.release(hw);
2364
2365         return ret_val;
2366 }
2367
2368
2369 /**
2370  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2371  *  @hw:   pointer to the HW structure
2372  **/
2373 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2374 {
2375         s32 ret_val;
2376         u16 data;
2377
2378         DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2379
2380         ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2381         if (ret_val)
2382                 return ret_val;
2383
2384         data |= HV_KMRN_MDIO_SLOW;
2385
2386         ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2387
2388         return ret_val;
2389 }
2390
2391 /**
2392  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2393  *  done after every PHY reset.
2394  **/
2395 STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2396 {
2397         s32 ret_val = E1000_SUCCESS;
2398         u16 phy_data;
2399
2400         DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2401
2402         if (hw->mac.type != e1000_pchlan)
2403                 return E1000_SUCCESS;
2404
2405         /* Set MDIO slow mode before any other MDIO access */
2406         if (hw->phy.type == e1000_phy_82577) {
2407                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2408                 if (ret_val)
2409                         return ret_val;
2410         }
2411
2412         if (((hw->phy.type == e1000_phy_82577) &&
2413              ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2414             ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2415                 /* Disable generation of early preamble */
2416                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2417                 if (ret_val)
2418                         return ret_val;
2419
2420                 /* Preamble tuning for SSC */
2421                 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2422                                                 0xA204);
2423                 if (ret_val)
2424                         return ret_val;
2425         }
2426
2427         if (hw->phy.type == e1000_phy_82578) {
2428                 /* Return registers to default by doing a soft reset then
2429                  * writing 0x3140 to the control register.
2430                  */
2431                 if (hw->phy.revision < 2) {
2432                         e1000_phy_sw_reset_generic(hw);
2433                         ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2434                                                         0x3140);
2435                 }
2436         }
2437
2438         /* Select page 0 */
2439         ret_val = hw->phy.ops.acquire(hw);
2440         if (ret_val)
2441                 return ret_val;
2442
2443         hw->phy.addr = 1;
2444         ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2445         hw->phy.ops.release(hw);
2446         if (ret_val)
2447                 return ret_val;
2448
2449         /* Configure the K1 Si workaround during phy reset assuming there is
2450          * link so that it disables K1 if link is in 1Gbps.
2451          */
2452         ret_val = e1000_k1_gig_workaround_hv(hw, true);
2453         if (ret_val)
2454                 return ret_val;
2455
2456         /* Workaround for link disconnects on a busy hub in half duplex */
2457         ret_val = hw->phy.ops.acquire(hw);
2458         if (ret_val)
2459                 return ret_val;
2460         ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2461         if (ret_val)
2462                 goto release;
2463         ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2464                                                phy_data & 0x00FF);
2465         if (ret_val)
2466                 goto release;
2467
2468         /* set MSE higher to enable link to stay up when noise is high */
2469         ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2470 release:
2471         hw->phy.ops.release(hw);
2472
2473         return ret_val;
2474 }
2475
2476 /**
2477  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2478  *  @hw:   pointer to the HW structure
2479  **/
2480 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2481 {
2482         u32 mac_reg;
2483         u16 i, phy_reg = 0;
2484         s32 ret_val;
2485
2486         DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2487
2488         ret_val = hw->phy.ops.acquire(hw);
2489         if (ret_val)
2490                 return;
2491         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2492         if (ret_val)
2493                 goto release;
2494
2495         /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2496         for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2497                 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2498                 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2499                                            (u16)(mac_reg & 0xFFFF));
2500                 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2501                                            (u16)((mac_reg >> 16) & 0xFFFF));
2502
2503                 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2504                 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2505                                            (u16)(mac_reg & 0xFFFF));
2506                 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2507                                            (u16)((mac_reg & E1000_RAH_AV)
2508                                                  >> 16));
2509         }
2510
2511         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2512
2513 release:
2514         hw->phy.ops.release(hw);
2515 }
2516
2517 #ifndef CRC32_OS_SUPPORT
2518 STATIC u32 e1000_calc_rx_da_crc(u8 mac[])
2519 {
2520         u32 poly = 0xEDB88320;  /* Polynomial for 802.3 CRC calculation */
2521         u32 i, j, mask, crc;
2522
2523         DEBUGFUNC("e1000_calc_rx_da_crc");
2524
2525         crc = 0xffffffff;
2526         for (i = 0; i < 6; i++) {
2527                 crc = crc ^ mac[i];
2528                 for (j = 8; j > 0; j--) {
2529                         mask = (crc & 1) * (-1);
2530                         crc = (crc >> 1) ^ (poly & mask);
2531                 }
2532         }
2533         return ~crc;
2534 }
2535
2536 #endif /* CRC32_OS_SUPPORT */
2537 /**
2538  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2539  *  with 82579 PHY
2540  *  @hw: pointer to the HW structure
2541  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2542  **/
2543 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2544 {
2545         s32 ret_val = E1000_SUCCESS;
2546         u16 phy_reg, data;
2547         u32 mac_reg;
2548         u16 i;
2549
2550         DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2551
2552         if (hw->mac.type < e1000_pch2lan)
2553                 return E1000_SUCCESS;
2554
2555         /* disable Rx path while enabling/disabling workaround */
2556         hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2557         ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2558                                         phy_reg | (1 << 14));
2559         if (ret_val)
2560                 return ret_val;
2561
2562         if (enable) {
2563                 /* Write Rx addresses (rar_entry_count for RAL/H, and
2564                  * SHRAL/H) and initial CRC values to the MAC
2565                  */
2566                 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2567                         u8 mac_addr[ETH_ADDR_LEN] = {0};
2568                         u32 addr_high, addr_low;
2569
2570                         addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2571                         if (!(addr_high & E1000_RAH_AV))
2572                                 continue;
2573                         addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2574                         mac_addr[0] = (addr_low & 0xFF);
2575                         mac_addr[1] = ((addr_low >> 8) & 0xFF);
2576                         mac_addr[2] = ((addr_low >> 16) & 0xFF);
2577                         mac_addr[3] = ((addr_low >> 24) & 0xFF);
2578                         mac_addr[4] = (addr_high & 0xFF);
2579                         mac_addr[5] = ((addr_high >> 8) & 0xFF);
2580
2581 #ifndef CRC32_OS_SUPPORT
2582                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2583                                         e1000_calc_rx_da_crc(mac_addr));
2584 #else /* CRC32_OS_SUPPORT */
2585                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2586                                         E1000_CRC32(ETH_ADDR_LEN, mac_addr));
2587 #endif /* CRC32_OS_SUPPORT */
2588                 }
2589
2590                 /* Write Rx addresses to the PHY */
2591                 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2592
2593                 /* Enable jumbo frame workaround in the MAC */
2594                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2595                 mac_reg &= ~(1 << 14);
2596                 mac_reg |= (7 << 15);
2597                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2598
2599                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2600                 mac_reg |= E1000_RCTL_SECRC;
2601                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2602
2603                 ret_val = e1000_read_kmrn_reg_generic(hw,
2604                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2605                                                 &data);
2606                 if (ret_val)
2607                         return ret_val;
2608                 ret_val = e1000_write_kmrn_reg_generic(hw,
2609                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2610                                                 data | (1 << 0));
2611                 if (ret_val)
2612                         return ret_val;
2613                 ret_val = e1000_read_kmrn_reg_generic(hw,
2614                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2615                                                 &data);
2616                 if (ret_val)
2617                         return ret_val;
2618                 data &= ~(0xF << 8);
2619                 data |= (0xB << 8);
2620                 ret_val = e1000_write_kmrn_reg_generic(hw,
2621                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2622                                                 data);
2623                 if (ret_val)
2624                         return ret_val;
2625
2626                 /* Enable jumbo frame workaround in the PHY */
2627                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2628                 data &= ~(0x7F << 5);
2629                 data |= (0x37 << 5);
2630                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2631                 if (ret_val)
2632                         return ret_val;
2633                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2634                 data &= ~(1 << 13);
2635                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2636                 if (ret_val)
2637                         return ret_val;
2638                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2639                 data &= ~(0x3FF << 2);
2640                 data |= (E1000_TX_PTR_GAP << 2);
2641                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2642                 if (ret_val)
2643                         return ret_val;
2644                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2645                 if (ret_val)
2646                         return ret_val;
2647                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2648                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2649                                                 (1 << 10));
2650                 if (ret_val)
2651                         return ret_val;
2652         } else {
2653                 /* Write MAC register values back to h/w defaults */
2654                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2655                 mac_reg &= ~(0xF << 14);
2656                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2657
2658                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2659                 mac_reg &= ~E1000_RCTL_SECRC;
2660                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2661
2662                 ret_val = e1000_read_kmrn_reg_generic(hw,
2663                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2664                                                 &data);
2665                 if (ret_val)
2666                         return ret_val;
2667                 ret_val = e1000_write_kmrn_reg_generic(hw,
2668                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2669                                                 data & ~(1 << 0));
2670                 if (ret_val)
2671                         return ret_val;
2672                 ret_val = e1000_read_kmrn_reg_generic(hw,
2673                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2674                                                 &data);
2675                 if (ret_val)
2676                         return ret_val;
2677                 data &= ~(0xF << 8);
2678                 data |= (0xB << 8);
2679                 ret_val = e1000_write_kmrn_reg_generic(hw,
2680                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2681                                                 data);
2682                 if (ret_val)
2683                         return ret_val;
2684
2685                 /* Write PHY register values back to h/w defaults */
2686                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2687                 data &= ~(0x7F << 5);
2688                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2689                 if (ret_val)
2690                         return ret_val;
2691                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2692                 data |= (1 << 13);
2693                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2694                 if (ret_val)
2695                         return ret_val;
2696                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2697                 data &= ~(0x3FF << 2);
2698                 data |= (0x8 << 2);
2699                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2700                 if (ret_val)
2701                         return ret_val;
2702                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2703                 if (ret_val)
2704                         return ret_val;
2705                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2706                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2707                                                 ~(1 << 10));
2708                 if (ret_val)
2709                         return ret_val;
2710         }
2711
2712         /* re-enable Rx path after enabling/disabling workaround */
2713         return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2714                                      ~(1 << 14));
2715 }
2716
2717 /**
2718  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2719  *  done after every PHY reset.
2720  **/
2721 STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2722 {
2723         s32 ret_val = E1000_SUCCESS;
2724
2725         DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2726
2727         if (hw->mac.type != e1000_pch2lan)
2728                 return E1000_SUCCESS;
2729
2730         /* Set MDIO slow mode before any other MDIO access */
2731         ret_val = e1000_set_mdio_slow_mode_hv(hw);
2732         if (ret_val)
2733                 return ret_val;
2734
2735         ret_val = hw->phy.ops.acquire(hw);
2736         if (ret_val)
2737                 return ret_val;
2738         /* set MSE higher to enable link to stay up when noise is high */
2739         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2740         if (ret_val)
2741                 goto release;
2742         /* drop link after 5 times MSE threshold was reached */
2743         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2744 release:
2745         hw->phy.ops.release(hw);
2746
2747         return ret_val;
2748 }
2749
2750 /**
2751  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2752  *  @hw:   pointer to the HW structure
2753  *
2754  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2755  *  Disable K1 for 1000 and 100 speeds
2756  **/
2757 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2758 {
2759         s32 ret_val = E1000_SUCCESS;
2760         u16 status_reg = 0;
2761
2762         DEBUGFUNC("e1000_k1_workaround_lv");
2763
2764         if (hw->mac.type != e1000_pch2lan)
2765                 return E1000_SUCCESS;
2766
2767         /* Set K1 beacon duration based on 10Mbs speed */
2768         ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2769         if (ret_val)
2770                 return ret_val;
2771
2772         if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2773             == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2774                 if (status_reg &
2775                     (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2776                         u16 pm_phy_reg;
2777
2778                         /* LV 1G/100 Packet drop issue wa  */
2779                         ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2780                                                        &pm_phy_reg);
2781                         if (ret_val)
2782                                 return ret_val;
2783                         pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2784                         ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2785                                                         pm_phy_reg);
2786                         if (ret_val)
2787                                 return ret_val;
2788                 } else {
2789                         u32 mac_reg;
2790                         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2791                         mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2792                         mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2793                         E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2794                 }
2795         }
2796
2797         return ret_val;
2798 }
2799
2800 /**
2801  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2802  *  @hw:   pointer to the HW structure
2803  *  @gate: boolean set to true to gate, false to ungate
2804  *
2805  *  Gate/ungate the automatic PHY configuration via hardware; perform
2806  *  the configuration via software instead.
2807  **/
2808 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2809 {
2810         u32 extcnf_ctrl;
2811
2812         DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2813
2814         if (hw->mac.type < e1000_pch2lan)
2815                 return;
2816
2817         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2818
2819         if (gate)
2820                 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2821         else
2822                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2823
2824         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2825 }
2826
2827 /**
2828  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2829  *  @hw: pointer to the HW structure
2830  *
2831  *  Check the appropriate indication the MAC has finished configuring the
2832  *  PHY after a software reset.
2833  **/
2834 STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2835 {
2836         u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2837
2838         DEBUGFUNC("e1000_lan_init_done_ich8lan");
2839
2840         /* Wait for basic configuration completes before proceeding */
2841         do {
2842                 data = E1000_READ_REG(hw, E1000_STATUS);
2843                 data &= E1000_STATUS_LAN_INIT_DONE;
2844                 usec_delay(100);
2845         } while ((!data) && --loop);
2846
2847         /* If basic configuration is incomplete before the above loop
2848          * count reaches 0, loading the configuration from NVM will
2849          * leave the PHY in a bad state possibly resulting in no link.
2850          */
2851         if (loop == 0)
2852                 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2853
2854         /* Clear the Init Done bit for the next init event */
2855         data = E1000_READ_REG(hw, E1000_STATUS);
2856         data &= ~E1000_STATUS_LAN_INIT_DONE;
2857         E1000_WRITE_REG(hw, E1000_STATUS, data);
2858 }
2859
2860 /**
2861  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2862  *  @hw: pointer to the HW structure
2863  **/
2864 STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2865 {
2866         s32 ret_val = E1000_SUCCESS;
2867         u16 reg;
2868
2869         DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2870
2871         if (hw->phy.ops.check_reset_block(hw))
2872                 return E1000_SUCCESS;
2873
2874         /* Allow time for h/w to get to quiescent state after reset */
2875         msec_delay(10);
2876
2877         /* Perform any necessary post-reset workarounds */
2878         switch (hw->mac.type) {
2879         case e1000_pchlan:
2880                 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2881                 if (ret_val)
2882                         return ret_val;
2883                 break;
2884         case e1000_pch2lan:
2885                 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2886                 if (ret_val)
2887                         return ret_val;
2888                 break;
2889         default:
2890                 break;
2891         }
2892
2893         /* Clear the host wakeup bit after lcd reset */
2894         if (hw->mac.type >= e1000_pchlan) {
2895                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
2896                 reg &= ~BM_WUC_HOST_WU_BIT;
2897                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
2898         }
2899
2900         /* Configure the LCD with the extended configuration region in NVM */
2901         ret_val = e1000_sw_lcd_config_ich8lan(hw);
2902         if (ret_val)
2903                 return ret_val;
2904
2905         /* Configure the LCD with the OEM bits in NVM */
2906         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2907
2908         if (hw->mac.type == e1000_pch2lan) {
2909                 /* Ungate automatic PHY configuration on non-managed 82579 */
2910                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
2911                     E1000_ICH_FWSM_FW_VALID)) {
2912                         msec_delay(10);
2913                         e1000_gate_hw_phy_config_ich8lan(hw, false);
2914                 }
2915
2916                 /* Set EEE LPI Update Timer to 200usec */
2917                 ret_val = hw->phy.ops.acquire(hw);
2918                 if (ret_val)
2919                         return ret_val;
2920                 ret_val = e1000_write_emi_reg_locked(hw,
2921                                                      I82579_LPI_UPDATE_TIMER,
2922                                                      0x1387);
2923                 hw->phy.ops.release(hw);
2924         }
2925
2926         return ret_val;
2927 }
2928
2929 /**
2930  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2931  *  @hw: pointer to the HW structure
2932  *
2933  *  Resets the PHY
2934  *  This is a function pointer entry point called by drivers
2935  *  or other shared routines.
2936  **/
2937 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2938 {
2939         s32 ret_val = E1000_SUCCESS;
2940
2941         DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2942
2943         /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2944         if ((hw->mac.type == e1000_pch2lan) &&
2945             !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2946                 e1000_gate_hw_phy_config_ich8lan(hw, true);
2947
2948         ret_val = e1000_phy_hw_reset_generic(hw);
2949         if (ret_val)
2950                 return ret_val;
2951
2952         return e1000_post_phy_reset_ich8lan(hw);
2953 }
2954
2955 /**
2956  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2957  *  @hw: pointer to the HW structure
2958  *  @active: true to enable LPLU, false to disable
2959  *
2960  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2961  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2962  *  the phy speed. This function will manually set the LPLU bit and restart
2963  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2964  *  since it configures the same bit.
2965  **/
2966 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2967 {
2968         s32 ret_val;
2969         u16 oem_reg;
2970
2971         DEBUGFUNC("e1000_set_lplu_state_pchlan");
2972
2973         ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2974         if (ret_val)
2975                 return ret_val;
2976
2977         if (active)
2978                 oem_reg |= HV_OEM_BITS_LPLU;
2979         else
2980                 oem_reg &= ~HV_OEM_BITS_LPLU;
2981
2982         if (!hw->phy.ops.check_reset_block(hw))
2983                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2984
2985         return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2986 }
2987
2988 /**
2989  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2990  *  @hw: pointer to the HW structure
2991  *  @active: true to enable LPLU, false to disable
2992  *
2993  *  Sets the LPLU D0 state according to the active flag.  When
2994  *  activating LPLU this function also disables smart speed
2995  *  and vice versa.  LPLU will not be activated unless the
2996  *  device autonegotiation advertisement meets standards of
2997  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2998  *  This is a function pointer entry point only called by
2999  *  PHY setup routines.
3000  **/
3001 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3002 {
3003         struct e1000_phy_info *phy = &hw->phy;
3004         u32 phy_ctrl;
3005         s32 ret_val = E1000_SUCCESS;
3006         u16 data;
3007
3008         DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3009
3010         if (phy->type == e1000_phy_ife)
3011                 return E1000_SUCCESS;
3012
3013         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3014
3015         if (active) {
3016                 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3017                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3018
3019                 if (phy->type != e1000_phy_igp_3)
3020                         return E1000_SUCCESS;
3021
3022                 /* Call gig speed drop workaround on LPLU before accessing
3023                  * any PHY registers
3024                  */
3025                 if (hw->mac.type == e1000_ich8lan)
3026                         e1000_gig_downshift_workaround_ich8lan(hw);
3027
3028                 /* When LPLU is enabled, we should disable SmartSpeed */
3029                 ret_val = phy->ops.read_reg(hw,
3030                                             IGP01E1000_PHY_PORT_CONFIG,
3031                                             &data);
3032                 if (ret_val)
3033                         return ret_val;
3034                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3035                 ret_val = phy->ops.write_reg(hw,
3036                                              IGP01E1000_PHY_PORT_CONFIG,
3037                                              data);
3038                 if (ret_val)
3039                         return ret_val;
3040         } else {
3041                 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3042                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3043
3044                 if (phy->type != e1000_phy_igp_3)
3045                         return E1000_SUCCESS;
3046
3047                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3048                  * during Dx states where the power conservation is most
3049                  * important.  During driver activity we should enable
3050                  * SmartSpeed, so performance is maintained.
3051                  */
3052                 if (phy->smart_speed == e1000_smart_speed_on) {
3053                         ret_val = phy->ops.read_reg(hw,
3054                                                     IGP01E1000_PHY_PORT_CONFIG,
3055                                                     &data);
3056                         if (ret_val)
3057                                 return ret_val;
3058
3059                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3060                         ret_val = phy->ops.write_reg(hw,
3061                                                      IGP01E1000_PHY_PORT_CONFIG,
3062                                                      data);
3063                         if (ret_val)
3064                                 return ret_val;
3065                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3066                         ret_val = phy->ops.read_reg(hw,
3067                                                     IGP01E1000_PHY_PORT_CONFIG,
3068                                                     &data);
3069                         if (ret_val)
3070                                 return ret_val;
3071
3072                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3073                         ret_val = phy->ops.write_reg(hw,
3074                                                      IGP01E1000_PHY_PORT_CONFIG,
3075                                                      data);
3076                         if (ret_val)
3077                                 return ret_val;
3078                 }
3079         }
3080
3081         return E1000_SUCCESS;
3082 }
3083
3084 /**
3085  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3086  *  @hw: pointer to the HW structure
3087  *  @active: true to enable LPLU, false to disable
3088  *
3089  *  Sets the LPLU D3 state according to the active flag.  When
3090  *  activating LPLU this function also disables smart speed
3091  *  and vice versa.  LPLU will not be activated unless the
3092  *  device autonegotiation advertisement meets standards of
3093  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3094  *  This is a function pointer entry point only called by
3095  *  PHY setup routines.
3096  **/
3097 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3098 {
3099         struct e1000_phy_info *phy = &hw->phy;
3100         u32 phy_ctrl;
3101         s32 ret_val = E1000_SUCCESS;
3102         u16 data;
3103
3104         DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3105
3106         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3107
3108         if (!active) {
3109                 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3110                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3111
3112                 if (phy->type != e1000_phy_igp_3)
3113                         return E1000_SUCCESS;
3114
3115                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3116                  * during Dx states where the power conservation is most
3117                  * important.  During driver activity we should enable
3118                  * SmartSpeed, so performance is maintained.
3119                  */
3120                 if (phy->smart_speed == e1000_smart_speed_on) {
3121                         ret_val = phy->ops.read_reg(hw,
3122                                                     IGP01E1000_PHY_PORT_CONFIG,
3123                                                     &data);
3124                         if (ret_val)
3125                                 return ret_val;
3126
3127                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3128                         ret_val = phy->ops.write_reg(hw,
3129                                                      IGP01E1000_PHY_PORT_CONFIG,
3130                                                      data);
3131                         if (ret_val)
3132                                 return ret_val;
3133                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3134                         ret_val = phy->ops.read_reg(hw,
3135                                                     IGP01E1000_PHY_PORT_CONFIG,
3136                                                     &data);
3137                         if (ret_val)
3138                                 return ret_val;
3139
3140                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3141                         ret_val = phy->ops.write_reg(hw,
3142                                                      IGP01E1000_PHY_PORT_CONFIG,
3143                                                      data);
3144                         if (ret_val)
3145                                 return ret_val;
3146                 }
3147         } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3148                    (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3149                    (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3150                 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3151                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3152
3153                 if (phy->type != e1000_phy_igp_3)
3154                         return E1000_SUCCESS;
3155
3156                 /* Call gig speed drop workaround on LPLU before accessing
3157                  * any PHY registers
3158                  */
3159                 if (hw->mac.type == e1000_ich8lan)
3160                         e1000_gig_downshift_workaround_ich8lan(hw);
3161
3162                 /* When LPLU is enabled, we should disable SmartSpeed */
3163                 ret_val = phy->ops.read_reg(hw,
3164                                             IGP01E1000_PHY_PORT_CONFIG,
3165                                             &data);
3166                 if (ret_val)
3167                         return ret_val;
3168
3169                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3170                 ret_val = phy->ops.write_reg(hw,
3171                                              IGP01E1000_PHY_PORT_CONFIG,
3172                                              data);
3173         }
3174
3175         return ret_val;
3176 }
3177
3178 /**
3179  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3180  *  @hw: pointer to the HW structure
3181  *  @bank:  pointer to the variable that returns the active bank
3182  *
3183  *  Reads signature byte from the NVM using the flash access registers.
3184  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3185  **/
3186 STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3187 {
3188         u32 eecd;
3189         struct e1000_nvm_info *nvm = &hw->nvm;
3190         u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3191         u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3192         u8 sig_byte = 0;
3193         s32 ret_val;
3194
3195         DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3196
3197         switch (hw->mac.type) {
3198         case e1000_ich8lan:
3199         case e1000_ich9lan:
3200                 eecd = E1000_READ_REG(hw, E1000_EECD);
3201                 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3202                     E1000_EECD_SEC1VAL_VALID_MASK) {
3203                         if (eecd & E1000_EECD_SEC1VAL)
3204                                 *bank = 1;
3205                         else
3206                                 *bank = 0;
3207
3208                         return E1000_SUCCESS;
3209                 }
3210                 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3211                 /* fall-thru */
3212         default:
3213                 /* set bank to 0 in case flash read fails */
3214                 *bank = 0;
3215
3216                 /* Check bank 0 */
3217                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3218                                                         &sig_byte);
3219                 if (ret_val)
3220                         return ret_val;
3221                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3222                     E1000_ICH_NVM_SIG_VALUE) {
3223                         *bank = 0;
3224                         return E1000_SUCCESS;
3225                 }
3226
3227                 /* Check bank 1 */
3228                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3229                                                         bank1_offset,
3230                                                         &sig_byte);
3231                 if (ret_val)
3232                         return ret_val;
3233                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3234                     E1000_ICH_NVM_SIG_VALUE) {
3235                         *bank = 1;
3236                         return E1000_SUCCESS;
3237                 }
3238
3239                 DEBUGOUT("ERROR: No valid NVM bank present\n");
3240                 return -E1000_ERR_NVM;
3241         }
3242 }
3243
3244 /**
3245  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3246  *  @hw: pointer to the HW structure
3247  *  @offset: The offset (in bytes) of the word(s) to read.
3248  *  @words: Size of data to read in words
3249  *  @data: Pointer to the word(s) to read at offset.
3250  *
3251  *  Reads a word(s) from the NVM using the flash access registers.
3252  **/
3253 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3254                                   u16 *data)
3255 {
3256         struct e1000_nvm_info *nvm = &hw->nvm;
3257         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3258         u32 act_offset;
3259         s32 ret_val = E1000_SUCCESS;
3260         u32 bank = 0;
3261         u16 i, word;
3262
3263         DEBUGFUNC("e1000_read_nvm_ich8lan");
3264
3265         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3266             (words == 0)) {
3267                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3268                 ret_val = -E1000_ERR_NVM;
3269                 goto out;
3270         }
3271
3272         nvm->ops.acquire(hw);
3273
3274         ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3275         if (ret_val != E1000_SUCCESS) {
3276                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3277                 bank = 0;
3278         }
3279
3280         act_offset = (bank) ? nvm->flash_bank_size : 0;
3281         act_offset += offset;
3282
3283         ret_val = E1000_SUCCESS;
3284         for (i = 0; i < words; i++) {
3285                 if (dev_spec->shadow_ram[offset+i].modified) {
3286                         data[i] = dev_spec->shadow_ram[offset+i].value;
3287                 } else {
3288                         ret_val = e1000_read_flash_word_ich8lan(hw,
3289                                                                 act_offset + i,
3290                                                                 &word);
3291                         if (ret_val)
3292                                 break;
3293                         data[i] = word;
3294                 }
3295         }
3296
3297         nvm->ops.release(hw);
3298
3299 out:
3300         if (ret_val)
3301                 DEBUGOUT1("NVM read error: %d\n", ret_val);
3302
3303         return ret_val;
3304 }
3305
3306 /**
3307  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3308  *  @hw: pointer to the HW structure
3309  *
3310  *  This function does initial flash setup so that a new read/write/erase cycle
3311  *  can be started.
3312  **/
3313 STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3314 {
3315         union ich8_hws_flash_status hsfsts;
3316         s32 ret_val = -E1000_ERR_NVM;
3317
3318         DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3319
3320         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3321
3322         /* Check if the flash descriptor is valid */
3323         if (!hsfsts.hsf_status.fldesvalid) {
3324                 DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3325                 return -E1000_ERR_NVM;
3326         }
3327
3328         /* Clear FCERR and DAEL in hw status by writing 1 */
3329         hsfsts.hsf_status.flcerr = 1;
3330         hsfsts.hsf_status.dael = 1;
3331         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3332
3333         /* Either we should have a hardware SPI cycle in progress
3334          * bit to check against, in order to start a new cycle or
3335          * FDONE bit should be changed in the hardware so that it
3336          * is 1 after hardware reset, which can then be used as an
3337          * indication whether a cycle is in progress or has been
3338          * completed.
3339          */
3340
3341         if (!hsfsts.hsf_status.flcinprog) {
3342                 /* There is no cycle running at present,
3343                  * so we can start a cycle.
3344                  * Begin by setting Flash Cycle Done.
3345                  */
3346                 hsfsts.hsf_status.flcdone = 1;
3347                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3348                 ret_val = E1000_SUCCESS;
3349         } else {
3350                 s32 i;
3351
3352                 /* Otherwise poll for sometime so the current
3353                  * cycle has a chance to end before giving up.
3354                  */
3355                 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3356                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3357                                                               ICH_FLASH_HSFSTS);
3358                         if (!hsfsts.hsf_status.flcinprog) {
3359                                 ret_val = E1000_SUCCESS;
3360                                 break;
3361                         }
3362                         usec_delay(1);
3363                 }
3364                 if (ret_val == E1000_SUCCESS) {
3365                         /* Successful in waiting for previous cycle to timeout,
3366                          * now set the Flash Cycle Done.
3367                          */
3368                         hsfsts.hsf_status.flcdone = 1;
3369                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3370                                                 hsfsts.regval);
3371                 } else {
3372                         DEBUGOUT("Flash controller busy, cannot get access\n");
3373                 }
3374         }
3375
3376         return ret_val;
3377 }
3378
3379 /**
3380  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3381  *  @hw: pointer to the HW structure
3382  *  @timeout: maximum time to wait for completion
3383  *
3384  *  This function starts a flash cycle and waits for its completion.
3385  **/
3386 STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3387 {
3388         union ich8_hws_flash_ctrl hsflctl;
3389         union ich8_hws_flash_status hsfsts;
3390         u32 i = 0;
3391
3392         DEBUGFUNC("e1000_flash_cycle_ich8lan");
3393
3394         /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3395         hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3396         hsflctl.hsf_ctrl.flcgo = 1;
3397
3398         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3399
3400         /* wait till FDONE bit is set to 1 */
3401         do {
3402                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3403                 if (hsfsts.hsf_status.flcdone)
3404                         break;
3405                 usec_delay(1);
3406         } while (i++ < timeout);
3407
3408         if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3409                 return E1000_SUCCESS;
3410
3411         return -E1000_ERR_NVM;
3412 }
3413
3414 /**
3415  *  e1000_read_flash_word_ich8lan - Read word from flash
3416  *  @hw: pointer to the HW structure
3417  *  @offset: offset to data location
3418  *  @data: pointer to the location for storing the data
3419  *
3420  *  Reads the flash word at offset into data.  Offset is converted
3421  *  to bytes before read.
3422  **/
3423 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3424                                          u16 *data)
3425 {
3426         DEBUGFUNC("e1000_read_flash_word_ich8lan");
3427
3428         if (!data)
3429                 return -E1000_ERR_NVM;
3430
3431         /* Must convert offset into bytes. */
3432         offset <<= 1;
3433
3434         return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3435 }
3436
3437 /**
3438  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3439  *  @hw: pointer to the HW structure
3440  *  @offset: The offset of the byte to read.
3441  *  @data: Pointer to a byte to store the value read.
3442  *
3443  *  Reads a single byte from the NVM using the flash access registers.
3444  **/
3445 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3446                                          u8 *data)
3447 {
3448         s32 ret_val;
3449         u16 word = 0;
3450
3451         ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3452
3453         if (ret_val)
3454                 return ret_val;
3455
3456         *data = (u8)word;
3457
3458         return E1000_SUCCESS;
3459 }
3460
3461 /**
3462  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3463  *  @hw: pointer to the HW structure
3464  *  @offset: The offset (in bytes) of the byte or word to read.
3465  *  @size: Size of data to read, 1=byte 2=word
3466  *  @data: Pointer to the word to store the value read.
3467  *
3468  *  Reads a byte or word from the NVM using the flash access registers.
3469  **/
3470 STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3471                                          u8 size, u16 *data)
3472 {
3473         union ich8_hws_flash_status hsfsts;
3474         union ich8_hws_flash_ctrl hsflctl;
3475         u32 flash_linear_addr;
3476         u32 flash_data = 0;
3477         s32 ret_val = -E1000_ERR_NVM;
3478         u8 count = 0;
3479
3480         DEBUGFUNC("e1000_read_flash_data_ich8lan");
3481
3482         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3483                 return -E1000_ERR_NVM;
3484         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3485                              hw->nvm.flash_base_addr);
3486
3487         do {
3488                 usec_delay(1);
3489                 /* Steps */
3490                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3491                 if (ret_val != E1000_SUCCESS)
3492                         break;
3493                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3494
3495                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3496                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3497                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3498                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3499
3500                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3501
3502                 ret_val =
3503                     e1000_flash_cycle_ich8lan(hw,
3504                                               ICH_FLASH_READ_COMMAND_TIMEOUT);
3505
3506                 /* Check if FCERR is set to 1, if set to 1, clear it
3507                  * and try the whole sequence a few more times, else
3508                  * read in (shift in) the Flash Data0, the order is
3509                  * least significant byte first msb to lsb
3510                  */
3511                 if (ret_val == E1000_SUCCESS) {
3512                         flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3513                         if (size == 1)
3514                                 *data = (u8)(flash_data & 0x000000FF);
3515                         else if (size == 2)
3516                                 *data = (u16)(flash_data & 0x0000FFFF);
3517                         break;
3518                 } else {
3519                         /* If we've gotten here, then things are probably
3520                          * completely hosed, but if the error condition is
3521                          * detected, it won't hurt to give it another try...
3522                          * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3523                          */
3524                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3525                                                               ICH_FLASH_HSFSTS);
3526                         if (hsfsts.hsf_status.flcerr) {
3527                                 /* Repeat for some time before giving up. */
3528                                 continue;
3529                         } else if (!hsfsts.hsf_status.flcdone) {
3530                                 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3531                                 break;
3532                         }
3533                 }
3534         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3535
3536         return ret_val;
3537 }
3538
3539 /**
3540  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3541  *  @hw: pointer to the HW structure
3542  *  @offset: The offset (in bytes) of the word(s) to write.
3543  *  @words: Size of data to write in words
3544  *  @data: Pointer to the word(s) to write at offset.
3545  *
3546  *  Writes a byte or word to the NVM using the flash access registers.
3547  **/
3548 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3549                                    u16 *data)
3550 {
3551         struct e1000_nvm_info *nvm = &hw->nvm;
3552         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3553         u16 i;
3554
3555         DEBUGFUNC("e1000_write_nvm_ich8lan");
3556
3557         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3558             (words == 0)) {
3559                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3560                 return -E1000_ERR_NVM;
3561         }
3562
3563         nvm->ops.acquire(hw);
3564
3565         for (i = 0; i < words; i++) {
3566                 dev_spec->shadow_ram[offset+i].modified = true;
3567                 dev_spec->shadow_ram[offset+i].value = data[i];
3568         }
3569
3570         nvm->ops.release(hw);
3571
3572         return E1000_SUCCESS;
3573 }
3574
3575 /**
3576  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3577  *  @hw: pointer to the HW structure
3578  *
3579  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3580  *  which writes the checksum to the shadow ram.  The changes in the shadow
3581  *  ram are then committed to the EEPROM by processing each bank at a time
3582  *  checking for the modified bit and writing only the pending changes.
3583  *  After a successful commit, the shadow ram is cleared and is ready for
3584  *  future writes.
3585  **/
3586 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3587 {
3588         struct e1000_nvm_info *nvm = &hw->nvm;
3589         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3590         u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3591         s32 ret_val;
3592         u16 data;
3593
3594         DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
3595
3596         ret_val = e1000_update_nvm_checksum_generic(hw);
3597         if (ret_val)
3598                 goto out;
3599
3600         if (nvm->type != e1000_nvm_flash_sw)
3601                 goto out;
3602
3603         nvm->ops.acquire(hw);
3604
3605         /* We're writing to the opposite bank so if we're on bank 1,
3606          * write to bank 0 etc.  We also need to erase the segment that
3607          * is going to be written
3608          */
3609         ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3610         if (ret_val != E1000_SUCCESS) {
3611                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3612                 bank = 0;
3613         }
3614
3615         if (bank == 0) {
3616                 new_bank_offset = nvm->flash_bank_size;
3617                 old_bank_offset = 0;
3618                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3619                 if (ret_val)
3620                         goto release;
3621         } else {
3622                 old_bank_offset = nvm->flash_bank_size;
3623                 new_bank_offset = 0;
3624                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3625                 if (ret_val)
3626                         goto release;
3627         }
3628
3629         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3630                 /* Determine whether to write the value stored
3631                  * in the other NVM bank or a modified value stored
3632                  * in the shadow RAM
3633                  */
3634                 if (dev_spec->shadow_ram[i].modified) {
3635                         data = dev_spec->shadow_ram[i].value;
3636                 } else {
3637                         ret_val = e1000_read_flash_word_ich8lan(hw, i +
3638                                                                 old_bank_offset,
3639                                                                 &data);
3640                         if (ret_val)
3641                                 break;
3642                 }
3643
3644                 /* If the word is 0x13, then make sure the signature bits
3645                  * (15:14) are 11b until the commit has completed.
3646                  * This will allow us to write 10b which indicates the
3647                  * signature is valid.  We want to do this after the write
3648                  * has completed so that we don't mark the segment valid
3649                  * while the write is still in progress
3650                  */
3651                 if (i == E1000_ICH_NVM_SIG_WORD)
3652                         data |= E1000_ICH_NVM_SIG_MASK;
3653
3654                 /* Convert offset to bytes. */
3655                 act_offset = (i + new_bank_offset) << 1;
3656
3657                 usec_delay(100);
3658                 /* Write the bytes to the new bank. */
3659                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3660                                                                act_offset,
3661                                                                (u8)data);
3662                 if (ret_val)
3663                         break;
3664
3665                 usec_delay(100);
3666                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3667                                                           act_offset + 1,
3668                                                           (u8)(data >> 8));
3669                 if (ret_val)
3670                         break;
3671         }
3672
3673         /* Don't bother writing the segment valid bits if sector
3674          * programming failed.
3675          */
3676         if (ret_val) {
3677                 DEBUGOUT("Flash commit failed.\n");
3678                 goto release;
3679         }
3680
3681         /* Finally validate the new segment by setting bit 15:14
3682          * to 10b in word 0x13 , this can be done without an
3683          * erase as well since these bits are 11 to start with
3684          * and we need to change bit 14 to 0b
3685          */
3686         act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3687         ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
3688         if (ret_val)
3689                 goto release;
3690
3691         data &= 0xBFFF;
3692         ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3693                                                        act_offset * 2 + 1,
3694                                                        (u8)(data >> 8));
3695         if (ret_val)
3696                 goto release;
3697
3698         /* And invalidate the previously valid segment by setting
3699          * its signature word (0x13) high_byte to 0b. This can be
3700          * done without an erase because flash erase sets all bits
3701          * to 1's. We can write 1's to 0's without an erase
3702          */
3703         act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3704         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
3705         if (ret_val)
3706                 goto release;
3707
3708         /* Great!  Everything worked, we can now clear the cached entries. */
3709         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3710                 dev_spec->shadow_ram[i].modified = false;
3711                 dev_spec->shadow_ram[i].value = 0xFFFF;
3712         }
3713
3714 release:
3715         nvm->ops.release(hw);
3716
3717         /* Reload the EEPROM, or else modifications will not appear
3718          * until after the next adapter reset.
3719          */
3720         if (!ret_val) {
3721                 nvm->ops.reload(hw);
3722                 msec_delay(10);
3723         }
3724
3725 out:
3726         if (ret_val)
3727                 DEBUGOUT1("NVM update error: %d\n", ret_val);
3728
3729         return ret_val;
3730 }
3731
3732 /**
3733  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
3734  *  @hw: pointer to the HW structure
3735  *
3736  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
3737  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
3738  *  calculated, in which case we need to calculate the checksum and set bit 6.
3739  **/
3740 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
3741 {
3742         s32 ret_val;
3743         u16 data;
3744         u16 word;
3745         u16 valid_csum_mask;
3746
3747         DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
3748
3749         /* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
3750          * the checksum needs to be fixed.  This bit is an indication that
3751          * the NVM was prepared by OEM software and did not calculate
3752          * the checksum...a likely scenario.
3753          */
3754         switch (hw->mac.type) {
3755         case e1000_pch_lpt:
3756                 word = NVM_COMPAT;
3757                 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
3758                 break;
3759         default:
3760                 word = NVM_FUTURE_INIT_WORD1;
3761                 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
3762                 break;
3763         }
3764
3765         ret_val = hw->nvm.ops.read(hw, word, 1, &data);
3766         if (ret_val)
3767                 return ret_val;
3768
3769         if (!(data & valid_csum_mask)) {
3770                 data |= valid_csum_mask;
3771                 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
3772                 if (ret_val)
3773                         return ret_val;
3774                 ret_val = hw->nvm.ops.update(hw);
3775                 if (ret_val)
3776                         return ret_val;
3777         }
3778
3779         return e1000_validate_nvm_checksum_generic(hw);
3780 }
3781
3782 /**
3783  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3784  *  @hw: pointer to the HW structure
3785  *  @offset: The offset (in bytes) of the byte/word to read.
3786  *  @size: Size of data to read, 1=byte 2=word
3787  *  @data: The byte(s) to write to the NVM.
3788  *
3789  *  Writes one/two bytes to the NVM using the flash access registers.
3790  **/
3791 STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3792                                           u8 size, u16 data)
3793 {
3794         union ich8_hws_flash_status hsfsts;
3795         union ich8_hws_flash_ctrl hsflctl;
3796         u32 flash_linear_addr;
3797         u32 flash_data = 0;
3798         s32 ret_val;
3799         u8 count = 0;
3800
3801         DEBUGFUNC("e1000_write_ich8_data");
3802
3803         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3804                 return -E1000_ERR_NVM;
3805
3806         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3807                              hw->nvm.flash_base_addr);
3808
3809         do {
3810                 usec_delay(1);
3811                 /* Steps */
3812                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3813                 if (ret_val != E1000_SUCCESS)
3814                         break;
3815                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3816
3817                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3818                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3819                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3820                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3821
3822                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3823
3824                 if (size == 1)
3825                         flash_data = (u32)data & 0x00FF;
3826                 else
3827                         flash_data = (u32)data;
3828
3829                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
3830
3831                 /* check if FCERR is set to 1 , if set to 1, clear it
3832                  * and try the whole sequence a few more times else done
3833                  */
3834                 ret_val =
3835                     e1000_flash_cycle_ich8lan(hw,
3836                                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3837                 if (ret_val == E1000_SUCCESS)
3838                         break;
3839
3840                 /* If we're here, then things are most likely
3841                  * completely hosed, but if the error condition
3842                  * is detected, it won't hurt to give it another
3843                  * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3844                  */
3845                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3846                 if (hsfsts.hsf_status.flcerr)
3847                         /* Repeat for some time before giving up. */
3848                         continue;
3849                 if (!hsfsts.hsf_status.flcdone) {
3850                         DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3851                         break;
3852                 }
3853         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3854
3855         return ret_val;
3856 }
3857
3858 /**
3859  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3860  *  @hw: pointer to the HW structure
3861  *  @offset: The index of the byte to read.
3862  *  @data: The byte to write to the NVM.
3863  *
3864  *  Writes a single byte to the NVM using the flash access registers.
3865  **/
3866 STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3867                                           u8 data)
3868 {
3869         u16 word = (u16)data;
3870
3871         DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3872
3873         return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3874 }
3875
3876 /**
3877  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3878  *  @hw: pointer to the HW structure
3879  *  @offset: The offset of the byte to write.
3880  *  @byte: The byte to write to the NVM.
3881  *
3882  *  Writes a single byte to the NVM using the flash access registers.
3883  *  Goes through a retry algorithm before giving up.
3884  **/
3885 STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3886                                                 u32 offset, u8 byte)
3887 {
3888         s32 ret_val;
3889         u16 program_retries;
3890
3891         DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3892
3893         ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3894         if (!ret_val)
3895                 return ret_val;
3896
3897         for (program_retries = 0; program_retries < 100; program_retries++) {
3898                 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3899                 usec_delay(100);
3900                 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3901                 if (ret_val == E1000_SUCCESS)
3902                         break;
3903         }
3904         if (program_retries == 100)
3905                 return -E1000_ERR_NVM;
3906
3907         return E1000_SUCCESS;
3908 }
3909
3910 /**
3911  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3912  *  @hw: pointer to the HW structure
3913  *  @bank: 0 for first bank, 1 for second bank, etc.
3914  *
3915  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3916  *  bank N is 4096 * N + flash_reg_addr.
3917  **/
3918 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3919 {
3920         struct e1000_nvm_info *nvm = &hw->nvm;
3921         union ich8_hws_flash_status hsfsts;
3922         union ich8_hws_flash_ctrl hsflctl;
3923         u32 flash_linear_addr;
3924         /* bank size is in 16bit words - adjust to bytes */
3925         u32 flash_bank_size = nvm->flash_bank_size * 2;
3926         s32 ret_val;
3927         s32 count = 0;
3928         s32 j, iteration, sector_size;
3929
3930         DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3931
3932         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3933
3934         /* Determine HW Sector size: Read BERASE bits of hw flash status
3935          * register
3936          * 00: The Hw sector is 256 bytes, hence we need to erase 16
3937          *     consecutive sectors.  The start index for the nth Hw sector
3938          *     can be calculated as = bank * 4096 + n * 256
3939          * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3940          *     The start index for the nth Hw sector can be calculated
3941          *     as = bank * 4096
3942          * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3943          *     (ich9 only, otherwise error condition)
3944          * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3945          */
3946         switch (hsfsts.hsf_status.berasesz) {
3947         case 0:
3948                 /* Hw sector size 256 */
3949                 sector_size = ICH_FLASH_SEG_SIZE_256;
3950                 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3951                 break;
3952         case 1:
3953                 sector_size = ICH_FLASH_SEG_SIZE_4K;
3954                 iteration = 1;
3955                 break;
3956         case 2:
3957                 sector_size = ICH_FLASH_SEG_SIZE_8K;
3958                 iteration = 1;
3959                 break;
3960         case 3:
3961                 sector_size = ICH_FLASH_SEG_SIZE_64K;
3962                 iteration = 1;
3963                 break;
3964         default:
3965                 return -E1000_ERR_NVM;
3966         }
3967
3968         /* Start with the base address, then add the sector offset. */
3969         flash_linear_addr = hw->nvm.flash_base_addr;
3970         flash_linear_addr += (bank) ? flash_bank_size : 0;
3971
3972         for (j = 0; j < iteration; j++) {
3973                 do {
3974                         u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
3975
3976                         /* Steps */
3977                         ret_val = e1000_flash_cycle_init_ich8lan(hw);
3978                         if (ret_val)
3979                                 return ret_val;
3980
3981                         /* Write a value 11 (block Erase) in Flash
3982                          * Cycle field in hw flash control
3983                          */
3984                         hsflctl.regval =
3985                             E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3986
3987                         hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3988                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3989                                                 hsflctl.regval);
3990
3991                         /* Write the last 24 bits of an index within the
3992                          * block into Flash Linear address field in Flash
3993                          * Address.
3994                          */
3995                         flash_linear_addr += (j * sector_size);
3996                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3997                                               flash_linear_addr);
3998
3999                         ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4000                         if (ret_val == E1000_SUCCESS)
4001                                 break;
4002
4003                         /* Check if FCERR is set to 1.  If 1,
4004                          * clear it and try the whole sequence
4005                          * a few more times else Done
4006                          */
4007                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4008                                                       ICH_FLASH_HSFSTS);
4009                         if (hsfsts.hsf_status.flcerr)
4010                                 /* repeat for some time before giving up */
4011                                 continue;
4012                         else if (!hsfsts.hsf_status.flcdone)
4013                                 return ret_val;
4014                 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4015         }
4016
4017         return E1000_SUCCESS;
4018 }
4019
4020 /**
4021  *  e1000_valid_led_default_ich8lan - Set the default LED settings
4022  *  @hw: pointer to the HW structure
4023  *  @data: Pointer to the LED settings
4024  *
4025  *  Reads the LED default settings from the NVM to data.  If the NVM LED
4026  *  settings is all 0's or F's, set the LED default to a valid LED default
4027  *  setting.
4028  **/
4029 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4030 {
4031         s32 ret_val;
4032
4033         DEBUGFUNC("e1000_valid_led_default_ich8lan");
4034
4035         ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4036         if (ret_val) {
4037                 DEBUGOUT("NVM Read Error\n");
4038                 return ret_val;
4039         }
4040
4041         if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4042                 *data = ID_LED_DEFAULT_ICH8LAN;
4043
4044         return E1000_SUCCESS;
4045 }
4046
4047 /**
4048  *  e1000_id_led_init_pchlan - store LED configurations
4049  *  @hw: pointer to the HW structure
4050  *
4051  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4052  *  the PHY LED configuration register.
4053  *
4054  *  PCH also does not have an "always on" or "always off" mode which
4055  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4056  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4057  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4058  *  link based on logic in e1000_led_[on|off]_pchlan().
4059  **/
4060 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4061 {
4062         struct e1000_mac_info *mac = &hw->mac;
4063         s32 ret_val;
4064         const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4065         const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4066         u16 data, i, temp, shift;
4067
4068         DEBUGFUNC("e1000_id_led_init_pchlan");
4069
4070         /* Get default ID LED modes */
4071         ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4072         if (ret_val)
4073                 return ret_val;
4074
4075         mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4076         mac->ledctl_mode1 = mac->ledctl_default;
4077         mac->ledctl_mode2 = mac->ledctl_default;
4078
4079         for (i = 0; i < 4; i++) {
4080                 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4081                 shift = (i * 5);
4082                 switch (temp) {
4083                 case ID_LED_ON1_DEF2:
4084                 case ID_LED_ON1_ON2:
4085                 case ID_LED_ON1_OFF2:
4086                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4087                         mac->ledctl_mode1 |= (ledctl_on << shift);
4088                         break;
4089                 case ID_LED_OFF1_DEF2:
4090                 case ID_LED_OFF1_ON2:
4091                 case ID_LED_OFF1_OFF2:
4092                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4093                         mac->ledctl_mode1 |= (ledctl_off << shift);
4094                         break;
4095                 default:
4096                         /* Do nothing */
4097                         break;
4098                 }
4099                 switch (temp) {
4100                 case ID_LED_DEF1_ON2:
4101                 case ID_LED_ON1_ON2:
4102                 case ID_LED_OFF1_ON2:
4103                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4104                         mac->ledctl_mode2 |= (ledctl_on << shift);
4105                         break;
4106                 case ID_LED_DEF1_OFF2:
4107                 case ID_LED_ON1_OFF2:
4108                 case ID_LED_OFF1_OFF2:
4109                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4110                         mac->ledctl_mode2 |= (ledctl_off << shift);
4111                         break;
4112                 default:
4113                         /* Do nothing */
4114                         break;
4115                 }
4116         }
4117
4118         return E1000_SUCCESS;
4119 }
4120
4121 /**
4122  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4123  *  @hw: pointer to the HW structure
4124  *
4125  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4126  *  register, so the the bus width is hard coded.
4127  **/
4128 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4129 {
4130         struct e1000_bus_info *bus = &hw->bus;
4131         s32 ret_val;
4132
4133         DEBUGFUNC("e1000_get_bus_info_ich8lan");
4134
4135         ret_val = e1000_get_bus_info_pcie_generic(hw);
4136
4137         /* ICH devices are "PCI Express"-ish.  They have
4138          * a configuration space, but do not contain
4139          * PCI Express Capability registers, so bus width
4140          * must be hardcoded.
4141          */
4142         if (bus->width == e1000_bus_width_unknown)
4143                 bus->width = e1000_bus_width_pcie_x1;
4144
4145         return ret_val;
4146 }
4147
4148 /**
4149  *  e1000_reset_hw_ich8lan - Reset the hardware
4150  *  @hw: pointer to the HW structure
4151  *
4152  *  Does a full reset of the hardware which includes a reset of the PHY and
4153  *  MAC.
4154  **/
4155 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4156 {
4157         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4158         u16 kum_cfg;
4159         u32 ctrl, reg;
4160         s32 ret_val;
4161
4162         DEBUGFUNC("e1000_reset_hw_ich8lan");
4163
4164         /* Prevent the PCI-E bus from sticking if there is no TLP connection
4165          * on the last TLP read/write transaction when MAC is reset.
4166          */
4167         ret_val = e1000_disable_pcie_master_generic(hw);
4168         if (ret_val)
4169                 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4170
4171         DEBUGOUT("Masking off all interrupts\n");
4172         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4173
4174         /* Disable the Transmit and Receive units.  Then delay to allow
4175          * any pending transactions to complete before we hit the MAC
4176          * with the global reset.
4177          */
4178         E1000_WRITE_REG(hw, E1000_RCTL, 0);
4179         E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4180         E1000_WRITE_FLUSH(hw);
4181
4182         msec_delay(10);
4183
4184         /* Workaround for ICH8 bit corruption issue in FIFO memory */
4185         if (hw->mac.type == e1000_ich8lan) {
4186                 /* Set Tx and Rx buffer allocation to 8k apiece. */
4187                 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4188                 /* Set Packet Buffer Size to 16k. */
4189                 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4190         }
4191
4192         if (hw->mac.type == e1000_pchlan) {
4193                 /* Save the NVM K1 bit setting*/
4194                 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4195                 if (ret_val)
4196                         return ret_val;
4197
4198                 if (kum_cfg & E1000_NVM_K1_ENABLE)
4199                         dev_spec->nvm_k1_enabled = true;
4200                 else
4201                         dev_spec->nvm_k1_enabled = false;
4202         }
4203
4204         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4205
4206         if (!hw->phy.ops.check_reset_block(hw)) {
4207                 /* Full-chip reset requires MAC and PHY reset at the same
4208                  * time to make sure the interface between MAC and the
4209                  * external PHY is reset.
4210                  */
4211                 ctrl |= E1000_CTRL_PHY_RST;
4212
4213                 /* Gate automatic PHY configuration by hardware on
4214                  * non-managed 82579
4215                  */
4216                 if ((hw->mac.type == e1000_pch2lan) &&
4217                     !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4218                         e1000_gate_hw_phy_config_ich8lan(hw, true);
4219         }
4220         ret_val = e1000_acquire_swflag_ich8lan(hw);
4221         DEBUGOUT("Issuing a global reset to ich8lan\n");
4222         E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4223         /* cannot issue a flush here because it hangs the hardware */
4224         msec_delay(20);
4225
4226         /* Set Phy Config Counter to 50msec */
4227         if (hw->mac.type == e1000_pch2lan) {
4228                 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4229                 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4230                 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4231                 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4232         }
4233
4234         if (!ret_val)
4235                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
4236
4237         if (ctrl & E1000_CTRL_PHY_RST) {
4238                 ret_val = hw->phy.ops.get_cfg_done(hw);
4239                 if (ret_val)
4240                         return ret_val;
4241
4242                 ret_val = e1000_post_phy_reset_ich8lan(hw);
4243                 if (ret_val)
4244                         return ret_val;
4245         }
4246
4247         /* For PCH, this write will make sure that any noise
4248          * will be detected as a CRC error and be dropped rather than show up
4249          * as a bad packet to the DMA engine.
4250          */
4251         if (hw->mac.type == e1000_pchlan)
4252                 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
4253
4254         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4255         E1000_READ_REG(hw, E1000_ICR);
4256
4257         reg = E1000_READ_REG(hw, E1000_KABGTXD);
4258         reg |= E1000_KABGTXD_BGSQLBIAS;
4259         E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
4260
4261         return E1000_SUCCESS;
4262 }
4263
4264 /**
4265  *  e1000_init_hw_ich8lan - Initialize the hardware
4266  *  @hw: pointer to the HW structure
4267  *
4268  *  Prepares the hardware for transmit and receive by doing the following:
4269  *   - initialize hardware bits
4270  *   - initialize LED identification
4271  *   - setup receive address registers
4272  *   - setup flow control
4273  *   - setup transmit descriptors
4274  *   - clear statistics
4275  **/
4276 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4277 {
4278         struct e1000_mac_info *mac = &hw->mac;
4279         u32 ctrl_ext, txdctl, snoop;
4280         s32 ret_val;
4281         u16 i;
4282
4283         DEBUGFUNC("e1000_init_hw_ich8lan");
4284
4285         e1000_initialize_hw_bits_ich8lan(hw);
4286
4287         /* Initialize identification LED */
4288         ret_val = mac->ops.id_led_init(hw);
4289         /* An error is not fatal and we should not stop init due to this */
4290         if (ret_val)
4291                 DEBUGOUT("Error initializing identification LED\n");
4292
4293         /* Setup the receive address. */
4294         e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
4295
4296         /* Zero out the Multicast HASH table */
4297         DEBUGOUT("Zeroing the MTA\n");
4298         for (i = 0; i < mac->mta_reg_count; i++)
4299                 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4300
4301         /* The 82578 Rx buffer will stall if wakeup is enabled in host and
4302          * the ME.  Disable wakeup by clearing the host wakeup bit.
4303          * Reset the phy after disabling host wakeup to reset the Rx buffer.
4304          */
4305         if (hw->phy.type == e1000_phy_82578) {
4306                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
4307                 i &= ~BM_WUC_HOST_WU_BIT;
4308                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
4309                 ret_val = e1000_phy_hw_reset_ich8lan(hw);
4310                 if (ret_val)
4311                         return ret_val;
4312         }
4313
4314         /* Setup link and flow control */
4315         ret_val = mac->ops.setup_link(hw);
4316
4317         /* Set the transmit descriptor write-back policy for both queues */
4318         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
4319         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4320                   E1000_TXDCTL_FULL_TX_DESC_WB);
4321         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4322                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4323         E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
4324         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
4325         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4326                   E1000_TXDCTL_FULL_TX_DESC_WB);
4327         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4328                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4329         E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
4330
4331         /* ICH8 has opposite polarity of no_snoop bits.
4332          * By default, we should use snoop behavior.
4333          */
4334         if (mac->type == e1000_ich8lan)
4335                 snoop = PCIE_ICH8_SNOOP_ALL;
4336         else
4337                 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
4338         e1000_set_pcie_no_snoop_generic(hw, snoop);
4339
4340         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
4341         ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4342         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
4343
4344         /* Clear all of the statistics registers (clear on read).  It is
4345          * important that we do this after we have tried to establish link
4346          * because the symbol error count will increment wildly if there
4347          * is no link.
4348          */
4349         e1000_clear_hw_cntrs_ich8lan(hw);
4350
4351         return ret_val;
4352 }
4353
4354 /**
4355  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4356  *  @hw: pointer to the HW structure
4357  *
4358  *  Sets/Clears required hardware bits necessary for correctly setting up the
4359  *  hardware for transmit and receive.
4360  **/
4361 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4362 {
4363         u32 reg;
4364
4365         DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
4366
4367         /* Extended Device Control */
4368         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
4369         reg |= (1 << 22);
4370         /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4371         if (hw->mac.type >= e1000_pchlan)
4372                 reg |= E1000_CTRL_EXT_PHYPDEN;
4373         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
4374
4375         /* Transmit Descriptor Control 0 */
4376         reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
4377         reg |= (1 << 22);
4378         E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
4379
4380         /* Transmit Descriptor Control 1 */
4381         reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
4382         reg |= (1 << 22);
4383         E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
4384
4385         /* Transmit Arbitration Control 0 */
4386         reg = E1000_READ_REG(hw, E1000_TARC(0));
4387         if (hw->mac.type == e1000_ich8lan)
4388                 reg |= (1 << 28) | (1 << 29);
4389         reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
4390         E1000_WRITE_REG(hw, E1000_TARC(0), reg);
4391
4392         /* Transmit Arbitration Control 1 */
4393         reg = E1000_READ_REG(hw, E1000_TARC(1));
4394         if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
4395                 reg &= ~(1 << 28);
4396         else
4397                 reg |= (1 << 28);
4398         reg |= (1 << 24) | (1 << 26) | (1 << 30);
4399         E1000_WRITE_REG(hw, E1000_TARC(1), reg);
4400
4401         /* Device Status */
4402         if (hw->mac.type == e1000_ich8lan) {
4403                 reg = E1000_READ_REG(hw, E1000_STATUS);
4404                 reg &= ~(1 << 31);
4405                 E1000_WRITE_REG(hw, E1000_STATUS, reg);
4406         }
4407
4408         /* work-around descriptor data corruption issue during nfs v2 udp
4409          * traffic, just disable the nfs filtering capability
4410          */
4411         reg = E1000_READ_REG(hw, E1000_RFCTL);
4412         reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4413
4414         /* Disable IPv6 extension header parsing because some malformed
4415          * IPv6 headers can hang the Rx.
4416          */
4417         if (hw->mac.type == e1000_ich8lan)
4418                 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4419         E1000_WRITE_REG(hw, E1000_RFCTL, reg);
4420
4421         /* Enable ECC on Lynxpoint */
4422         if (hw->mac.type == e1000_pch_lpt) {
4423                 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
4424                 reg |= E1000_PBECCSTS_ECC_ENABLE;
4425                 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
4426
4427                 reg = E1000_READ_REG(hw, E1000_CTRL);
4428                 reg |= E1000_CTRL_MEHE;
4429                 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4430         }
4431
4432         return;
4433 }
4434
4435 /**
4436  *  e1000_setup_link_ich8lan - Setup flow control and link settings
4437  *  @hw: pointer to the HW structure
4438  *
4439  *  Determines which flow control settings to use, then configures flow
4440  *  control.  Calls the appropriate media-specific link configuration
4441  *  function.  Assuming the adapter has a valid link partner, a valid link
4442  *  should be established.  Assumes the hardware has previously been reset
4443  *  and the transmitter and receiver are not enabled.
4444  **/
4445 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4446 {
4447         s32 ret_val;
4448
4449         DEBUGFUNC("e1000_setup_link_ich8lan");
4450
4451         if (hw->phy.ops.check_reset_block(hw))
4452                 return E1000_SUCCESS;
4453
4454         /* ICH parts do not have a word in the NVM to determine
4455          * the default flow control setting, so we explicitly
4456          * set it to full.
4457          */
4458         if (hw->fc.requested_mode == e1000_fc_default)
4459                 hw->fc.requested_mode = e1000_fc_full;
4460
4461         /* Save off the requested flow control mode for use later.  Depending
4462          * on the link partner's capabilities, we may or may not use this mode.
4463          */
4464         hw->fc.current_mode = hw->fc.requested_mode;
4465
4466         DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
4467                 hw->fc.current_mode);
4468
4469         /* Continue to configure the copper link. */
4470         ret_val = hw->mac.ops.setup_physical_interface(hw);
4471         if (ret_val)
4472                 return ret_val;
4473
4474         E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
4475         if ((hw->phy.type == e1000_phy_82578) ||
4476             (hw->phy.type == e1000_phy_82579) ||
4477             (hw->phy.type == e1000_phy_i217) ||
4478             (hw->phy.type == e1000_phy_82577)) {
4479                 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
4480
4481                 ret_val = hw->phy.ops.write_reg(hw,
4482                                              PHY_REG(BM_PORT_CTRL_PAGE, 27),
4483                                              hw->fc.pause_time);
4484                 if (ret_val)
4485                         return ret_val;
4486         }
4487
4488         return e1000_set_fc_watermarks_generic(hw);
4489 }
4490
4491 /**
4492  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4493  *  @hw: pointer to the HW structure
4494  *
4495  *  Configures the kumeran interface to the PHY to wait the appropriate time
4496  *  when polling the PHY, then call the generic setup_copper_link to finish
4497  *  configuring the copper link.
4498  **/
4499 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4500 {
4501         u32 ctrl;
4502         s32 ret_val;
4503         u16 reg_data;
4504
4505         DEBUGFUNC("e1000_setup_copper_link_ich8lan");
4506
4507         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4508         ctrl |= E1000_CTRL_SLU;
4509         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4510         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4511
4512         /* Set the mac to wait the maximum time between each iteration
4513          * and increase the max iterations when polling the phy;
4514          * this fixes erroneous timeouts at 10Mbps.
4515          */
4516         ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
4517                                                0xFFFF);
4518         if (ret_val)
4519                 return ret_val;
4520         ret_val = e1000_read_kmrn_reg_generic(hw,
4521                                               E1000_KMRNCTRLSTA_INBAND_PARAM,
4522                                               &reg_data);
4523         if (ret_val)
4524                 return ret_val;
4525         reg_data |= 0x3F;
4526         ret_val = e1000_write_kmrn_reg_generic(hw,
4527                                                E1000_KMRNCTRLSTA_INBAND_PARAM,
4528                                                reg_data);
4529         if (ret_val)
4530                 return ret_val;
4531
4532         switch (hw->phy.type) {
4533         case e1000_phy_igp_3:
4534                 ret_val = e1000_copper_link_setup_igp(hw);
4535                 if (ret_val)
4536                         return ret_val;
4537                 break;
4538         case e1000_phy_bm:
4539         case e1000_phy_82578:
4540                 ret_val = e1000_copper_link_setup_m88(hw);
4541                 if (ret_val)
4542                         return ret_val;
4543                 break;
4544         case e1000_phy_82577:
4545         case e1000_phy_82579:
4546                 ret_val = e1000_copper_link_setup_82577(hw);
4547                 if (ret_val)
4548                         return ret_val;
4549                 break;
4550         case e1000_phy_ife:
4551                 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
4552                                                &reg_data);
4553                 if (ret_val)
4554                         return ret_val;
4555
4556                 reg_data &= ~IFE_PMC_AUTO_MDIX;
4557
4558                 switch (hw->phy.mdix) {
4559                 case 1:
4560                         reg_data &= ~IFE_PMC_FORCE_MDIX;
4561                         break;
4562                 case 2:
4563                         reg_data |= IFE_PMC_FORCE_MDIX;
4564                         break;
4565                 case 0:
4566                 default:
4567                         reg_data |= IFE_PMC_AUTO_MDIX;
4568                         break;
4569                 }
4570                 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
4571                                                 reg_data);
4572                 if (ret_val)
4573                         return ret_val;
4574                 break;
4575         default:
4576                 break;
4577         }
4578
4579         return e1000_setup_copper_link_generic(hw);
4580 }
4581
4582 /**
4583  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
4584  *  @hw: pointer to the HW structure
4585  *
4586  *  Calls the PHY specific link setup function and then calls the
4587  *  generic setup_copper_link to finish configuring the link for
4588  *  Lynxpoint PCH devices
4589  **/
4590 STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
4591 {
4592         u32 ctrl;
4593         s32 ret_val;
4594
4595         DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
4596
4597         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4598         ctrl |= E1000_CTRL_SLU;
4599         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4600         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4601
4602         ret_val = e1000_copper_link_setup_82577(hw);
4603         if (ret_val)
4604                 return ret_val;
4605
4606         return e1000_setup_copper_link_generic(hw);
4607 }
4608
4609 /**
4610  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
4611  *  @hw: pointer to the HW structure
4612  *  @speed: pointer to store current link speed
4613  *  @duplex: pointer to store the current link duplex
4614  *
4615  *  Calls the generic get_speed_and_duplex to retrieve the current link
4616  *  information and then calls the Kumeran lock loss workaround for links at
4617  *  gigabit speeds.
4618  **/
4619 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
4620                                           u16 *duplex)
4621 {
4622         s32 ret_val;
4623
4624         DEBUGFUNC("e1000_get_link_up_info_ich8lan");
4625
4626         ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
4627         if (ret_val)
4628                 return ret_val;
4629
4630         if ((hw->mac.type == e1000_ich8lan) &&
4631             (hw->phy.type == e1000_phy_igp_3) &&
4632             (*speed == SPEED_1000)) {
4633                 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
4634         }
4635
4636         return ret_val;
4637 }
4638
4639 /**
4640  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
4641  *  @hw: pointer to the HW structure
4642  *
4643  *  Work-around for 82566 Kumeran PCS lock loss:
4644  *  On link status change (i.e. PCI reset, speed change) and link is up and
4645  *  speed is gigabit-
4646  *    0) if workaround is optionally disabled do nothing
4647  *    1) wait 1ms for Kumeran link to come up
4648  *    2) check Kumeran Diagnostic register PCS lock loss bit
4649  *    3) if not set the link is locked (all is good), otherwise...
4650  *    4) reset the PHY
4651  *    5) repeat up to 10 times
4652  *  Note: this is only called for IGP3 copper when speed is 1gb.
4653  **/
4654 STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
4655 {
4656         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4657         u32 phy_ctrl;
4658         s32 ret_val;
4659         u16 i, data;
4660         bool link;
4661
4662         DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
4663
4664         if (!dev_spec->kmrn_lock_loss_workaround_enabled)
4665                 return E1000_SUCCESS;
4666
4667         /* Make sure link is up before proceeding.  If not just return.
4668          * Attempting this while link is negotiating fouled up link
4669          * stability
4670          */
4671         ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
4672         if (!link)
4673                 return E1000_SUCCESS;
4674
4675         for (i = 0; i < 10; i++) {
4676                 /* read once to clear */
4677                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4678                 if (ret_val)
4679                         return ret_val;
4680                 /* and again to get new status */
4681                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4682                 if (ret_val)
4683                         return ret_val;
4684
4685                 /* check for PCS lock */
4686                 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
4687                         return E1000_SUCCESS;
4688
4689                 /* Issue PHY reset */
4690                 hw->phy.ops.reset(hw);
4691                 msec_delay_irq(5);
4692         }
4693         /* Disable GigE link negotiation */
4694         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4695         phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
4696                      E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4697         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4698
4699         /* Call gig speed drop workaround on Gig disable before accessing
4700          * any PHY registers
4701          */
4702         e1000_gig_downshift_workaround_ich8lan(hw);
4703
4704         /* unable to acquire PCS lock */
4705         return -E1000_ERR_PHY;
4706 }
4707
4708 /**
4709  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
4710  *  @hw: pointer to the HW structure
4711  *  @state: boolean value used to set the current Kumeran workaround state
4712  *
4713  *  If ICH8, set the current Kumeran workaround state (enabled - true
4714  *  /disabled - false).
4715  **/
4716 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
4717                                                  bool state)
4718 {
4719         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4720
4721         DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
4722
4723         if (hw->mac.type != e1000_ich8lan) {
4724                 DEBUGOUT("Workaround applies to ICH8 only.\n");
4725                 return;
4726         }
4727
4728         dev_spec->kmrn_lock_loss_workaround_enabled = state;
4729
4730         return;
4731 }
4732
4733 /**
4734  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
4735  *  @hw: pointer to the HW structure
4736  *
4737  *  Workaround for 82566 power-down on D3 entry:
4738  *    1) disable gigabit link
4739  *    2) write VR power-down enable
4740  *    3) read it back
4741  *  Continue if successful, else issue LCD reset and repeat
4742  **/
4743 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
4744 {
4745         u32 reg;
4746         u16 data;
4747         u8  retry = 0;
4748
4749         DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
4750
4751         if (hw->phy.type != e1000_phy_igp_3)
4752                 return;
4753
4754         /* Try the workaround twice (if needed) */
4755         do {
4756                 /* Disable link */
4757                 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
4758                 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
4759                         E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4760                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
4761
4762                 /* Call gig speed drop workaround on Gig disable before
4763                  * accessing any PHY registers
4764                  */
4765                 if (hw->mac.type == e1000_ich8lan)
4766                         e1000_gig_downshift_workaround_ich8lan(hw);
4767
4768                 /* Write VR power-down enable */
4769                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4770                 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4771                 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
4772                                       data | IGP3_VR_CTRL_MODE_SHUTDOWN);
4773
4774                 /* Read it back and test */
4775                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4776                 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4777                 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
4778                         break;
4779
4780                 /* Issue PHY reset and repeat at most one more time */
4781                 reg = E1000_READ_REG(hw, E1000_CTRL);
4782                 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
4783                 retry++;
4784         } while (retry);
4785 }
4786
4787 /**
4788  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
4789  *  @hw: pointer to the HW structure
4790  *
4791  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
4792  *  LPLU, Gig disable, MDIC PHY reset):
4793  *    1) Set Kumeran Near-end loopback
4794  *    2) Clear Kumeran Near-end loopback
4795  *  Should only be called for ICH8[m] devices with any 1G Phy.
4796  **/
4797 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
4798 {
4799         s32 ret_val;
4800         u16 reg_data;
4801
4802         DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
4803
4804         if ((hw->mac.type != e1000_ich8lan) ||
4805             (hw->phy.type == e1000_phy_ife))
4806                 return;
4807
4808         ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4809                                               &reg_data);
4810         if (ret_val)
4811                 return;
4812         reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
4813         ret_val = e1000_write_kmrn_reg_generic(hw,
4814                                                E1000_KMRNCTRLSTA_DIAG_OFFSET,
4815                                                reg_data);
4816         if (ret_val)
4817                 return;
4818         reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
4819         e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4820                                      reg_data);
4821 }
4822
4823 /**
4824  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4825  *  @hw: pointer to the HW structure
4826  *
4827  *  During S0 to Sx transition, it is possible the link remains at gig
4828  *  instead of negotiating to a lower speed.  Before going to Sx, set
4829  *  'Gig Disable' to force link speed negotiation to a lower speed based on
4830  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
4831  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4832  *  needs to be written.
4833  *  Parts that support (and are linked to a partner which support) EEE in
4834  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4835  *  than 10Mbps w/o EEE.
4836  **/
4837 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4838 {
4839         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4840         u32 phy_ctrl;
4841         s32 ret_val;
4842
4843         DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
4844
4845         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4846         phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4847
4848         if (hw->phy.type == e1000_phy_i217) {
4849                 u16 phy_reg, device_id = hw->device_id;
4850
4851                 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4852                     (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
4853                     (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
4854                     (device_id == E1000_DEV_ID_PCH_I218_V3)) {
4855                         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
4856
4857                         E1000_WRITE_REG(hw, E1000_FEXTNVM6,
4858                                         fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4859                 }
4860
4861                 ret_val = hw->phy.ops.acquire(hw);
4862                 if (ret_val)
4863                         goto out;
4864
4865                 if (!dev_spec->eee_disable) {
4866                         u16 eee_advert;
4867
4868                         ret_val =
4869                             e1000_read_emi_reg_locked(hw,
4870                                                       I217_EEE_ADVERTISEMENT,
4871                                                       &eee_advert);
4872                         if (ret_val)
4873                                 goto release;
4874
4875                         /* Disable LPLU if both link partners support 100BaseT
4876                          * EEE and 100Full is advertised on both ends of the
4877                          * link, and enable Auto Enable LPI since there will
4878                          * be no driver to enable LPI while in Sx.
4879                          */
4880                         if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4881                             (dev_spec->eee_lp_ability &
4882                              I82579_EEE_100_SUPPORTED) &&
4883                             (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
4884                                 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4885                                               E1000_PHY_CTRL_NOND0A_LPLU);
4886
4887                                 /* Set Auto Enable LPI after link up */
4888                                 hw->phy.ops.read_reg_locked(hw,
4889                                                             I217_LPI_GPIO_CTRL,
4890                                                             &phy_reg);
4891                                 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4892                                 hw->phy.ops.write_reg_locked(hw,
4893                                                              I217_LPI_GPIO_CTRL,
4894                                                              phy_reg);
4895                         }
4896                 }
4897
4898                 /* For i217 Intel Rapid Start Technology support,
4899                  * when the system is going into Sx and no manageability engine
4900                  * is present, the driver must configure proxy to reset only on
4901                  * power good.  LPI (Low Power Idle) state must also reset only
4902                  * on power good, as well as the MTA (Multicast table array).
4903                  * The SMBus release must also be disabled on LCD reset.
4904                  */
4905                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4906                       E1000_ICH_FWSM_FW_VALID)) {
4907                         /* Enable proxy to reset only on power good. */
4908                         hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
4909                                                     &phy_reg);
4910                         phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4911                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
4912                                                      phy_reg);
4913
4914                         /* Set bit enable LPI (EEE) to reset only on
4915                          * power good.
4916                         */
4917                         hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
4918                         phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4919                         hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
4920
4921                         /* Disable the SMB release on LCD reset. */
4922                         hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
4923                         phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4924                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4925                 }
4926
4927                 /* Enable MTA to reset for Intel Rapid Start Technology
4928                  * Support
4929                  */
4930                 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
4931                 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4932                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4933
4934 release:
4935                 hw->phy.ops.release(hw);
4936         }
4937 out:
4938         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4939
4940         if (hw->mac.type == e1000_ich8lan)
4941                 e1000_gig_downshift_workaround_ich8lan(hw);
4942
4943         if (hw->mac.type >= e1000_pchlan) {
4944                 e1000_oem_bits_config_ich8lan(hw, false);
4945
4946                 /* Reset PHY to activate OEM bits on 82577/8 */
4947                 if (hw->mac.type == e1000_pchlan)
4948                         e1000_phy_hw_reset_generic(hw);
4949
4950                 ret_val = hw->phy.ops.acquire(hw);
4951                 if (ret_val)
4952                         return;
4953                 e1000_write_smbus_addr(hw);
4954                 hw->phy.ops.release(hw);
4955         }
4956
4957         return;
4958 }
4959
4960 /**
4961  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
4962  *  @hw: pointer to the HW structure
4963  *
4964  *  During Sx to S0 transitions on non-managed devices or managed devices
4965  *  on which PHY resets are not blocked, if the PHY registers cannot be
4966  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
4967  *  the PHY.
4968  *  On i217, setup Intel Rapid Start Technology.
4969  **/
4970 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4971 {
4972         s32 ret_val;
4973
4974         DEBUGFUNC("e1000_resume_workarounds_pchlan");
4975         if (hw->mac.type < e1000_pch2lan)
4976                 return E1000_SUCCESS;
4977
4978         ret_val = e1000_init_phy_workarounds_pchlan(hw);
4979         if (ret_val) {
4980                 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
4981                 return ret_val;
4982         }
4983
4984         /* For i217 Intel Rapid Start Technology support when the system
4985          * is transitioning from Sx and no manageability engine is present
4986          * configure SMBus to restore on reset, disable proxy, and enable
4987          * the reset on MTA (Multicast table array).
4988          */
4989         if (hw->phy.type == e1000_phy_i217) {
4990                 u16 phy_reg;
4991
4992                 ret_val = hw->phy.ops.acquire(hw);
4993                 if (ret_val) {
4994                         DEBUGOUT("Failed to setup iRST\n");
4995                         return ret_val;
4996                 }
4997
4998                 /* Clear Auto Enable LPI after link up */
4999                 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5000                 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5001                 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5002
5003                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5004                     E1000_ICH_FWSM_FW_VALID)) {
5005                         /* Restore clear on SMB if no manageability engine
5006                          * is present
5007                          */
5008                         ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5009                                                               &phy_reg);
5010                         if (ret_val)
5011                                 goto release;
5012                         phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5013                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5014
5015                         /* Disable Proxy */
5016                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5017                 }
5018                 /* Enable reset on MTA */
5019                 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5020                                                       &phy_reg);
5021                 if (ret_val)
5022                         goto release;
5023                 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5024                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5025 release:
5026                 if (ret_val)
5027                         DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5028                 hw->phy.ops.release(hw);
5029                 return ret_val;
5030         }
5031         return E1000_SUCCESS;
5032 }
5033
5034 /**
5035  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5036  *  @hw: pointer to the HW structure
5037  *
5038  *  Return the LED back to the default configuration.
5039  **/
5040 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5041 {
5042         DEBUGFUNC("e1000_cleanup_led_ich8lan");
5043
5044         if (hw->phy.type == e1000_phy_ife)
5045                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5046                                              0);
5047
5048         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5049         return E1000_SUCCESS;
5050 }
5051
5052 /**
5053  *  e1000_led_on_ich8lan - Turn LEDs on
5054  *  @hw: pointer to the HW structure
5055  *
5056  *  Turn on the LEDs.
5057  **/
5058 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5059 {
5060         DEBUGFUNC("e1000_led_on_ich8lan");
5061
5062         if (hw->phy.type == e1000_phy_ife)
5063                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5064                                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5065
5066         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5067         return E1000_SUCCESS;
5068 }
5069
5070 /**
5071  *  e1000_led_off_ich8lan - Turn LEDs off
5072  *  @hw: pointer to the HW structure
5073  *
5074  *  Turn off the LEDs.
5075  **/
5076 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5077 {
5078         DEBUGFUNC("e1000_led_off_ich8lan");
5079
5080         if (hw->phy.type == e1000_phy_ife)
5081                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5082                                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5083
5084         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5085         return E1000_SUCCESS;
5086 }
5087
5088 /**
5089  *  e1000_setup_led_pchlan - Configures SW controllable LED
5090  *  @hw: pointer to the HW structure
5091  *
5092  *  This prepares the SW controllable LED for use.
5093  **/
5094 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5095 {
5096         DEBUGFUNC("e1000_setup_led_pchlan");
5097
5098         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5099                                      (u16)hw->mac.ledctl_mode1);
5100 }
5101
5102 /**
5103  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5104  *  @hw: pointer to the HW structure
5105  *
5106  *  Return the LED back to the default configuration.
5107  **/
5108 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5109 {
5110         DEBUGFUNC("e1000_cleanup_led_pchlan");
5111
5112         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5113                                      (u16)hw->mac.ledctl_default);
5114 }
5115
5116 /**
5117  *  e1000_led_on_pchlan - Turn LEDs on
5118  *  @hw: pointer to the HW structure
5119  *
5120  *  Turn on the LEDs.
5121  **/
5122 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5123 {
5124         u16 data = (u16)hw->mac.ledctl_mode2;
5125         u32 i, led;
5126
5127         DEBUGFUNC("e1000_led_on_pchlan");
5128
5129         /* If no link, then turn LED on by setting the invert bit
5130          * for each LED that's mode is "link_up" in ledctl_mode2.
5131          */
5132         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5133                 for (i = 0; i < 3; i++) {
5134                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5135                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5136                             E1000_LEDCTL_MODE_LINK_UP)
5137                                 continue;
5138                         if (led & E1000_PHY_LED0_IVRT)
5139                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5140                         else
5141                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5142                 }
5143         }
5144
5145         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5146 }
5147
5148 /**
5149  *  e1000_led_off_pchlan - Turn LEDs off
5150  *  @hw: pointer to the HW structure
5151  *
5152  *  Turn off the LEDs.
5153  **/
5154 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5155 {
5156         u16 data = (u16)hw->mac.ledctl_mode1;
5157         u32 i, led;
5158
5159         DEBUGFUNC("e1000_led_off_pchlan");
5160
5161         /* If no link, then turn LED off by clearing the invert bit
5162          * for each LED that's mode is "link_up" in ledctl_mode1.
5163          */
5164         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5165                 for (i = 0; i < 3; i++) {
5166                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5167                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5168                             E1000_LEDCTL_MODE_LINK_UP)
5169                                 continue;
5170                         if (led & E1000_PHY_LED0_IVRT)
5171                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5172                         else
5173                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5174                 }
5175         }
5176
5177         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5178 }
5179
5180 /**
5181  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5182  *  @hw: pointer to the HW structure
5183  *
5184  *  Read appropriate register for the config done bit for completion status
5185  *  and configure the PHY through s/w for EEPROM-less parts.
5186  *
5187  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5188  *  config done bit, so only an error is logged and continues.  If we were
5189  *  to return with error, EEPROM-less silicon would not be able to be reset
5190  *  or change link.
5191  **/
5192 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5193 {
5194         s32 ret_val = E1000_SUCCESS;
5195         u32 bank = 0;
5196         u32 status;
5197
5198         DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5199
5200         e1000_get_cfg_done_generic(hw);
5201
5202         /* Wait for indication from h/w that it has completed basic config */
5203         if (hw->mac.type >= e1000_ich10lan) {
5204                 e1000_lan_init_done_ich8lan(hw);
5205         } else {
5206                 ret_val = e1000_get_auto_rd_done_generic(hw);
5207                 if (ret_val) {
5208                         /* When auto config read does not complete, do not
5209                          * return with an error. This can happen in situations
5210                          * where there is no eeprom and prevents getting link.
5211                          */
5212                         DEBUGOUT("Auto Read Done did not complete\n");
5213                         ret_val = E1000_SUCCESS;
5214                 }
5215         }
5216
5217         /* Clear PHY Reset Asserted bit */
5218         status = E1000_READ_REG(hw, E1000_STATUS);
5219         if (status & E1000_STATUS_PHYRA)
5220                 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5221         else
5222                 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5223
5224         /* If EEPROM is not marked present, init the IGP 3 PHY manually */
5225         if (hw->mac.type <= e1000_ich9lan) {
5226                 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5227                     (hw->phy.type == e1000_phy_igp_3)) {
5228                         e1000_phy_init_script_igp3(hw);
5229                 }
5230         } else {
5231                 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5232                         /* Maybe we should do a basic PHY config */
5233                         DEBUGOUT("EEPROM not present\n");
5234                         ret_val = -E1000_ERR_CONFIG;
5235                 }
5236         }
5237
5238         return ret_val;
5239 }
5240
5241 /**
5242  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5243  * @hw: pointer to the HW structure
5244  *
5245  * In the case of a PHY power down to save power, or to turn off link during a
5246  * driver unload, or wake on lan is not enabled, remove the link.
5247  **/
5248 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5249 {
5250         /* If the management interface is not enabled, then power down */
5251         if (!(hw->mac.ops.check_mng_mode(hw) ||
5252               hw->phy.ops.check_reset_block(hw)))
5253                 e1000_power_down_phy_copper(hw);
5254
5255         return;
5256 }
5257
5258 /**
5259  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5260  *  @hw: pointer to the HW structure
5261  *
5262  *  Clears hardware counters specific to the silicon family and calls
5263  *  clear_hw_cntrs_generic to clear all general purpose counters.
5264  **/
5265 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5266 {
5267         u16 phy_data;
5268         s32 ret_val;
5269
5270         DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
5271
5272         e1000_clear_hw_cntrs_base_generic(hw);
5273
5274         E1000_READ_REG(hw, E1000_ALGNERRC);
5275         E1000_READ_REG(hw, E1000_RXERRC);
5276         E1000_READ_REG(hw, E1000_TNCRS);
5277         E1000_READ_REG(hw, E1000_CEXTERR);
5278         E1000_READ_REG(hw, E1000_TSCTC);
5279         E1000_READ_REG(hw, E1000_TSCTFC);
5280
5281         E1000_READ_REG(hw, E1000_MGTPRC);
5282         E1000_READ_REG(hw, E1000_MGTPDC);
5283         E1000_READ_REG(hw, E1000_MGTPTC);
5284
5285         E1000_READ_REG(hw, E1000_IAC);
5286         E1000_READ_REG(hw, E1000_ICRXOC);
5287
5288         /* Clear PHY statistics registers */
5289         if ((hw->phy.type == e1000_phy_82578) ||
5290             (hw->phy.type == e1000_phy_82579) ||
5291             (hw->phy.type == e1000_phy_i217) ||
5292             (hw->phy.type == e1000_phy_82577)) {
5293                 ret_val = hw->phy.ops.acquire(hw);
5294                 if (ret_val)
5295                         return;
5296                 ret_val = hw->phy.ops.set_page(hw,
5297                                                HV_STATS_PAGE << IGP_PAGE_SHIFT);
5298                 if (ret_val)
5299                         goto release;
5300                 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5301                 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5302                 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5303                 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5304                 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5305                 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5306                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5307                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5308                 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5309                 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5310                 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5311                 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5312                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5313                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5314 release:
5315                 hw->phy.ops.release(hw);
5316         }
5317 }
5318
5319 /**
5320  *  e1000_configure_k0s_lpt - Configure K0s power state
5321  *  @hw: pointer to the HW structure
5322  *  @entry_latency: Tx idle period for entering K0s - valid values are 0 to 3.
5323  *      0 corresponds to 128ns, each value over 0 doubles the duration.
5324  *  @min_time: Minimum Tx idle period allowed  - valid values are 0 to 4.
5325  *      0 corresponds to 128ns, each value over 0 doubles the duration.
5326  *
5327  *  Configure the K1 power state based on the provided parameter.
5328  *  Assumes semaphore already acquired.
5329  *
5330  *  Success returns 0, Failure returns:
5331  *      -E1000_ERR_PHY (-2) in case of access error
5332  *      -E1000_ERR_PARAM (-4) in case of parameters error
5333  **/
5334 s32 e1000_configure_k0s_lpt(struct e1000_hw *hw, u8 entry_latency, u8 min_time)
5335 {
5336         s32 ret_val;
5337         u16 kmrn_reg = 0;
5338
5339         DEBUGFUNC("e1000_configure_k0s_lpt");
5340
5341         if (entry_latency > 3 || min_time > 4)
5342                 return -E1000_ERR_PARAM;
5343
5344         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
5345                                              &kmrn_reg);
5346         if (ret_val)
5347                 return ret_val;
5348
5349         /* for now don't touch the latency */
5350         kmrn_reg &= ~(E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_MASK);
5351         kmrn_reg |= ((min_time << E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT));
5352
5353         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
5354                                               kmrn_reg);
5355         if (ret_val)
5356                 return ret_val;
5357
5358         return E1000_SUCCESS;
5359 }