e1000/base: minor changes
[dpdk.git] / lib / librte_pmd_e1000 / e1000 / e1000_ich8lan.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2012, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /* 82562G 10/100 Network Connection
35  * 82562G-2 10/100 Network Connection
36  * 82562GT 10/100 Network Connection
37  * 82562GT-2 10/100 Network Connection
38  * 82562V 10/100 Network Connection
39  * 82562V-2 10/100 Network Connection
40  * 82566DC-2 Gigabit Network Connection
41  * 82566DC Gigabit Network Connection
42  * 82566DM-2 Gigabit Network Connection
43  * 82566DM Gigabit Network Connection
44  * 82566MC Gigabit Network Connection
45  * 82566MM Gigabit Network Connection
46  * 82567LM Gigabit Network Connection
47  * 82567LF Gigabit Network Connection
48  * 82567V Gigabit Network Connection
49  * 82567LM-2 Gigabit Network Connection
50  * 82567LF-2 Gigabit Network Connection
51  * 82567V-2 Gigabit Network Connection
52  * 82567LF-3 Gigabit Network Connection
53  * 82567LM-3 Gigabit Network Connection
54  * 82567LM-4 Gigabit Network Connection
55  * 82577LM Gigabit Network Connection
56  * 82577LC Gigabit Network Connection
57  * 82578DM Gigabit Network Connection
58  * 82578DC Gigabit Network Connection
59  * 82579LM Gigabit Network Connection
60  * 82579V Gigabit Network Connection
61  * Ethernet Connection I217-LM
62  * Ethernet Connection I217-V
63  * Ethernet Connection I218-V
64  * Ethernet Connection I218-LM
65  */
66
67 #include "e1000_api.h"
68
69 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
70 STATIC s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
71 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
72 STATIC s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
73 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
74 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
75 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
76 STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
77 STATIC void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
78 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
79 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
80 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
81                                               u8 *mc_addr_list,
82                                               u32 mc_addr_count);
83 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
84 STATIC s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
85 STATIC s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
86 STATIC s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
87 STATIC s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
88                                             bool active);
89 STATIC s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
90                                             bool active);
91 STATIC s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
92                                    u16 words, u16 *data);
93 STATIC s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
94                                     u16 words, u16 *data);
95 STATIC s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
96 STATIC s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
97 STATIC s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
98                                             u16 *data);
99 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
100 STATIC s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
101 STATIC s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
102 STATIC s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
103 STATIC s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
104 STATIC s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
105 STATIC s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
106 STATIC s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
107                                            u16 *speed, u16 *duplex);
108 STATIC s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
109 STATIC s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
110 STATIC s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
111 STATIC s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
112 STATIC s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
113 STATIC s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
114 STATIC s32  e1000_led_on_pchlan(struct e1000_hw *hw);
115 STATIC s32  e1000_led_off_pchlan(struct e1000_hw *hw);
116 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
117 STATIC s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
118 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
119 STATIC s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
120 STATIC s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
121                                           u32 offset, u8 *data);
122 STATIC s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
123                                           u8 size, u16 *data);
124 STATIC s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
125                                           u32 offset, u16 *data);
126 STATIC s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
127                                                  u32 offset, u8 byte);
128 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
129 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
130 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
131 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
132 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
133 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
134
135 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
136 /* Offset 04h HSFSTS */
137 union ich8_hws_flash_status {
138         struct ich8_hsfsts {
139                 u16 flcdone:1; /* bit 0 Flash Cycle Done */
140                 u16 flcerr:1; /* bit 1 Flash Cycle Error */
141                 u16 dael:1; /* bit 2 Direct Access error Log */
142                 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
143                 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
144                 u16 reserved1:2; /* bit 13:6 Reserved */
145                 u16 reserved2:6; /* bit 13:6 Reserved */
146                 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
147                 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
148         } hsf_status;
149         u16 regval;
150 };
151
152 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
153 /* Offset 06h FLCTL */
154 union ich8_hws_flash_ctrl {
155         struct ich8_hsflctl {
156                 u16 flcgo:1;   /* 0 Flash Cycle Go */
157                 u16 flcycle:2;   /* 2:1 Flash Cycle */
158                 u16 reserved:5;   /* 7:3 Reserved  */
159                 u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
160                 u16 flockdn:6;   /* 15:10 Reserved */
161         } hsf_ctrl;
162         u16 regval;
163 };
164
165 /* ICH Flash Region Access Permissions */
166 union ich8_hws_flash_regacc {
167         struct ich8_flracc {
168                 u32 grra:8; /* 0:7 GbE region Read Access */
169                 u32 grwa:8; /* 8:15 GbE region Write Access */
170                 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
171                 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
172         } hsf_flregacc;
173         u16 regval;
174 };
175
176 /**
177  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
178  *  @hw: pointer to the HW structure
179  *
180  *  Test access to the PHY registers by reading the PHY ID registers.  If
181  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
182  *  otherwise assume the read PHY ID is correct if it is valid.
183  *
184  *  Assumes the sw/fw/hw semaphore is already acquired.
185  **/
186 STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
187 {
188         u16 phy_reg = 0;
189         u32 phy_id = 0;
190         s32 ret_val = 0;
191         u16 retry_count;
192         u32 mac_reg = 0;
193
194         for (retry_count = 0; retry_count < 2; retry_count++) {
195                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
196                 if (ret_val || (phy_reg == 0xFFFF))
197                         continue;
198                 phy_id = (u32)(phy_reg << 16);
199
200                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
201                 if (ret_val || (phy_reg == 0xFFFF)) {
202                         phy_id = 0;
203                         continue;
204                 }
205                 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
206                 break;
207         }
208
209         if (hw->phy.id) {
210                 if  (hw->phy.id == phy_id)
211                         goto out;
212         } else if (phy_id) {
213                 hw->phy.id = phy_id;
214                 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
215                 goto out;
216         }
217
218         /* In case the PHY needs to be in mdio slow mode,
219          * set slow mode and try to get the PHY id again.
220          */
221         if (hw->mac.type < e1000_pch_lpt) {
222                 hw->phy.ops.release(hw);
223                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
224                 if (!ret_val)
225                         ret_val = e1000_get_phy_id(hw);
226                 hw->phy.ops.acquire(hw);
227         }
228
229         if (ret_val)
230                 return false;
231 out:
232         if (hw->mac.type == e1000_pch_lpt) {
233                 /* Unforce SMBus mode in PHY */
234                 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
235                 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
236                 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
237
238                 /* Unforce SMBus mode in MAC */
239                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
240                 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
241                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
242         }
243
244         return true;
245 }
246
247 /**
248  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
249  *  @hw: pointer to the HW structure
250  *
251  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
252  *  used to reset the PHY to a quiescent state when necessary.
253  **/
254 STATIC void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
255 {
256         u32 mac_reg;
257
258         DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
259
260         /* Set Phy Config Counter to 50msec */
261         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
262         mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
263         mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
264         E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
265
266         /* Toggle LANPHYPC Value bit */
267         mac_reg = E1000_READ_REG(hw, E1000_CTRL);
268         mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
269         mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
270         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
271         E1000_WRITE_FLUSH(hw);
272         usec_delay(10);
273         mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
274         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
275         E1000_WRITE_FLUSH(hw);
276
277         if (hw->mac.type < e1000_pch_lpt) {
278                 msec_delay(50);
279         } else {
280                 u16 count = 20;
281
282                 do {
283                         msec_delay(5);
284                 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
285                            E1000_CTRL_EXT_LPCD) && count--);
286
287                 msec_delay(30);
288         }
289 }
290
291 /**
292  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
293  *  @hw: pointer to the HW structure
294  *
295  *  Workarounds/flow necessary for PHY initialization during driver load
296  *  and resume paths.
297  **/
298 STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
299 {
300         u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
301         s32 ret_val;
302
303         DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
304
305         /* Gate automatic PHY configuration by hardware on managed and
306          * non-managed 82579 and newer adapters.
307          */
308         e1000_gate_hw_phy_config_ich8lan(hw, true);
309
310 #if defined(NAHUM6LP_HW) && defined(ULP_SUPPORT)
311         /* It is not possible to be certain of the current state of ULP
312          * so forcibly disable it.
313          */
314         hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
315
316 #endif /* NAHUM6LP_HW && ULP_SUPPORT */
317         ret_val = hw->phy.ops.acquire(hw);
318         if (ret_val) {
319                 DEBUGOUT("Failed to initialize PHY flow\n");
320                 goto out;
321         }
322
323         /* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
324          * inaccessible and resetting the PHY is not blocked, toggle the
325          * LANPHYPC Value bit to force the interconnect to PCIe mode.
326          */
327         switch (hw->mac.type) {
328         case e1000_pch_lpt:
329                 if (e1000_phy_is_accessible_pchlan(hw))
330                         break;
331
332                 /* Before toggling LANPHYPC, see if PHY is accessible by
333                  * forcing MAC to SMBus mode first.
334                  */
335                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
336                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
337                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
338
339                 /* Wait 50 milliseconds for MAC to finish any retries
340                  * that it might be trying to perform from previous
341                  * attempts to acknowledge any phy read requests.
342                  */
343                  msec_delay(50);
344
345                 /* fall-through */
346         case e1000_pch2lan:
347                 if (e1000_phy_is_accessible_pchlan(hw))
348                         break;
349
350                 /* fall-through */
351         case e1000_pchlan:
352                 if ((hw->mac.type == e1000_pchlan) &&
353                     (fwsm & E1000_ICH_FWSM_FW_VALID))
354                         break;
355
356                 if (hw->phy.ops.check_reset_block(hw)) {
357                         DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
358                         ret_val = -E1000_ERR_PHY;
359                         break;
360                 }
361
362                 /* Toggle LANPHYPC Value bit */
363                 e1000_toggle_lanphypc_pch_lpt(hw);
364                 if (hw->mac.type >= e1000_pch_lpt) {
365                         if (e1000_phy_is_accessible_pchlan(hw))
366                                 break;
367
368                         /* Toggling LANPHYPC brings the PHY out of SMBus mode
369                          * so ensure that the MAC is also out of SMBus mode
370                          */
371                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
372                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
373                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
374
375                         if (e1000_phy_is_accessible_pchlan(hw))
376                                 break;
377
378                         ret_val = -E1000_ERR_PHY;
379                 }
380                 break;
381         default:
382                 break;
383         }
384
385         hw->phy.ops.release(hw);
386         if (!ret_val) {
387
388                 /* Check to see if able to reset PHY.  Print error if not */
389                 if (hw->phy.ops.check_reset_block(hw)) {
390                         ERROR_REPORT("Reset blocked by ME\n");
391                         goto out;
392                 }
393
394                 /* Reset the PHY before any access to it.  Doing so, ensures
395                  * that the PHY is in a known good state before we read/write
396                  * PHY registers.  The generic reset is sufficient here,
397                  * because we haven't determined the PHY type yet.
398                  */
399                 ret_val = e1000_phy_hw_reset_generic(hw);
400                 if (ret_val)
401                         goto out;
402
403                 /* On a successful reset, possibly need to wait for the PHY
404                  * to quiesce to an accessible state before returning control
405                  * to the calling function.  If the PHY does not quiesce, then
406                  * return E1000E_BLK_PHY_RESET, as this is the condition that
407                  *  the PHY is in.
408                  */
409                 ret_val = hw->phy.ops.check_reset_block(hw);
410                 if (ret_val)
411                         ERROR_REPORT("ME blocked access to PHY after reset\n");
412         }
413
414 out:
415         /* Ungate automatic PHY configuration on non-managed 82579 */
416         if ((hw->mac.type == e1000_pch2lan) &&
417             !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
418                 msec_delay(10);
419                 e1000_gate_hw_phy_config_ich8lan(hw, false);
420         }
421
422         return ret_val;
423 }
424
425 /**
426  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
427  *  @hw: pointer to the HW structure
428  *
429  *  Initialize family-specific PHY parameters and function pointers.
430  **/
431 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
432 {
433         struct e1000_phy_info *phy = &hw->phy;
434         s32 ret_val;
435
436         DEBUGFUNC("e1000_init_phy_params_pchlan");
437
438         phy->addr               = 1;
439         phy->reset_delay_us     = 100;
440
441         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
442         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
443         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
444         phy->ops.set_page       = e1000_set_page_igp;
445         phy->ops.read_reg       = e1000_read_phy_reg_hv;
446         phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
447         phy->ops.read_reg_page  = e1000_read_phy_reg_page_hv;
448         phy->ops.release        = e1000_release_swflag_ich8lan;
449         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
450         phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
451         phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
452         phy->ops.write_reg      = e1000_write_phy_reg_hv;
453         phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
454         phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
455         phy->ops.power_up       = e1000_power_up_phy_copper;
456         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
457         phy->autoneg_mask       = AUTONEG_ADVERTISE_SPEED_DEFAULT;
458
459         phy->id = e1000_phy_unknown;
460
461         ret_val = e1000_init_phy_workarounds_pchlan(hw);
462         if (ret_val)
463                 return ret_val;
464
465         if (phy->id == e1000_phy_unknown)
466                 switch (hw->mac.type) {
467                 default:
468                         ret_val = e1000_get_phy_id(hw);
469                         if (ret_val)
470                                 return ret_val;
471                         if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
472                                 break;
473                         /* fall-through */
474                 case e1000_pch2lan:
475                 case e1000_pch_lpt:
476                         /* In case the PHY needs to be in mdio slow mode,
477                          * set slow mode and try to get the PHY id again.
478                          */
479                         ret_val = e1000_set_mdio_slow_mode_hv(hw);
480                         if (ret_val)
481                                 return ret_val;
482                         ret_val = e1000_get_phy_id(hw);
483                         if (ret_val)
484                                 return ret_val;
485                         break;
486                 }
487         phy->type = e1000_get_phy_type_from_id(phy->id);
488
489         switch (phy->type) {
490         case e1000_phy_82577:
491         case e1000_phy_82579:
492         case e1000_phy_i217:
493                 phy->ops.check_polarity = e1000_check_polarity_82577;
494                 phy->ops.force_speed_duplex =
495                         e1000_phy_force_speed_duplex_82577;
496                 phy->ops.get_cable_length = e1000_get_cable_length_82577;
497                 phy->ops.get_info = e1000_get_phy_info_82577;
498                 phy->ops.commit = e1000_phy_sw_reset_generic;
499                 break;
500         case e1000_phy_82578:
501                 phy->ops.check_polarity = e1000_check_polarity_m88;
502                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
503                 phy->ops.get_cable_length = e1000_get_cable_length_m88;
504                 phy->ops.get_info = e1000_get_phy_info_m88;
505                 break;
506         default:
507                 ret_val = -E1000_ERR_PHY;
508                 break;
509         }
510
511         return ret_val;
512 }
513
514 /**
515  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
516  *  @hw: pointer to the HW structure
517  *
518  *  Initialize family-specific PHY parameters and function pointers.
519  **/
520 STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
521 {
522         struct e1000_phy_info *phy = &hw->phy;
523         s32 ret_val;
524         u16 i = 0;
525
526         DEBUGFUNC("e1000_init_phy_params_ich8lan");
527
528         phy->addr               = 1;
529         phy->reset_delay_us     = 100;
530
531         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
532         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
533         phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
534         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
535         phy->ops.read_reg       = e1000_read_phy_reg_igp;
536         phy->ops.release        = e1000_release_swflag_ich8lan;
537         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
538         phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
539         phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
540         phy->ops.write_reg      = e1000_write_phy_reg_igp;
541         phy->ops.power_up       = e1000_power_up_phy_copper;
542         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
543
544         /* We may need to do this twice - once for IGP and if that fails,
545          * we'll set BM func pointers and try again
546          */
547         ret_val = e1000_determine_phy_address(hw);
548         if (ret_val) {
549                 phy->ops.write_reg = e1000_write_phy_reg_bm;
550                 phy->ops.read_reg  = e1000_read_phy_reg_bm;
551                 ret_val = e1000_determine_phy_address(hw);
552                 if (ret_val) {
553                         DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
554                         return ret_val;
555                 }
556         }
557
558         phy->id = 0;
559         while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
560                (i++ < 100)) {
561                 msec_delay(1);
562                 ret_val = e1000_get_phy_id(hw);
563                 if (ret_val)
564                         return ret_val;
565         }
566
567         /* Verify phy id */
568         switch (phy->id) {
569         case IGP03E1000_E_PHY_ID:
570                 phy->type = e1000_phy_igp_3;
571                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
572                 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
573                 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
574                 phy->ops.get_info = e1000_get_phy_info_igp;
575                 phy->ops.check_polarity = e1000_check_polarity_igp;
576                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
577                 break;
578         case IFE_E_PHY_ID:
579         case IFE_PLUS_E_PHY_ID:
580         case IFE_C_E_PHY_ID:
581                 phy->type = e1000_phy_ife;
582                 phy->autoneg_mask = E1000_ALL_NOT_GIG;
583                 phy->ops.get_info = e1000_get_phy_info_ife;
584                 phy->ops.check_polarity = e1000_check_polarity_ife;
585                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
586                 break;
587         case BME1000_E_PHY_ID:
588                 phy->type = e1000_phy_bm;
589                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
590                 phy->ops.read_reg = e1000_read_phy_reg_bm;
591                 phy->ops.write_reg = e1000_write_phy_reg_bm;
592                 phy->ops.commit = e1000_phy_sw_reset_generic;
593                 phy->ops.get_info = e1000_get_phy_info_m88;
594                 phy->ops.check_polarity = e1000_check_polarity_m88;
595                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
596                 break;
597         default:
598                 return -E1000_ERR_PHY;
599                 break;
600         }
601
602         return E1000_SUCCESS;
603 }
604
605 /**
606  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
607  *  @hw: pointer to the HW structure
608  *
609  *  Initialize family-specific NVM parameters and function
610  *  pointers.
611  **/
612 STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
613 {
614         struct e1000_nvm_info *nvm = &hw->nvm;
615         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
616         u32 gfpreg, sector_base_addr, sector_end_addr;
617         u16 i;
618
619         DEBUGFUNC("e1000_init_nvm_params_ich8lan");
620
621         /* Can't read flash registers if the register set isn't mapped. */
622         if (!hw->flash_address) {
623                 DEBUGOUT("ERROR: Flash registers not mapped\n");
624                 return -E1000_ERR_CONFIG;
625         }
626
627         nvm->type = e1000_nvm_flash_sw;
628
629         gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
630
631         /* sector_X_addr is a "sector"-aligned address (4096 bytes)
632          * Add 1 to sector_end_addr since this sector is included in
633          * the overall size.
634          */
635         sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
636         sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
637
638         /* flash_base_addr is byte-aligned */
639         nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
640
641         /* find total size of the NVM, then cut in half since the total
642          * size represents two separate NVM banks.
643          */
644         nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
645                                 << FLASH_SECTOR_ADDR_SHIFT);
646         nvm->flash_bank_size /= 2;
647         /* Adjust to word count */
648         nvm->flash_bank_size /= sizeof(u16);
649
650         nvm->word_size = E1000_SHADOW_RAM_WORDS;
651
652         /* Clear shadow ram */
653         for (i = 0; i < nvm->word_size; i++) {
654                 dev_spec->shadow_ram[i].modified = false;
655                 dev_spec->shadow_ram[i].value    = 0xFFFF;
656         }
657
658         E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
659         E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
660
661         /* Function Pointers */
662         nvm->ops.acquire        = e1000_acquire_nvm_ich8lan;
663         nvm->ops.release        = e1000_release_nvm_ich8lan;
664         nvm->ops.read           = e1000_read_nvm_ich8lan;
665         nvm->ops.update         = e1000_update_nvm_checksum_ich8lan;
666         nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
667         nvm->ops.validate       = e1000_validate_nvm_checksum_ich8lan;
668         nvm->ops.write          = e1000_write_nvm_ich8lan;
669
670         return E1000_SUCCESS;
671 }
672
673 /**
674  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
675  *  @hw: pointer to the HW structure
676  *
677  *  Initialize family-specific MAC parameters and function
678  *  pointers.
679  **/
680 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
681 {
682         struct e1000_mac_info *mac = &hw->mac;
683 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
684         u16 pci_cfg;
685 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
686
687         DEBUGFUNC("e1000_init_mac_params_ich8lan");
688
689         /* Set media type function pointer */
690         hw->phy.media_type = e1000_media_type_copper;
691
692         /* Set mta register count */
693         mac->mta_reg_count = 32;
694         /* Set rar entry count */
695         mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
696         if (mac->type == e1000_ich8lan)
697                 mac->rar_entry_count--;
698         /* Set if part includes ASF firmware */
699         mac->asf_firmware_present = true;
700         /* FWSM register */
701         mac->has_fwsm = true;
702         /* ARC subsystem not supported */
703         mac->arc_subsystem_valid = false;
704         /* Adaptive IFS supported */
705         mac->adaptive_ifs = true;
706
707         /* Function pointers */
708
709         /* bus type/speed/width */
710         mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
711         /* function id */
712         mac->ops.set_lan_id = e1000_set_lan_id_single_port;
713         /* reset */
714         mac->ops.reset_hw = e1000_reset_hw_ich8lan;
715         /* hw initialization */
716         mac->ops.init_hw = e1000_init_hw_ich8lan;
717         /* link setup */
718         mac->ops.setup_link = e1000_setup_link_ich8lan;
719         /* physical interface setup */
720         mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
721         /* check for link */
722         mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
723         /* link info */
724         mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
725         /* multicast address update */
726         mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
727         /* clear hardware counters */
728         mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
729
730         /* LED and other operations */
731         switch (mac->type) {
732         case e1000_ich8lan:
733         case e1000_ich9lan:
734         case e1000_ich10lan:
735                 /* check management mode */
736                 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
737                 /* ID LED init */
738                 mac->ops.id_led_init = e1000_id_led_init_generic;
739                 /* blink LED */
740                 mac->ops.blink_led = e1000_blink_led_generic;
741                 /* setup LED */
742                 mac->ops.setup_led = e1000_setup_led_generic;
743                 /* cleanup LED */
744                 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
745                 /* turn on/off LED */
746                 mac->ops.led_on = e1000_led_on_ich8lan;
747                 mac->ops.led_off = e1000_led_off_ich8lan;
748                 break;
749         case e1000_pch2lan:
750                 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
751                 mac->ops.rar_set = e1000_rar_set_pch2lan;
752                 /* fall-through */
753         case e1000_pch_lpt:
754 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
755                 /* multicast address update for pch2 */
756                 mac->ops.update_mc_addr_list =
757                         e1000_update_mc_addr_list_pch2lan;
758 #endif
759         case e1000_pchlan:
760 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
761                 /* save PCH revision_id */
762                 e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
763                 hw->revision_id = (u8)(pci_cfg &= 0x000F);
764 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
765                 /* check management mode */
766                 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
767                 /* ID LED init */
768                 mac->ops.id_led_init = e1000_id_led_init_pchlan;
769                 /* setup LED */
770                 mac->ops.setup_led = e1000_setup_led_pchlan;
771                 /* cleanup LED */
772                 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
773                 /* turn on/off LED */
774                 mac->ops.led_on = e1000_led_on_pchlan;
775                 mac->ops.led_off = e1000_led_off_pchlan;
776                 break;
777         default:
778                 break;
779         }
780
781         if (mac->type == e1000_pch_lpt) {
782                 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
783                 mac->ops.rar_set = e1000_rar_set_pch_lpt;
784                 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
785         }
786
787         /* Enable PCS Lock-loss workaround for ICH8 */
788         if (mac->type == e1000_ich8lan)
789                 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
790
791         return E1000_SUCCESS;
792 }
793
794 /**
795  *  __e1000_access_emi_reg_locked - Read/write EMI register
796  *  @hw: pointer to the HW structure
797  *  @addr: EMI address to program
798  *  @data: pointer to value to read/write from/to the EMI address
799  *  @read: boolean flag to indicate read or write
800  *
801  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
802  **/
803 STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
804                                          u16 *data, bool read)
805 {
806         s32 ret_val;
807
808         DEBUGFUNC("__e1000_access_emi_reg_locked");
809
810         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
811         if (ret_val)
812                 return ret_val;
813
814         if (read)
815                 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
816                                                       data);
817         else
818                 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
819                                                        *data);
820
821         return ret_val;
822 }
823
824 /**
825  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
826  *  @hw: pointer to the HW structure
827  *  @addr: EMI address to program
828  *  @data: value to be read from the EMI address
829  *
830  *  Assumes the SW/FW/HW Semaphore is already acquired.
831  **/
832 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
833 {
834         DEBUGFUNC("e1000_read_emi_reg_locked");
835
836         return __e1000_access_emi_reg_locked(hw, addr, data, true);
837 }
838
839 /**
840  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
841  *  @hw: pointer to the HW structure
842  *  @addr: EMI address to program
843  *  @data: value to be written to the EMI address
844  *
845  *  Assumes the SW/FW/HW Semaphore is already acquired.
846  **/
847 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
848 {
849         DEBUGFUNC("e1000_read_emi_reg_locked");
850
851         return __e1000_access_emi_reg_locked(hw, addr, &data, false);
852 }
853
854 /**
855  *  e1000_set_eee_pchlan - Enable/disable EEE support
856  *  @hw: pointer to the HW structure
857  *
858  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
859  *  the link and the EEE capabilities of the link partner.  The LPI Control
860  *  register bits will remain set only if/when link is up.
861  *
862  *  EEE LPI must not be asserted earlier than one second after link is up.
863  *  On 82579, EEE LPI should not be enabled until such time otherwise there
864  *  can be link issues with some switches.  Other devices can have EEE LPI
865  *  enabled immediately upon link up since they have a timer in hardware which
866  *  prevents LPI from being asserted too early.
867  **/
868 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
869 {
870         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
871         s32 ret_val;
872         u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
873
874         DEBUGFUNC("e1000_set_eee_pchlan");
875
876         switch (hw->phy.type) {
877         case e1000_phy_82579:
878                 lpa = I82579_EEE_LP_ABILITY;
879                 pcs_status = I82579_EEE_PCS_STATUS;
880                 adv_addr = I82579_EEE_ADVERTISEMENT;
881                 break;
882         case e1000_phy_i217:
883                 lpa = I217_EEE_LP_ABILITY;
884                 pcs_status = I217_EEE_PCS_STATUS;
885                 adv_addr = I217_EEE_ADVERTISEMENT;
886                 break;
887         default:
888                 return E1000_SUCCESS;
889         }
890
891         ret_val = hw->phy.ops.acquire(hw);
892         if (ret_val)
893                 return ret_val;
894
895         ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
896         if (ret_val)
897                 goto release;
898
899         /* Clear bits that enable EEE in various speeds */
900         lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
901
902         /* Enable EEE if not disabled by user */
903         if (!dev_spec->eee_disable) {
904                 /* Save off link partner's EEE ability */
905                 ret_val = e1000_read_emi_reg_locked(hw, lpa,
906                                                     &dev_spec->eee_lp_ability);
907                 if (ret_val)
908                         goto release;
909
910                 /* Read EEE advertisement */
911                 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
912                 if (ret_val)
913                         goto release;
914
915                 /* Enable EEE only for speeds in which the link partner is
916                  * EEE capable and for which we advertise EEE.
917                  */
918                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
919                         lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
920
921                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
922                         hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
923                         if (data & NWAY_LPAR_100TX_FD_CAPS)
924                                 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
925                         else
926                                 /* EEE is not supported in 100Half, so ignore
927                                  * partner's EEE in 100 ability if full-duplex
928                                  * is not advertised.
929                                  */
930                                 dev_spec->eee_lp_ability &=
931                                     ~I82579_EEE_100_SUPPORTED;
932                 }
933         }
934
935         /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
936         ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
937         if (ret_val)
938                 goto release;
939
940         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
941 release:
942         hw->phy.ops.release(hw);
943
944         return ret_val;
945 }
946
947 /**
948  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
949  *  @hw:   pointer to the HW structure
950  *  @link: link up bool flag
951  *
952  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
953  *  preventing further DMA write requests.  Workaround the issue by disabling
954  *  the de-assertion of the clock request when in 1Gpbs mode.
955  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
956  *  speeds in order to avoid Tx hangs.
957  **/
958 STATIC s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
959 {
960         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
961         u32 status = E1000_READ_REG(hw, E1000_STATUS);
962         s32 ret_val = E1000_SUCCESS;
963         u16 reg;
964
965         if (link && (status & E1000_STATUS_SPEED_1000)) {
966                 ret_val = hw->phy.ops.acquire(hw);
967                 if (ret_val)
968                         return ret_val;
969
970                 ret_val =
971                     e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
972                                                &reg);
973                 if (ret_val)
974                         goto release;
975
976                 ret_val =
977                     e1000_write_kmrn_reg_locked(hw,
978                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
979                                                 reg &
980                                                 ~E1000_KMRNCTRLSTA_K1_ENABLE);
981                 if (ret_val)
982                         goto release;
983
984                 usec_delay(10);
985
986                 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
987                                 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
988
989                 ret_val =
990                     e1000_write_kmrn_reg_locked(hw,
991                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
992                                                 reg);
993 release:
994                 hw->phy.ops.release(hw);
995         } else {
996                 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
997                 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
998
999                 if (!link || ((status & E1000_STATUS_SPEED_100) &&
1000                               (status & E1000_STATUS_FD)))
1001                         goto update_fextnvm6;
1002
1003                 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1004                 if (ret_val)
1005                         return ret_val;
1006
1007                 /* Clear link status transmit timeout */
1008                 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1009
1010                 if (status & E1000_STATUS_SPEED_100) {
1011                         /* Set inband Tx timeout to 5x10us for 100Half */
1012                         reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1013
1014                         /* Do not extend the K1 entry latency for 100Half */
1015                         fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1016                 } else {
1017                         /* Set inband Tx timeout to 50x10us for 10Full/Half */
1018                         reg |= 50 <<
1019                                I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1020
1021                         /* Extend the K1 entry latency for 10 Mbps */
1022                         fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1023                 }
1024
1025                 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1026                 if (ret_val)
1027                         return ret_val;
1028
1029 update_fextnvm6:
1030                 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1031         }
1032
1033         return ret_val;
1034 }
1035
1036 #if defined(NAHUM6LP_HW) && defined(ULP_SUPPORT)
1037 /**
1038  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1039  *  @hw: pointer to the HW structure
1040  *  @to_sx: boolean indicating a system power state transition to Sx
1041  *
1042  *  When link is down, configure ULP mode to significantly reduce the power
1043  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1044  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1045  *  system, configure the ULP mode by software.
1046  */
1047 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1048 {
1049         u32 mac_reg;
1050         s32 ret_val = E1000_SUCCESS;
1051         u16 phy_reg;
1052
1053         if ((hw->mac.type < e1000_pch_lpt) ||
1054             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1055             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1056             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1057                 return 0;
1058
1059         if (!to_sx) {
1060                 int i = 0;
1061
1062                 /* Poll up to 5 seconds for Cable Disconnected indication */
1063                 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1064                          E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1065                         /* Bail if link is re-acquired */
1066                         if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1067                                 return -E1000_ERR_PHY;
1068
1069                         if (i++ == 100)
1070                                 break;
1071
1072                         msec_delay(50);
1073                 }
1074                 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1075                           (E1000_READ_REG(hw, E1000_FEXT) &
1076                            E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1077                           i * 50);
1078         }
1079
1080         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1081                 /* Request ME configure ULP mode in the PHY */
1082                 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1083                 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1084                 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1085
1086                 goto out;
1087         }
1088
1089         ret_val = hw->phy.ops.acquire(hw);
1090         if (ret_val)
1091                 goto out;
1092
1093         /* During S0 Idle keep the phy in PCI-E mode */
1094         if (hw->dev_spec.ich8lan.smbus_disable)
1095                 goto skip_smbus;
1096
1097         /* Force SMBus mode in PHY */
1098         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1099         if (ret_val)
1100                 goto release;
1101         phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1102         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1103
1104         /* Force SMBus mode in MAC */
1105         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1106         mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1107         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1108
1109 skip_smbus:
1110         if (!to_sx) {
1111                 /* Change the 'Link Status Change' interrupt to trigger
1112                  * on 'Cable Status Change'
1113                  */
1114                 ret_val = e1000_read_kmrn_reg_locked(hw,
1115                                                      E1000_KMRNCTRLSTA_OP_MODES,
1116                                                      &phy_reg);
1117                 if (ret_val)
1118                         goto release;
1119                 phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1120                 e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1121                                             phy_reg);
1122         }
1123
1124         /* Set Inband ULP Exit, Reset to SMBus mode and
1125          * Disable SMBus Release on PERST# in PHY
1126          */
1127         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1128         if (ret_val)
1129                 goto release;
1130         phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1131                     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1132         if (to_sx) {
1133                 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1134                         phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1135
1136                 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1137         } else {
1138                 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1139         }
1140         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1141
1142         /* Set Disable SMBus Release on PERST# in MAC */
1143         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1144         mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1145         E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1146
1147         /* Commit ULP changes in PHY by starting auto ULP configuration */
1148         phy_reg |= I218_ULP_CONFIG1_START;
1149         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1150
1151         if (!to_sx) {
1152                 /* Disable Tx so that the MAC doesn't send any (buffered)
1153                  * packets to the PHY.
1154                  */
1155                 mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1156                 mac_reg &= ~E1000_TCTL_EN;
1157                 E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1158         }
1159 release:
1160         hw->phy.ops.release(hw);
1161 out:
1162         if (ret_val)
1163                 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1164         else
1165                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1166
1167         return ret_val;
1168 }
1169
1170 /**
1171  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1172  *  @hw: pointer to the HW structure
1173  *  @force: boolean indicating whether or not to force disabling ULP
1174  *
1175  *  Un-configure ULP mode when link is up, the system is transitioned from
1176  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1177  *  system, poll for an indication from ME that ULP has been un-configured.
1178  *  If not on an ME enabled system, un-configure the ULP mode by software.
1179  *
1180  *  During nominal operation, this function is called when link is acquired
1181  *  to disable ULP mode (force=false); otherwise, for example when unloading
1182  *  the driver or during Sx->S0 transitions, this is called with force=true
1183  *  to forcibly disable ULP.
1184
1185  *  When the cable is plugged in while the device is in D0, a Cable Status
1186  *  Change interrupt is generated which causes this function to be called
1187  *  to partially disable ULP mode and restart autonegotiation.  This function
1188  *  is then called again due to the resulting Link Status Change interrupt
1189  *  to finish cleaning up after the ULP flow.
1190  */
1191 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1192 {
1193         s32 ret_val = E1000_SUCCESS;
1194         u32 mac_reg;
1195         u16 phy_reg;
1196         int i = 0;
1197
1198         if ((hw->mac.type < e1000_pch_lpt) ||
1199             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1200             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1201             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1202                 return 0;
1203
1204         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1205                 if (force) {
1206                         /* Request ME un-configure ULP mode in the PHY */
1207                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1208                         mac_reg &= ~E1000_H2ME_ULP;
1209                         mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1210                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1211                 }
1212
1213                 /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
1214                 while (E1000_READ_REG(hw, E1000_FWSM) &
1215                        E1000_FWSM_ULP_CFG_DONE) {
1216                         if (i++ == 10) {
1217                                 ret_val = -E1000_ERR_PHY;
1218                                 goto out;
1219                         }
1220
1221                         msec_delay(10);
1222                 }
1223                 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1224
1225                 if (force) {
1226                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1227                         mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1228                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1229                 } else {
1230                         /* Clear H2ME.ULP after ME ULP configuration */
1231                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1232                         mac_reg &= ~E1000_H2ME_ULP;
1233                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1234
1235                         /* Restore link speed advertisements and restart
1236                          * Auto-negotiation
1237                          */
1238                         ret_val = e1000_phy_setup_autoneg(hw);
1239                         if (ret_val)
1240                                 goto out;
1241
1242                         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1243                 }
1244
1245                 goto out;
1246         }
1247
1248         ret_val = hw->phy.ops.acquire(hw);
1249         if (ret_val)
1250                 goto out;
1251
1252         /* Revert the change to the 'Link Status Change'
1253          * interrupt to trigger on 'Cable Status Change'
1254          */
1255         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1256                                              &phy_reg);
1257         if (ret_val)
1258                 goto release;
1259         phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1260         e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg);
1261
1262         if (force)
1263                 /* Toggle LANPHYPC Value bit */
1264                 e1000_toggle_lanphypc_pch_lpt(hw);
1265
1266         /* Unforce SMBus mode in PHY */
1267         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1268         if (ret_val) {
1269                 /* The MAC might be in PCIe mode, so temporarily force to
1270                  * SMBus mode in order to access the PHY.
1271                  */
1272                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1273                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1274                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1275
1276                 msec_delay(50);
1277
1278                 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1279                                                        &phy_reg);
1280                 if (ret_val)
1281                         goto release;
1282         }
1283         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1284         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1285
1286         /* Unforce SMBus mode in MAC */
1287         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1288         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1289         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1290
1291         /* When ULP mode was previously entered, K1 was disabled by the
1292          * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1293          */
1294         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1295         if (ret_val)
1296                 goto release;
1297         phy_reg |= HV_PM_CTRL_K1_ENABLE;
1298         e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1299
1300         /* Clear ULP enabled configuration */
1301         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1302         if (ret_val)
1303                 goto release;
1304         /* CSC interrupt received due to ULP Indication */
1305         if ((phy_reg & I218_ULP_CONFIG1_IND) || force) {
1306                 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1307                              I218_ULP_CONFIG1_STICKY_ULP |
1308                              I218_ULP_CONFIG1_RESET_TO_SMBUS |
1309                              I218_ULP_CONFIG1_WOL_HOST |
1310                              I218_ULP_CONFIG1_INBAND_EXIT |
1311                              I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1312                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1313
1314                 /* Commit ULP changes by starting auto ULP configuration */
1315                 phy_reg |= I218_ULP_CONFIG1_START;
1316                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1317
1318                 /* Clear Disable SMBus Release on PERST# in MAC */
1319                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1320                 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1321                 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1322
1323                 if (!force) {
1324                         hw->phy.ops.release(hw);
1325
1326                         if (hw->mac.autoneg)
1327                                 e1000_phy_setup_autoneg(hw);
1328
1329                         e1000_sw_lcd_config_ich8lan(hw);
1330
1331                         e1000_oem_bits_config_ich8lan(hw, true);
1332
1333                         /* Set ULP state to unknown and return non-zero to
1334                          * indicate no link (yet) and re-enter on the next LSC
1335                          * to finish disabling ULP flow.
1336                          */
1337                         hw->dev_spec.ich8lan.ulp_state =
1338                             e1000_ulp_state_unknown;
1339
1340                         return 1;
1341                 }
1342         }
1343
1344         /* Re-enable Tx */
1345         mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1346         mac_reg |= E1000_TCTL_EN;
1347         E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1348
1349 release:
1350         hw->phy.ops.release(hw);
1351         if (force) {
1352                 hw->phy.ops.reset(hw);
1353                 msec_delay(50);
1354         }
1355 out:
1356         if (ret_val)
1357                 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1358         else
1359                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1360
1361         return ret_val;
1362 }
1363
1364 #endif /* NAHUM6LP_HW && ULP_SUPPORT */
1365 /**
1366  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1367  *  @hw: pointer to the HW structure
1368  *
1369  *  Checks to see of the link status of the hardware has changed.  If a
1370  *  change in link status has been detected, then we read the PHY registers
1371  *  to get the current speed/duplex if link exists.
1372  **/
1373 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1374 {
1375         struct e1000_mac_info *mac = &hw->mac;
1376         s32 ret_val;
1377         bool link = false;
1378         u16 phy_reg;
1379
1380         DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1381
1382         /* We only want to go out to the PHY registers to see if Auto-Neg
1383          * has completed and/or if our link status has changed.  The
1384          * get_link_status flag is set upon receiving a Link Status
1385          * Change or Rx Sequence Error interrupt.
1386          */
1387         if (!mac->get_link_status)
1388                 return E1000_SUCCESS;
1389
1390         if ((hw->mac.type < e1000_pch_lpt) ||
1391             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1392             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) {
1393                 /* First we want to see if the MII Status Register reports
1394                  * link.  If so, then we want to get the current speed/duplex
1395                  * of the PHY.
1396                  */
1397                 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1398                 if (ret_val)
1399                         return ret_val;
1400         } else {
1401                 /* Check the MAC's STATUS register to determine link state
1402                  * since the PHY could be inaccessible while in ULP mode.
1403                  */
1404                 link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
1405                 if (link)
1406                         ret_val = e1000_disable_ulp_lpt_lp(hw, false);
1407                 else
1408                         ret_val = e1000_enable_ulp_lpt_lp(hw, false);
1409
1410                 if (ret_val)
1411                         return ret_val;
1412         }
1413
1414         if (hw->mac.type == e1000_pchlan) {
1415                 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1416                 if (ret_val)
1417                         return ret_val;
1418         }
1419
1420         /* When connected at 10Mbps half-duplex, 82579 parts are excessively
1421          * aggressive resulting in many collisions. To avoid this, increase
1422          * the IPG and reduce Rx latency in the PHY.
1423          */
1424         if ((hw->mac.type == e1000_pch2lan) && link) {
1425                 u32 reg;
1426                 reg = E1000_READ_REG(hw, E1000_STATUS);
1427                 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1428                         reg = E1000_READ_REG(hw, E1000_TIPG);
1429                         reg &= ~E1000_TIPG_IPGT_MASK;
1430                         reg |= 0xFF;
1431                         E1000_WRITE_REG(hw, E1000_TIPG, reg);
1432
1433                         /* Reduce Rx latency in analog PHY */
1434                         ret_val = hw->phy.ops.acquire(hw);
1435                         if (ret_val)
1436                                 return ret_val;
1437
1438                         ret_val = e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0);
1439
1440                         hw->phy.ops.release(hw);
1441
1442                         if (ret_val)
1443                                 return ret_val;
1444                 }
1445         }
1446
1447         /* Work-around I218 hang issue */
1448         if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1449             (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
1450                 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1451                 if (ret_val)
1452                         return ret_val;
1453         }
1454
1455         /* Clear link partner's EEE ability */
1456         hw->dev_spec.ich8lan.eee_lp_ability = 0;
1457
1458         if (!link)
1459                 return E1000_SUCCESS; /* No link detected */
1460
1461         mac->get_link_status = false;
1462
1463         switch (hw->mac.type) {
1464         case e1000_pch2lan:
1465                 ret_val = e1000_k1_workaround_lv(hw);
1466                 if (ret_val)
1467                         return ret_val;
1468                 /* fall-thru */
1469         case e1000_pchlan:
1470                 if (hw->phy.type == e1000_phy_82578) {
1471                         ret_val = e1000_link_stall_workaround_hv(hw);
1472                         if (ret_val)
1473                                 return ret_val;
1474                 }
1475
1476                 /* Workaround for PCHx parts in half-duplex:
1477                  * Set the number of preambles removed from the packet
1478                  * when it is passed from the PHY to the MAC to prevent
1479                  * the MAC from misinterpreting the packet type.
1480                  */
1481                 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1482                 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1483
1484                 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1485                     E1000_STATUS_FD)
1486                         phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1487
1488                 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1489                 break;
1490         default:
1491                 break;
1492         }
1493
1494         /* Check if there was DownShift, must be checked
1495          * immediately after link-up
1496          */
1497         e1000_check_downshift_generic(hw);
1498
1499         /* Enable/Disable EEE after link up */
1500         if (hw->phy.type > e1000_phy_82579) {
1501                 ret_val = e1000_set_eee_pchlan(hw);
1502                 if (ret_val)
1503                         return ret_val;
1504         }
1505
1506         /* If we are forcing speed/duplex, then we simply return since
1507          * we have already determined whether we have link or not.
1508          */
1509         if (!mac->autoneg)
1510                 return -E1000_ERR_CONFIG;
1511
1512         /* Auto-Neg is enabled.  Auto Speed Detection takes care
1513          * of MAC speed/duplex configuration.  So we only need to
1514          * configure Collision Distance in the MAC.
1515          */
1516         mac->ops.config_collision_dist(hw);
1517
1518         /* Configure Flow Control now that Auto-Neg has completed.
1519          * First, we need to restore the desired flow control
1520          * settings because we may have had to re-autoneg with a
1521          * different link partner.
1522          */
1523         ret_val = e1000_config_fc_after_link_up_generic(hw);
1524         if (ret_val)
1525                 DEBUGOUT("Error configuring flow control\n");
1526
1527         return ret_val;
1528 }
1529
1530 /**
1531  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1532  *  @hw: pointer to the HW structure
1533  *
1534  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1535  **/
1536 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1537 {
1538         DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1539
1540         hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1541         hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1542         switch (hw->mac.type) {
1543         case e1000_ich8lan:
1544         case e1000_ich9lan:
1545         case e1000_ich10lan:
1546                 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1547                 break;
1548         case e1000_pchlan:
1549         case e1000_pch2lan:
1550         case e1000_pch_lpt:
1551                 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1552                 break;
1553         default:
1554                 break;
1555         }
1556 }
1557
1558 /**
1559  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1560  *  @hw: pointer to the HW structure
1561  *
1562  *  Acquires the mutex for performing NVM operations.
1563  **/
1564 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1565 {
1566         DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1567
1568         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1569
1570         return E1000_SUCCESS;
1571 }
1572
1573 /**
1574  *  e1000_release_nvm_ich8lan - Release NVM mutex
1575  *  @hw: pointer to the HW structure
1576  *
1577  *  Releases the mutex used while performing NVM operations.
1578  **/
1579 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1580 {
1581         DEBUGFUNC("e1000_release_nvm_ich8lan");
1582
1583         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1584
1585         return;
1586 }
1587
1588 /**
1589  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1590  *  @hw: pointer to the HW structure
1591  *
1592  *  Acquires the software control flag for performing PHY and select
1593  *  MAC CSR accesses.
1594  **/
1595 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1596 {
1597         u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1598         s32 ret_val = E1000_SUCCESS;
1599
1600         DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1601
1602         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1603
1604         while (timeout) {
1605                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1606                 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1607                         break;
1608
1609                 msec_delay_irq(1);
1610                 timeout--;
1611         }
1612
1613         if (!timeout) {
1614                 DEBUGOUT("SW has already locked the resource.\n");
1615                 ret_val = -E1000_ERR_CONFIG;
1616                 goto out;
1617         }
1618
1619         timeout = SW_FLAG_TIMEOUT;
1620
1621         extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1622         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1623
1624         while (timeout) {
1625                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1626                 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1627                         break;
1628
1629                 msec_delay_irq(1);
1630                 timeout--;
1631         }
1632
1633         if (!timeout) {
1634                 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1635                           E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1636                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1637                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1638                 ret_val = -E1000_ERR_CONFIG;
1639                 goto out;
1640         }
1641
1642 out:
1643         if (ret_val)
1644                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1645
1646         return ret_val;
1647 }
1648
1649 /**
1650  *  e1000_release_swflag_ich8lan - Release software control flag
1651  *  @hw: pointer to the HW structure
1652  *
1653  *  Releases the software control flag for performing PHY and select
1654  *  MAC CSR accesses.
1655  **/
1656 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1657 {
1658         u32 extcnf_ctrl;
1659
1660         DEBUGFUNC("e1000_release_swflag_ich8lan");
1661
1662         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1663
1664         if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1665                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1666                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1667         } else {
1668                 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1669         }
1670
1671         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1672
1673         return;
1674 }
1675
1676 /**
1677  *  e1000_check_mng_mode_ich8lan - Checks management mode
1678  *  @hw: pointer to the HW structure
1679  *
1680  *  This checks if the adapter has any manageability enabled.
1681  *  This is a function pointer entry point only called by read/write
1682  *  routines for the PHY and NVM parts.
1683  **/
1684 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1685 {
1686         u32 fwsm;
1687
1688         DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1689
1690         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1691
1692         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1693                ((fwsm & E1000_FWSM_MODE_MASK) ==
1694                 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1695 }
1696
1697 /**
1698  *  e1000_check_mng_mode_pchlan - Checks management mode
1699  *  @hw: pointer to the HW structure
1700  *
1701  *  This checks if the adapter has iAMT enabled.
1702  *  This is a function pointer entry point only called by read/write
1703  *  routines for the PHY and NVM parts.
1704  **/
1705 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1706 {
1707         u32 fwsm;
1708
1709         DEBUGFUNC("e1000_check_mng_mode_pchlan");
1710
1711         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1712
1713         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1714                (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1715 }
1716
1717 /**
1718  *  e1000_rar_set_pch2lan - Set receive address register
1719  *  @hw: pointer to the HW structure
1720  *  @addr: pointer to the receive address
1721  *  @index: receive address array register
1722  *
1723  *  Sets the receive address array register at index to the address passed
1724  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1725  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1726  *  Use SHRA[0-3] in place of those reserved for ME.
1727  **/
1728 STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1729 {
1730         u32 rar_low, rar_high;
1731
1732         DEBUGFUNC("e1000_rar_set_pch2lan");
1733
1734         /* HW expects these in little endian so we reverse the byte order
1735          * from network order (big endian) to little endian
1736          */
1737         rar_low = ((u32) addr[0] |
1738                    ((u32) addr[1] << 8) |
1739                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1740
1741         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1742
1743         /* If MAC address zero, no need to set the AV bit */
1744         if (rar_low || rar_high)
1745                 rar_high |= E1000_RAH_AV;
1746
1747         if (index == 0) {
1748                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1749                 E1000_WRITE_FLUSH(hw);
1750                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1751                 E1000_WRITE_FLUSH(hw);
1752                 return;
1753         }
1754
1755         /* RAR[1-6] are owned by manageability.  Skip those and program the
1756          * next address into the SHRA register array.
1757          */
1758         if (index < (u32) (hw->mac.rar_entry_count - 6)) {
1759                 s32 ret_val;
1760
1761                 ret_val = e1000_acquire_swflag_ich8lan(hw);
1762                 if (ret_val)
1763                         goto out;
1764
1765                 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1766                 E1000_WRITE_FLUSH(hw);
1767                 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1768                 E1000_WRITE_FLUSH(hw);
1769
1770                 e1000_release_swflag_ich8lan(hw);
1771
1772                 /* verify the register updates */
1773                 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1774                     (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1775                         return;
1776
1777                 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1778                          (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1779         }
1780
1781 out:
1782         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1783 }
1784
1785 /**
1786  *  e1000_rar_set_pch_lpt - Set receive address registers
1787  *  @hw: pointer to the HW structure
1788  *  @addr: pointer to the receive address
1789  *  @index: receive address array register
1790  *
1791  *  Sets the receive address register array at index to the address passed
1792  *  in by addr. For LPT, RAR[0] is the base address register that is to
1793  *  contain the MAC address. SHRA[0-10] are the shared receive address
1794  *  registers that are shared between the Host and manageability engine (ME).
1795  **/
1796 STATIC void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1797 {
1798         u32 rar_low, rar_high;
1799         u32 wlock_mac;
1800
1801         DEBUGFUNC("e1000_rar_set_pch_lpt");
1802
1803         /* HW expects these in little endian so we reverse the byte order
1804          * from network order (big endian) to little endian
1805          */
1806         rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1807                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1808
1809         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1810
1811         /* If MAC address zero, no need to set the AV bit */
1812         if (rar_low || rar_high)
1813                 rar_high |= E1000_RAH_AV;
1814
1815         if (index == 0) {
1816                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1817                 E1000_WRITE_FLUSH(hw);
1818                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1819                 E1000_WRITE_FLUSH(hw);
1820                 return;
1821         }
1822
1823         /* The manageability engine (ME) can lock certain SHRAR registers that
1824          * it is using - those registers are unavailable for use.
1825          */
1826         if (index < hw->mac.rar_entry_count) {
1827                 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
1828                             E1000_FWSM_WLOCK_MAC_MASK;
1829                 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1830
1831                 /* Check if all SHRAR registers are locked */
1832                 if (wlock_mac == 1)
1833                         goto out;
1834
1835                 if ((wlock_mac == 0) || (index <= wlock_mac)) {
1836                         s32 ret_val;
1837
1838                         ret_val = e1000_acquire_swflag_ich8lan(hw);
1839
1840                         if (ret_val)
1841                                 goto out;
1842
1843                         E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
1844                                         rar_low);
1845                         E1000_WRITE_FLUSH(hw);
1846                         E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
1847                                         rar_high);
1848                         E1000_WRITE_FLUSH(hw);
1849
1850                         e1000_release_swflag_ich8lan(hw);
1851
1852                         /* verify the register updates */
1853                         if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1854                             (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
1855                                 return;
1856                 }
1857         }
1858
1859 out:
1860         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1861 }
1862
1863 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
1864 /**
1865  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
1866  *  @hw: pointer to the HW structure
1867  *  @mc_addr_list: array of multicast addresses to program
1868  *  @mc_addr_count: number of multicast addresses to program
1869  *
1870  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
1871  *  The caller must have a packed mc_addr_list of multicast addresses.
1872  **/
1873 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
1874                                               u8 *mc_addr_list,
1875                                               u32 mc_addr_count)
1876 {
1877         u16 phy_reg = 0;
1878         int i;
1879         s32 ret_val;
1880
1881         DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
1882
1883         e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
1884
1885         ret_val = hw->phy.ops.acquire(hw);
1886         if (ret_val)
1887                 return;
1888
1889         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1890         if (ret_val)
1891                 goto release;
1892
1893         for (i = 0; i < hw->mac.mta_reg_count; i++) {
1894                 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
1895                                            (u16)(hw->mac.mta_shadow[i] &
1896                                                  0xFFFF));
1897                 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
1898                                            (u16)((hw->mac.mta_shadow[i] >> 16) &
1899                                                  0xFFFF));
1900         }
1901
1902         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1903
1904 release:
1905         hw->phy.ops.release(hw);
1906 }
1907
1908 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
1909 /**
1910  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1911  *  @hw: pointer to the HW structure
1912  *
1913  *  Checks if firmware is blocking the reset of the PHY.
1914  *  This is a function pointer entry point only called by
1915  *  reset routines.
1916  **/
1917 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1918 {
1919         u32 fwsm;
1920         bool blocked = false;
1921         int i = 0;
1922
1923         DEBUGFUNC("e1000_check_reset_block_ich8lan");
1924
1925         do {
1926                 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1927                 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
1928                         blocked = true;
1929                         msec_delay(10);
1930                         continue;
1931                 }
1932                 blocked = false;
1933         } while (blocked && (i++ < 10));
1934         return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
1935 }
1936
1937 /**
1938  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1939  *  @hw: pointer to the HW structure
1940  *
1941  *  Assumes semaphore already acquired.
1942  *
1943  **/
1944 STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1945 {
1946         u16 phy_data;
1947         u32 strap = E1000_READ_REG(hw, E1000_STRAP);
1948         u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
1949                 E1000_STRAP_SMT_FREQ_SHIFT;
1950         s32 ret_val;
1951
1952         strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1953
1954         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1955         if (ret_val)
1956                 return ret_val;
1957
1958         phy_data &= ~HV_SMB_ADDR_MASK;
1959         phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1960         phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1961
1962         if (hw->phy.type == e1000_phy_i217) {
1963                 /* Restore SMBus frequency */
1964                 if (freq--) {
1965                         phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
1966                         phy_data |= (freq & (1 << 0)) <<
1967                                 HV_SMB_ADDR_FREQ_LOW_SHIFT;
1968                         phy_data |= (freq & (1 << 1)) <<
1969                                 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
1970                 } else {
1971                         DEBUGOUT("Unsupported SMB frequency in PHY\n");
1972                 }
1973         }
1974
1975         return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1976 }
1977
1978 /**
1979  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1980  *  @hw:   pointer to the HW structure
1981  *
1982  *  SW should configure the LCD from the NVM extended configuration region
1983  *  as a workaround for certain parts.
1984  **/
1985 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1986 {
1987         struct e1000_phy_info *phy = &hw->phy;
1988         u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1989         s32 ret_val = E1000_SUCCESS;
1990         u16 word_addr, reg_data, reg_addr, phy_page = 0;
1991
1992         DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
1993
1994         /* Initialize the PHY from the NVM on ICH platforms.  This
1995          * is needed due to an issue where the NVM configuration is
1996          * not properly autoloaded after power transitions.
1997          * Therefore, after each PHY reset, we will load the
1998          * configuration data out of the NVM manually.
1999          */
2000         switch (hw->mac.type) {
2001         case e1000_ich8lan:
2002                 if (phy->type != e1000_phy_igp_3)
2003                         return ret_val;
2004
2005                 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2006                     (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2007                         sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2008                         break;
2009                 }
2010                 /* Fall-thru */
2011         case e1000_pchlan:
2012         case e1000_pch2lan:
2013         case e1000_pch_lpt:
2014                 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2015                 break;
2016         default:
2017                 return ret_val;
2018         }
2019
2020         ret_val = hw->phy.ops.acquire(hw);
2021         if (ret_val)
2022                 return ret_val;
2023
2024         data = E1000_READ_REG(hw, E1000_FEXTNVM);
2025         if (!(data & sw_cfg_mask))
2026                 goto release;
2027
2028         /* Make sure HW does not configure LCD from PHY
2029          * extended configuration before SW configuration
2030          */
2031         data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2032         if ((hw->mac.type < e1000_pch2lan) &&
2033             (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2034                         goto release;
2035
2036         cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2037         cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2038         cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2039         if (!cnf_size)
2040                 goto release;
2041
2042         cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2043         cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2044
2045         if (((hw->mac.type == e1000_pchlan) &&
2046              !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2047             (hw->mac.type > e1000_pchlan)) {
2048                 /* HW configures the SMBus address and LEDs when the
2049                  * OEM and LCD Write Enable bits are set in the NVM.
2050                  * When both NVM bits are cleared, SW will configure
2051                  * them instead.
2052                  */
2053                 ret_val = e1000_write_smbus_addr(hw);
2054                 if (ret_val)
2055                         goto release;
2056
2057                 data = E1000_READ_REG(hw, E1000_LEDCTL);
2058                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2059                                                         (u16)data);
2060                 if (ret_val)
2061                         goto release;
2062         }
2063
2064         /* Configure LCD from extended configuration region. */
2065
2066         /* cnf_base_addr is in DWORD */
2067         word_addr = (u16)(cnf_base_addr << 1);
2068
2069         for (i = 0; i < cnf_size; i++) {
2070                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2071                                            &reg_data);
2072                 if (ret_val)
2073                         goto release;
2074
2075                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2076                                            1, &reg_addr);
2077                 if (ret_val)
2078                         goto release;
2079
2080                 /* Save off the PHY page for future writes. */
2081                 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2082                         phy_page = reg_data;
2083                         continue;
2084                 }
2085
2086                 reg_addr &= PHY_REG_MASK;
2087                 reg_addr |= phy_page;
2088
2089                 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2090                                                     reg_data);
2091                 if (ret_val)
2092                         goto release;
2093         }
2094
2095 release:
2096         hw->phy.ops.release(hw);
2097         return ret_val;
2098 }
2099
2100 /**
2101  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2102  *  @hw:   pointer to the HW structure
2103  *  @link: link up bool flag
2104  *
2105  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2106  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2107  *  If link is down, the function will restore the default K1 setting located
2108  *  in the NVM.
2109  **/
2110 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2111 {
2112         s32 ret_val = E1000_SUCCESS;
2113         u16 status_reg = 0;
2114         bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2115
2116         DEBUGFUNC("e1000_k1_gig_workaround_hv");
2117
2118         if (hw->mac.type != e1000_pchlan)
2119                 return E1000_SUCCESS;
2120
2121         /* Wrap the whole flow with the sw flag */
2122         ret_val = hw->phy.ops.acquire(hw);
2123         if (ret_val)
2124                 return ret_val;
2125
2126         /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2127         if (link) {
2128                 if (hw->phy.type == e1000_phy_82578) {
2129                         ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2130                                                               &status_reg);
2131                         if (ret_val)
2132                                 goto release;
2133
2134                         status_reg &= (BM_CS_STATUS_LINK_UP |
2135                                        BM_CS_STATUS_RESOLVED |
2136                                        BM_CS_STATUS_SPEED_MASK);
2137
2138                         if (status_reg == (BM_CS_STATUS_LINK_UP |
2139                                            BM_CS_STATUS_RESOLVED |
2140                                            BM_CS_STATUS_SPEED_1000))
2141                                 k1_enable = false;
2142                 }
2143
2144                 if (hw->phy.type == e1000_phy_82577) {
2145                         ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2146                                                               &status_reg);
2147                         if (ret_val)
2148                                 goto release;
2149
2150                         status_reg &= (HV_M_STATUS_LINK_UP |
2151                                        HV_M_STATUS_AUTONEG_COMPLETE |
2152                                        HV_M_STATUS_SPEED_MASK);
2153
2154                         if (status_reg == (HV_M_STATUS_LINK_UP |
2155                                            HV_M_STATUS_AUTONEG_COMPLETE |
2156                                            HV_M_STATUS_SPEED_1000))
2157                                 k1_enable = false;
2158                 }
2159
2160                 /* Link stall fix for link up */
2161                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2162                                                        0x0100);
2163                 if (ret_val)
2164                         goto release;
2165
2166         } else {
2167                 /* Link stall fix for link down */
2168                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2169                                                        0x4100);
2170                 if (ret_val)
2171                         goto release;
2172         }
2173
2174         ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2175
2176 release:
2177         hw->phy.ops.release(hw);
2178
2179         return ret_val;
2180 }
2181
2182 /**
2183  *  e1000_configure_k1_ich8lan - Configure K1 power state
2184  *  @hw: pointer to the HW structure
2185  *  @enable: K1 state to configure
2186  *
2187  *  Configure the K1 power state based on the provided parameter.
2188  *  Assumes semaphore already acquired.
2189  *
2190  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2191  **/
2192 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2193 {
2194         s32 ret_val;
2195         u32 ctrl_reg = 0;
2196         u32 ctrl_ext = 0;
2197         u32 reg = 0;
2198         u16 kmrn_reg = 0;
2199
2200         DEBUGFUNC("e1000_configure_k1_ich8lan");
2201
2202         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2203                                              &kmrn_reg);
2204         if (ret_val)
2205                 return ret_val;
2206
2207         if (k1_enable)
2208                 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2209         else
2210                 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2211
2212         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2213                                               kmrn_reg);
2214         if (ret_val)
2215                 return ret_val;
2216
2217         usec_delay(20);
2218         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2219         ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2220
2221         reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2222         reg |= E1000_CTRL_FRCSPD;
2223         E1000_WRITE_REG(hw, E1000_CTRL, reg);
2224
2225         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2226         E1000_WRITE_FLUSH(hw);
2227         usec_delay(20);
2228         E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2229         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2230         E1000_WRITE_FLUSH(hw);
2231         usec_delay(20);
2232
2233         return E1000_SUCCESS;
2234 }
2235
2236 /**
2237  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2238  *  @hw:       pointer to the HW structure
2239  *  @d0_state: boolean if entering d0 or d3 device state
2240  *
2241  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2242  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2243  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2244  **/
2245 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2246 {
2247         s32 ret_val = 0;
2248         u32 mac_reg;
2249         u16 oem_reg;
2250
2251         DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2252
2253         if (hw->mac.type < e1000_pchlan)
2254                 return ret_val;
2255
2256         ret_val = hw->phy.ops.acquire(hw);
2257         if (ret_val)
2258                 return ret_val;
2259
2260         if (hw->mac.type == e1000_pchlan) {
2261                 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2262                 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2263                         goto release;
2264         }
2265
2266         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2267         if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2268                 goto release;
2269
2270         mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2271
2272         ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2273         if (ret_val)
2274                 goto release;
2275
2276         oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2277
2278         if (d0_state) {
2279                 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2280                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2281
2282                 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2283                         oem_reg |= HV_OEM_BITS_LPLU;
2284         } else {
2285                 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2286                     E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2287                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2288
2289                 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2290                     E1000_PHY_CTRL_NOND0A_LPLU))
2291                         oem_reg |= HV_OEM_BITS_LPLU;
2292         }
2293
2294         /* Set Restart auto-neg to activate the bits */
2295         if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2296             !hw->phy.ops.check_reset_block(hw))
2297                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2298
2299         ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2300
2301 release:
2302         hw->phy.ops.release(hw);
2303
2304         return ret_val;
2305 }
2306
2307
2308 /**
2309  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2310  *  @hw:   pointer to the HW structure
2311  **/
2312 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2313 {
2314         s32 ret_val;
2315         u16 data;
2316
2317         DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2318
2319         ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2320         if (ret_val)
2321                 return ret_val;
2322
2323         data |= HV_KMRN_MDIO_SLOW;
2324
2325         ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2326
2327         return ret_val;
2328 }
2329
2330 /**
2331  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2332  *  done after every PHY reset.
2333  **/
2334 STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2335 {
2336         s32 ret_val = E1000_SUCCESS;
2337         u16 phy_data;
2338
2339         DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2340
2341         if (hw->mac.type != e1000_pchlan)
2342                 return E1000_SUCCESS;
2343
2344         /* Set MDIO slow mode before any other MDIO access */
2345         if (hw->phy.type == e1000_phy_82577) {
2346                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2347                 if (ret_val)
2348                         return ret_val;
2349         }
2350
2351         if (((hw->phy.type == e1000_phy_82577) &&
2352              ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2353             ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2354                 /* Disable generation of early preamble */
2355                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2356                 if (ret_val)
2357                         return ret_val;
2358
2359                 /* Preamble tuning for SSC */
2360                 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2361                                                 0xA204);
2362                 if (ret_val)
2363                         return ret_val;
2364         }
2365
2366         if (hw->phy.type == e1000_phy_82578) {
2367                 /* Return registers to default by doing a soft reset then
2368                  * writing 0x3140 to the control register.
2369                  */
2370                 if (hw->phy.revision < 2) {
2371                         e1000_phy_sw_reset_generic(hw);
2372                         ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2373                                                         0x3140);
2374                 }
2375         }
2376
2377         /* Select page 0 */
2378         ret_val = hw->phy.ops.acquire(hw);
2379         if (ret_val)
2380                 return ret_val;
2381
2382         hw->phy.addr = 1;
2383         ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2384         hw->phy.ops.release(hw);
2385         if (ret_val)
2386                 return ret_val;
2387
2388         /* Configure the K1 Si workaround during phy reset assuming there is
2389          * link so that it disables K1 if link is in 1Gbps.
2390          */
2391         ret_val = e1000_k1_gig_workaround_hv(hw, true);
2392         if (ret_val)
2393                 return ret_val;
2394
2395         /* Workaround for link disconnects on a busy hub in half duplex */
2396         ret_val = hw->phy.ops.acquire(hw);
2397         if (ret_val)
2398                 return ret_val;
2399         ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2400         if (ret_val)
2401                 goto release;
2402         ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2403                                                phy_data & 0x00FF);
2404         if (ret_val)
2405                 goto release;
2406
2407         /* set MSE higher to enable link to stay up when noise is high */
2408         ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2409 release:
2410         hw->phy.ops.release(hw);
2411
2412         return ret_val;
2413 }
2414
2415 /**
2416  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2417  *  @hw:   pointer to the HW structure
2418  **/
2419 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2420 {
2421         u32 mac_reg;
2422         u16 i, phy_reg = 0;
2423         s32 ret_val;
2424
2425         DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2426
2427         ret_val = hw->phy.ops.acquire(hw);
2428         if (ret_val)
2429                 return;
2430         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2431         if (ret_val)
2432                 goto release;
2433
2434         /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2435         for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2436                 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2437                 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2438                                            (u16)(mac_reg & 0xFFFF));
2439                 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2440                                            (u16)((mac_reg >> 16) & 0xFFFF));
2441
2442                 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2443                 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2444                                            (u16)(mac_reg & 0xFFFF));
2445                 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2446                                            (u16)((mac_reg & E1000_RAH_AV)
2447                                                  >> 16));
2448         }
2449
2450         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2451
2452 release:
2453         hw->phy.ops.release(hw);
2454 }
2455
2456 #ifndef CRC32_OS_SUPPORT
2457 STATIC u32 e1000_calc_rx_da_crc(u8 mac[])
2458 {
2459         u32 poly = 0xEDB88320;  /* Polynomial for 802.3 CRC calculation */
2460         u32 i, j, mask, crc;
2461
2462         DEBUGFUNC("e1000_calc_rx_da_crc");
2463
2464         crc = 0xffffffff;
2465         for (i = 0; i < 6; i++) {
2466                 crc = crc ^ mac[i];
2467                 for (j = 8; j > 0; j--) {
2468                         mask = (crc & 1) * (-1);
2469                         crc = (crc >> 1) ^ (poly & mask);
2470                 }
2471         }
2472         return ~crc;
2473 }
2474
2475 #endif /* CRC32_OS_SUPPORT */
2476 /**
2477  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2478  *  with 82579 PHY
2479  *  @hw: pointer to the HW structure
2480  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2481  **/
2482 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2483 {
2484         s32 ret_val = E1000_SUCCESS;
2485         u16 phy_reg, data;
2486         u32 mac_reg;
2487         u16 i;
2488
2489         DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2490
2491         if (hw->mac.type < e1000_pch2lan)
2492                 return E1000_SUCCESS;
2493
2494         /* disable Rx path while enabling/disabling workaround */
2495         hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2496         ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2497                                         phy_reg | (1 << 14));
2498         if (ret_val)
2499                 return ret_val;
2500
2501         if (enable) {
2502                 /* Write Rx addresses (rar_entry_count for RAL/H, and
2503                  * SHRAL/H) and initial CRC values to the MAC
2504                  */
2505                 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2506                         u8 mac_addr[ETH_ADDR_LEN] = {0};
2507                         u32 addr_high, addr_low;
2508
2509                         addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2510                         if (!(addr_high & E1000_RAH_AV))
2511                                 continue;
2512                         addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2513                         mac_addr[0] = (addr_low & 0xFF);
2514                         mac_addr[1] = ((addr_low >> 8) & 0xFF);
2515                         mac_addr[2] = ((addr_low >> 16) & 0xFF);
2516                         mac_addr[3] = ((addr_low >> 24) & 0xFF);
2517                         mac_addr[4] = (addr_high & 0xFF);
2518                         mac_addr[5] = ((addr_high >> 8) & 0xFF);
2519
2520 #ifndef CRC32_OS_SUPPORT
2521                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2522                                         e1000_calc_rx_da_crc(mac_addr));
2523 #else /* CRC32_OS_SUPPORT */
2524                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2525                                         E1000_CRC32(ETH_ADDR_LEN, mac_addr));
2526 #endif /* CRC32_OS_SUPPORT */
2527                 }
2528
2529                 /* Write Rx addresses to the PHY */
2530                 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2531
2532                 /* Enable jumbo frame workaround in the MAC */
2533                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2534                 mac_reg &= ~(1 << 14);
2535                 mac_reg |= (7 << 15);
2536                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2537
2538                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2539                 mac_reg |= E1000_RCTL_SECRC;
2540                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2541
2542                 ret_val = e1000_read_kmrn_reg_generic(hw,
2543                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2544                                                 &data);
2545                 if (ret_val)
2546                         return ret_val;
2547                 ret_val = e1000_write_kmrn_reg_generic(hw,
2548                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2549                                                 data | (1 << 0));
2550                 if (ret_val)
2551                         return ret_val;
2552                 ret_val = e1000_read_kmrn_reg_generic(hw,
2553                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2554                                                 &data);
2555                 if (ret_val)
2556                         return ret_val;
2557                 data &= ~(0xF << 8);
2558                 data |= (0xB << 8);
2559                 ret_val = e1000_write_kmrn_reg_generic(hw,
2560                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2561                                                 data);
2562                 if (ret_val)
2563                         return ret_val;
2564
2565                 /* Enable jumbo frame workaround in the PHY */
2566                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2567                 data &= ~(0x7F << 5);
2568                 data |= (0x37 << 5);
2569                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2570                 if (ret_val)
2571                         return ret_val;
2572                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2573                 data &= ~(1 << 13);
2574                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2575                 if (ret_val)
2576                         return ret_val;
2577                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2578                 data &= ~(0x3FF << 2);
2579                 data |= (0x1A << 2);
2580                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2581                 if (ret_val)
2582                         return ret_val;
2583                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2584                 if (ret_val)
2585                         return ret_val;
2586                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2587                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2588                                                 (1 << 10));
2589                 if (ret_val)
2590                         return ret_val;
2591         } else {
2592                 /* Write MAC register values back to h/w defaults */
2593                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2594                 mac_reg &= ~(0xF << 14);
2595                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2596
2597                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2598                 mac_reg &= ~E1000_RCTL_SECRC;
2599                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2600
2601                 ret_val = e1000_read_kmrn_reg_generic(hw,
2602                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2603                                                 &data);
2604                 if (ret_val)
2605                         return ret_val;
2606                 ret_val = e1000_write_kmrn_reg_generic(hw,
2607                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2608                                                 data & ~(1 << 0));
2609                 if (ret_val)
2610                         return ret_val;
2611                 ret_val = e1000_read_kmrn_reg_generic(hw,
2612                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2613                                                 &data);
2614                 if (ret_val)
2615                         return ret_val;
2616                 data &= ~(0xF << 8);
2617                 data |= (0xB << 8);
2618                 ret_val = e1000_write_kmrn_reg_generic(hw,
2619                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2620                                                 data);
2621                 if (ret_val)
2622                         return ret_val;
2623
2624                 /* Write PHY register values back to h/w defaults */
2625                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2626                 data &= ~(0x7F << 5);
2627                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2628                 if (ret_val)
2629                         return ret_val;
2630                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2631                 data |= (1 << 13);
2632                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2633                 if (ret_val)
2634                         return ret_val;
2635                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2636                 data &= ~(0x3FF << 2);
2637                 data |= (0x8 << 2);
2638                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2639                 if (ret_val)
2640                         return ret_val;
2641                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2642                 if (ret_val)
2643                         return ret_val;
2644                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2645                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2646                                                 ~(1 << 10));
2647                 if (ret_val)
2648                         return ret_val;
2649         }
2650
2651         /* re-enable Rx path after enabling/disabling workaround */
2652         return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2653                                      ~(1 << 14));
2654 }
2655
2656 /**
2657  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2658  *  done after every PHY reset.
2659  **/
2660 STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2661 {
2662         s32 ret_val = E1000_SUCCESS;
2663
2664         DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2665
2666         if (hw->mac.type != e1000_pch2lan)
2667                 return E1000_SUCCESS;
2668
2669         /* Set MDIO slow mode before any other MDIO access */
2670         ret_val = e1000_set_mdio_slow_mode_hv(hw);
2671         if (ret_val)
2672                 return ret_val;
2673
2674         ret_val = hw->phy.ops.acquire(hw);
2675         if (ret_val)
2676                 return ret_val;
2677         /* set MSE higher to enable link to stay up when noise is high */
2678         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2679         if (ret_val)
2680                 goto release;
2681         /* drop link after 5 times MSE threshold was reached */
2682         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2683 release:
2684         hw->phy.ops.release(hw);
2685
2686         return ret_val;
2687 }
2688
2689 /**
2690  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2691  *  @hw:   pointer to the HW structure
2692  *
2693  *  Workaround to set the K1 beacon duration for 82579 parts
2694  **/
2695 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2696 {
2697         s32 ret_val = E1000_SUCCESS;
2698         u16 status_reg = 0;
2699         u32 mac_reg;
2700         u16 phy_reg;
2701
2702         DEBUGFUNC("e1000_k1_workaround_lv");
2703
2704         if (hw->mac.type != e1000_pch2lan)
2705                 return E1000_SUCCESS;
2706
2707         /* Set K1 beacon duration based on 1Gbps speed or otherwise */
2708         ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2709         if (ret_val)
2710                 return ret_val;
2711
2712         if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2713             == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2714                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2715                 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2716
2717                 ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
2718                 if (ret_val)
2719                         return ret_val;
2720
2721                 if (status_reg & HV_M_STATUS_SPEED_1000) {
2722                         u16 pm_phy_reg;
2723
2724                         mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
2725                         phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2726                         /* LV 1G Packet drop issue wa  */
2727                         ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2728                                                        &pm_phy_reg);
2729                         if (ret_val)
2730                                 return ret_val;
2731                         pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
2732                         ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2733                                                         pm_phy_reg);
2734                         if (ret_val)
2735                                 return ret_val;
2736                 } else {
2737                         mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2738                         phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2739                 }
2740                 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2741                 ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
2742         }
2743
2744         return ret_val;
2745 }
2746
2747 /**
2748  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2749  *  @hw:   pointer to the HW structure
2750  *  @gate: boolean set to true to gate, false to ungate
2751  *
2752  *  Gate/ungate the automatic PHY configuration via hardware; perform
2753  *  the configuration via software instead.
2754  **/
2755 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2756 {
2757         u32 extcnf_ctrl;
2758
2759         DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2760
2761         if (hw->mac.type < e1000_pch2lan)
2762                 return;
2763
2764         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2765
2766         if (gate)
2767                 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2768         else
2769                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2770
2771         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2772 }
2773
2774 /**
2775  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2776  *  @hw: pointer to the HW structure
2777  *
2778  *  Check the appropriate indication the MAC has finished configuring the
2779  *  PHY after a software reset.
2780  **/
2781 STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2782 {
2783         u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2784
2785         DEBUGFUNC("e1000_lan_init_done_ich8lan");
2786
2787         /* Wait for basic configuration completes before proceeding */
2788         do {
2789                 data = E1000_READ_REG(hw, E1000_STATUS);
2790                 data &= E1000_STATUS_LAN_INIT_DONE;
2791                 usec_delay(100);
2792         } while ((!data) && --loop);
2793
2794         /* If basic configuration is incomplete before the above loop
2795          * count reaches 0, loading the configuration from NVM will
2796          * leave the PHY in a bad state possibly resulting in no link.
2797          */
2798         if (loop == 0)
2799                 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2800
2801         /* Clear the Init Done bit for the next init event */
2802         data = E1000_READ_REG(hw, E1000_STATUS);
2803         data &= ~E1000_STATUS_LAN_INIT_DONE;
2804         E1000_WRITE_REG(hw, E1000_STATUS, data);
2805 }
2806
2807 /**
2808  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2809  *  @hw: pointer to the HW structure
2810  **/
2811 STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2812 {
2813         s32 ret_val = E1000_SUCCESS;
2814         u16 reg;
2815
2816         DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2817
2818         if (hw->phy.ops.check_reset_block(hw))
2819                 return E1000_SUCCESS;
2820
2821         /* Allow time for h/w to get to quiescent state after reset */
2822         msec_delay(10);
2823
2824         /* Perform any necessary post-reset workarounds */
2825         switch (hw->mac.type) {
2826         case e1000_pchlan:
2827                 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2828                 if (ret_val)
2829                         return ret_val;
2830                 break;
2831         case e1000_pch2lan:
2832                 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2833                 if (ret_val)
2834                         return ret_val;
2835                 break;
2836         default:
2837                 break;
2838         }
2839
2840         /* Clear the host wakeup bit after lcd reset */
2841         if (hw->mac.type >= e1000_pchlan) {
2842                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
2843                 reg &= ~BM_WUC_HOST_WU_BIT;
2844                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
2845         }
2846
2847         /* Configure the LCD with the extended configuration region in NVM */
2848         ret_val = e1000_sw_lcd_config_ich8lan(hw);
2849         if (ret_val)
2850                 return ret_val;
2851
2852         /* Configure the LCD with the OEM bits in NVM */
2853         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2854
2855         if (hw->mac.type == e1000_pch2lan) {
2856                 /* Ungate automatic PHY configuration on non-managed 82579 */
2857                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
2858                     E1000_ICH_FWSM_FW_VALID)) {
2859                         msec_delay(10);
2860                         e1000_gate_hw_phy_config_ich8lan(hw, false);
2861                 }
2862
2863                 /* Set EEE LPI Update Timer to 200usec */
2864                 ret_val = hw->phy.ops.acquire(hw);
2865                 if (ret_val)
2866                         return ret_val;
2867                 ret_val = e1000_write_emi_reg_locked(hw,
2868                                                      I82579_LPI_UPDATE_TIMER,
2869                                                      0x1387);
2870                 hw->phy.ops.release(hw);
2871         }
2872
2873         return ret_val;
2874 }
2875
2876 /**
2877  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2878  *  @hw: pointer to the HW structure
2879  *
2880  *  Resets the PHY
2881  *  This is a function pointer entry point called by drivers
2882  *  or other shared routines.
2883  **/
2884 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2885 {
2886         s32 ret_val = E1000_SUCCESS;
2887
2888         DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2889
2890         /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2891         if ((hw->mac.type == e1000_pch2lan) &&
2892             !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2893                 e1000_gate_hw_phy_config_ich8lan(hw, true);
2894
2895         ret_val = e1000_phy_hw_reset_generic(hw);
2896         if (ret_val)
2897                 return ret_val;
2898
2899         return e1000_post_phy_reset_ich8lan(hw);
2900 }
2901
2902 /**
2903  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2904  *  @hw: pointer to the HW structure
2905  *  @active: true to enable LPLU, false to disable
2906  *
2907  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2908  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2909  *  the phy speed. This function will manually set the LPLU bit and restart
2910  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2911  *  since it configures the same bit.
2912  **/
2913 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2914 {
2915         s32 ret_val;
2916         u16 oem_reg;
2917
2918         DEBUGFUNC("e1000_set_lplu_state_pchlan");
2919
2920         ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2921         if (ret_val)
2922                 return ret_val;
2923
2924         if (active)
2925                 oem_reg |= HV_OEM_BITS_LPLU;
2926         else
2927                 oem_reg &= ~HV_OEM_BITS_LPLU;
2928
2929         if (!hw->phy.ops.check_reset_block(hw))
2930                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2931
2932         return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2933 }
2934
2935 /**
2936  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2937  *  @hw: pointer to the HW structure
2938  *  @active: true to enable LPLU, false to disable
2939  *
2940  *  Sets the LPLU D0 state according to the active flag.  When
2941  *  activating LPLU this function also disables smart speed
2942  *  and vice versa.  LPLU will not be activated unless the
2943  *  device autonegotiation advertisement meets standards of
2944  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2945  *  This is a function pointer entry point only called by
2946  *  PHY setup routines.
2947  **/
2948 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2949 {
2950         struct e1000_phy_info *phy = &hw->phy;
2951         u32 phy_ctrl;
2952         s32 ret_val = E1000_SUCCESS;
2953         u16 data;
2954
2955         DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2956
2957         if (phy->type == e1000_phy_ife)
2958                 return E1000_SUCCESS;
2959
2960         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2961
2962         if (active) {
2963                 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2964                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2965
2966                 if (phy->type != e1000_phy_igp_3)
2967                         return E1000_SUCCESS;
2968
2969                 /* Call gig speed drop workaround on LPLU before accessing
2970                  * any PHY registers
2971                  */
2972                 if (hw->mac.type == e1000_ich8lan)
2973                         e1000_gig_downshift_workaround_ich8lan(hw);
2974
2975                 /* When LPLU is enabled, we should disable SmartSpeed */
2976                 ret_val = phy->ops.read_reg(hw,
2977                                             IGP01E1000_PHY_PORT_CONFIG,
2978                                             &data);
2979                 if (ret_val)
2980                         return ret_val;
2981                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2982                 ret_val = phy->ops.write_reg(hw,
2983                                              IGP01E1000_PHY_PORT_CONFIG,
2984                                              data);
2985                 if (ret_val)
2986                         return ret_val;
2987         } else {
2988                 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2989                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2990
2991                 if (phy->type != e1000_phy_igp_3)
2992                         return E1000_SUCCESS;
2993
2994                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2995                  * during Dx states where the power conservation is most
2996                  * important.  During driver activity we should enable
2997                  * SmartSpeed, so performance is maintained.
2998                  */
2999                 if (phy->smart_speed == e1000_smart_speed_on) {
3000                         ret_val = phy->ops.read_reg(hw,
3001                                                     IGP01E1000_PHY_PORT_CONFIG,
3002                                                     &data);
3003                         if (ret_val)
3004                                 return ret_val;
3005
3006                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3007                         ret_val = phy->ops.write_reg(hw,
3008                                                      IGP01E1000_PHY_PORT_CONFIG,
3009                                                      data);
3010                         if (ret_val)
3011                                 return ret_val;
3012                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3013                         ret_val = phy->ops.read_reg(hw,
3014                                                     IGP01E1000_PHY_PORT_CONFIG,
3015                                                     &data);
3016                         if (ret_val)
3017                                 return ret_val;
3018
3019                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3020                         ret_val = phy->ops.write_reg(hw,
3021                                                      IGP01E1000_PHY_PORT_CONFIG,
3022                                                      data);
3023                         if (ret_val)
3024                                 return ret_val;
3025                 }
3026         }
3027
3028         return E1000_SUCCESS;
3029 }
3030
3031 /**
3032  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3033  *  @hw: pointer to the HW structure
3034  *  @active: true to enable LPLU, false to disable
3035  *
3036  *  Sets the LPLU D3 state according to the active flag.  When
3037  *  activating LPLU this function also disables smart speed
3038  *  and vice versa.  LPLU will not be activated unless the
3039  *  device autonegotiation advertisement meets standards of
3040  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3041  *  This is a function pointer entry point only called by
3042  *  PHY setup routines.
3043  **/
3044 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3045 {
3046         struct e1000_phy_info *phy = &hw->phy;
3047         u32 phy_ctrl;
3048         s32 ret_val = E1000_SUCCESS;
3049         u16 data;
3050
3051         DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3052
3053         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3054
3055         if (!active) {
3056                 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3057                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3058
3059                 if (phy->type != e1000_phy_igp_3)
3060                         return E1000_SUCCESS;
3061
3062                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3063                  * during Dx states where the power conservation is most
3064                  * important.  During driver activity we should enable
3065                  * SmartSpeed, so performance is maintained.
3066                  */
3067                 if (phy->smart_speed == e1000_smart_speed_on) {
3068                         ret_val = phy->ops.read_reg(hw,
3069                                                     IGP01E1000_PHY_PORT_CONFIG,
3070                                                     &data);
3071                         if (ret_val)
3072                                 return ret_val;
3073
3074                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3075                         ret_val = phy->ops.write_reg(hw,
3076                                                      IGP01E1000_PHY_PORT_CONFIG,
3077                                                      data);
3078                         if (ret_val)
3079                                 return ret_val;
3080                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3081                         ret_val = phy->ops.read_reg(hw,
3082                                                     IGP01E1000_PHY_PORT_CONFIG,
3083                                                     &data);
3084                         if (ret_val)
3085                                 return ret_val;
3086
3087                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3088                         ret_val = phy->ops.write_reg(hw,
3089                                                      IGP01E1000_PHY_PORT_CONFIG,
3090                                                      data);
3091                         if (ret_val)
3092                                 return ret_val;
3093                 }
3094         } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3095                    (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3096                    (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3097                 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3098                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3099
3100                 if (phy->type != e1000_phy_igp_3)
3101                         return E1000_SUCCESS;
3102
3103                 /* Call gig speed drop workaround on LPLU before accessing
3104                  * any PHY registers
3105                  */
3106                 if (hw->mac.type == e1000_ich8lan)
3107                         e1000_gig_downshift_workaround_ich8lan(hw);
3108
3109                 /* When LPLU is enabled, we should disable SmartSpeed */
3110                 ret_val = phy->ops.read_reg(hw,
3111                                             IGP01E1000_PHY_PORT_CONFIG,
3112                                             &data);
3113                 if (ret_val)
3114                         return ret_val;
3115
3116                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3117                 ret_val = phy->ops.write_reg(hw,
3118                                              IGP01E1000_PHY_PORT_CONFIG,
3119                                              data);
3120         }
3121
3122         return ret_val;
3123 }
3124
3125 /**
3126  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3127  *  @hw: pointer to the HW structure
3128  *  @bank:  pointer to the variable that returns the active bank
3129  *
3130  *  Reads signature byte from the NVM using the flash access registers.
3131  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3132  **/
3133 STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3134 {
3135         u32 eecd;
3136         struct e1000_nvm_info *nvm = &hw->nvm;
3137         u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3138         u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3139         u8 sig_byte = 0;
3140         s32 ret_val;
3141
3142         DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3143
3144         switch (hw->mac.type) {
3145         case e1000_ich8lan:
3146         case e1000_ich9lan:
3147                 eecd = E1000_READ_REG(hw, E1000_EECD);
3148                 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3149                     E1000_EECD_SEC1VAL_VALID_MASK) {
3150                         if (eecd & E1000_EECD_SEC1VAL)
3151                                 *bank = 1;
3152                         else
3153                                 *bank = 0;
3154
3155                         return E1000_SUCCESS;
3156                 }
3157                 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3158                 /* fall-thru */
3159         default:
3160                 /* set bank to 0 in case flash read fails */
3161                 *bank = 0;
3162
3163                 /* Check bank 0 */
3164                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3165                                                         &sig_byte);
3166                 if (ret_val)
3167                         return ret_val;
3168                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3169                     E1000_ICH_NVM_SIG_VALUE) {
3170                         *bank = 0;
3171                         return E1000_SUCCESS;
3172                 }
3173
3174                 /* Check bank 1 */
3175                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3176                                                         bank1_offset,
3177                                                         &sig_byte);
3178                 if (ret_val)
3179                         return ret_val;
3180                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3181                     E1000_ICH_NVM_SIG_VALUE) {
3182                         *bank = 1;
3183                         return E1000_SUCCESS;
3184                 }
3185
3186                 DEBUGOUT("ERROR: No valid NVM bank present\n");
3187                 return -E1000_ERR_NVM;
3188         }
3189 }
3190
3191 /**
3192  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3193  *  @hw: pointer to the HW structure
3194  *  @offset: The offset (in bytes) of the word(s) to read.
3195  *  @words: Size of data to read in words
3196  *  @data: Pointer to the word(s) to read at offset.
3197  *
3198  *  Reads a word(s) from the NVM using the flash access registers.
3199  **/
3200 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3201                                   u16 *data)
3202 {
3203         struct e1000_nvm_info *nvm = &hw->nvm;
3204         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3205         u32 act_offset;
3206         s32 ret_val = E1000_SUCCESS;
3207         u32 bank = 0;
3208         u16 i, word;
3209
3210         DEBUGFUNC("e1000_read_nvm_ich8lan");
3211
3212         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3213             (words == 0)) {
3214                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3215                 ret_val = -E1000_ERR_NVM;
3216                 goto out;
3217         }
3218
3219         nvm->ops.acquire(hw);
3220
3221         ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3222         if (ret_val != E1000_SUCCESS) {
3223                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3224                 bank = 0;
3225         }
3226
3227         act_offset = (bank) ? nvm->flash_bank_size : 0;
3228         act_offset += offset;
3229
3230         ret_val = E1000_SUCCESS;
3231         for (i = 0; i < words; i++) {
3232                 if (dev_spec->shadow_ram[offset+i].modified) {
3233                         data[i] = dev_spec->shadow_ram[offset+i].value;
3234                 } else {
3235                         ret_val = e1000_read_flash_word_ich8lan(hw,
3236                                                                 act_offset + i,
3237                                                                 &word);
3238                         if (ret_val)
3239                                 break;
3240                         data[i] = word;
3241                 }
3242         }
3243
3244         nvm->ops.release(hw);
3245
3246 out:
3247         if (ret_val)
3248                 DEBUGOUT1("NVM read error: %d\n", ret_val);
3249
3250         return ret_val;
3251 }
3252
3253 /**
3254  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3255  *  @hw: pointer to the HW structure
3256  *
3257  *  This function does initial flash setup so that a new read/write/erase cycle
3258  *  can be started.
3259  **/
3260 STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3261 {
3262         union ich8_hws_flash_status hsfsts;
3263         s32 ret_val = -E1000_ERR_NVM;
3264
3265         DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3266
3267         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3268
3269         /* Check if the flash descriptor is valid */
3270         if (!hsfsts.hsf_status.fldesvalid) {
3271                 DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3272                 return -E1000_ERR_NVM;
3273         }
3274
3275         /* Clear FCERR and DAEL in hw status by writing 1 */
3276         hsfsts.hsf_status.flcerr = 1;
3277         hsfsts.hsf_status.dael = 1;
3278         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3279
3280         /* Either we should have a hardware SPI cycle in progress
3281          * bit to check against, in order to start a new cycle or
3282          * FDONE bit should be changed in the hardware so that it
3283          * is 1 after hardware reset, which can then be used as an
3284          * indication whether a cycle is in progress or has been
3285          * completed.
3286          */
3287
3288         if (!hsfsts.hsf_status.flcinprog) {
3289                 /* There is no cycle running at present,
3290                  * so we can start a cycle.
3291                  * Begin by setting Flash Cycle Done.
3292                  */
3293                 hsfsts.hsf_status.flcdone = 1;
3294                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3295                 ret_val = E1000_SUCCESS;
3296         } else {
3297                 s32 i;
3298
3299                 /* Otherwise poll for sometime so the current
3300                  * cycle has a chance to end before giving up.
3301                  */
3302                 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3303                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3304                                                               ICH_FLASH_HSFSTS);
3305                         if (!hsfsts.hsf_status.flcinprog) {
3306                                 ret_val = E1000_SUCCESS;
3307                                 break;
3308                         }
3309                         usec_delay(1);
3310                 }
3311                 if (ret_val == E1000_SUCCESS) {
3312                         /* Successful in waiting for previous cycle to timeout,
3313                          * now set the Flash Cycle Done.
3314                          */
3315                         hsfsts.hsf_status.flcdone = 1;
3316                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3317                                                 hsfsts.regval);
3318                 } else {
3319                         DEBUGOUT("Flash controller busy, cannot get access\n");
3320                 }
3321         }
3322
3323         return ret_val;
3324 }
3325
3326 /**
3327  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3328  *  @hw: pointer to the HW structure
3329  *  @timeout: maximum time to wait for completion
3330  *
3331  *  This function starts a flash cycle and waits for its completion.
3332  **/
3333 STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3334 {
3335         union ich8_hws_flash_ctrl hsflctl;
3336         union ich8_hws_flash_status hsfsts;
3337         u32 i = 0;
3338
3339         DEBUGFUNC("e1000_flash_cycle_ich8lan");
3340
3341         /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3342         hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3343         hsflctl.hsf_ctrl.flcgo = 1;
3344
3345         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3346
3347         /* wait till FDONE bit is set to 1 */
3348         do {
3349                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3350                 if (hsfsts.hsf_status.flcdone)
3351                         break;
3352                 usec_delay(1);
3353         } while (i++ < timeout);
3354
3355         if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3356                 return E1000_SUCCESS;
3357
3358         return -E1000_ERR_NVM;
3359 }
3360
3361 /**
3362  *  e1000_read_flash_word_ich8lan - Read word from flash
3363  *  @hw: pointer to the HW structure
3364  *  @offset: offset to data location
3365  *  @data: pointer to the location for storing the data
3366  *
3367  *  Reads the flash word at offset into data.  Offset is converted
3368  *  to bytes before read.
3369  **/
3370 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3371                                          u16 *data)
3372 {
3373         DEBUGFUNC("e1000_read_flash_word_ich8lan");
3374
3375         if (!data)
3376                 return -E1000_ERR_NVM;
3377
3378         /* Must convert offset into bytes. */
3379         offset <<= 1;
3380
3381         return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3382 }
3383
3384 /**
3385  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3386  *  @hw: pointer to the HW structure
3387  *  @offset: The offset of the byte to read.
3388  *  @data: Pointer to a byte to store the value read.
3389  *
3390  *  Reads a single byte from the NVM using the flash access registers.
3391  **/
3392 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3393                                          u8 *data)
3394 {
3395         s32 ret_val;
3396         u16 word = 0;
3397
3398         ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3399
3400         if (ret_val)
3401                 return ret_val;
3402
3403         *data = (u8)word;
3404
3405         return E1000_SUCCESS;
3406 }
3407
3408 /**
3409  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3410  *  @hw: pointer to the HW structure
3411  *  @offset: The offset (in bytes) of the byte or word to read.
3412  *  @size: Size of data to read, 1=byte 2=word
3413  *  @data: Pointer to the word to store the value read.
3414  *
3415  *  Reads a byte or word from the NVM using the flash access registers.
3416  **/
3417 STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3418                                          u8 size, u16 *data)
3419 {
3420         union ich8_hws_flash_status hsfsts;
3421         union ich8_hws_flash_ctrl hsflctl;
3422         u32 flash_linear_addr;
3423         u32 flash_data = 0;
3424         s32 ret_val = -E1000_ERR_NVM;
3425         u8 count = 0;
3426
3427         DEBUGFUNC("e1000_read_flash_data_ich8lan");
3428
3429         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3430                 return -E1000_ERR_NVM;
3431         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3432                              hw->nvm.flash_base_addr);
3433
3434         do {
3435                 usec_delay(1);
3436                 /* Steps */
3437                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3438                 if (ret_val != E1000_SUCCESS)
3439                         break;
3440                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3441
3442                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3443                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3444                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3445                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3446
3447                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3448
3449                 ret_val =
3450                     e1000_flash_cycle_ich8lan(hw,
3451                                               ICH_FLASH_READ_COMMAND_TIMEOUT);
3452
3453                 /* Check if FCERR is set to 1, if set to 1, clear it
3454                  * and try the whole sequence a few more times, else
3455                  * read in (shift in) the Flash Data0, the order is
3456                  * least significant byte first msb to lsb
3457                  */
3458                 if (ret_val == E1000_SUCCESS) {
3459                         flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3460                         if (size == 1)
3461                                 *data = (u8)(flash_data & 0x000000FF);
3462                         else if (size == 2)
3463                                 *data = (u16)(flash_data & 0x0000FFFF);
3464                         break;
3465                 } else {
3466                         /* If we've gotten here, then things are probably
3467                          * completely hosed, but if the error condition is
3468                          * detected, it won't hurt to give it another try...
3469                          * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3470                          */
3471                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3472                                                               ICH_FLASH_HSFSTS);
3473                         if (hsfsts.hsf_status.flcerr) {
3474                                 /* Repeat for some time before giving up. */
3475                                 continue;
3476                         } else if (!hsfsts.hsf_status.flcdone) {
3477                                 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3478                                 break;
3479                         }
3480                 }
3481         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3482
3483         return ret_val;
3484 }
3485
3486 /**
3487  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3488  *  @hw: pointer to the HW structure
3489  *  @offset: The offset (in bytes) of the word(s) to write.
3490  *  @words: Size of data to write in words
3491  *  @data: Pointer to the word(s) to write at offset.
3492  *
3493  *  Writes a byte or word to the NVM using the flash access registers.
3494  **/
3495 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3496                                    u16 *data)
3497 {
3498         struct e1000_nvm_info *nvm = &hw->nvm;
3499         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3500         u16 i;
3501
3502         DEBUGFUNC("e1000_write_nvm_ich8lan");
3503
3504         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3505             (words == 0)) {
3506                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3507                 return -E1000_ERR_NVM;
3508         }
3509
3510         nvm->ops.acquire(hw);
3511
3512         for (i = 0; i < words; i++) {
3513                 dev_spec->shadow_ram[offset+i].modified = true;
3514                 dev_spec->shadow_ram[offset+i].value = data[i];
3515         }
3516
3517         nvm->ops.release(hw);
3518
3519         return E1000_SUCCESS;
3520 }
3521
3522 /**
3523  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3524  *  @hw: pointer to the HW structure
3525  *
3526  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3527  *  which writes the checksum to the shadow ram.  The changes in the shadow
3528  *  ram are then committed to the EEPROM by processing each bank at a time
3529  *  checking for the modified bit and writing only the pending changes.
3530  *  After a successful commit, the shadow ram is cleared and is ready for
3531  *  future writes.
3532  **/
3533 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3534 {
3535         struct e1000_nvm_info *nvm = &hw->nvm;
3536         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3537         u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3538         s32 ret_val;
3539         u16 data;
3540
3541         DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
3542
3543         ret_val = e1000_update_nvm_checksum_generic(hw);
3544         if (ret_val)
3545                 goto out;
3546
3547         if (nvm->type != e1000_nvm_flash_sw)
3548                 goto out;
3549
3550         nvm->ops.acquire(hw);
3551
3552         /* We're writing to the opposite bank so if we're on bank 1,
3553          * write to bank 0 etc.  We also need to erase the segment that
3554          * is going to be written
3555          */
3556         ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3557         if (ret_val != E1000_SUCCESS) {
3558                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3559                 bank = 0;
3560         }
3561
3562         if (bank == 0) {
3563                 new_bank_offset = nvm->flash_bank_size;
3564                 old_bank_offset = 0;
3565                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3566                 if (ret_val)
3567                         goto release;
3568         } else {
3569                 old_bank_offset = nvm->flash_bank_size;
3570                 new_bank_offset = 0;
3571                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3572                 if (ret_val)
3573                         goto release;
3574         }
3575
3576         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3577                 /* Determine whether to write the value stored
3578                  * in the other NVM bank or a modified value stored
3579                  * in the shadow RAM
3580                  */
3581                 if (dev_spec->shadow_ram[i].modified) {
3582                         data = dev_spec->shadow_ram[i].value;
3583                 } else {
3584                         ret_val = e1000_read_flash_word_ich8lan(hw, i +
3585                                                                 old_bank_offset,
3586                                                                 &data);
3587                         if (ret_val)
3588                                 break;
3589                 }
3590
3591                 /* If the word is 0x13, then make sure the signature bits
3592                  * (15:14) are 11b until the commit has completed.
3593                  * This will allow us to write 10b which indicates the
3594                  * signature is valid.  We want to do this after the write
3595                  * has completed so that we don't mark the segment valid
3596                  * while the write is still in progress
3597                  */
3598                 if (i == E1000_ICH_NVM_SIG_WORD)
3599                         data |= E1000_ICH_NVM_SIG_MASK;
3600
3601                 /* Convert offset to bytes. */
3602                 act_offset = (i + new_bank_offset) << 1;
3603
3604                 usec_delay(100);
3605                 /* Write the bytes to the new bank. */
3606                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3607                                                                act_offset,
3608                                                                (u8)data);
3609                 if (ret_val)
3610                         break;
3611
3612                 usec_delay(100);
3613                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3614                                                           act_offset + 1,
3615                                                           (u8)(data >> 8));
3616                 if (ret_val)
3617                         break;
3618         }
3619
3620         /* Don't bother writing the segment valid bits if sector
3621          * programming failed.
3622          */
3623         if (ret_val) {
3624                 DEBUGOUT("Flash commit failed.\n");
3625                 goto release;
3626         }
3627
3628         /* Finally validate the new segment by setting bit 15:14
3629          * to 10b in word 0x13 , this can be done without an
3630          * erase as well since these bits are 11 to start with
3631          * and we need to change bit 14 to 0b
3632          */
3633         act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3634         ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
3635         if (ret_val)
3636                 goto release;
3637
3638         data &= 0xBFFF;
3639         ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3640                                                        act_offset * 2 + 1,
3641                                                        (u8)(data >> 8));
3642         if (ret_val)
3643                 goto release;
3644
3645         /* And invalidate the previously valid segment by setting
3646          * its signature word (0x13) high_byte to 0b. This can be
3647          * done without an erase because flash erase sets all bits
3648          * to 1's. We can write 1's to 0's without an erase
3649          */
3650         act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3651         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
3652         if (ret_val)
3653                 goto release;
3654
3655         /* Great!  Everything worked, we can now clear the cached entries. */
3656         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3657                 dev_spec->shadow_ram[i].modified = false;
3658                 dev_spec->shadow_ram[i].value = 0xFFFF;
3659         }
3660
3661 release:
3662         nvm->ops.release(hw);
3663
3664         /* Reload the EEPROM, or else modifications will not appear
3665          * until after the next adapter reset.
3666          */
3667         if (!ret_val) {
3668                 nvm->ops.reload(hw);
3669                 msec_delay(10);
3670         }
3671
3672 out:
3673         if (ret_val)
3674                 DEBUGOUT1("NVM update error: %d\n", ret_val);
3675
3676         return ret_val;
3677 }
3678
3679 /**
3680  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
3681  *  @hw: pointer to the HW structure
3682  *
3683  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
3684  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
3685  *  calculated, in which case we need to calculate the checksum and set bit 6.
3686  **/
3687 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
3688 {
3689         s32 ret_val;
3690         u16 data;
3691         u16 word;
3692         u16 valid_csum_mask;
3693
3694         DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
3695
3696         /* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
3697          * the checksum needs to be fixed.  This bit is an indication that
3698          * the NVM was prepared by OEM software and did not calculate
3699          * the checksum...a likely scenario.
3700          */
3701         switch (hw->mac.type) {
3702         case e1000_pch_lpt:
3703                 word = NVM_COMPAT;
3704                 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
3705                 break;
3706         default:
3707                 word = NVM_FUTURE_INIT_WORD1;
3708                 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
3709                 break;
3710         }
3711
3712         ret_val = hw->nvm.ops.read(hw, word, 1, &data);
3713         if (ret_val)
3714                 return ret_val;
3715
3716         if (!(data & valid_csum_mask)) {
3717                 data |= valid_csum_mask;
3718                 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
3719                 if (ret_val)
3720                         return ret_val;
3721                 ret_val = hw->nvm.ops.update(hw);
3722                 if (ret_val)
3723                         return ret_val;
3724         }
3725
3726         return e1000_validate_nvm_checksum_generic(hw);
3727 }
3728
3729 /**
3730  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3731  *  @hw: pointer to the HW structure
3732  *  @offset: The offset (in bytes) of the byte/word to read.
3733  *  @size: Size of data to read, 1=byte 2=word
3734  *  @data: The byte(s) to write to the NVM.
3735  *
3736  *  Writes one/two bytes to the NVM using the flash access registers.
3737  **/
3738 STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3739                                           u8 size, u16 data)
3740 {
3741         union ich8_hws_flash_status hsfsts;
3742         union ich8_hws_flash_ctrl hsflctl;
3743         u32 flash_linear_addr;
3744         u32 flash_data = 0;
3745         s32 ret_val;
3746         u8 count = 0;
3747
3748         DEBUGFUNC("e1000_write_ich8_data");
3749
3750         if (size < 1 || size > 2 || data > size * 0xff ||
3751             offset > ICH_FLASH_LINEAR_ADDR_MASK)
3752                 return -E1000_ERR_NVM;
3753
3754         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3755                              hw->nvm.flash_base_addr);
3756
3757         do {
3758                 usec_delay(1);
3759                 /* Steps */
3760                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3761                 if (ret_val != E1000_SUCCESS)
3762                         break;
3763                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3764
3765                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3766                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3767                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3768                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3769
3770                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3771
3772                 if (size == 1)
3773                         flash_data = (u32)data & 0x00FF;
3774                 else
3775                         flash_data = (u32)data;
3776
3777                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
3778
3779                 /* check if FCERR is set to 1 , if set to 1, clear it
3780                  * and try the whole sequence a few more times else done
3781                  */
3782                 ret_val =
3783                     e1000_flash_cycle_ich8lan(hw,
3784                                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3785                 if (ret_val == E1000_SUCCESS)
3786                         break;
3787
3788                 /* If we're here, then things are most likely
3789                  * completely hosed, but if the error condition
3790                  * is detected, it won't hurt to give it another
3791                  * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3792                  */
3793                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3794                 if (hsfsts.hsf_status.flcerr)
3795                         /* Repeat for some time before giving up. */
3796                         continue;
3797                 if (!hsfsts.hsf_status.flcdone) {
3798                         DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3799                         break;
3800                 }
3801         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3802
3803         return ret_val;
3804 }
3805
3806 /**
3807  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3808  *  @hw: pointer to the HW structure
3809  *  @offset: The index of the byte to read.
3810  *  @data: The byte to write to the NVM.
3811  *
3812  *  Writes a single byte to the NVM using the flash access registers.
3813  **/
3814 STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3815                                           u8 data)
3816 {
3817         u16 word = (u16)data;
3818
3819         DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3820
3821         return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3822 }
3823
3824 /**
3825  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3826  *  @hw: pointer to the HW structure
3827  *  @offset: The offset of the byte to write.
3828  *  @byte: The byte to write to the NVM.
3829  *
3830  *  Writes a single byte to the NVM using the flash access registers.
3831  *  Goes through a retry algorithm before giving up.
3832  **/
3833 STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3834                                                 u32 offset, u8 byte)
3835 {
3836         s32 ret_val;
3837         u16 program_retries;
3838
3839         DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3840
3841         ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3842         if (!ret_val)
3843                 return ret_val;
3844
3845         for (program_retries = 0; program_retries < 100; program_retries++) {
3846                 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3847                 usec_delay(100);
3848                 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3849                 if (ret_val == E1000_SUCCESS)
3850                         break;
3851         }
3852         if (program_retries == 100)
3853                 return -E1000_ERR_NVM;
3854
3855         return E1000_SUCCESS;
3856 }
3857
3858 /**
3859  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3860  *  @hw: pointer to the HW structure
3861  *  @bank: 0 for first bank, 1 for second bank, etc.
3862  *
3863  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3864  *  bank N is 4096 * N + flash_reg_addr.
3865  **/
3866 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3867 {
3868         struct e1000_nvm_info *nvm = &hw->nvm;
3869         union ich8_hws_flash_status hsfsts;
3870         union ich8_hws_flash_ctrl hsflctl;
3871         u32 flash_linear_addr;
3872         /* bank size is in 16bit words - adjust to bytes */
3873         u32 flash_bank_size = nvm->flash_bank_size * 2;
3874         s32 ret_val;
3875         s32 count = 0;
3876         s32 j, iteration, sector_size;
3877
3878         DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3879
3880         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3881
3882         /* Determine HW Sector size: Read BERASE bits of hw flash status
3883          * register
3884          * 00: The Hw sector is 256 bytes, hence we need to erase 16
3885          *     consecutive sectors.  The start index for the nth Hw sector
3886          *     can be calculated as = bank * 4096 + n * 256
3887          * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3888          *     The start index for the nth Hw sector can be calculated
3889          *     as = bank * 4096
3890          * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3891          *     (ich9 only, otherwise error condition)
3892          * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3893          */
3894         switch (hsfsts.hsf_status.berasesz) {
3895         case 0:
3896                 /* Hw sector size 256 */
3897                 sector_size = ICH_FLASH_SEG_SIZE_256;
3898                 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3899                 break;
3900         case 1:
3901                 sector_size = ICH_FLASH_SEG_SIZE_4K;
3902                 iteration = 1;
3903                 break;
3904         case 2:
3905                 sector_size = ICH_FLASH_SEG_SIZE_8K;
3906                 iteration = 1;
3907                 break;
3908         case 3:
3909                 sector_size = ICH_FLASH_SEG_SIZE_64K;
3910                 iteration = 1;
3911                 break;
3912         default:
3913                 return -E1000_ERR_NVM;
3914         }
3915
3916         /* Start with the base address, then add the sector offset. */
3917         flash_linear_addr = hw->nvm.flash_base_addr;
3918         flash_linear_addr += (bank) ? flash_bank_size : 0;
3919
3920         for (j = 0; j < iteration; j++) {
3921                 do {
3922                         u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
3923
3924                         /* Steps */
3925                         ret_val = e1000_flash_cycle_init_ich8lan(hw);
3926                         if (ret_val)
3927                                 return ret_val;
3928
3929                         /* Write a value 11 (block Erase) in Flash
3930                          * Cycle field in hw flash control
3931                          */
3932                         hsflctl.regval =
3933                             E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3934
3935                         hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3936                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3937                                                 hsflctl.regval);
3938
3939                         /* Write the last 24 bits of an index within the
3940                          * block into Flash Linear address field in Flash
3941                          * Address.
3942                          */
3943                         flash_linear_addr += (j * sector_size);
3944                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3945                                               flash_linear_addr);
3946
3947                         ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
3948                         if (ret_val == E1000_SUCCESS)
3949                                 break;
3950
3951                         /* Check if FCERR is set to 1.  If 1,
3952                          * clear it and try the whole sequence
3953                          * a few more times else Done
3954                          */
3955                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3956                                                       ICH_FLASH_HSFSTS);
3957                         if (hsfsts.hsf_status.flcerr)
3958                                 /* repeat for some time before giving up */
3959                                 continue;
3960                         else if (!hsfsts.hsf_status.flcdone)
3961                                 return ret_val;
3962                 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3963         }
3964
3965         return E1000_SUCCESS;
3966 }
3967
3968 /**
3969  *  e1000_valid_led_default_ich8lan - Set the default LED settings
3970  *  @hw: pointer to the HW structure
3971  *  @data: Pointer to the LED settings
3972  *
3973  *  Reads the LED default settings from the NVM to data.  If the NVM LED
3974  *  settings is all 0's or F's, set the LED default to a valid LED default
3975  *  setting.
3976  **/
3977 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3978 {
3979         s32 ret_val;
3980
3981         DEBUGFUNC("e1000_valid_led_default_ich8lan");
3982
3983         ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3984         if (ret_val) {
3985                 DEBUGOUT("NVM Read Error\n");
3986                 return ret_val;
3987         }
3988
3989         if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
3990                 *data = ID_LED_DEFAULT_ICH8LAN;
3991
3992         return E1000_SUCCESS;
3993 }
3994
3995 /**
3996  *  e1000_id_led_init_pchlan - store LED configurations
3997  *  @hw: pointer to the HW structure
3998  *
3999  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4000  *  the PHY LED configuration register.
4001  *
4002  *  PCH also does not have an "always on" or "always off" mode which
4003  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4004  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4005  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4006  *  link based on logic in e1000_led_[on|off]_pchlan().
4007  **/
4008 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4009 {
4010         struct e1000_mac_info *mac = &hw->mac;
4011         s32 ret_val;
4012         const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4013         const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4014         u16 data, i, temp, shift;
4015
4016         DEBUGFUNC("e1000_id_led_init_pchlan");
4017
4018         /* Get default ID LED modes */
4019         ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4020         if (ret_val)
4021                 return ret_val;
4022
4023         mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4024         mac->ledctl_mode1 = mac->ledctl_default;
4025         mac->ledctl_mode2 = mac->ledctl_default;
4026
4027         for (i = 0; i < 4; i++) {
4028                 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4029                 shift = (i * 5);
4030                 switch (temp) {
4031                 case ID_LED_ON1_DEF2:
4032                 case ID_LED_ON1_ON2:
4033                 case ID_LED_ON1_OFF2:
4034                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4035                         mac->ledctl_mode1 |= (ledctl_on << shift);
4036                         break;
4037                 case ID_LED_OFF1_DEF2:
4038                 case ID_LED_OFF1_ON2:
4039                 case ID_LED_OFF1_OFF2:
4040                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4041                         mac->ledctl_mode1 |= (ledctl_off << shift);
4042                         break;
4043                 default:
4044                         /* Do nothing */
4045                         break;
4046                 }
4047                 switch (temp) {
4048                 case ID_LED_DEF1_ON2:
4049                 case ID_LED_ON1_ON2:
4050                 case ID_LED_OFF1_ON2:
4051                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4052                         mac->ledctl_mode2 |= (ledctl_on << shift);
4053                         break;
4054                 case ID_LED_DEF1_OFF2:
4055                 case ID_LED_ON1_OFF2:
4056                 case ID_LED_OFF1_OFF2:
4057                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4058                         mac->ledctl_mode2 |= (ledctl_off << shift);
4059                         break;
4060                 default:
4061                         /* Do nothing */
4062                         break;
4063                 }
4064         }
4065
4066         return E1000_SUCCESS;
4067 }
4068
4069 /**
4070  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4071  *  @hw: pointer to the HW structure
4072  *
4073  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4074  *  register, so the the bus width is hard coded.
4075  **/
4076 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4077 {
4078         struct e1000_bus_info *bus = &hw->bus;
4079         s32 ret_val;
4080
4081         DEBUGFUNC("e1000_get_bus_info_ich8lan");
4082
4083         ret_val = e1000_get_bus_info_pcie_generic(hw);
4084
4085         /* ICH devices are "PCI Express"-ish.  They have
4086          * a configuration space, but do not contain
4087          * PCI Express Capability registers, so bus width
4088          * must be hardcoded.
4089          */
4090         if (bus->width == e1000_bus_width_unknown)
4091                 bus->width = e1000_bus_width_pcie_x1;
4092
4093         return ret_val;
4094 }
4095
4096 /**
4097  *  e1000_reset_hw_ich8lan - Reset the hardware
4098  *  @hw: pointer to the HW structure
4099  *
4100  *  Does a full reset of the hardware which includes a reset of the PHY and
4101  *  MAC.
4102  **/
4103 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4104 {
4105         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4106         u16 kum_cfg;
4107         u32 ctrl, reg;
4108         s32 ret_val;
4109
4110         DEBUGFUNC("e1000_reset_hw_ich8lan");
4111
4112         /* Prevent the PCI-E bus from sticking if there is no TLP connection
4113          * on the last TLP read/write transaction when MAC is reset.
4114          */
4115         ret_val = e1000_disable_pcie_master_generic(hw);
4116         if (ret_val)
4117                 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4118
4119         DEBUGOUT("Masking off all interrupts\n");
4120         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4121
4122         /* Disable the Transmit and Receive units.  Then delay to allow
4123          * any pending transactions to complete before we hit the MAC
4124          * with the global reset.
4125          */
4126         E1000_WRITE_REG(hw, E1000_RCTL, 0);
4127         E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4128         E1000_WRITE_FLUSH(hw);
4129
4130         msec_delay(10);
4131
4132         /* Workaround for ICH8 bit corruption issue in FIFO memory */
4133         if (hw->mac.type == e1000_ich8lan) {
4134                 /* Set Tx and Rx buffer allocation to 8k apiece. */
4135                 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4136                 /* Set Packet Buffer Size to 16k. */
4137                 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4138         }
4139
4140         if (hw->mac.type == e1000_pchlan) {
4141                 /* Save the NVM K1 bit setting*/
4142                 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4143                 if (ret_val)
4144                         return ret_val;
4145
4146                 if (kum_cfg & E1000_NVM_K1_ENABLE)
4147                         dev_spec->nvm_k1_enabled = true;
4148                 else
4149                         dev_spec->nvm_k1_enabled = false;
4150         }
4151
4152         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4153
4154         if (!hw->phy.ops.check_reset_block(hw)) {
4155                 /* Full-chip reset requires MAC and PHY reset at the same
4156                  * time to make sure the interface between MAC and the
4157                  * external PHY is reset.
4158                  */
4159                 ctrl |= E1000_CTRL_PHY_RST;
4160
4161                 /* Gate automatic PHY configuration by hardware on
4162                  * non-managed 82579
4163                  */
4164                 if ((hw->mac.type == e1000_pch2lan) &&
4165                     !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4166                         e1000_gate_hw_phy_config_ich8lan(hw, true);
4167         }
4168         ret_val = e1000_acquire_swflag_ich8lan(hw);
4169         DEBUGOUT("Issuing a global reset to ich8lan\n");
4170         E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4171         /* cannot issue a flush here because it hangs the hardware */
4172         msec_delay(20);
4173
4174         /* Set Phy Config Counter to 50msec */
4175         if (hw->mac.type == e1000_pch2lan) {
4176                 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4177                 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4178                 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4179                 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4180         }
4181
4182         if (!ret_val)
4183                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
4184
4185         if (ctrl & E1000_CTRL_PHY_RST) {
4186                 ret_val = hw->phy.ops.get_cfg_done(hw);
4187                 if (ret_val)
4188                         return ret_val;
4189
4190                 ret_val = e1000_post_phy_reset_ich8lan(hw);
4191                 if (ret_val)
4192                         return ret_val;
4193         }
4194
4195         /* For PCH, this write will make sure that any noise
4196          * will be detected as a CRC error and be dropped rather than show up
4197          * as a bad packet to the DMA engine.
4198          */
4199         if (hw->mac.type == e1000_pchlan)
4200                 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
4201
4202         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4203         E1000_READ_REG(hw, E1000_ICR);
4204
4205         reg = E1000_READ_REG(hw, E1000_KABGTXD);
4206         reg |= E1000_KABGTXD_BGSQLBIAS;
4207         E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
4208
4209         return E1000_SUCCESS;
4210 }
4211
4212 /**
4213  *  e1000_init_hw_ich8lan - Initialize the hardware
4214  *  @hw: pointer to the HW structure
4215  *
4216  *  Prepares the hardware for transmit and receive by doing the following:
4217  *   - initialize hardware bits
4218  *   - initialize LED identification
4219  *   - setup receive address registers
4220  *   - setup flow control
4221  *   - setup transmit descriptors
4222  *   - clear statistics
4223  **/
4224 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4225 {
4226         struct e1000_mac_info *mac = &hw->mac;
4227         u32 ctrl_ext, txdctl, snoop;
4228         s32 ret_val;
4229         u16 i;
4230
4231         DEBUGFUNC("e1000_init_hw_ich8lan");
4232
4233         e1000_initialize_hw_bits_ich8lan(hw);
4234
4235         /* Initialize identification LED */
4236         ret_val = mac->ops.id_led_init(hw);
4237         /* An error is not fatal and we should not stop init due to this */
4238         if (ret_val)
4239                 DEBUGOUT("Error initializing identification LED\n");
4240
4241         /* Setup the receive address. */
4242         e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
4243
4244         /* Zero out the Multicast HASH table */
4245         DEBUGOUT("Zeroing the MTA\n");
4246         for (i = 0; i < mac->mta_reg_count; i++)
4247                 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4248
4249         /* The 82578 Rx buffer will stall if wakeup is enabled in host and
4250          * the ME.  Disable wakeup by clearing the host wakeup bit.
4251          * Reset the phy after disabling host wakeup to reset the Rx buffer.
4252          */
4253         if (hw->phy.type == e1000_phy_82578) {
4254                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
4255                 i &= ~BM_WUC_HOST_WU_BIT;
4256                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
4257                 ret_val = e1000_phy_hw_reset_ich8lan(hw);
4258                 if (ret_val)
4259                         return ret_val;
4260         }
4261
4262         /* Setup link and flow control */
4263         ret_val = mac->ops.setup_link(hw);
4264
4265         /* Set the transmit descriptor write-back policy for both queues */
4266         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
4267         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4268                   E1000_TXDCTL_FULL_TX_DESC_WB);
4269         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4270                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4271         E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
4272         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
4273         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4274                   E1000_TXDCTL_FULL_TX_DESC_WB);
4275         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4276                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4277         E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
4278
4279         /* ICH8 has opposite polarity of no_snoop bits.
4280          * By default, we should use snoop behavior.
4281          */
4282         if (mac->type == e1000_ich8lan)
4283                 snoop = PCIE_ICH8_SNOOP_ALL;
4284         else
4285                 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
4286         e1000_set_pcie_no_snoop_generic(hw, snoop);
4287
4288         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
4289         ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4290         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
4291
4292         /* Clear all of the statistics registers (clear on read).  It is
4293          * important that we do this after we have tried to establish link
4294          * because the symbol error count will increment wildly if there
4295          * is no link.
4296          */
4297         e1000_clear_hw_cntrs_ich8lan(hw);
4298
4299         return ret_val;
4300 }
4301
4302 /**
4303  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4304  *  @hw: pointer to the HW structure
4305  *
4306  *  Sets/Clears required hardware bits necessary for correctly setting up the
4307  *  hardware for transmit and receive.
4308  **/
4309 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4310 {
4311         u32 reg;
4312
4313         DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
4314
4315         /* Extended Device Control */
4316         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
4317         reg |= (1 << 22);
4318         /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4319         if (hw->mac.type >= e1000_pchlan)
4320                 reg |= E1000_CTRL_EXT_PHYPDEN;
4321         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
4322
4323         /* Transmit Descriptor Control 0 */
4324         reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
4325         reg |= (1 << 22);
4326         E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
4327
4328         /* Transmit Descriptor Control 1 */
4329         reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
4330         reg |= (1 << 22);
4331         E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
4332
4333         /* Transmit Arbitration Control 0 */
4334         reg = E1000_READ_REG(hw, E1000_TARC(0));
4335         if (hw->mac.type == e1000_ich8lan)
4336                 reg |= (1 << 28) | (1 << 29);
4337         reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
4338         E1000_WRITE_REG(hw, E1000_TARC(0), reg);
4339
4340         /* Transmit Arbitration Control 1 */
4341         reg = E1000_READ_REG(hw, E1000_TARC(1));
4342         if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
4343                 reg &= ~(1 << 28);
4344         else
4345                 reg |= (1 << 28);
4346         reg |= (1 << 24) | (1 << 26) | (1 << 30);
4347         E1000_WRITE_REG(hw, E1000_TARC(1), reg);
4348
4349         /* Device Status */
4350         if (hw->mac.type == e1000_ich8lan) {
4351                 reg = E1000_READ_REG(hw, E1000_STATUS);
4352                 reg &= ~(1 << 31);
4353                 E1000_WRITE_REG(hw, E1000_STATUS, reg);
4354         }
4355
4356         /* work-around descriptor data corruption issue during nfs v2 udp
4357          * traffic, just disable the nfs filtering capability
4358          */
4359         reg = E1000_READ_REG(hw, E1000_RFCTL);
4360         reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4361
4362         /* Disable IPv6 extension header parsing because some malformed
4363          * IPv6 headers can hang the Rx.
4364          */
4365         if (hw->mac.type == e1000_ich8lan)
4366                 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4367         E1000_WRITE_REG(hw, E1000_RFCTL, reg);
4368
4369         /* Enable ECC on Lynxpoint */
4370         if (hw->mac.type == e1000_pch_lpt) {
4371                 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
4372                 reg |= E1000_PBECCSTS_ECC_ENABLE;
4373                 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
4374
4375                 reg = E1000_READ_REG(hw, E1000_CTRL);
4376                 reg |= E1000_CTRL_MEHE;
4377                 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4378         }
4379
4380         return;
4381 }
4382
4383 /**
4384  *  e1000_setup_link_ich8lan - Setup flow control and link settings
4385  *  @hw: pointer to the HW structure
4386  *
4387  *  Determines which flow control settings to use, then configures flow
4388  *  control.  Calls the appropriate media-specific link configuration
4389  *  function.  Assuming the adapter has a valid link partner, a valid link
4390  *  should be established.  Assumes the hardware has previously been reset
4391  *  and the transmitter and receiver are not enabled.
4392  **/
4393 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4394 {
4395         s32 ret_val;
4396
4397         DEBUGFUNC("e1000_setup_link_ich8lan");
4398
4399         if (hw->phy.ops.check_reset_block(hw))
4400                 return E1000_SUCCESS;
4401
4402         /* ICH parts do not have a word in the NVM to determine
4403          * the default flow control setting, so we explicitly
4404          * set it to full.
4405          */
4406         if (hw->fc.requested_mode == e1000_fc_default)
4407                 hw->fc.requested_mode = e1000_fc_full;
4408
4409         /* Save off the requested flow control mode for use later.  Depending
4410          * on the link partner's capabilities, we may or may not use this mode.
4411          */
4412         hw->fc.current_mode = hw->fc.requested_mode;
4413
4414         DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
4415                 hw->fc.current_mode);
4416
4417         /* Continue to configure the copper link. */
4418         ret_val = hw->mac.ops.setup_physical_interface(hw);
4419         if (ret_val)
4420                 return ret_val;
4421
4422         E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
4423         if ((hw->phy.type == e1000_phy_82578) ||
4424             (hw->phy.type == e1000_phy_82579) ||
4425             (hw->phy.type == e1000_phy_i217) ||
4426             (hw->phy.type == e1000_phy_82577)) {
4427                 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
4428
4429                 ret_val = hw->phy.ops.write_reg(hw,
4430                                              PHY_REG(BM_PORT_CTRL_PAGE, 27),
4431                                              hw->fc.pause_time);
4432                 if (ret_val)
4433                         return ret_val;
4434         }
4435
4436         return e1000_set_fc_watermarks_generic(hw);
4437 }
4438
4439 /**
4440  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4441  *  @hw: pointer to the HW structure
4442  *
4443  *  Configures the kumeran interface to the PHY to wait the appropriate time
4444  *  when polling the PHY, then call the generic setup_copper_link to finish
4445  *  configuring the copper link.
4446  **/
4447 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4448 {
4449         u32 ctrl;
4450         s32 ret_val;
4451         u16 reg_data;
4452
4453         DEBUGFUNC("e1000_setup_copper_link_ich8lan");
4454
4455         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4456         ctrl |= E1000_CTRL_SLU;
4457         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4458         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4459
4460         /* Set the mac to wait the maximum time between each iteration
4461          * and increase the max iterations when polling the phy;
4462          * this fixes erroneous timeouts at 10Mbps.
4463          */
4464         ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
4465                                                0xFFFF);
4466         if (ret_val)
4467                 return ret_val;
4468         ret_val = e1000_read_kmrn_reg_generic(hw,
4469                                               E1000_KMRNCTRLSTA_INBAND_PARAM,
4470                                               &reg_data);
4471         if (ret_val)
4472                 return ret_val;
4473         reg_data |= 0x3F;
4474         ret_val = e1000_write_kmrn_reg_generic(hw,
4475                                                E1000_KMRNCTRLSTA_INBAND_PARAM,
4476                                                reg_data);
4477         if (ret_val)
4478                 return ret_val;
4479
4480         switch (hw->phy.type) {
4481         case e1000_phy_igp_3:
4482                 ret_val = e1000_copper_link_setup_igp(hw);
4483                 if (ret_val)
4484                         return ret_val;
4485                 break;
4486         case e1000_phy_bm:
4487         case e1000_phy_82578:
4488                 ret_val = e1000_copper_link_setup_m88(hw);
4489                 if (ret_val)
4490                         return ret_val;
4491                 break;
4492         case e1000_phy_82577:
4493         case e1000_phy_82579:
4494                 ret_val = e1000_copper_link_setup_82577(hw);
4495                 if (ret_val)
4496                         return ret_val;
4497                 break;
4498         case e1000_phy_ife:
4499                 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
4500                                                &reg_data);
4501                 if (ret_val)
4502                         return ret_val;
4503
4504                 reg_data &= ~IFE_PMC_AUTO_MDIX;
4505
4506                 switch (hw->phy.mdix) {
4507                 case 1:
4508                         reg_data &= ~IFE_PMC_FORCE_MDIX;
4509                         break;
4510                 case 2:
4511                         reg_data |= IFE_PMC_FORCE_MDIX;
4512                         break;
4513                 case 0:
4514                 default:
4515                         reg_data |= IFE_PMC_AUTO_MDIX;
4516                         break;
4517                 }
4518                 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
4519                                                 reg_data);
4520                 if (ret_val)
4521                         return ret_val;
4522                 break;
4523         default:
4524                 break;
4525         }
4526
4527         return e1000_setup_copper_link_generic(hw);
4528 }
4529
4530 /**
4531  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
4532  *  @hw: pointer to the HW structure
4533  *
4534  *  Calls the PHY specific link setup function and then calls the
4535  *  generic setup_copper_link to finish configuring the link for
4536  *  Lynxpoint PCH devices
4537  **/
4538 STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
4539 {
4540         u32 ctrl;
4541         s32 ret_val;
4542
4543         DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
4544
4545         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4546         ctrl |= E1000_CTRL_SLU;
4547         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4548         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4549
4550         ret_val = e1000_copper_link_setup_82577(hw);
4551         if (ret_val)
4552                 return ret_val;
4553
4554         return e1000_setup_copper_link_generic(hw);
4555 }
4556
4557 /**
4558  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
4559  *  @hw: pointer to the HW structure
4560  *  @speed: pointer to store current link speed
4561  *  @duplex: pointer to store the current link duplex
4562  *
4563  *  Calls the generic get_speed_and_duplex to retrieve the current link
4564  *  information and then calls the Kumeran lock loss workaround for links at
4565  *  gigabit speeds.
4566  **/
4567 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
4568                                           u16 *duplex)
4569 {
4570         s32 ret_val;
4571
4572         DEBUGFUNC("e1000_get_link_up_info_ich8lan");
4573
4574         ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
4575         if (ret_val)
4576                 return ret_val;
4577
4578         if ((hw->mac.type == e1000_ich8lan) &&
4579             (hw->phy.type == e1000_phy_igp_3) &&
4580             (*speed == SPEED_1000)) {
4581                 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
4582         }
4583
4584         return ret_val;
4585 }
4586
4587 /**
4588  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
4589  *  @hw: pointer to the HW structure
4590  *
4591  *  Work-around for 82566 Kumeran PCS lock loss:
4592  *  On link status change (i.e. PCI reset, speed change) and link is up and
4593  *  speed is gigabit-
4594  *    0) if workaround is optionally disabled do nothing
4595  *    1) wait 1ms for Kumeran link to come up
4596  *    2) check Kumeran Diagnostic register PCS lock loss bit
4597  *    3) if not set the link is locked (all is good), otherwise...
4598  *    4) reset the PHY
4599  *    5) repeat up to 10 times
4600  *  Note: this is only called for IGP3 copper when speed is 1gb.
4601  **/
4602 STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
4603 {
4604         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4605         u32 phy_ctrl;
4606         s32 ret_val;
4607         u16 i, data;
4608         bool link;
4609
4610         DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
4611
4612         if (!dev_spec->kmrn_lock_loss_workaround_enabled)
4613                 return E1000_SUCCESS;
4614
4615         /* Make sure link is up before proceeding.  If not just return.
4616          * Attempting this while link is negotiating fouled up link
4617          * stability
4618          */
4619         ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
4620         if (!link)
4621                 return E1000_SUCCESS;
4622
4623         for (i = 0; i < 10; i++) {
4624                 /* read once to clear */
4625                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4626                 if (ret_val)
4627                         return ret_val;
4628                 /* and again to get new status */
4629                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4630                 if (ret_val)
4631                         return ret_val;
4632
4633                 /* check for PCS lock */
4634                 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
4635                         return E1000_SUCCESS;
4636
4637                 /* Issue PHY reset */
4638                 hw->phy.ops.reset(hw);
4639                 msec_delay_irq(5);
4640         }
4641         /* Disable GigE link negotiation */
4642         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4643         phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
4644                      E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4645         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4646
4647         /* Call gig speed drop workaround on Gig disable before accessing
4648          * any PHY registers
4649          */
4650         e1000_gig_downshift_workaround_ich8lan(hw);
4651
4652         /* unable to acquire PCS lock */
4653         return -E1000_ERR_PHY;
4654 }
4655
4656 /**
4657  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
4658  *  @hw: pointer to the HW structure
4659  *  @state: boolean value used to set the current Kumeran workaround state
4660  *
4661  *  If ICH8, set the current Kumeran workaround state (enabled - true
4662  *  /disabled - false).
4663  **/
4664 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
4665                                                  bool state)
4666 {
4667         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4668
4669         DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
4670
4671         if (hw->mac.type != e1000_ich8lan) {
4672                 DEBUGOUT("Workaround applies to ICH8 only.\n");
4673                 return;
4674         }
4675
4676         dev_spec->kmrn_lock_loss_workaround_enabled = state;
4677
4678         return;
4679 }
4680
4681 /**
4682  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
4683  *  @hw: pointer to the HW structure
4684  *
4685  *  Workaround for 82566 power-down on D3 entry:
4686  *    1) disable gigabit link
4687  *    2) write VR power-down enable
4688  *    3) read it back
4689  *  Continue if successful, else issue LCD reset and repeat
4690  **/
4691 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
4692 {
4693         u32 reg;
4694         u16 data;
4695         u8  retry = 0;
4696
4697         DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
4698
4699         if (hw->phy.type != e1000_phy_igp_3)
4700                 return;
4701
4702         /* Try the workaround twice (if needed) */
4703         do {
4704                 /* Disable link */
4705                 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
4706                 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
4707                         E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4708                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
4709
4710                 /* Call gig speed drop workaround on Gig disable before
4711                  * accessing any PHY registers
4712                  */
4713                 if (hw->mac.type == e1000_ich8lan)
4714                         e1000_gig_downshift_workaround_ich8lan(hw);
4715
4716                 /* Write VR power-down enable */
4717                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4718                 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4719                 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
4720                                       data | IGP3_VR_CTRL_MODE_SHUTDOWN);
4721
4722                 /* Read it back and test */
4723                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4724                 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4725                 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
4726                         break;
4727
4728                 /* Issue PHY reset and repeat at most one more time */
4729                 reg = E1000_READ_REG(hw, E1000_CTRL);
4730                 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
4731                 retry++;
4732         } while (retry);
4733 }
4734
4735 /**
4736  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
4737  *  @hw: pointer to the HW structure
4738  *
4739  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
4740  *  LPLU, Gig disable, MDIC PHY reset):
4741  *    1) Set Kumeran Near-end loopback
4742  *    2) Clear Kumeran Near-end loopback
4743  *  Should only be called for ICH8[m] devices with any 1G Phy.
4744  **/
4745 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
4746 {
4747         s32 ret_val;
4748         u16 reg_data;
4749
4750         DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
4751
4752         if ((hw->mac.type != e1000_ich8lan) ||
4753             (hw->phy.type == e1000_phy_ife))
4754                 return;
4755
4756         ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4757                                               &reg_data);
4758         if (ret_val)
4759                 return;
4760         reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
4761         ret_val = e1000_write_kmrn_reg_generic(hw,
4762                                                E1000_KMRNCTRLSTA_DIAG_OFFSET,
4763                                                reg_data);
4764         if (ret_val)
4765                 return;
4766         reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
4767         e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4768                                      reg_data);
4769 }
4770
4771 /**
4772  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4773  *  @hw: pointer to the HW structure
4774  *
4775  *  During S0 to Sx transition, it is possible the link remains at gig
4776  *  instead of negotiating to a lower speed.  Before going to Sx, set
4777  *  'Gig Disable' to force link speed negotiation to a lower speed based on
4778  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
4779  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4780  *  needs to be written.
4781  *  Parts that support (and are linked to a partner which support) EEE in
4782  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4783  *  than 10Mbps w/o EEE.
4784  **/
4785 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4786 {
4787         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4788         u32 phy_ctrl;
4789         s32 ret_val;
4790
4791         DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
4792
4793         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4794         phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4795
4796         if (hw->phy.type == e1000_phy_i217) {
4797                 u16 phy_reg, device_id = hw->device_id;
4798
4799                 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4800                     (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
4801                         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
4802
4803                         E1000_WRITE_REG(hw, E1000_FEXTNVM6,
4804                                         fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4805                 }
4806
4807                 ret_val = hw->phy.ops.acquire(hw);
4808                 if (ret_val)
4809                         goto out;
4810
4811                 if (!dev_spec->eee_disable) {
4812                         u16 eee_advert;
4813
4814                         ret_val =
4815                             e1000_read_emi_reg_locked(hw,
4816                                                       I217_EEE_ADVERTISEMENT,
4817                                                       &eee_advert);
4818                         if (ret_val)
4819                                 goto release;
4820
4821                         /* Disable LPLU if both link partners support 100BaseT
4822                          * EEE and 100Full is advertised on both ends of the
4823                          * link, and enable Auto Enable LPI since there will
4824                          * be no driver to enable LPI while in Sx.
4825                          */
4826                         if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4827                             (dev_spec->eee_lp_ability &
4828                              I82579_EEE_100_SUPPORTED) &&
4829                             (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
4830                                 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4831                                               E1000_PHY_CTRL_NOND0A_LPLU);
4832
4833                                 /* Set Auto Enable LPI after link up */
4834                                 hw->phy.ops.read_reg_locked(hw,
4835                                                             I217_LPI_GPIO_CTRL,
4836                                                             &phy_reg);
4837                                 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4838                                 hw->phy.ops.write_reg_locked(hw,
4839                                                              I217_LPI_GPIO_CTRL,
4840                                                              phy_reg);
4841                         }
4842                 }
4843
4844                 /* For i217 Intel Rapid Start Technology support,
4845                  * when the system is going into Sx and no manageability engine
4846                  * is present, the driver must configure proxy to reset only on
4847                  * power good.  LPI (Low Power Idle) state must also reset only
4848                  * on power good, as well as the MTA (Multicast table array).
4849                  * The SMBus release must also be disabled on LCD reset.
4850                  */
4851                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4852                       E1000_ICH_FWSM_FW_VALID)) {
4853                         /* Enable proxy to reset only on power good. */
4854                         hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
4855                                                     &phy_reg);
4856                         phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4857                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
4858                                                      phy_reg);
4859
4860                         /* Set bit enable LPI (EEE) to reset only on
4861                          * power good.
4862                         */
4863                         hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
4864                         phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4865                         hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
4866
4867                         /* Disable the SMB release on LCD reset. */
4868                         hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
4869                         phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4870                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4871                 }
4872
4873                 /* Enable MTA to reset for Intel Rapid Start Technology
4874                  * Support
4875                  */
4876                 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
4877                 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4878                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4879
4880 release:
4881                 hw->phy.ops.release(hw);
4882         }
4883 out:
4884         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4885
4886         if (hw->mac.type == e1000_ich8lan)
4887                 e1000_gig_downshift_workaround_ich8lan(hw);
4888
4889         if (hw->mac.type >= e1000_pchlan) {
4890                 e1000_oem_bits_config_ich8lan(hw, false);
4891
4892                 /* Reset PHY to activate OEM bits on 82577/8 */
4893                 if (hw->mac.type == e1000_pchlan)
4894                         e1000_phy_hw_reset_generic(hw);
4895
4896                 ret_val = hw->phy.ops.acquire(hw);
4897                 if (ret_val)
4898                         return;
4899                 e1000_write_smbus_addr(hw);
4900                 hw->phy.ops.release(hw);
4901         }
4902
4903         return;
4904 }
4905
4906 /**
4907  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
4908  *  @hw: pointer to the HW structure
4909  *
4910  *  During Sx to S0 transitions on non-managed devices or managed devices
4911  *  on which PHY resets are not blocked, if the PHY registers cannot be
4912  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
4913  *  the PHY.
4914  *  On i217, setup Intel Rapid Start Technology.
4915  **/
4916 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4917 {
4918         s32 ret_val;
4919
4920         DEBUGFUNC("e1000_resume_workarounds_pchlan");
4921
4922         if (hw->mac.type < e1000_pch2lan)
4923                 return;
4924
4925         ret_val = e1000_init_phy_workarounds_pchlan(hw);
4926         if (ret_val) {
4927                 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
4928                 return;
4929         }
4930
4931         /* For i217 Intel Rapid Start Technology support when the system
4932          * is transitioning from Sx and no manageability engine is present
4933          * configure SMBus to restore on reset, disable proxy, and enable
4934          * the reset on MTA (Multicast table array).
4935          */
4936         if (hw->phy.type == e1000_phy_i217) {
4937                 u16 phy_reg;
4938
4939                 ret_val = hw->phy.ops.acquire(hw);
4940                 if (ret_val) {
4941                         DEBUGOUT("Failed to setup iRST\n");
4942                         return;
4943                 }
4944
4945                 /* Clear Auto Enable LPI after link up */
4946                 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
4947                 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4948                 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
4949
4950                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4951                     E1000_ICH_FWSM_FW_VALID)) {
4952                         /* Restore clear on SMB if no manageability engine
4953                          * is present
4954                          */
4955                         ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
4956                                                               &phy_reg);
4957                         if (ret_val)
4958                                 goto release;
4959                         phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
4960                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4961
4962                         /* Disable Proxy */
4963                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
4964                 }
4965                 /* Enable reset on MTA */
4966                 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
4967                                                       &phy_reg);
4968                 if (ret_val)
4969                         goto release;
4970                 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
4971                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4972 release:
4973                 if (ret_val)
4974                         DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
4975                 hw->phy.ops.release(hw);
4976         }
4977 }
4978
4979 /**
4980  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
4981  *  @hw: pointer to the HW structure
4982  *
4983  *  Return the LED back to the default configuration.
4984  **/
4985 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
4986 {
4987         DEBUGFUNC("e1000_cleanup_led_ich8lan");
4988
4989         if (hw->phy.type == e1000_phy_ife)
4990                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4991                                              0);
4992
4993         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
4994         return E1000_SUCCESS;
4995 }
4996
4997 /**
4998  *  e1000_led_on_ich8lan - Turn LEDs on
4999  *  @hw: pointer to the HW structure
5000  *
5001  *  Turn on the LEDs.
5002  **/
5003 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5004 {
5005         DEBUGFUNC("e1000_led_on_ich8lan");
5006
5007         if (hw->phy.type == e1000_phy_ife)
5008                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5009                                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5010
5011         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5012         return E1000_SUCCESS;
5013 }
5014
5015 /**
5016  *  e1000_led_off_ich8lan - Turn LEDs off
5017  *  @hw: pointer to the HW structure
5018  *
5019  *  Turn off the LEDs.
5020  **/
5021 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5022 {
5023         DEBUGFUNC("e1000_led_off_ich8lan");
5024
5025         if (hw->phy.type == e1000_phy_ife)
5026                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5027                                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5028
5029         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5030         return E1000_SUCCESS;
5031 }
5032
5033 /**
5034  *  e1000_setup_led_pchlan - Configures SW controllable LED
5035  *  @hw: pointer to the HW structure
5036  *
5037  *  This prepares the SW controllable LED for use.
5038  **/
5039 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5040 {
5041         DEBUGFUNC("e1000_setup_led_pchlan");
5042
5043         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5044                                      (u16)hw->mac.ledctl_mode1);
5045 }
5046
5047 /**
5048  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5049  *  @hw: pointer to the HW structure
5050  *
5051  *  Return the LED back to the default configuration.
5052  **/
5053 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5054 {
5055         DEBUGFUNC("e1000_cleanup_led_pchlan");
5056
5057         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5058                                      (u16)hw->mac.ledctl_default);
5059 }
5060
5061 /**
5062  *  e1000_led_on_pchlan - Turn LEDs on
5063  *  @hw: pointer to the HW structure
5064  *
5065  *  Turn on the LEDs.
5066  **/
5067 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5068 {
5069         u16 data = (u16)hw->mac.ledctl_mode2;
5070         u32 i, led;
5071
5072         DEBUGFUNC("e1000_led_on_pchlan");
5073
5074         /* If no link, then turn LED on by setting the invert bit
5075          * for each LED that's mode is "link_up" in ledctl_mode2.
5076          */
5077         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5078                 for (i = 0; i < 3; i++) {
5079                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5080                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5081                             E1000_LEDCTL_MODE_LINK_UP)
5082                                 continue;
5083                         if (led & E1000_PHY_LED0_IVRT)
5084                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5085                         else
5086                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5087                 }
5088         }
5089
5090         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5091 }
5092
5093 /**
5094  *  e1000_led_off_pchlan - Turn LEDs off
5095  *  @hw: pointer to the HW structure
5096  *
5097  *  Turn off the LEDs.
5098  **/
5099 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5100 {
5101         u16 data = (u16)hw->mac.ledctl_mode1;
5102         u32 i, led;
5103
5104         DEBUGFUNC("e1000_led_off_pchlan");
5105
5106         /* If no link, then turn LED off by clearing the invert bit
5107          * for each LED that's mode is "link_up" in ledctl_mode1.
5108          */
5109         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5110                 for (i = 0; i < 3; i++) {
5111                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5112                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5113                             E1000_LEDCTL_MODE_LINK_UP)
5114                                 continue;
5115                         if (led & E1000_PHY_LED0_IVRT)
5116                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5117                         else
5118                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5119                 }
5120         }
5121
5122         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5123 }
5124
5125 /**
5126  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5127  *  @hw: pointer to the HW structure
5128  *
5129  *  Read appropriate register for the config done bit for completion status
5130  *  and configure the PHY through s/w for EEPROM-less parts.
5131  *
5132  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5133  *  config done bit, so only an error is logged and continues.  If we were
5134  *  to return with error, EEPROM-less silicon would not be able to be reset
5135  *  or change link.
5136  **/
5137 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5138 {
5139         s32 ret_val = E1000_SUCCESS;
5140         u32 bank = 0;
5141         u32 status;
5142
5143         DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5144
5145         e1000_get_cfg_done_generic(hw);
5146
5147         /* Wait for indication from h/w that it has completed basic config */
5148         if (hw->mac.type >= e1000_ich10lan) {
5149                 e1000_lan_init_done_ich8lan(hw);
5150         } else {
5151                 ret_val = e1000_get_auto_rd_done_generic(hw);
5152                 if (ret_val) {
5153                         /* When auto config read does not complete, do not
5154                          * return with an error. This can happen in situations
5155                          * where there is no eeprom and prevents getting link.
5156                          */
5157                         DEBUGOUT("Auto Read Done did not complete\n");
5158                         ret_val = E1000_SUCCESS;
5159                 }
5160         }
5161
5162         /* Clear PHY Reset Asserted bit */
5163         status = E1000_READ_REG(hw, E1000_STATUS);
5164         if (status & E1000_STATUS_PHYRA)
5165                 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5166         else
5167                 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5168
5169         /* If EEPROM is not marked present, init the IGP 3 PHY manually */
5170         if (hw->mac.type <= e1000_ich9lan) {
5171                 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5172                     (hw->phy.type == e1000_phy_igp_3)) {
5173                         e1000_phy_init_script_igp3(hw);
5174                 }
5175         } else {
5176                 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5177                         /* Maybe we should do a basic PHY config */
5178                         DEBUGOUT("EEPROM not present\n");
5179                         ret_val = -E1000_ERR_CONFIG;
5180                 }
5181         }
5182
5183         return ret_val;
5184 }
5185
5186 /**
5187  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5188  * @hw: pointer to the HW structure
5189  *
5190  * In the case of a PHY power down to save power, or to turn off link during a
5191  * driver unload, or wake on lan is not enabled, remove the link.
5192  **/
5193 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5194 {
5195         /* If the management interface is not enabled, then power down */
5196         if (!(hw->mac.ops.check_mng_mode(hw) ||
5197               hw->phy.ops.check_reset_block(hw)))
5198                 e1000_power_down_phy_copper(hw);
5199
5200         return;
5201 }
5202
5203 /**
5204  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5205  *  @hw: pointer to the HW structure
5206  *
5207  *  Clears hardware counters specific to the silicon family and calls
5208  *  clear_hw_cntrs_generic to clear all general purpose counters.
5209  **/
5210 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5211 {
5212         u16 phy_data;
5213         s32 ret_val;
5214
5215         DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
5216
5217         e1000_clear_hw_cntrs_base_generic(hw);
5218
5219         E1000_READ_REG(hw, E1000_ALGNERRC);
5220         E1000_READ_REG(hw, E1000_RXERRC);
5221         E1000_READ_REG(hw, E1000_TNCRS);
5222         E1000_READ_REG(hw, E1000_CEXTERR);
5223         E1000_READ_REG(hw, E1000_TSCTC);
5224         E1000_READ_REG(hw, E1000_TSCTFC);
5225
5226         E1000_READ_REG(hw, E1000_MGTPRC);
5227         E1000_READ_REG(hw, E1000_MGTPDC);
5228         E1000_READ_REG(hw, E1000_MGTPTC);
5229
5230         E1000_READ_REG(hw, E1000_IAC);
5231         E1000_READ_REG(hw, E1000_ICRXOC);
5232
5233         /* Clear PHY statistics registers */
5234         if ((hw->phy.type == e1000_phy_82578) ||
5235             (hw->phy.type == e1000_phy_82579) ||
5236             (hw->phy.type == e1000_phy_i217) ||
5237             (hw->phy.type == e1000_phy_82577)) {
5238                 ret_val = hw->phy.ops.acquire(hw);
5239                 if (ret_val)
5240                         return;
5241                 ret_val = hw->phy.ops.set_page(hw,
5242                                                HV_STATS_PAGE << IGP_PAGE_SHIFT);
5243                 if (ret_val)
5244                         goto release;
5245                 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5246                 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5247                 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5248                 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5249                 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5250                 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5251                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5252                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5253                 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5254                 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5255                 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5256                 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5257                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5258                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5259 release:
5260                 hw->phy.ops.release(hw);
5261         }
5262 }
5263