0837a40c065fa425be3666d654fd7eea99877b61
[dpdk.git] / drivers / net / e1000 / base / e1000_ich8lan.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /* 82562G 10/100 Network Connection
35  * 82562G-2 10/100 Network Connection
36  * 82562GT 10/100 Network Connection
37  * 82562GT-2 10/100 Network Connection
38  * 82562V 10/100 Network Connection
39  * 82562V-2 10/100 Network Connection
40  * 82566DC-2 Gigabit Network Connection
41  * 82566DC Gigabit Network Connection
42  * 82566DM-2 Gigabit Network Connection
43  * 82566DM Gigabit Network Connection
44  * 82566MC Gigabit Network Connection
45  * 82566MM Gigabit Network Connection
46  * 82567LM Gigabit Network Connection
47  * 82567LF Gigabit Network Connection
48  * 82567V Gigabit Network Connection
49  * 82567LM-2 Gigabit Network Connection
50  * 82567LF-2 Gigabit Network Connection
51  * 82567V-2 Gigabit Network Connection
52  * 82567LF-3 Gigabit Network Connection
53  * 82567LM-3 Gigabit Network Connection
54  * 82567LM-4 Gigabit Network Connection
55  * 82577LM Gigabit Network Connection
56  * 82577LC Gigabit Network Connection
57  * 82578DM Gigabit Network Connection
58  * 82578DC Gigabit Network Connection
59  * 82579LM Gigabit Network Connection
60  * 82579V Gigabit Network Connection
61  * Ethernet Connection I217-LM
62  * Ethernet Connection I217-V
63  * Ethernet Connection I218-V
64  * Ethernet Connection I218-LM
65  * Ethernet Connection (2) I218-LM
66  * Ethernet Connection (2) I218-V
67  * Ethernet Connection (3) I218-LM
68  * Ethernet Connection (3) I218-V
69  */
70
71 #include "e1000_api.h"
72
73 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
74 STATIC s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 STATIC s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 STATIC int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 STATIC int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
84 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
85                                               u8 *mc_addr_list,
86                                               u32 mc_addr_count);
87 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
88 STATIC s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
89 STATIC s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
90 STATIC s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
91 STATIC s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
92                                             bool active);
93 STATIC s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
94                                             bool active);
95 STATIC s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
96                                    u16 words, u16 *data);
97 STATIC s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
98                                     u16 words, u16 *data);
99 STATIC s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100 STATIC s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 STATIC s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
102                                             u16 *data);
103 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
104 STATIC s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
105 STATIC s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
106 STATIC s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
107 STATIC s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
108 STATIC s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
109 STATIC s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
110 STATIC s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
111                                            u16 *speed, u16 *duplex);
112 STATIC s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
113 STATIC s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
114 STATIC s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
115 STATIC s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
116 STATIC s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
117 STATIC s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
118 STATIC s32  e1000_led_on_pchlan(struct e1000_hw *hw);
119 STATIC s32  e1000_led_off_pchlan(struct e1000_hw *hw);
120 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
121 STATIC s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
122 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
123 STATIC s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
124 STATIC s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
125                                           u32 offset, u8 *data);
126 STATIC s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
127                                           u8 size, u16 *data);
128 STATIC s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
129                                           u32 offset, u16 *data);
130 STATIC s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
131                                                  u32 offset, u8 byte);
132 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
133 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
134 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
135 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
136 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
137 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
138
139 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
140 /* Offset 04h HSFSTS */
141 union ich8_hws_flash_status {
142         struct ich8_hsfsts {
143                 u16 flcdone:1; /* bit 0 Flash Cycle Done */
144                 u16 flcerr:1; /* bit 1 Flash Cycle Error */
145                 u16 dael:1; /* bit 2 Direct Access error Log */
146                 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
147                 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
148                 u16 reserved1:2; /* bit 13:6 Reserved */
149                 u16 reserved2:6; /* bit 13:6 Reserved */
150                 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
151                 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
152         } hsf_status;
153         u16 regval;
154 };
155
156 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
157 /* Offset 06h FLCTL */
158 union ich8_hws_flash_ctrl {
159         struct ich8_hsflctl {
160                 u16 flcgo:1;   /* 0 Flash Cycle Go */
161                 u16 flcycle:2;   /* 2:1 Flash Cycle */
162                 u16 reserved:5;   /* 7:3 Reserved  */
163                 u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
164                 u16 flockdn:6;   /* 15:10 Reserved */
165         } hsf_ctrl;
166         u16 regval;
167 };
168
169 /* ICH Flash Region Access Permissions */
170 union ich8_hws_flash_regacc {
171         struct ich8_flracc {
172                 u32 grra:8; /* 0:7 GbE region Read Access */
173                 u32 grwa:8; /* 8:15 GbE region Write Access */
174                 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
175                 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
176         } hsf_flregacc;
177         u16 regval;
178 };
179
180 /**
181  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
182  *  @hw: pointer to the HW structure
183  *
184  *  Test access to the PHY registers by reading the PHY ID registers.  If
185  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
186  *  otherwise assume the read PHY ID is correct if it is valid.
187  *
188  *  Assumes the sw/fw/hw semaphore is already acquired.
189  **/
190 STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
191 {
192         u16 phy_reg = 0;
193         u32 phy_id = 0;
194         s32 ret_val = 0;
195         u16 retry_count;
196         u32 mac_reg = 0;
197
198         for (retry_count = 0; retry_count < 2; retry_count++) {
199                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
200                 if (ret_val || (phy_reg == 0xFFFF))
201                         continue;
202                 phy_id = (u32)(phy_reg << 16);
203
204                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
205                 if (ret_val || (phy_reg == 0xFFFF)) {
206                         phy_id = 0;
207                         continue;
208                 }
209                 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
210                 break;
211         }
212
213         if (hw->phy.id) {
214                 if  (hw->phy.id == phy_id)
215                         goto out;
216         } else if (phy_id) {
217                 hw->phy.id = phy_id;
218                 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
219                 goto out;
220         }
221
222         /* In case the PHY needs to be in mdio slow mode,
223          * set slow mode and try to get the PHY id again.
224          */
225         if (hw->mac.type < e1000_pch_lpt) {
226                 hw->phy.ops.release(hw);
227                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
228                 if (!ret_val)
229                         ret_val = e1000_get_phy_id(hw);
230                 hw->phy.ops.acquire(hw);
231         }
232
233         if (ret_val)
234                 return false;
235 out:
236         if (hw->mac.type == e1000_pch_lpt) {
237                 /* Only unforce SMBus if ME is not active */
238                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
239                     E1000_ICH_FWSM_FW_VALID)) {
240                         /* Unforce SMBus mode in PHY */
241                         hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
242                         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
243                         hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
244
245                         /* Unforce SMBus mode in MAC */
246                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
247                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
248                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
249                 }
250         }
251
252         return true;
253 }
254
255 /**
256  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
257  *  @hw: pointer to the HW structure
258  *
259  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
260  *  used to reset the PHY to a quiescent state when necessary.
261  **/
262 STATIC void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
263 {
264         u32 mac_reg;
265
266         DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
267
268         /* Set Phy Config Counter to 50msec */
269         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
270         mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
271         mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
272         E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
273
274         /* Toggle LANPHYPC Value bit */
275         mac_reg = E1000_READ_REG(hw, E1000_CTRL);
276         mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
277         mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
278         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
279         E1000_WRITE_FLUSH(hw);
280         msec_delay(1);
281         mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
282         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
283         E1000_WRITE_FLUSH(hw);
284
285         if (hw->mac.type < e1000_pch_lpt) {
286                 msec_delay(50);
287         } else {
288                 u16 count = 20;
289
290                 do {
291                         msec_delay(5);
292                 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
293                            E1000_CTRL_EXT_LPCD) && count--);
294
295                 msec_delay(30);
296         }
297 }
298
299 /**
300  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
301  *  @hw: pointer to the HW structure
302  *
303  *  Workarounds/flow necessary for PHY initialization during driver load
304  *  and resume paths.
305  **/
306 STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
307 {
308         u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
309         s32 ret_val;
310
311         DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
312
313         /* Gate automatic PHY configuration by hardware on managed and
314          * non-managed 82579 and newer adapters.
315          */
316         e1000_gate_hw_phy_config_ich8lan(hw, true);
317
318 #ifdef ULP_SUPPORT
319         /* It is not possible to be certain of the current state of ULP
320          * so forcibly disable it.
321          */
322         hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
323
324 #endif /* ULP_SUPPORT */
325         ret_val = hw->phy.ops.acquire(hw);
326         if (ret_val) {
327                 DEBUGOUT("Failed to initialize PHY flow\n");
328                 goto out;
329         }
330
331         /* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
332          * inaccessible and resetting the PHY is not blocked, toggle the
333          * LANPHYPC Value bit to force the interconnect to PCIe mode.
334          */
335         switch (hw->mac.type) {
336         case e1000_pch_lpt:
337                 if (e1000_phy_is_accessible_pchlan(hw))
338                         break;
339
340                 /* Before toggling LANPHYPC, see if PHY is accessible by
341                  * forcing MAC to SMBus mode first.
342                  */
343                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
344                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
345                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
346
347                 /* Wait 50 milliseconds for MAC to finish any retries
348                  * that it might be trying to perform from previous
349                  * attempts to acknowledge any phy read requests.
350                  */
351                  msec_delay(50);
352
353                 /* fall-through */
354         case e1000_pch2lan:
355                 if (e1000_phy_is_accessible_pchlan(hw))
356                         break;
357
358                 /* fall-through */
359         case e1000_pchlan:
360                 if ((hw->mac.type == e1000_pchlan) &&
361                     (fwsm & E1000_ICH_FWSM_FW_VALID))
362                         break;
363
364                 if (hw->phy.ops.check_reset_block(hw)) {
365                         DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
366                         ret_val = -E1000_ERR_PHY;
367                         break;
368                 }
369
370                 /* Toggle LANPHYPC Value bit */
371                 e1000_toggle_lanphypc_pch_lpt(hw);
372                 if (hw->mac.type >= e1000_pch_lpt) {
373                         if (e1000_phy_is_accessible_pchlan(hw))
374                                 break;
375
376                         /* Toggling LANPHYPC brings the PHY out of SMBus mode
377                          * so ensure that the MAC is also out of SMBus mode
378                          */
379                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
380                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
381                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
382
383                         if (e1000_phy_is_accessible_pchlan(hw))
384                                 break;
385
386                         ret_val = -E1000_ERR_PHY;
387                 }
388                 break;
389         default:
390                 break;
391         }
392
393         hw->phy.ops.release(hw);
394         if (!ret_val) {
395
396                 /* Check to see if able to reset PHY.  Print error if not */
397                 if (hw->phy.ops.check_reset_block(hw)) {
398                         ERROR_REPORT("Reset blocked by ME\n");
399                         goto out;
400                 }
401
402                 /* Reset the PHY before any access to it.  Doing so, ensures
403                  * that the PHY is in a known good state before we read/write
404                  * PHY registers.  The generic reset is sufficient here,
405                  * because we haven't determined the PHY type yet.
406                  */
407                 ret_val = e1000_phy_hw_reset_generic(hw);
408                 if (ret_val)
409                         goto out;
410
411                 /* On a successful reset, possibly need to wait for the PHY
412                  * to quiesce to an accessible state before returning control
413                  * to the calling function.  If the PHY does not quiesce, then
414                  * return E1000E_BLK_PHY_RESET, as this is the condition that
415                  *  the PHY is in.
416                  */
417                 ret_val = hw->phy.ops.check_reset_block(hw);
418                 if (ret_val)
419                         ERROR_REPORT("ME blocked access to PHY after reset\n");
420         }
421
422 out:
423         /* Ungate automatic PHY configuration on non-managed 82579 */
424         if ((hw->mac.type == e1000_pch2lan) &&
425             !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
426                 msec_delay(10);
427                 e1000_gate_hw_phy_config_ich8lan(hw, false);
428         }
429
430         return ret_val;
431 }
432
433 /**
434  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
435  *  @hw: pointer to the HW structure
436  *
437  *  Initialize family-specific PHY parameters and function pointers.
438  **/
439 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
440 {
441         struct e1000_phy_info *phy = &hw->phy;
442         s32 ret_val;
443
444         DEBUGFUNC("e1000_init_phy_params_pchlan");
445
446         phy->addr               = 1;
447         phy->reset_delay_us     = 100;
448
449         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
450         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
451         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
452         phy->ops.set_page       = e1000_set_page_igp;
453         phy->ops.read_reg       = e1000_read_phy_reg_hv;
454         phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
455         phy->ops.read_reg_page  = e1000_read_phy_reg_page_hv;
456         phy->ops.release        = e1000_release_swflag_ich8lan;
457         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
458         phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
459         phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
460         phy->ops.write_reg      = e1000_write_phy_reg_hv;
461         phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
462         phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
463         phy->ops.power_up       = e1000_power_up_phy_copper;
464         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
465         phy->autoneg_mask       = AUTONEG_ADVERTISE_SPEED_DEFAULT;
466
467         phy->id = e1000_phy_unknown;
468
469         ret_val = e1000_init_phy_workarounds_pchlan(hw);
470         if (ret_val)
471                 return ret_val;
472
473         if (phy->id == e1000_phy_unknown)
474                 switch (hw->mac.type) {
475                 default:
476                         ret_val = e1000_get_phy_id(hw);
477                         if (ret_val)
478                                 return ret_val;
479                         if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
480                                 break;
481                         /* fall-through */
482                 case e1000_pch2lan:
483                 case e1000_pch_lpt:
484                         /* In case the PHY needs to be in mdio slow mode,
485                          * set slow mode and try to get the PHY id again.
486                          */
487                         ret_val = e1000_set_mdio_slow_mode_hv(hw);
488                         if (ret_val)
489                                 return ret_val;
490                         ret_val = e1000_get_phy_id(hw);
491                         if (ret_val)
492                                 return ret_val;
493                         break;
494                 }
495         phy->type = e1000_get_phy_type_from_id(phy->id);
496
497         switch (phy->type) {
498         case e1000_phy_82577:
499         case e1000_phy_82579:
500         case e1000_phy_i217:
501                 phy->ops.check_polarity = e1000_check_polarity_82577;
502                 phy->ops.force_speed_duplex =
503                         e1000_phy_force_speed_duplex_82577;
504                 phy->ops.get_cable_length = e1000_get_cable_length_82577;
505                 phy->ops.get_info = e1000_get_phy_info_82577;
506                 phy->ops.commit = e1000_phy_sw_reset_generic;
507                 break;
508         case e1000_phy_82578:
509                 phy->ops.check_polarity = e1000_check_polarity_m88;
510                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
511                 phy->ops.get_cable_length = e1000_get_cable_length_m88;
512                 phy->ops.get_info = e1000_get_phy_info_m88;
513                 break;
514         default:
515                 ret_val = -E1000_ERR_PHY;
516                 break;
517         }
518
519         return ret_val;
520 }
521
522 /**
523  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
524  *  @hw: pointer to the HW structure
525  *
526  *  Initialize family-specific PHY parameters and function pointers.
527  **/
528 STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
529 {
530         struct e1000_phy_info *phy = &hw->phy;
531         s32 ret_val;
532         u16 i = 0;
533
534         DEBUGFUNC("e1000_init_phy_params_ich8lan");
535
536         phy->addr               = 1;
537         phy->reset_delay_us     = 100;
538
539         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
540         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
541         phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
542         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
543         phy->ops.read_reg       = e1000_read_phy_reg_igp;
544         phy->ops.release        = e1000_release_swflag_ich8lan;
545         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
546         phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
547         phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
548         phy->ops.write_reg      = e1000_write_phy_reg_igp;
549         phy->ops.power_up       = e1000_power_up_phy_copper;
550         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
551
552         /* We may need to do this twice - once for IGP and if that fails,
553          * we'll set BM func pointers and try again
554          */
555         ret_val = e1000_determine_phy_address(hw);
556         if (ret_val) {
557                 phy->ops.write_reg = e1000_write_phy_reg_bm;
558                 phy->ops.read_reg  = e1000_read_phy_reg_bm;
559                 ret_val = e1000_determine_phy_address(hw);
560                 if (ret_val) {
561                         DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
562                         return ret_val;
563                 }
564         }
565
566         phy->id = 0;
567         while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
568                (i++ < 100)) {
569                 msec_delay(1);
570                 ret_val = e1000_get_phy_id(hw);
571                 if (ret_val)
572                         return ret_val;
573         }
574
575         /* Verify phy id */
576         switch (phy->id) {
577         case IGP03E1000_E_PHY_ID:
578                 phy->type = e1000_phy_igp_3;
579                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
580                 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
581                 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
582                 phy->ops.get_info = e1000_get_phy_info_igp;
583                 phy->ops.check_polarity = e1000_check_polarity_igp;
584                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
585                 break;
586         case IFE_E_PHY_ID:
587         case IFE_PLUS_E_PHY_ID:
588         case IFE_C_E_PHY_ID:
589                 phy->type = e1000_phy_ife;
590                 phy->autoneg_mask = E1000_ALL_NOT_GIG;
591                 phy->ops.get_info = e1000_get_phy_info_ife;
592                 phy->ops.check_polarity = e1000_check_polarity_ife;
593                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
594                 break;
595         case BME1000_E_PHY_ID:
596                 phy->type = e1000_phy_bm;
597                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
598                 phy->ops.read_reg = e1000_read_phy_reg_bm;
599                 phy->ops.write_reg = e1000_write_phy_reg_bm;
600                 phy->ops.commit = e1000_phy_sw_reset_generic;
601                 phy->ops.get_info = e1000_get_phy_info_m88;
602                 phy->ops.check_polarity = e1000_check_polarity_m88;
603                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
604                 break;
605         default:
606                 return -E1000_ERR_PHY;
607                 break;
608         }
609
610         return E1000_SUCCESS;
611 }
612
613 /**
614  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
615  *  @hw: pointer to the HW structure
616  *
617  *  Initialize family-specific NVM parameters and function
618  *  pointers.
619  **/
620 STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
621 {
622         struct e1000_nvm_info *nvm = &hw->nvm;
623         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
624         u32 gfpreg, sector_base_addr, sector_end_addr;
625         u16 i;
626
627         DEBUGFUNC("e1000_init_nvm_params_ich8lan");
628
629         /* Can't read flash registers if the register set isn't mapped. */
630         nvm->type = e1000_nvm_flash_sw;
631         if (!hw->flash_address) {
632                 DEBUGOUT("ERROR: Flash registers not mapped\n");
633                 return -E1000_ERR_CONFIG;
634         }
635
636         gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
637
638         /* sector_X_addr is a "sector"-aligned address (4096 bytes)
639          * Add 1 to sector_end_addr since this sector is included in
640          * the overall size.
641          */
642         sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
643         sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
644
645         /* flash_base_addr is byte-aligned */
646         nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
647
648         /* find total size of the NVM, then cut in half since the total
649          * size represents two separate NVM banks.
650          */
651         nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
652                                 << FLASH_SECTOR_ADDR_SHIFT);
653         nvm->flash_bank_size /= 2;
654         /* Adjust to word count */
655         nvm->flash_bank_size /= sizeof(u16);
656
657         nvm->word_size = E1000_SHADOW_RAM_WORDS;
658
659         /* Clear shadow ram */
660         for (i = 0; i < nvm->word_size; i++) {
661                 dev_spec->shadow_ram[i].modified = false;
662                 dev_spec->shadow_ram[i].value    = 0xFFFF;
663         }
664
665         E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
666         E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
667
668         /* Function Pointers */
669         nvm->ops.acquire        = e1000_acquire_nvm_ich8lan;
670         nvm->ops.release        = e1000_release_nvm_ich8lan;
671         nvm->ops.read           = e1000_read_nvm_ich8lan;
672         nvm->ops.update         = e1000_update_nvm_checksum_ich8lan;
673         nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
674         nvm->ops.validate       = e1000_validate_nvm_checksum_ich8lan;
675         nvm->ops.write          = e1000_write_nvm_ich8lan;
676
677         return E1000_SUCCESS;
678 }
679
680 /**
681  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
682  *  @hw: pointer to the HW structure
683  *
684  *  Initialize family-specific MAC parameters and function
685  *  pointers.
686  **/
687 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
688 {
689         struct e1000_mac_info *mac = &hw->mac;
690 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
691         u16 pci_cfg;
692 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
693
694         DEBUGFUNC("e1000_init_mac_params_ich8lan");
695
696         /* Set media type function pointer */
697         hw->phy.media_type = e1000_media_type_copper;
698
699         /* Set mta register count */
700         mac->mta_reg_count = 32;
701         /* Set rar entry count */
702         mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
703         if (mac->type == e1000_ich8lan)
704                 mac->rar_entry_count--;
705         /* Set if part includes ASF firmware */
706         mac->asf_firmware_present = true;
707         /* FWSM register */
708         mac->has_fwsm = true;
709         /* ARC subsystem not supported */
710         mac->arc_subsystem_valid = false;
711         /* Adaptive IFS supported */
712         mac->adaptive_ifs = true;
713
714         /* Function pointers */
715
716         /* bus type/speed/width */
717         mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
718         /* function id */
719         mac->ops.set_lan_id = e1000_set_lan_id_single_port;
720         /* reset */
721         mac->ops.reset_hw = e1000_reset_hw_ich8lan;
722         /* hw initialization */
723         mac->ops.init_hw = e1000_init_hw_ich8lan;
724         /* link setup */
725         mac->ops.setup_link = e1000_setup_link_ich8lan;
726         /* physical interface setup */
727         mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
728         /* check for link */
729         mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
730         /* link info */
731         mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
732         /* multicast address update */
733         mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
734         /* clear hardware counters */
735         mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
736
737         /* LED and other operations */
738         switch (mac->type) {
739         case e1000_ich8lan:
740         case e1000_ich9lan:
741         case e1000_ich10lan:
742                 /* check management mode */
743                 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
744                 /* ID LED init */
745                 mac->ops.id_led_init = e1000_id_led_init_generic;
746                 /* blink LED */
747                 mac->ops.blink_led = e1000_blink_led_generic;
748                 /* setup LED */
749                 mac->ops.setup_led = e1000_setup_led_generic;
750                 /* cleanup LED */
751                 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
752                 /* turn on/off LED */
753                 mac->ops.led_on = e1000_led_on_ich8lan;
754                 mac->ops.led_off = e1000_led_off_ich8lan;
755                 break;
756         case e1000_pch2lan:
757                 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
758                 mac->ops.rar_set = e1000_rar_set_pch2lan;
759                 /* fall-through */
760         case e1000_pch_lpt:
761 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
762                 /* multicast address update for pch2 */
763                 mac->ops.update_mc_addr_list =
764                         e1000_update_mc_addr_list_pch2lan;
765                 /* fall-through */
766 #endif
767         case e1000_pchlan:
768 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
769                 /* save PCH revision_id */
770                 e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
771                 hw->revision_id = (u8)(pci_cfg &= 0x000F);
772 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
773                 /* check management mode */
774                 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
775                 /* ID LED init */
776                 mac->ops.id_led_init = e1000_id_led_init_pchlan;
777                 /* setup LED */
778                 mac->ops.setup_led = e1000_setup_led_pchlan;
779                 /* cleanup LED */
780                 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
781                 /* turn on/off LED */
782                 mac->ops.led_on = e1000_led_on_pchlan;
783                 mac->ops.led_off = e1000_led_off_pchlan;
784                 break;
785         default:
786                 break;
787         }
788
789         if (mac->type == e1000_pch_lpt) {
790                 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
791                 mac->ops.rar_set = e1000_rar_set_pch_lpt;
792                 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
793         }
794
795         /* Enable PCS Lock-loss workaround for ICH8 */
796         if (mac->type == e1000_ich8lan)
797                 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
798
799         return E1000_SUCCESS;
800 }
801
802 /**
803  *  __e1000_access_emi_reg_locked - Read/write EMI register
804  *  @hw: pointer to the HW structure
805  *  @addr: EMI address to program
806  *  @data: pointer to value to read/write from/to the EMI address
807  *  @read: boolean flag to indicate read or write
808  *
809  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
810  **/
811 STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
812                                          u16 *data, bool read)
813 {
814         s32 ret_val;
815
816         DEBUGFUNC("__e1000_access_emi_reg_locked");
817
818         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
819         if (ret_val)
820                 return ret_val;
821
822         if (read)
823                 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
824                                                       data);
825         else
826                 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
827                                                        *data);
828
829         return ret_val;
830 }
831
832 /**
833  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
834  *  @hw: pointer to the HW structure
835  *  @addr: EMI address to program
836  *  @data: value to be read from the EMI address
837  *
838  *  Assumes the SW/FW/HW Semaphore is already acquired.
839  **/
840 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
841 {
842         DEBUGFUNC("e1000_read_emi_reg_locked");
843
844         return __e1000_access_emi_reg_locked(hw, addr, data, true);
845 }
846
847 /**
848  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
849  *  @hw: pointer to the HW structure
850  *  @addr: EMI address to program
851  *  @data: value to be written to the EMI address
852  *
853  *  Assumes the SW/FW/HW Semaphore is already acquired.
854  **/
855 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
856 {
857         DEBUGFUNC("e1000_read_emi_reg_locked");
858
859         return __e1000_access_emi_reg_locked(hw, addr, &data, false);
860 }
861
862 /**
863  *  e1000_set_eee_pchlan - Enable/disable EEE support
864  *  @hw: pointer to the HW structure
865  *
866  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
867  *  the link and the EEE capabilities of the link partner.  The LPI Control
868  *  register bits will remain set only if/when link is up.
869  *
870  *  EEE LPI must not be asserted earlier than one second after link is up.
871  *  On 82579, EEE LPI should not be enabled until such time otherwise there
872  *  can be link issues with some switches.  Other devices can have EEE LPI
873  *  enabled immediately upon link up since they have a timer in hardware which
874  *  prevents LPI from being asserted too early.
875  **/
876 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
877 {
878         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
879         s32 ret_val;
880         u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
881
882         DEBUGFUNC("e1000_set_eee_pchlan");
883
884         switch (hw->phy.type) {
885         case e1000_phy_82579:
886                 lpa = I82579_EEE_LP_ABILITY;
887                 pcs_status = I82579_EEE_PCS_STATUS;
888                 adv_addr = I82579_EEE_ADVERTISEMENT;
889                 break;
890         case e1000_phy_i217:
891                 lpa = I217_EEE_LP_ABILITY;
892                 pcs_status = I217_EEE_PCS_STATUS;
893                 adv_addr = I217_EEE_ADVERTISEMENT;
894                 break;
895         default:
896                 return E1000_SUCCESS;
897         }
898
899         ret_val = hw->phy.ops.acquire(hw);
900         if (ret_val)
901                 return ret_val;
902
903         ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
904         if (ret_val)
905                 goto release;
906
907         /* Clear bits that enable EEE in various speeds */
908         lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
909
910         /* Enable EEE if not disabled by user */
911         if (!dev_spec->eee_disable) {
912                 /* Save off link partner's EEE ability */
913                 ret_val = e1000_read_emi_reg_locked(hw, lpa,
914                                                     &dev_spec->eee_lp_ability);
915                 if (ret_val)
916                         goto release;
917
918                 /* Read EEE advertisement */
919                 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
920                 if (ret_val)
921                         goto release;
922
923                 /* Enable EEE only for speeds in which the link partner is
924                  * EEE capable and for which we advertise EEE.
925                  */
926                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
927                         lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
928
929                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
930                         hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
931                         if (data & NWAY_LPAR_100TX_FD_CAPS)
932                                 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
933                         else
934                                 /* EEE is not supported in 100Half, so ignore
935                                  * partner's EEE in 100 ability if full-duplex
936                                  * is not advertised.
937                                  */
938                                 dev_spec->eee_lp_ability &=
939                                     ~I82579_EEE_100_SUPPORTED;
940                 }
941         }
942
943         if (hw->phy.type == e1000_phy_82579) {
944                 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
945                                                     &data);
946                 if (ret_val)
947                         goto release;
948
949                 data &= ~I82579_LPI_100_PLL_SHUT;
950                 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
951                                                      data);
952         }
953
954         /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
955         ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
956         if (ret_val)
957                 goto release;
958
959         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
960 release:
961         hw->phy.ops.release(hw);
962
963         return ret_val;
964 }
965
966 /**
967  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
968  *  @hw:   pointer to the HW structure
969  *  @link: link up bool flag
970  *
971  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
972  *  preventing further DMA write requests.  Workaround the issue by disabling
973  *  the de-assertion of the clock request when in 1Gpbs mode.
974  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
975  *  speeds in order to avoid Tx hangs.
976  **/
977 STATIC s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
978 {
979         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
980         u32 status = E1000_READ_REG(hw, E1000_STATUS);
981         s32 ret_val = E1000_SUCCESS;
982         u16 reg;
983
984         if (link && (status & E1000_STATUS_SPEED_1000)) {
985                 ret_val = hw->phy.ops.acquire(hw);
986                 if (ret_val)
987                         return ret_val;
988
989                 ret_val =
990                     e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
991                                                &reg);
992                 if (ret_val)
993                         goto release;
994
995                 ret_val =
996                     e1000_write_kmrn_reg_locked(hw,
997                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
998                                                 reg &
999                                                 ~E1000_KMRNCTRLSTA_K1_ENABLE);
1000                 if (ret_val)
1001                         goto release;
1002
1003                 usec_delay(10);
1004
1005                 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1006                                 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1007
1008                 ret_val =
1009                     e1000_write_kmrn_reg_locked(hw,
1010                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
1011                                                 reg);
1012 release:
1013                 hw->phy.ops.release(hw);
1014         } else {
1015                 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1016                 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1017
1018                 if (!link || ((status & E1000_STATUS_SPEED_100) &&
1019                               (status & E1000_STATUS_FD)))
1020                         goto update_fextnvm6;
1021
1022                 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1023                 if (ret_val)
1024                         return ret_val;
1025
1026                 /* Clear link status transmit timeout */
1027                 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1028
1029                 if (status & E1000_STATUS_SPEED_100) {
1030                         /* Set inband Tx timeout to 5x10us for 100Half */
1031                         reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1032
1033                         /* Do not extend the K1 entry latency for 100Half */
1034                         fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1035                 } else {
1036                         /* Set inband Tx timeout to 50x10us for 10Full/Half */
1037                         reg |= 50 <<
1038                                I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1039
1040                         /* Extend the K1 entry latency for 10 Mbps */
1041                         fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1042                 }
1043
1044                 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1045                 if (ret_val)
1046                         return ret_val;
1047
1048 update_fextnvm6:
1049                 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1050         }
1051
1052         return ret_val;
1053 }
1054
1055 #ifdef ULP_SUPPORT
1056 /**
1057  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1058  *  @hw: pointer to the HW structure
1059  *  @to_sx: boolean indicating a system power state transition to Sx
1060  *
1061  *  When link is down, configure ULP mode to significantly reduce the power
1062  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1063  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1064  *  system, configure the ULP mode by software.
1065  */
1066 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1067 {
1068         u32 mac_reg;
1069         s32 ret_val = E1000_SUCCESS;
1070         u16 phy_reg;
1071         u16 oem_reg = 0;
1072
1073         if ((hw->mac.type < e1000_pch_lpt) ||
1074             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1075             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1076             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1077             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1078             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1079                 return 0;
1080
1081         if (!to_sx) {
1082                 int i = 0;
1083                 /* Poll up to 5 seconds for Cable Disconnected indication */
1084                 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1085                          E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1086                         /* Bail if link is re-acquired */
1087                         if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1088                                 return -E1000_ERR_PHY;
1089                         if (i++ == 100)
1090                                 break;
1091
1092                         msec_delay(50);
1093                 }
1094                 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1095                           (E1000_READ_REG(hw, E1000_FEXT) &
1096                            E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1097                           i * 50);
1098                 if (!(E1000_READ_REG(hw, E1000_FEXT) &
1099                     E1000_FEXT_PHY_CABLE_DISCONNECTED))
1100                         return 0;
1101         }
1102
1103         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1104                 /* Request ME configure ULP mode in the PHY */
1105                 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1106                 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1107                 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1108
1109                 goto out;
1110         }
1111
1112         ret_val = hw->phy.ops.acquire(hw);
1113         if (ret_val)
1114                 goto out;
1115
1116         /* During S0 Idle keep the phy in PCI-E mode */
1117         if (hw->dev_spec.ich8lan.smbus_disable)
1118                 goto skip_smbus;
1119
1120         /* Force SMBus mode in PHY */
1121         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1122         if (ret_val)
1123                 goto release;
1124         phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1125         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1126
1127         /* Force SMBus mode in MAC */
1128         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1129         mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1130         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1131
1132         /* Si workaround for ULP entry flow on i127/rev6 h/w.  Enable
1133          * LPLU and disable Gig speed when entering ULP
1134          */
1135         if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1136                 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1137                                                        &oem_reg);
1138                 if (ret_val)
1139                         goto release;
1140
1141                 phy_reg = oem_reg;
1142                 phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1143
1144                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1145                                                         phy_reg);
1146
1147                 if (ret_val)
1148                         goto release;
1149         }
1150
1151 skip_smbus:
1152         if (!to_sx) {
1153                 /* Change the 'Link Status Change' interrupt to trigger
1154                  * on 'Cable Status Change'
1155                  */
1156                 ret_val = e1000_read_kmrn_reg_locked(hw,
1157                                                      E1000_KMRNCTRLSTA_OP_MODES,
1158                                                      &phy_reg);
1159                 if (ret_val)
1160                         goto release;
1161                 phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1162                 e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1163                                             phy_reg);
1164         }
1165
1166         /* Set Inband ULP Exit, Reset to SMBus mode and
1167          * Disable SMBus Release on PERST# in PHY
1168          */
1169         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1170         if (ret_val)
1171                 goto release;
1172         phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1173                     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1174         if (to_sx) {
1175                 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1176                         phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1177                 else
1178                         phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1179
1180                 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1181                 phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1182         } else {
1183                 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1184                 phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1185                 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1186         }
1187         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1188
1189         /* Set Disable SMBus Release on PERST# in MAC */
1190         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1191         mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1192         E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1193
1194         /* Commit ULP changes in PHY by starting auto ULP configuration */
1195         phy_reg |= I218_ULP_CONFIG1_START;
1196         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1197
1198         if (!to_sx) {
1199                 /* Disable Tx so that the MAC doesn't send any (buffered)
1200                  * packets to the PHY.
1201                  */
1202                 mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1203                 mac_reg &= ~E1000_TCTL_EN;
1204                 E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1205         }
1206
1207         if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
1208             to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1209                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1210                                                         oem_reg);
1211                 if (ret_val)
1212                         goto release;
1213         }
1214
1215 release:
1216         hw->phy.ops.release(hw);
1217 out:
1218         if (ret_val)
1219                 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1220         else
1221                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1222
1223         return ret_val;
1224 }
1225
1226 /**
1227  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1228  *  @hw: pointer to the HW structure
1229  *  @force: boolean indicating whether or not to force disabling ULP
1230  *
1231  *  Un-configure ULP mode when link is up, the system is transitioned from
1232  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1233  *  system, poll for an indication from ME that ULP has been un-configured.
1234  *  If not on an ME enabled system, un-configure the ULP mode by software.
1235  *
1236  *  During nominal operation, this function is called when link is acquired
1237  *  to disable ULP mode (force=false); otherwise, for example when unloading
1238  *  the driver or during Sx->S0 transitions, this is called with force=true
1239  *  to forcibly disable ULP.
1240
1241  *  When the cable is plugged in while the device is in D0, a Cable Status
1242  *  Change interrupt is generated which causes this function to be called
1243  *  to partially disable ULP mode and restart autonegotiation.  This function
1244  *  is then called again due to the resulting Link Status Change interrupt
1245  *  to finish cleaning up after the ULP flow.
1246  */
1247 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1248 {
1249         s32 ret_val = E1000_SUCCESS;
1250         u32 mac_reg;
1251         u16 phy_reg;
1252         int i = 0;
1253
1254         if ((hw->mac.type < e1000_pch_lpt) ||
1255             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1256             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1257             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1258             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1259             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1260                 return 0;
1261
1262         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1263                 if (force) {
1264                         /* Request ME un-configure ULP mode in the PHY */
1265                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1266                         mac_reg &= ~E1000_H2ME_ULP;
1267                         mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1268                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1269                 }
1270
1271                 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
1272                 while (E1000_READ_REG(hw, E1000_FWSM) &
1273                        E1000_FWSM_ULP_CFG_DONE) {
1274                         if (i++ == 30) {
1275                                 ret_val = -E1000_ERR_PHY;
1276                                 goto out;
1277                         }
1278
1279                         msec_delay(10);
1280                 }
1281                 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1282
1283                 if (force) {
1284                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1285                         mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1286                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1287                 } else {
1288                         /* Clear H2ME.ULP after ME ULP configuration */
1289                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1290                         mac_reg &= ~E1000_H2ME_ULP;
1291                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1292
1293                         /* Restore link speed advertisements and restart
1294                          * Auto-negotiation
1295                          */
1296                         if (hw->mac.autoneg) {
1297                                 ret_val = e1000_phy_setup_autoneg(hw);
1298                                 if (ret_val)
1299                                         goto out;
1300                         } else {
1301                                 ret_val = e1000_setup_copper_link_generic(hw);
1302                                 if (ret_val)
1303                                         goto out;
1304                         }
1305                         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1306                 }
1307
1308                 goto out;
1309         }
1310
1311         ret_val = hw->phy.ops.acquire(hw);
1312         if (ret_val)
1313                 goto out;
1314
1315         /* Revert the change to the 'Link Status Change'
1316          * interrupt to trigger on 'Cable Status Change'
1317          */
1318         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1319                                              &phy_reg);
1320         if (ret_val)
1321                 goto release;
1322         phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1323         e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg);
1324
1325         if (force)
1326                 /* Toggle LANPHYPC Value bit */
1327                 e1000_toggle_lanphypc_pch_lpt(hw);
1328
1329         /* Unforce SMBus mode in PHY */
1330         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1331         if (ret_val) {
1332                 /* The MAC might be in PCIe mode, so temporarily force to
1333                  * SMBus mode in order to access the PHY.
1334                  */
1335                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1336                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1337                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1338
1339                 msec_delay(50);
1340
1341                 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1342                                                        &phy_reg);
1343                 if (ret_val)
1344                         goto release;
1345         }
1346         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1347         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1348
1349         /* Unforce SMBus mode in MAC */
1350         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1351         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1352         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1353
1354         /* When ULP mode was previously entered, K1 was disabled by the
1355          * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1356          */
1357         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1358         if (ret_val)
1359                 goto release;
1360         phy_reg |= HV_PM_CTRL_K1_ENABLE;
1361         e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1362
1363         /* Clear ULP enabled configuration */
1364         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1365         if (ret_val)
1366                 goto release;
1367         /* CSC interrupt received due to ULP Indication */
1368         if ((phy_reg & I218_ULP_CONFIG1_IND) || force) {
1369                 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1370                              I218_ULP_CONFIG1_STICKY_ULP |
1371                              I218_ULP_CONFIG1_RESET_TO_SMBUS |
1372                              I218_ULP_CONFIG1_WOL_HOST |
1373                              I218_ULP_CONFIG1_INBAND_EXIT |
1374                              I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
1375                              I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
1376                              I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1377                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1378
1379                 /* Commit ULP changes by starting auto ULP configuration */
1380                 phy_reg |= I218_ULP_CONFIG1_START;
1381                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1382
1383                 /* Clear Disable SMBus Release on PERST# in MAC */
1384                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1385                 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1386                 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1387
1388                 if (!force) {
1389                         hw->phy.ops.release(hw);
1390
1391                         if (hw->mac.autoneg)
1392                                 e1000_phy_setup_autoneg(hw);
1393                         else
1394                                 e1000_setup_copper_link_generic(hw);
1395
1396                         e1000_sw_lcd_config_ich8lan(hw);
1397
1398                         e1000_oem_bits_config_ich8lan(hw, true);
1399
1400                         /* Set ULP state to unknown and return non-zero to
1401                          * indicate no link (yet) and re-enter on the next LSC
1402                          * to finish disabling ULP flow.
1403                          */
1404                         hw->dev_spec.ich8lan.ulp_state =
1405                             e1000_ulp_state_unknown;
1406
1407                         return 1;
1408                 }
1409         }
1410
1411         /* Re-enable Tx */
1412         mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1413         mac_reg |= E1000_TCTL_EN;
1414         E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1415
1416 release:
1417         hw->phy.ops.release(hw);
1418         if (force) {
1419                 hw->phy.ops.reset(hw);
1420                 msec_delay(50);
1421         }
1422 out:
1423         if (ret_val)
1424                 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1425         else
1426                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1427
1428         return ret_val;
1429 }
1430
1431 #endif /* ULP_SUPPORT */
1432
1433
1434 /**
1435  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1436  *  @hw: pointer to the HW structure
1437  *
1438  *  Checks to see of the link status of the hardware has changed.  If a
1439  *  change in link status has been detected, then we read the PHY registers
1440  *  to get the current speed/duplex if link exists.
1441  **/
1442 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1443 {
1444         struct e1000_mac_info *mac = &hw->mac;
1445         s32 ret_val, tipg_reg = 0;
1446         u16 emi_addr, emi_val = 0;
1447         bool link = false;
1448         u16 phy_reg;
1449
1450         DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1451
1452         /* We only want to go out to the PHY registers to see if Auto-Neg
1453          * has completed and/or if our link status has changed.  The
1454          * get_link_status flag is set upon receiving a Link Status
1455          * Change or Rx Sequence Error interrupt.
1456          */
1457         if (!mac->get_link_status)
1458                 return E1000_SUCCESS;
1459
1460         if ((hw->mac.type < e1000_pch_lpt) ||
1461             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1462             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) {
1463                 /* First we want to see if the MII Status Register reports
1464                  * link.  If so, then we want to get the current speed/duplex
1465                  * of the PHY.
1466                  */
1467                 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1468                 if (ret_val)
1469                         return ret_val;
1470         } else {
1471                 /* Check the MAC's STATUS register to determine link state
1472                  * since the PHY could be inaccessible while in ULP mode.
1473                  */
1474                 link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
1475                 if (link)
1476                         ret_val = e1000_disable_ulp_lpt_lp(hw, false);
1477                 else
1478                         ret_val = e1000_enable_ulp_lpt_lp(hw, false);
1479                 if (ret_val)
1480                         return ret_val;
1481         }
1482
1483         if (hw->mac.type == e1000_pchlan) {
1484                 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1485                 if (ret_val)
1486                         return ret_val;
1487         }
1488
1489         /* When connected at 10Mbps half-duplex, some parts are excessively
1490          * aggressive resulting in many collisions. To avoid this, increase
1491          * the IPG and reduce Rx latency in the PHY.
1492          */
1493         if (((hw->mac.type == e1000_pch2lan) ||
1494              (hw->mac.type == e1000_pch_lpt)) && link) {
1495                 u16 speed, duplex;
1496
1497                 e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1498                 tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1499                 tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1500
1501                 if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1502                         tipg_reg |= 0xFF;
1503                         /* Reduce Rx latency in analog PHY */
1504                         emi_val = 0;
1505                 } else {
1506                         /* Roll back the default values */
1507                         tipg_reg |= 0x08;
1508                         emi_val = 1;
1509                 }
1510
1511                 E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1512
1513                 ret_val = hw->phy.ops.acquire(hw);
1514                 if (ret_val)
1515                         return ret_val;
1516
1517                 if (hw->mac.type == e1000_pch2lan)
1518                         emi_addr = I82579_RX_CONFIG;
1519                 else
1520                         emi_addr = I217_RX_CONFIG;
1521                 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1522
1523
1524                 if (hw->mac.type >= e1000_pch_lpt) {
1525                         u16 phy_reg;
1526
1527                         hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
1528                                                     &phy_reg);
1529                         phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1530                         if (speed == SPEED_100 || speed == SPEED_10)
1531                                 phy_reg |= 0x3E8;
1532                         else
1533                                 phy_reg |= 0xFA;
1534                         hw->phy.ops.write_reg_locked(hw,
1535                                                      I217_PLL_CLOCK_GATE_REG,
1536                                                      phy_reg);
1537                  }
1538                 hw->phy.ops.release(hw);
1539
1540                 if (ret_val)
1541                         return ret_val;
1542         }
1543
1544         /* I217 Packet Loss issue:
1545          * ensure that FEXTNVM4 Beacon Duration is set correctly
1546          * on power up.
1547          * Set the Beacon Duration for I217 to 8 usec
1548          */
1549         if (hw->mac.type == e1000_pch_lpt) {
1550                 u32 mac_reg;
1551
1552                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1553                 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1554                 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1555                 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1556         }
1557
1558         /* Work-around I218 hang issue */
1559         if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1560             (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1561             (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1562             (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1563                 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1564                 if (ret_val)
1565                         return ret_val;
1566         }
1567         /* Clear link partner's EEE ability */
1568         hw->dev_spec.ich8lan.eee_lp_ability = 0;
1569
1570         /* Configure K0s minimum time */
1571         if (hw->mac.type == e1000_pch_lpt) {
1572                 e1000_configure_k0s_lpt(hw, K1_ENTRY_LATENCY, K1_MIN_TIME);
1573         }
1574
1575         if (!link)
1576                 return E1000_SUCCESS; /* No link detected */
1577
1578         mac->get_link_status = false;
1579
1580         switch (hw->mac.type) {
1581         case e1000_pch2lan:
1582                 ret_val = e1000_k1_workaround_lv(hw);
1583                 if (ret_val)
1584                         return ret_val;
1585                 /* fall-thru */
1586         case e1000_pchlan:
1587                 if (hw->phy.type == e1000_phy_82578) {
1588                         ret_val = e1000_link_stall_workaround_hv(hw);
1589                         if (ret_val)
1590                                 return ret_val;
1591                 }
1592
1593                 /* Workaround for PCHx parts in half-duplex:
1594                  * Set the number of preambles removed from the packet
1595                  * when it is passed from the PHY to the MAC to prevent
1596                  * the MAC from misinterpreting the packet type.
1597                  */
1598                 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1599                 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1600
1601                 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1602                     E1000_STATUS_FD)
1603                         phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1604
1605                 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1606                 break;
1607         default:
1608                 break;
1609         }
1610
1611         /* Check if there was DownShift, must be checked
1612          * immediately after link-up
1613          */
1614         e1000_check_downshift_generic(hw);
1615
1616         /* Enable/Disable EEE after link up */
1617         if (hw->phy.type > e1000_phy_82579) {
1618                 ret_val = e1000_set_eee_pchlan(hw);
1619                 if (ret_val)
1620                         return ret_val;
1621         }
1622
1623         /* If we are forcing speed/duplex, then we simply return since
1624          * we have already determined whether we have link or not.
1625          */
1626         if (!mac->autoneg)
1627                 return -E1000_ERR_CONFIG;
1628
1629         /* Auto-Neg is enabled.  Auto Speed Detection takes care
1630          * of MAC speed/duplex configuration.  So we only need to
1631          * configure Collision Distance in the MAC.
1632          */
1633         mac->ops.config_collision_dist(hw);
1634
1635         /* Configure Flow Control now that Auto-Neg has completed.
1636          * First, we need to restore the desired flow control
1637          * settings because we may have had to re-autoneg with a
1638          * different link partner.
1639          */
1640         ret_val = e1000_config_fc_after_link_up_generic(hw);
1641         if (ret_val)
1642                 DEBUGOUT("Error configuring flow control\n");
1643
1644         return ret_val;
1645 }
1646
1647 /**
1648  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1649  *  @hw: pointer to the HW structure
1650  *
1651  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1652  **/
1653 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1654 {
1655         DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1656
1657         hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1658         hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1659         switch (hw->mac.type) {
1660         case e1000_ich8lan:
1661         case e1000_ich9lan:
1662         case e1000_ich10lan:
1663                 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1664                 break;
1665         case e1000_pchlan:
1666         case e1000_pch2lan:
1667         case e1000_pch_lpt:
1668                 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1669                 break;
1670         default:
1671                 break;
1672         }
1673 }
1674
1675 /**
1676  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1677  *  @hw: pointer to the HW structure
1678  *
1679  *  Acquires the mutex for performing NVM operations.
1680  **/
1681 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1682 {
1683         DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1684
1685         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1686
1687         return E1000_SUCCESS;
1688 }
1689
1690 /**
1691  *  e1000_release_nvm_ich8lan - Release NVM mutex
1692  *  @hw: pointer to the HW structure
1693  *
1694  *  Releases the mutex used while performing NVM operations.
1695  **/
1696 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1697 {
1698         DEBUGFUNC("e1000_release_nvm_ich8lan");
1699
1700         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1701
1702         return;
1703 }
1704
1705 /**
1706  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1707  *  @hw: pointer to the HW structure
1708  *
1709  *  Acquires the software control flag for performing PHY and select
1710  *  MAC CSR accesses.
1711  **/
1712 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1713 {
1714         u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1715         s32 ret_val = E1000_SUCCESS;
1716
1717         DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1718
1719         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1720
1721         while (timeout) {
1722                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1723                 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1724                         break;
1725
1726                 msec_delay_irq(1);
1727                 timeout--;
1728         }
1729
1730         if (!timeout) {
1731                 DEBUGOUT("SW has already locked the resource.\n");
1732                 ret_val = -E1000_ERR_CONFIG;
1733                 goto out;
1734         }
1735
1736         timeout = SW_FLAG_TIMEOUT;
1737
1738         extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1739         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1740
1741         while (timeout) {
1742                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1743                 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1744                         break;
1745
1746                 msec_delay_irq(1);
1747                 timeout--;
1748         }
1749
1750         if (!timeout) {
1751                 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1752                           E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1753                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1754                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1755                 ret_val = -E1000_ERR_CONFIG;
1756                 goto out;
1757         }
1758
1759 out:
1760         if (ret_val)
1761                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1762
1763         return ret_val;
1764 }
1765
1766 /**
1767  *  e1000_release_swflag_ich8lan - Release software control flag
1768  *  @hw: pointer to the HW structure
1769  *
1770  *  Releases the software control flag for performing PHY and select
1771  *  MAC CSR accesses.
1772  **/
1773 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1774 {
1775         u32 extcnf_ctrl;
1776
1777         DEBUGFUNC("e1000_release_swflag_ich8lan");
1778
1779         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1780
1781         if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1782                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1783                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1784         } else {
1785                 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1786         }
1787
1788         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1789
1790         return;
1791 }
1792
1793 /**
1794  *  e1000_check_mng_mode_ich8lan - Checks management mode
1795  *  @hw: pointer to the HW structure
1796  *
1797  *  This checks if the adapter has any manageability enabled.
1798  *  This is a function pointer entry point only called by read/write
1799  *  routines for the PHY and NVM parts.
1800  **/
1801 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1802 {
1803         u32 fwsm;
1804
1805         DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1806
1807         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1808
1809         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1810                ((fwsm & E1000_FWSM_MODE_MASK) ==
1811                 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1812 }
1813
1814 /**
1815  *  e1000_check_mng_mode_pchlan - Checks management mode
1816  *  @hw: pointer to the HW structure
1817  *
1818  *  This checks if the adapter has iAMT enabled.
1819  *  This is a function pointer entry point only called by read/write
1820  *  routines for the PHY and NVM parts.
1821  **/
1822 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1823 {
1824         u32 fwsm;
1825
1826         DEBUGFUNC("e1000_check_mng_mode_pchlan");
1827
1828         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1829
1830         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1831                (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1832 }
1833
1834 /**
1835  *  e1000_rar_set_pch2lan - Set receive address register
1836  *  @hw: pointer to the HW structure
1837  *  @addr: pointer to the receive address
1838  *  @index: receive address array register
1839  *
1840  *  Sets the receive address array register at index to the address passed
1841  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1842  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1843  *  Use SHRA[0-3] in place of those reserved for ME.
1844  **/
1845 STATIC int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1846 {
1847         u32 rar_low, rar_high;
1848
1849         DEBUGFUNC("e1000_rar_set_pch2lan");
1850
1851         /* HW expects these in little endian so we reverse the byte order
1852          * from network order (big endian) to little endian
1853          */
1854         rar_low = ((u32) addr[0] |
1855                    ((u32) addr[1] << 8) |
1856                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1857
1858         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1859
1860         /* If MAC address zero, no need to set the AV bit */
1861         if (rar_low || rar_high)
1862                 rar_high |= E1000_RAH_AV;
1863
1864         if (index == 0) {
1865                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1866                 E1000_WRITE_FLUSH(hw);
1867                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1868                 E1000_WRITE_FLUSH(hw);
1869                 return E1000_SUCCESS;
1870         }
1871
1872         /* RAR[1-6] are owned by manageability.  Skip those and program the
1873          * next address into the SHRA register array.
1874          */
1875         if (index < (u32) (hw->mac.rar_entry_count)) {
1876                 s32 ret_val;
1877
1878                 ret_val = e1000_acquire_swflag_ich8lan(hw);
1879                 if (ret_val)
1880                         goto out;
1881
1882                 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1883                 E1000_WRITE_FLUSH(hw);
1884                 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1885                 E1000_WRITE_FLUSH(hw);
1886
1887                 e1000_release_swflag_ich8lan(hw);
1888
1889                 /* verify the register updates */
1890                 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1891                     (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1892                         return E1000_SUCCESS;
1893
1894                 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1895                          (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1896         }
1897
1898 out:
1899         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1900         return -E1000_ERR_CONFIG;
1901 }
1902
1903 /**
1904  *  e1000_rar_set_pch_lpt - Set receive address registers
1905  *  @hw: pointer to the HW structure
1906  *  @addr: pointer to the receive address
1907  *  @index: receive address array register
1908  *
1909  *  Sets the receive address register array at index to the address passed
1910  *  in by addr. For LPT, RAR[0] is the base address register that is to
1911  *  contain the MAC address. SHRA[0-10] are the shared receive address
1912  *  registers that are shared between the Host and manageability engine (ME).
1913  **/
1914 STATIC int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1915 {
1916         u32 rar_low, rar_high;
1917         u32 wlock_mac;
1918
1919         DEBUGFUNC("e1000_rar_set_pch_lpt");
1920
1921         /* HW expects these in little endian so we reverse the byte order
1922          * from network order (big endian) to little endian
1923          */
1924         rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1925                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1926
1927         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1928
1929         /* If MAC address zero, no need to set the AV bit */
1930         if (rar_low || rar_high)
1931                 rar_high |= E1000_RAH_AV;
1932
1933         if (index == 0) {
1934                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1935                 E1000_WRITE_FLUSH(hw);
1936                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1937                 E1000_WRITE_FLUSH(hw);
1938                 return E1000_SUCCESS;
1939         }
1940
1941         /* The manageability engine (ME) can lock certain SHRAR registers that
1942          * it is using - those registers are unavailable for use.
1943          */
1944         if (index < hw->mac.rar_entry_count) {
1945                 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
1946                             E1000_FWSM_WLOCK_MAC_MASK;
1947                 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1948
1949                 /* Check if all SHRAR registers are locked */
1950                 if (wlock_mac == 1)
1951                         goto out;
1952
1953                 if ((wlock_mac == 0) || (index <= wlock_mac)) {
1954                         s32 ret_val;
1955
1956                         ret_val = e1000_acquire_swflag_ich8lan(hw);
1957
1958                         if (ret_val)
1959                                 goto out;
1960
1961                         E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
1962                                         rar_low);
1963                         E1000_WRITE_FLUSH(hw);
1964                         E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
1965                                         rar_high);
1966                         E1000_WRITE_FLUSH(hw);
1967
1968                         e1000_release_swflag_ich8lan(hw);
1969
1970                         /* verify the register updates */
1971                         if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1972                             (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
1973                                 return E1000_SUCCESS;
1974                 }
1975         }
1976
1977 out:
1978         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1979         return -E1000_ERR_CONFIG;
1980 }
1981
1982 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
1983 /**
1984  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
1985  *  @hw: pointer to the HW structure
1986  *  @mc_addr_list: array of multicast addresses to program
1987  *  @mc_addr_count: number of multicast addresses to program
1988  *
1989  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
1990  *  The caller must have a packed mc_addr_list of multicast addresses.
1991  **/
1992 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
1993                                               u8 *mc_addr_list,
1994                                               u32 mc_addr_count)
1995 {
1996         u16 phy_reg = 0;
1997         int i;
1998         s32 ret_val;
1999
2000         DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
2001
2002         e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
2003
2004         ret_val = hw->phy.ops.acquire(hw);
2005         if (ret_val)
2006                 return;
2007
2008         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2009         if (ret_val)
2010                 goto release;
2011
2012         for (i = 0; i < hw->mac.mta_reg_count; i++) {
2013                 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
2014                                            (u16)(hw->mac.mta_shadow[i] &
2015                                                  0xFFFF));
2016                 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
2017                                            (u16)((hw->mac.mta_shadow[i] >> 16) &
2018                                                  0xFFFF));
2019         }
2020
2021         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2022
2023 release:
2024         hw->phy.ops.release(hw);
2025 }
2026
2027 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
2028 /**
2029  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2030  *  @hw: pointer to the HW structure
2031  *
2032  *  Checks if firmware is blocking the reset of the PHY.
2033  *  This is a function pointer entry point only called by
2034  *  reset routines.
2035  **/
2036 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2037 {
2038         u32 fwsm;
2039         bool blocked = false;
2040         int i = 0;
2041
2042         DEBUGFUNC("e1000_check_reset_block_ich8lan");
2043
2044         do {
2045                 fwsm = E1000_READ_REG(hw, E1000_FWSM);
2046                 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2047                         blocked = true;
2048                         msec_delay(10);
2049                         continue;
2050                 }
2051                 blocked = false;
2052         } while (blocked && (i++ < 30));
2053         return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2054 }
2055
2056 /**
2057  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2058  *  @hw: pointer to the HW structure
2059  *
2060  *  Assumes semaphore already acquired.
2061  *
2062  **/
2063 STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2064 {
2065         u16 phy_data;
2066         u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2067         u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2068                 E1000_STRAP_SMT_FREQ_SHIFT;
2069         s32 ret_val;
2070
2071         strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2072
2073         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2074         if (ret_val)
2075                 return ret_val;
2076
2077         phy_data &= ~HV_SMB_ADDR_MASK;
2078         phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2079         phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2080
2081         if (hw->phy.type == e1000_phy_i217) {
2082                 /* Restore SMBus frequency */
2083                 if (freq--) {
2084                         phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2085                         phy_data |= (freq & (1 << 0)) <<
2086                                 HV_SMB_ADDR_FREQ_LOW_SHIFT;
2087                         phy_data |= (freq & (1 << 1)) <<
2088                                 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2089                 } else {
2090                         DEBUGOUT("Unsupported SMB frequency in PHY\n");
2091                 }
2092         }
2093
2094         return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2095 }
2096
2097 /**
2098  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2099  *  @hw:   pointer to the HW structure
2100  *
2101  *  SW should configure the LCD from the NVM extended configuration region
2102  *  as a workaround for certain parts.
2103  **/
2104 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2105 {
2106         struct e1000_phy_info *phy = &hw->phy;
2107         u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2108         s32 ret_val = E1000_SUCCESS;
2109         u16 word_addr, reg_data, reg_addr, phy_page = 0;
2110
2111         DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2112
2113         /* Initialize the PHY from the NVM on ICH platforms.  This
2114          * is needed due to an issue where the NVM configuration is
2115          * not properly autoloaded after power transitions.
2116          * Therefore, after each PHY reset, we will load the
2117          * configuration data out of the NVM manually.
2118          */
2119         switch (hw->mac.type) {
2120         case e1000_ich8lan:
2121                 if (phy->type != e1000_phy_igp_3)
2122                         return ret_val;
2123
2124                 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2125                     (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2126                         sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2127                         break;
2128                 }
2129                 /* Fall-thru */
2130         case e1000_pchlan:
2131         case e1000_pch2lan:
2132         case e1000_pch_lpt:
2133                 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2134                 break;
2135         default:
2136                 return ret_val;
2137         }
2138
2139         ret_val = hw->phy.ops.acquire(hw);
2140         if (ret_val)
2141                 return ret_val;
2142
2143         data = E1000_READ_REG(hw, E1000_FEXTNVM);
2144         if (!(data & sw_cfg_mask))
2145                 goto release;
2146
2147         /* Make sure HW does not configure LCD from PHY
2148          * extended configuration before SW configuration
2149          */
2150         data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2151         if ((hw->mac.type < e1000_pch2lan) &&
2152             (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2153                         goto release;
2154
2155         cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2156         cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2157         cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2158         if (!cnf_size)
2159                 goto release;
2160
2161         cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2162         cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2163
2164         if (((hw->mac.type == e1000_pchlan) &&
2165              !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2166             (hw->mac.type > e1000_pchlan)) {
2167                 /* HW configures the SMBus address and LEDs when the
2168                  * OEM and LCD Write Enable bits are set in the NVM.
2169                  * When both NVM bits are cleared, SW will configure
2170                  * them instead.
2171                  */
2172                 ret_val = e1000_write_smbus_addr(hw);
2173                 if (ret_val)
2174                         goto release;
2175
2176                 data = E1000_READ_REG(hw, E1000_LEDCTL);
2177                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2178                                                         (u16)data);
2179                 if (ret_val)
2180                         goto release;
2181         }
2182
2183         /* Configure LCD from extended configuration region. */
2184
2185         /* cnf_base_addr is in DWORD */
2186         word_addr = (u16)(cnf_base_addr << 1);
2187
2188         for (i = 0; i < cnf_size; i++) {
2189                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2190                                            &reg_data);
2191                 if (ret_val)
2192                         goto release;
2193
2194                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2195                                            1, &reg_addr);
2196                 if (ret_val)
2197                         goto release;
2198
2199                 /* Save off the PHY page for future writes. */
2200                 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2201                         phy_page = reg_data;
2202                         continue;
2203                 }
2204
2205                 reg_addr &= PHY_REG_MASK;
2206                 reg_addr |= phy_page;
2207
2208                 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2209                                                     reg_data);
2210                 if (ret_val)
2211                         goto release;
2212         }
2213
2214 release:
2215         hw->phy.ops.release(hw);
2216         return ret_val;
2217 }
2218
2219 /**
2220  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2221  *  @hw:   pointer to the HW structure
2222  *  @link: link up bool flag
2223  *
2224  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2225  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2226  *  If link is down, the function will restore the default K1 setting located
2227  *  in the NVM.
2228  **/
2229 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2230 {
2231         s32 ret_val = E1000_SUCCESS;
2232         u16 status_reg = 0;
2233         bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2234
2235         DEBUGFUNC("e1000_k1_gig_workaround_hv");
2236
2237         if (hw->mac.type != e1000_pchlan)
2238                 return E1000_SUCCESS;
2239
2240         /* Wrap the whole flow with the sw flag */
2241         ret_val = hw->phy.ops.acquire(hw);
2242         if (ret_val)
2243                 return ret_val;
2244
2245         /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2246         if (link) {
2247                 if (hw->phy.type == e1000_phy_82578) {
2248                         ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2249                                                               &status_reg);
2250                         if (ret_val)
2251                                 goto release;
2252
2253                         status_reg &= (BM_CS_STATUS_LINK_UP |
2254                                        BM_CS_STATUS_RESOLVED |
2255                                        BM_CS_STATUS_SPEED_MASK);
2256
2257                         if (status_reg == (BM_CS_STATUS_LINK_UP |
2258                                            BM_CS_STATUS_RESOLVED |
2259                                            BM_CS_STATUS_SPEED_1000))
2260                                 k1_enable = false;
2261                 }
2262
2263                 if (hw->phy.type == e1000_phy_82577) {
2264                         ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2265                                                               &status_reg);
2266                         if (ret_val)
2267                                 goto release;
2268
2269                         status_reg &= (HV_M_STATUS_LINK_UP |
2270                                        HV_M_STATUS_AUTONEG_COMPLETE |
2271                                        HV_M_STATUS_SPEED_MASK);
2272
2273                         if (status_reg == (HV_M_STATUS_LINK_UP |
2274                                            HV_M_STATUS_AUTONEG_COMPLETE |
2275                                            HV_M_STATUS_SPEED_1000))
2276                                 k1_enable = false;
2277                 }
2278
2279                 /* Link stall fix for link up */
2280                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2281                                                        0x0100);
2282                 if (ret_val)
2283                         goto release;
2284
2285         } else {
2286                 /* Link stall fix for link down */
2287                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2288                                                        0x4100);
2289                 if (ret_val)
2290                         goto release;
2291         }
2292
2293         ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2294
2295 release:
2296         hw->phy.ops.release(hw);
2297
2298         return ret_val;
2299 }
2300
2301 /**
2302  *  e1000_configure_k1_ich8lan - Configure K1 power state
2303  *  @hw: pointer to the HW structure
2304  *  @enable: K1 state to configure
2305  *
2306  *  Configure the K1 power state based on the provided parameter.
2307  *  Assumes semaphore already acquired.
2308  *
2309  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2310  **/
2311 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2312 {
2313         s32 ret_val;
2314         u32 ctrl_reg = 0;
2315         u32 ctrl_ext = 0;
2316         u32 reg = 0;
2317         u16 kmrn_reg = 0;
2318
2319         DEBUGFUNC("e1000_configure_k1_ich8lan");
2320
2321         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2322                                              &kmrn_reg);
2323         if (ret_val)
2324                 return ret_val;
2325
2326         if (k1_enable)
2327                 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2328         else
2329                 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2330
2331         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2332                                               kmrn_reg);
2333         if (ret_val)
2334                 return ret_val;
2335
2336         usec_delay(20);
2337         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2338         ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2339
2340         reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2341         reg |= E1000_CTRL_FRCSPD;
2342         E1000_WRITE_REG(hw, E1000_CTRL, reg);
2343
2344         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2345         E1000_WRITE_FLUSH(hw);
2346         usec_delay(20);
2347         E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2348         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2349         E1000_WRITE_FLUSH(hw);
2350         usec_delay(20);
2351
2352         return E1000_SUCCESS;
2353 }
2354
2355 /**
2356  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2357  *  @hw:       pointer to the HW structure
2358  *  @d0_state: boolean if entering d0 or d3 device state
2359  *
2360  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2361  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2362  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2363  **/
2364 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2365 {
2366         s32 ret_val = 0;
2367         u32 mac_reg;
2368         u16 oem_reg;
2369
2370         DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2371
2372         if (hw->mac.type < e1000_pchlan)
2373                 return ret_val;
2374
2375         ret_val = hw->phy.ops.acquire(hw);
2376         if (ret_val)
2377                 return ret_val;
2378
2379         if (hw->mac.type == e1000_pchlan) {
2380                 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2381                 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2382                         goto release;
2383         }
2384
2385         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2386         if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2387                 goto release;
2388
2389         mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2390
2391         ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2392         if (ret_val)
2393                 goto release;
2394
2395         oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2396
2397         if (d0_state) {
2398                 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2399                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2400
2401                 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2402                         oem_reg |= HV_OEM_BITS_LPLU;
2403         } else {
2404                 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2405                     E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2406                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2407
2408                 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2409                     E1000_PHY_CTRL_NOND0A_LPLU))
2410                         oem_reg |= HV_OEM_BITS_LPLU;
2411         }
2412
2413         /* Set Restart auto-neg to activate the bits */
2414         if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2415             !hw->phy.ops.check_reset_block(hw))
2416                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2417
2418         ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2419
2420 release:
2421         hw->phy.ops.release(hw);
2422
2423         return ret_val;
2424 }
2425
2426
2427 /**
2428  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2429  *  @hw:   pointer to the HW structure
2430  **/
2431 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2432 {
2433         s32 ret_val;
2434         u16 data;
2435
2436         DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2437
2438         ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2439         if (ret_val)
2440                 return ret_val;
2441
2442         data |= HV_KMRN_MDIO_SLOW;
2443
2444         ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2445
2446         return ret_val;
2447 }
2448
2449 /**
2450  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2451  *  done after every PHY reset.
2452  **/
2453 STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2454 {
2455         s32 ret_val = E1000_SUCCESS;
2456         u16 phy_data;
2457
2458         DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2459
2460         if (hw->mac.type != e1000_pchlan)
2461                 return E1000_SUCCESS;
2462
2463         /* Set MDIO slow mode before any other MDIO access */
2464         if (hw->phy.type == e1000_phy_82577) {
2465                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2466                 if (ret_val)
2467                         return ret_val;
2468         }
2469
2470         if (((hw->phy.type == e1000_phy_82577) &&
2471              ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2472             ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2473                 /* Disable generation of early preamble */
2474                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2475                 if (ret_val)
2476                         return ret_val;
2477
2478                 /* Preamble tuning for SSC */
2479                 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2480                                                 0xA204);
2481                 if (ret_val)
2482                         return ret_val;
2483         }
2484
2485         if (hw->phy.type == e1000_phy_82578) {
2486                 /* Return registers to default by doing a soft reset then
2487                  * writing 0x3140 to the control register.
2488                  */
2489                 if (hw->phy.revision < 2) {
2490                         e1000_phy_sw_reset_generic(hw);
2491                         ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2492                                                         0x3140);
2493                 }
2494         }
2495
2496         /* Select page 0 */
2497         ret_val = hw->phy.ops.acquire(hw);
2498         if (ret_val)
2499                 return ret_val;
2500
2501         hw->phy.addr = 1;
2502         ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2503         hw->phy.ops.release(hw);
2504         if (ret_val)
2505                 return ret_val;
2506
2507         /* Configure the K1 Si workaround during phy reset assuming there is
2508          * link so that it disables K1 if link is in 1Gbps.
2509          */
2510         ret_val = e1000_k1_gig_workaround_hv(hw, true);
2511         if (ret_val)
2512                 return ret_val;
2513
2514         /* Workaround for link disconnects on a busy hub in half duplex */
2515         ret_val = hw->phy.ops.acquire(hw);
2516         if (ret_val)
2517                 return ret_val;
2518         ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2519         if (ret_val)
2520                 goto release;
2521         ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2522                                                phy_data & 0x00FF);
2523         if (ret_val)
2524                 goto release;
2525
2526         /* set MSE higher to enable link to stay up when noise is high */
2527         ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2528 release:
2529         hw->phy.ops.release(hw);
2530
2531         return ret_val;
2532 }
2533
2534 /**
2535  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2536  *  @hw:   pointer to the HW structure
2537  **/
2538 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2539 {
2540         u32 mac_reg;
2541         u16 i, phy_reg = 0;
2542         s32 ret_val;
2543
2544         DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2545
2546         ret_val = hw->phy.ops.acquire(hw);
2547         if (ret_val)
2548                 return;
2549         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2550         if (ret_val)
2551                 goto release;
2552
2553         /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2554         for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2555                 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2556                 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2557                                            (u16)(mac_reg & 0xFFFF));
2558                 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2559                                            (u16)((mac_reg >> 16) & 0xFFFF));
2560
2561                 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2562                 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2563                                            (u16)(mac_reg & 0xFFFF));
2564                 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2565                                            (u16)((mac_reg & E1000_RAH_AV)
2566                                                  >> 16));
2567         }
2568
2569         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2570
2571 release:
2572         hw->phy.ops.release(hw);
2573 }
2574
2575 #ifndef CRC32_OS_SUPPORT
2576 STATIC u32 e1000_calc_rx_da_crc(u8 mac[])
2577 {
2578         u32 poly = 0xEDB88320;  /* Polynomial for 802.3 CRC calculation */
2579         u32 i, j, mask, crc;
2580
2581         DEBUGFUNC("e1000_calc_rx_da_crc");
2582
2583         crc = 0xffffffff;
2584         for (i = 0; i < 6; i++) {
2585                 crc = crc ^ mac[i];
2586                 for (j = 8; j > 0; j--) {
2587                         mask = (crc & 1) * (-1);
2588                         crc = (crc >> 1) ^ (poly & mask);
2589                 }
2590         }
2591         return ~crc;
2592 }
2593
2594 #endif /* CRC32_OS_SUPPORT */
2595 /**
2596  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2597  *  with 82579 PHY
2598  *  @hw: pointer to the HW structure
2599  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2600  **/
2601 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2602 {
2603         s32 ret_val = E1000_SUCCESS;
2604         u16 phy_reg, data;
2605         u32 mac_reg;
2606         u16 i;
2607
2608         DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2609
2610         if (hw->mac.type < e1000_pch2lan)
2611                 return E1000_SUCCESS;
2612
2613         /* disable Rx path while enabling/disabling workaround */
2614         hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2615         ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2616                                         phy_reg | (1 << 14));
2617         if (ret_val)
2618                 return ret_val;
2619
2620         if (enable) {
2621                 /* Write Rx addresses (rar_entry_count for RAL/H, and
2622                  * SHRAL/H) and initial CRC values to the MAC
2623                  */
2624                 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2625                         u8 mac_addr[ETH_ADDR_LEN] = {0};
2626                         u32 addr_high, addr_low;
2627
2628                         addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2629                         if (!(addr_high & E1000_RAH_AV))
2630                                 continue;
2631                         addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2632                         mac_addr[0] = (addr_low & 0xFF);
2633                         mac_addr[1] = ((addr_low >> 8) & 0xFF);
2634                         mac_addr[2] = ((addr_low >> 16) & 0xFF);
2635                         mac_addr[3] = ((addr_low >> 24) & 0xFF);
2636                         mac_addr[4] = (addr_high & 0xFF);
2637                         mac_addr[5] = ((addr_high >> 8) & 0xFF);
2638
2639 #ifndef CRC32_OS_SUPPORT
2640                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2641                                         e1000_calc_rx_da_crc(mac_addr));
2642 #else /* CRC32_OS_SUPPORT */
2643                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2644                                         E1000_CRC32(ETH_ADDR_LEN, mac_addr));
2645 #endif /* CRC32_OS_SUPPORT */
2646                 }
2647
2648                 /* Write Rx addresses to the PHY */
2649                 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2650
2651                 /* Enable jumbo frame workaround in the MAC */
2652                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2653                 mac_reg &= ~(1 << 14);
2654                 mac_reg |= (7 << 15);
2655                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2656
2657                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2658                 mac_reg |= E1000_RCTL_SECRC;
2659                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2660
2661                 ret_val = e1000_read_kmrn_reg_generic(hw,
2662                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2663                                                 &data);
2664                 if (ret_val)
2665                         return ret_val;
2666                 ret_val = e1000_write_kmrn_reg_generic(hw,
2667                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2668                                                 data | (1 << 0));
2669                 if (ret_val)
2670                         return ret_val;
2671                 ret_val = e1000_read_kmrn_reg_generic(hw,
2672                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2673                                                 &data);
2674                 if (ret_val)
2675                         return ret_val;
2676                 data &= ~(0xF << 8);
2677                 data |= (0xB << 8);
2678                 ret_val = e1000_write_kmrn_reg_generic(hw,
2679                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2680                                                 data);
2681                 if (ret_val)
2682                         return ret_val;
2683
2684                 /* Enable jumbo frame workaround in the PHY */
2685                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2686                 data &= ~(0x7F << 5);
2687                 data |= (0x37 << 5);
2688                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2689                 if (ret_val)
2690                         return ret_val;
2691                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2692                 data &= ~(1 << 13);
2693                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2694                 if (ret_val)
2695                         return ret_val;
2696                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2697                 data &= ~(0x3FF << 2);
2698                 data |= (E1000_TX_PTR_GAP << 2);
2699                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2700                 if (ret_val)
2701                         return ret_val;
2702                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2703                 if (ret_val)
2704                         return ret_val;
2705                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2706                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2707                                                 (1 << 10));
2708                 if (ret_val)
2709                         return ret_val;
2710         } else {
2711                 /* Write MAC register values back to h/w defaults */
2712                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2713                 mac_reg &= ~(0xF << 14);
2714                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2715
2716                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2717                 mac_reg &= ~E1000_RCTL_SECRC;
2718                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2719
2720                 ret_val = e1000_read_kmrn_reg_generic(hw,
2721                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2722                                                 &data);
2723                 if (ret_val)
2724                         return ret_val;
2725                 ret_val = e1000_write_kmrn_reg_generic(hw,
2726                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2727                                                 data & ~(1 << 0));
2728                 if (ret_val)
2729                         return ret_val;
2730                 ret_val = e1000_read_kmrn_reg_generic(hw,
2731                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2732                                                 &data);
2733                 if (ret_val)
2734                         return ret_val;
2735                 data &= ~(0xF << 8);
2736                 data |= (0xB << 8);
2737                 ret_val = e1000_write_kmrn_reg_generic(hw,
2738                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2739                                                 data);
2740                 if (ret_val)
2741                         return ret_val;
2742
2743                 /* Write PHY register values back to h/w defaults */
2744                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2745                 data &= ~(0x7F << 5);
2746                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2747                 if (ret_val)
2748                         return ret_val;
2749                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2750                 data |= (1 << 13);
2751                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2752                 if (ret_val)
2753                         return ret_val;
2754                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2755                 data &= ~(0x3FF << 2);
2756                 data |= (0x8 << 2);
2757                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2758                 if (ret_val)
2759                         return ret_val;
2760                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2761                 if (ret_val)
2762                         return ret_val;
2763                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2764                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2765                                                 ~(1 << 10));
2766                 if (ret_val)
2767                         return ret_val;
2768         }
2769
2770         /* re-enable Rx path after enabling/disabling workaround */
2771         return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2772                                      ~(1 << 14));
2773 }
2774
2775 /**
2776  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2777  *  done after every PHY reset.
2778  **/
2779 STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2780 {
2781         s32 ret_val = E1000_SUCCESS;
2782
2783         DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2784
2785         if (hw->mac.type != e1000_pch2lan)
2786                 return E1000_SUCCESS;
2787
2788         /* Set MDIO slow mode before any other MDIO access */
2789         ret_val = e1000_set_mdio_slow_mode_hv(hw);
2790         if (ret_val)
2791                 return ret_val;
2792
2793         ret_val = hw->phy.ops.acquire(hw);
2794         if (ret_val)
2795                 return ret_val;
2796         /* set MSE higher to enable link to stay up when noise is high */
2797         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2798         if (ret_val)
2799                 goto release;
2800         /* drop link after 5 times MSE threshold was reached */
2801         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2802 release:
2803         hw->phy.ops.release(hw);
2804
2805         return ret_val;
2806 }
2807
2808 /**
2809  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2810  *  @hw:   pointer to the HW structure
2811  *
2812  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2813  *  Disable K1 for 1000 and 100 speeds
2814  **/
2815 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2816 {
2817         s32 ret_val = E1000_SUCCESS;
2818         u16 status_reg = 0;
2819
2820         DEBUGFUNC("e1000_k1_workaround_lv");
2821
2822         if (hw->mac.type != e1000_pch2lan)
2823                 return E1000_SUCCESS;
2824
2825         /* Set K1 beacon duration based on 10Mbs speed */
2826         ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2827         if (ret_val)
2828                 return ret_val;
2829
2830         if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2831             == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2832                 if (status_reg &
2833                     (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2834                         u16 pm_phy_reg;
2835
2836                         /* LV 1G/100 Packet drop issue wa  */
2837                         ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2838                                                        &pm_phy_reg);
2839                         if (ret_val)
2840                                 return ret_val;
2841                         pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2842                         ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2843                                                         pm_phy_reg);
2844                         if (ret_val)
2845                                 return ret_val;
2846                 } else {
2847                         u32 mac_reg;
2848                         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2849                         mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2850                         mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2851                         E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2852                 }
2853         }
2854
2855         return ret_val;
2856 }
2857
2858 /**
2859  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2860  *  @hw:   pointer to the HW structure
2861  *  @gate: boolean set to true to gate, false to ungate
2862  *
2863  *  Gate/ungate the automatic PHY configuration via hardware; perform
2864  *  the configuration via software instead.
2865  **/
2866 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2867 {
2868         u32 extcnf_ctrl;
2869
2870         DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2871
2872         if (hw->mac.type < e1000_pch2lan)
2873                 return;
2874
2875         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2876
2877         if (gate)
2878                 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2879         else
2880                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2881
2882         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2883 }
2884
2885 /**
2886  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2887  *  @hw: pointer to the HW structure
2888  *
2889  *  Check the appropriate indication the MAC has finished configuring the
2890  *  PHY after a software reset.
2891  **/
2892 STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2893 {
2894         u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2895
2896         DEBUGFUNC("e1000_lan_init_done_ich8lan");
2897
2898         /* Wait for basic configuration completes before proceeding */
2899         do {
2900                 data = E1000_READ_REG(hw, E1000_STATUS);
2901                 data &= E1000_STATUS_LAN_INIT_DONE;
2902                 usec_delay(100);
2903         } while ((!data) && --loop);
2904
2905         /* If basic configuration is incomplete before the above loop
2906          * count reaches 0, loading the configuration from NVM will
2907          * leave the PHY in a bad state possibly resulting in no link.
2908          */
2909         if (loop == 0)
2910                 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2911
2912         /* Clear the Init Done bit for the next init event */
2913         data = E1000_READ_REG(hw, E1000_STATUS);
2914         data &= ~E1000_STATUS_LAN_INIT_DONE;
2915         E1000_WRITE_REG(hw, E1000_STATUS, data);
2916 }
2917
2918 /**
2919  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2920  *  @hw: pointer to the HW structure
2921  **/
2922 STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2923 {
2924         s32 ret_val = E1000_SUCCESS;
2925         u16 reg;
2926
2927         DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2928
2929         if (hw->phy.ops.check_reset_block(hw))
2930                 return E1000_SUCCESS;
2931
2932         /* Allow time for h/w to get to quiescent state after reset */
2933         msec_delay(10);
2934
2935         /* Perform any necessary post-reset workarounds */
2936         switch (hw->mac.type) {
2937         case e1000_pchlan:
2938                 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2939                 if (ret_val)
2940                         return ret_val;
2941                 break;
2942         case e1000_pch2lan:
2943                 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2944                 if (ret_val)
2945                         return ret_val;
2946                 break;
2947         default:
2948                 break;
2949         }
2950
2951         /* Clear the host wakeup bit after lcd reset */
2952         if (hw->mac.type >= e1000_pchlan) {
2953                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
2954                 reg &= ~BM_WUC_HOST_WU_BIT;
2955                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
2956         }
2957
2958         /* Configure the LCD with the extended configuration region in NVM */
2959         ret_val = e1000_sw_lcd_config_ich8lan(hw);
2960         if (ret_val)
2961                 return ret_val;
2962
2963         /* Configure the LCD with the OEM bits in NVM */
2964         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2965
2966         if (hw->mac.type == e1000_pch2lan) {
2967                 /* Ungate automatic PHY configuration on non-managed 82579 */
2968                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
2969                     E1000_ICH_FWSM_FW_VALID)) {
2970                         msec_delay(10);
2971                         e1000_gate_hw_phy_config_ich8lan(hw, false);
2972                 }
2973
2974                 /* Set EEE LPI Update Timer to 200usec */
2975                 ret_val = hw->phy.ops.acquire(hw);
2976                 if (ret_val)
2977                         return ret_val;
2978                 ret_val = e1000_write_emi_reg_locked(hw,
2979                                                      I82579_LPI_UPDATE_TIMER,
2980                                                      0x1387);
2981                 hw->phy.ops.release(hw);
2982         }
2983
2984         return ret_val;
2985 }
2986
2987 /**
2988  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2989  *  @hw: pointer to the HW structure
2990  *
2991  *  Resets the PHY
2992  *  This is a function pointer entry point called by drivers
2993  *  or other shared routines.
2994  **/
2995 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2996 {
2997         s32 ret_val = E1000_SUCCESS;
2998
2999         DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
3000
3001         /* Gate automatic PHY configuration by hardware on non-managed 82579 */
3002         if ((hw->mac.type == e1000_pch2lan) &&
3003             !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3004                 e1000_gate_hw_phy_config_ich8lan(hw, true);
3005
3006         ret_val = e1000_phy_hw_reset_generic(hw);
3007         if (ret_val)
3008                 return ret_val;
3009
3010         return e1000_post_phy_reset_ich8lan(hw);
3011 }
3012
3013 /**
3014  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3015  *  @hw: pointer to the HW structure
3016  *  @active: true to enable LPLU, false to disable
3017  *
3018  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
3019  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3020  *  the phy speed. This function will manually set the LPLU bit and restart
3021  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
3022  *  since it configures the same bit.
3023  **/
3024 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3025 {
3026         s32 ret_val;
3027         u16 oem_reg;
3028
3029         DEBUGFUNC("e1000_set_lplu_state_pchlan");
3030         ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3031         if (ret_val)
3032                 return ret_val;
3033
3034         if (active)
3035                 oem_reg |= HV_OEM_BITS_LPLU;
3036         else
3037                 oem_reg &= ~HV_OEM_BITS_LPLU;
3038
3039         if (!hw->phy.ops.check_reset_block(hw))
3040                 oem_reg |= HV_OEM_BITS_RESTART_AN;
3041
3042         return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3043 }
3044
3045 /**
3046  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3047  *  @hw: pointer to the HW structure
3048  *  @active: true to enable LPLU, false to disable
3049  *
3050  *  Sets the LPLU D0 state according to the active flag.  When
3051  *  activating LPLU this function also disables smart speed
3052  *  and vice versa.  LPLU will not be activated unless the
3053  *  device autonegotiation advertisement meets standards of
3054  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3055  *  This is a function pointer entry point only called by
3056  *  PHY setup routines.
3057  **/
3058 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3059 {
3060         struct e1000_phy_info *phy = &hw->phy;
3061         u32 phy_ctrl;
3062         s32 ret_val = E1000_SUCCESS;
3063         u16 data;
3064
3065         DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3066
3067         if (phy->type == e1000_phy_ife)
3068                 return E1000_SUCCESS;
3069
3070         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3071
3072         if (active) {
3073                 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3074                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3075
3076                 if (phy->type != e1000_phy_igp_3)
3077                         return E1000_SUCCESS;
3078
3079                 /* Call gig speed drop workaround on LPLU before accessing
3080                  * any PHY registers
3081                  */
3082                 if (hw->mac.type == e1000_ich8lan)
3083                         e1000_gig_downshift_workaround_ich8lan(hw);
3084
3085                 /* When LPLU is enabled, we should disable SmartSpeed */
3086                 ret_val = phy->ops.read_reg(hw,
3087                                             IGP01E1000_PHY_PORT_CONFIG,
3088                                             &data);
3089                 if (ret_val)
3090                         return ret_val;
3091                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3092                 ret_val = phy->ops.write_reg(hw,
3093                                              IGP01E1000_PHY_PORT_CONFIG,
3094                                              data);
3095                 if (ret_val)
3096                         return ret_val;
3097         } else {
3098                 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3099                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3100
3101                 if (phy->type != e1000_phy_igp_3)
3102                         return E1000_SUCCESS;
3103
3104                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3105                  * during Dx states where the power conservation is most
3106                  * important.  During driver activity we should enable
3107                  * SmartSpeed, so performance is maintained.
3108                  */
3109                 if (phy->smart_speed == e1000_smart_speed_on) {
3110                         ret_val = phy->ops.read_reg(hw,
3111                                                     IGP01E1000_PHY_PORT_CONFIG,
3112                                                     &data);
3113                         if (ret_val)
3114                                 return ret_val;
3115
3116                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3117                         ret_val = phy->ops.write_reg(hw,
3118                                                      IGP01E1000_PHY_PORT_CONFIG,
3119                                                      data);
3120                         if (ret_val)
3121                                 return ret_val;
3122                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3123                         ret_val = phy->ops.read_reg(hw,
3124                                                     IGP01E1000_PHY_PORT_CONFIG,
3125                                                     &data);
3126                         if (ret_val)
3127                                 return ret_val;
3128
3129                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3130                         ret_val = phy->ops.write_reg(hw,
3131                                                      IGP01E1000_PHY_PORT_CONFIG,
3132                                                      data);
3133                         if (ret_val)
3134                                 return ret_val;
3135                 }
3136         }
3137
3138         return E1000_SUCCESS;
3139 }
3140
3141 /**
3142  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3143  *  @hw: pointer to the HW structure
3144  *  @active: true to enable LPLU, false to disable
3145  *
3146  *  Sets the LPLU D3 state according to the active flag.  When
3147  *  activating LPLU this function also disables smart speed
3148  *  and vice versa.  LPLU will not be activated unless the
3149  *  device autonegotiation advertisement meets standards of
3150  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3151  *  This is a function pointer entry point only called by
3152  *  PHY setup routines.
3153  **/
3154 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3155 {
3156         struct e1000_phy_info *phy = &hw->phy;
3157         u32 phy_ctrl;
3158         s32 ret_val = E1000_SUCCESS;
3159         u16 data;
3160
3161         DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3162
3163         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3164
3165         if (!active) {
3166                 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3167                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3168
3169                 if (phy->type != e1000_phy_igp_3)
3170                         return E1000_SUCCESS;
3171
3172                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3173                  * during Dx states where the power conservation is most
3174                  * important.  During driver activity we should enable
3175                  * SmartSpeed, so performance is maintained.
3176                  */
3177                 if (phy->smart_speed == e1000_smart_speed_on) {
3178                         ret_val = phy->ops.read_reg(hw,
3179                                                     IGP01E1000_PHY_PORT_CONFIG,
3180                                                     &data);
3181                         if (ret_val)
3182                                 return ret_val;
3183
3184                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3185                         ret_val = phy->ops.write_reg(hw,
3186                                                      IGP01E1000_PHY_PORT_CONFIG,
3187                                                      data);
3188                         if (ret_val)
3189                                 return ret_val;
3190                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3191                         ret_val = phy->ops.read_reg(hw,
3192                                                     IGP01E1000_PHY_PORT_CONFIG,
3193                                                     &data);
3194                         if (ret_val)
3195                                 return ret_val;
3196
3197                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3198                         ret_val = phy->ops.write_reg(hw,
3199                                                      IGP01E1000_PHY_PORT_CONFIG,
3200                                                      data);
3201                         if (ret_val)
3202                                 return ret_val;
3203                 }
3204         } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3205                    (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3206                    (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3207                 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3208                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3209
3210                 if (phy->type != e1000_phy_igp_3)
3211                         return E1000_SUCCESS;
3212
3213                 /* Call gig speed drop workaround on LPLU before accessing
3214                  * any PHY registers
3215                  */
3216                 if (hw->mac.type == e1000_ich8lan)
3217                         e1000_gig_downshift_workaround_ich8lan(hw);
3218
3219                 /* When LPLU is enabled, we should disable SmartSpeed */
3220                 ret_val = phy->ops.read_reg(hw,
3221                                             IGP01E1000_PHY_PORT_CONFIG,
3222                                             &data);
3223                 if (ret_val)
3224                         return ret_val;
3225
3226                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3227                 ret_val = phy->ops.write_reg(hw,
3228                                              IGP01E1000_PHY_PORT_CONFIG,
3229                                              data);
3230         }
3231
3232         return ret_val;
3233 }
3234
3235 /**
3236  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3237  *  @hw: pointer to the HW structure
3238  *  @bank:  pointer to the variable that returns the active bank
3239  *
3240  *  Reads signature byte from the NVM using the flash access registers.
3241  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3242  **/
3243 STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3244 {
3245         u32 eecd;
3246         struct e1000_nvm_info *nvm = &hw->nvm;
3247         u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3248         u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3249         u32 nvm_dword = 0;
3250         u8 sig_byte = 0;
3251         s32 ret_val;
3252
3253         DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3254
3255         switch (hw->mac.type) {
3256         case e1000_ich8lan:
3257         case e1000_ich9lan:
3258                 eecd = E1000_READ_REG(hw, E1000_EECD);
3259                 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3260                     E1000_EECD_SEC1VAL_VALID_MASK) {
3261                         if (eecd & E1000_EECD_SEC1VAL)
3262                                 *bank = 1;
3263                         else
3264                                 *bank = 0;
3265
3266                         return E1000_SUCCESS;
3267                 }
3268                 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3269                 /* fall-thru */
3270         default:
3271                 /* set bank to 0 in case flash read fails */
3272                 *bank = 0;
3273
3274                 /* Check bank 0 */
3275                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3276                                                         &sig_byte);
3277                 if (ret_val)
3278                         return ret_val;
3279                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3280                     E1000_ICH_NVM_SIG_VALUE) {
3281                         *bank = 0;
3282                         return E1000_SUCCESS;
3283                 }
3284
3285                 /* Check bank 1 */
3286                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3287                                                         bank1_offset,
3288                                                         &sig_byte);
3289                 if (ret_val)
3290                         return ret_val;
3291                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3292                     E1000_ICH_NVM_SIG_VALUE) {
3293                         *bank = 1;
3294                         return E1000_SUCCESS;
3295                 }
3296
3297                 DEBUGOUT("ERROR: No valid NVM bank present\n");
3298                 return -E1000_ERR_NVM;
3299         }
3300 }
3301
3302 /**
3303  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3304  *  @hw: pointer to the HW structure
3305  *  @offset: The offset (in bytes) of the word(s) to read.
3306  *  @words: Size of data to read in words
3307  *  @data: Pointer to the word(s) to read at offset.
3308  *
3309  *  Reads a word(s) from the NVM using the flash access registers.
3310  **/
3311 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3312                                   u16 *data)
3313 {
3314         struct e1000_nvm_info *nvm = &hw->nvm;
3315         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3316         u32 act_offset;
3317         s32 ret_val = E1000_SUCCESS;
3318         u32 bank = 0;
3319         u16 i, word;
3320
3321         DEBUGFUNC("e1000_read_nvm_ich8lan");
3322
3323         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3324             (words == 0)) {
3325                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3326                 ret_val = -E1000_ERR_NVM;
3327                 goto out;
3328         }
3329
3330         nvm->ops.acquire(hw);
3331
3332         ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3333         if (ret_val != E1000_SUCCESS) {
3334                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3335                 bank = 0;
3336         }
3337
3338         act_offset = (bank) ? nvm->flash_bank_size : 0;
3339         act_offset += offset;
3340
3341         ret_val = E1000_SUCCESS;
3342         for (i = 0; i < words; i++) {
3343                 if (dev_spec->shadow_ram[offset+i].modified) {
3344                         data[i] = dev_spec->shadow_ram[offset+i].value;
3345                 } else {
3346                         ret_val = e1000_read_flash_word_ich8lan(hw,
3347                                                                 act_offset + i,
3348                                                                 &word);
3349                         if (ret_val)
3350                                 break;
3351                         data[i] = word;
3352                 }
3353         }
3354
3355         nvm->ops.release(hw);
3356
3357 out:
3358         if (ret_val)
3359                 DEBUGOUT1("NVM read error: %d\n", ret_val);
3360
3361         return ret_val;
3362 }
3363
3364 /**
3365  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3366  *  @hw: pointer to the HW structure
3367  *
3368  *  This function does initial flash setup so that a new read/write/erase cycle
3369  *  can be started.
3370  **/
3371 STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3372 {
3373         union ich8_hws_flash_status hsfsts;
3374         s32 ret_val = -E1000_ERR_NVM;
3375
3376         DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3377
3378         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3379
3380         /* Check if the flash descriptor is valid */
3381         if (!hsfsts.hsf_status.fldesvalid) {
3382                 DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3383                 return -E1000_ERR_NVM;
3384         }
3385
3386         /* Clear FCERR and DAEL in hw status by writing 1 */
3387         hsfsts.hsf_status.flcerr = 1;
3388         hsfsts.hsf_status.dael = 1;
3389         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3390
3391         /* Either we should have a hardware SPI cycle in progress
3392          * bit to check against, in order to start a new cycle or
3393          * FDONE bit should be changed in the hardware so that it
3394          * is 1 after hardware reset, which can then be used as an
3395          * indication whether a cycle is in progress or has been
3396          * completed.
3397          */
3398
3399         if (!hsfsts.hsf_status.flcinprog) {
3400                 /* There is no cycle running at present,
3401                  * so we can start a cycle.
3402                  * Begin by setting Flash Cycle Done.
3403                  */
3404                 hsfsts.hsf_status.flcdone = 1;
3405                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3406                 ret_val = E1000_SUCCESS;
3407         } else {
3408                 s32 i;
3409
3410                 /* Otherwise poll for sometime so the current
3411                  * cycle has a chance to end before giving up.
3412                  */
3413                 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3414                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3415                                                               ICH_FLASH_HSFSTS);
3416                         if (!hsfsts.hsf_status.flcinprog) {
3417                                 ret_val = E1000_SUCCESS;
3418                                 break;
3419                         }
3420                         usec_delay(1);
3421                 }
3422                 if (ret_val == E1000_SUCCESS) {
3423                         /* Successful in waiting for previous cycle to timeout,
3424                          * now set the Flash Cycle Done.
3425                          */
3426                         hsfsts.hsf_status.flcdone = 1;
3427                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3428                                                 hsfsts.regval);
3429                 } else {
3430                         DEBUGOUT("Flash controller busy, cannot get access\n");
3431                 }
3432         }
3433
3434         return ret_val;
3435 }
3436
3437 /**
3438  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3439  *  @hw: pointer to the HW structure
3440  *  @timeout: maximum time to wait for completion
3441  *
3442  *  This function starts a flash cycle and waits for its completion.
3443  **/
3444 STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3445 {
3446         union ich8_hws_flash_ctrl hsflctl;
3447         union ich8_hws_flash_status hsfsts;
3448         u32 i = 0;
3449
3450         DEBUGFUNC("e1000_flash_cycle_ich8lan");
3451
3452         /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3453         hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3454         hsflctl.hsf_ctrl.flcgo = 1;
3455
3456         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3457
3458         /* wait till FDONE bit is set to 1 */
3459         do {
3460                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3461                 if (hsfsts.hsf_status.flcdone)
3462                         break;
3463                 usec_delay(1);
3464         } while (i++ < timeout);
3465
3466         if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3467                 return E1000_SUCCESS;
3468
3469         return -E1000_ERR_NVM;
3470 }
3471
3472 /**
3473  *  e1000_read_flash_word_ich8lan - Read word from flash
3474  *  @hw: pointer to the HW structure
3475  *  @offset: offset to data location
3476  *  @data: pointer to the location for storing the data
3477  *
3478  *  Reads the flash word at offset into data.  Offset is converted
3479  *  to bytes before read.
3480  **/
3481 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3482                                          u16 *data)
3483 {
3484         DEBUGFUNC("e1000_read_flash_word_ich8lan");
3485
3486         if (!data)
3487                 return -E1000_ERR_NVM;
3488
3489         /* Must convert offset into bytes. */
3490         offset <<= 1;
3491
3492         return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3493 }
3494
3495 /**
3496  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3497  *  @hw: pointer to the HW structure
3498  *  @offset: The offset of the byte to read.
3499  *  @data: Pointer to a byte to store the value read.
3500  *
3501  *  Reads a single byte from the NVM using the flash access registers.
3502  **/
3503 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3504                                          u8 *data)
3505 {
3506         s32 ret_val;
3507         u16 word = 0;
3508
3509         ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3510
3511         if (ret_val)
3512                 return ret_val;
3513
3514         *data = (u8)word;
3515
3516         return E1000_SUCCESS;
3517 }
3518
3519 /**
3520  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3521  *  @hw: pointer to the HW structure
3522  *  @offset: The offset (in bytes) of the byte or word to read.
3523  *  @size: Size of data to read, 1=byte 2=word
3524  *  @data: Pointer to the word to store the value read.
3525  *
3526  *  Reads a byte or word from the NVM using the flash access registers.
3527  **/
3528 STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3529                                          u8 size, u16 *data)
3530 {
3531         union ich8_hws_flash_status hsfsts;
3532         union ich8_hws_flash_ctrl hsflctl;
3533         u32 flash_linear_addr;
3534         u32 flash_data = 0;
3535         s32 ret_val = -E1000_ERR_NVM;
3536         u8 count = 0;
3537
3538         DEBUGFUNC("e1000_read_flash_data_ich8lan");
3539
3540         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3541                 return -E1000_ERR_NVM;
3542         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3543                              hw->nvm.flash_base_addr);
3544
3545         do {
3546                 usec_delay(1);
3547                 /* Steps */
3548                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3549                 if (ret_val != E1000_SUCCESS)
3550                         break;
3551                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3552
3553                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3554                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3555                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3556                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3557                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3558
3559                 ret_val = e1000_flash_cycle_ich8lan(hw,
3560                                                 ICH_FLASH_READ_COMMAND_TIMEOUT);
3561
3562                 /* Check if FCERR is set to 1, if set to 1, clear it
3563                  * and try the whole sequence a few more times, else
3564                  * read in (shift in) the Flash Data0, the order is
3565                  * least significant byte first msb to lsb
3566                  */
3567                 if (ret_val == E1000_SUCCESS) {
3568                         flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3569                         if (size == 1)
3570                                 *data = (u8)(flash_data & 0x000000FF);
3571                         else if (size == 2)
3572                                 *data = (u16)(flash_data & 0x0000FFFF);
3573                         break;
3574                 } else {
3575                         /* If we've gotten here, then things are probably
3576                          * completely hosed, but if the error condition is
3577                          * detected, it won't hurt to give it another try...
3578                          * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3579                          */
3580                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3581                                                               ICH_FLASH_HSFSTS);
3582                         if (hsfsts.hsf_status.flcerr) {
3583                                 /* Repeat for some time before giving up. */
3584                                 continue;
3585                         } else if (!hsfsts.hsf_status.flcdone) {
3586                                 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3587                                 break;
3588                         }
3589                 }
3590         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3591
3592         return ret_val;
3593 }
3594
3595
3596 /**
3597  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3598  *  @hw: pointer to the HW structure
3599  *  @offset: The offset (in bytes) of the word(s) to write.
3600  *  @words: Size of data to write in words
3601  *  @data: Pointer to the word(s) to write at offset.
3602  *
3603  *  Writes a byte or word to the NVM using the flash access registers.
3604  **/
3605 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3606                                    u16 *data)
3607 {
3608         struct e1000_nvm_info *nvm = &hw->nvm;
3609         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3610         u16 i;
3611
3612         DEBUGFUNC("e1000_write_nvm_ich8lan");
3613
3614         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3615             (words == 0)) {
3616                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3617                 return -E1000_ERR_NVM;
3618         }
3619
3620         nvm->ops.acquire(hw);
3621
3622         for (i = 0; i < words; i++) {
3623                 dev_spec->shadow_ram[offset+i].modified = true;
3624                 dev_spec->shadow_ram[offset+i].value = data[i];
3625         }
3626
3627         nvm->ops.release(hw);
3628
3629         return E1000_SUCCESS;
3630 }
3631
3632 /**
3633  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3634  *  @hw: pointer to the HW structure
3635  *
3636  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3637  *  which writes the checksum to the shadow ram.  The changes in the shadow
3638  *  ram are then committed to the EEPROM by processing each bank at a time
3639  *  checking for the modified bit and writing only the pending changes.
3640  *  After a successful commit, the shadow ram is cleared and is ready for
3641  *  future writes.
3642  **/
3643 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3644 {
3645         struct e1000_nvm_info *nvm = &hw->nvm;
3646         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3647         u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3648         s32 ret_val;
3649         u16 data = 0;
3650
3651         DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
3652
3653         ret_val = e1000_update_nvm_checksum_generic(hw);
3654         if (ret_val)
3655                 goto out;
3656
3657         if (nvm->type != e1000_nvm_flash_sw)
3658                 goto out;
3659
3660         nvm->ops.acquire(hw);
3661
3662         /* We're writing to the opposite bank so if we're on bank 1,
3663          * write to bank 0 etc.  We also need to erase the segment that
3664          * is going to be written
3665          */
3666         ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3667         if (ret_val != E1000_SUCCESS) {
3668                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3669                 bank = 0;
3670         }
3671
3672         if (bank == 0) {
3673                 new_bank_offset = nvm->flash_bank_size;
3674                 old_bank_offset = 0;
3675                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3676                 if (ret_val)
3677                         goto release;
3678         } else {
3679                 old_bank_offset = nvm->flash_bank_size;
3680                 new_bank_offset = 0;
3681                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3682                 if (ret_val)
3683                         goto release;
3684         }
3685         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3686                 if (dev_spec->shadow_ram[i].modified) {
3687                         data = dev_spec->shadow_ram[i].value;
3688                 } else {
3689                         ret_val = e1000_read_flash_word_ich8lan(hw, i +
3690                                                                 old_bank_offset,
3691                                                                 &data);
3692                         if (ret_val)
3693                                 break;
3694                 }
3695                 /* If the word is 0x13, then make sure the signature bits
3696                  * (15:14) are 11b until the commit has completed.
3697                  * This will allow us to write 10b which indicates the
3698                  * signature is valid.  We want to do this after the write
3699                  * has completed so that we don't mark the segment valid
3700                  * while the write is still in progress
3701                  */
3702                 if (i == E1000_ICH_NVM_SIG_WORD)
3703                         data |= E1000_ICH_NVM_SIG_MASK;
3704
3705                 /* Convert offset to bytes. */
3706                 act_offset = (i + new_bank_offset) << 1;
3707
3708                 usec_delay(100);
3709
3710                 /* Write the bytes to the new bank. */
3711                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3712                                                                act_offset,
3713                                                                (u8)data);
3714                 if (ret_val)
3715                         break;
3716
3717                 usec_delay(100);
3718                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3719                                                           act_offset + 1,
3720                                                           (u8)(data >> 8));
3721                 if (ret_val)
3722                         break;
3723         }
3724
3725         /* Don't bother writing the segment valid bits if sector
3726          * programming failed.
3727          */
3728         if (ret_val) {
3729                 DEBUGOUT("Flash commit failed.\n");
3730                 goto release;
3731         }
3732
3733         /* Finally validate the new segment by setting bit 15:14
3734          * to 10b in word 0x13 , this can be done without an
3735          * erase as well since these bits are 11 to start with
3736          * and we need to change bit 14 to 0b
3737          */
3738         act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3739         ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
3740         if (ret_val)
3741                 goto release;
3742
3743         data &= 0xBFFF;
3744         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
3745                                                        (u8)(data >> 8));
3746         if (ret_val)
3747                 goto release;
3748
3749         /* And invalidate the previously valid segment by setting
3750          * its signature word (0x13) high_byte to 0b. This can be
3751          * done without an erase because flash erase sets all bits
3752          * to 1's. We can write 1's to 0's without an erase
3753          */
3754         act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3755
3756         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
3757
3758         if (ret_val)
3759                 goto release;
3760
3761         /* Great!  Everything worked, we can now clear the cached entries. */
3762         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3763                 dev_spec->shadow_ram[i].modified = false;
3764                 dev_spec->shadow_ram[i].value = 0xFFFF;
3765         }
3766
3767 release:
3768         nvm->ops.release(hw);
3769
3770         /* Reload the EEPROM, or else modifications will not appear
3771          * until after the next adapter reset.
3772          */
3773         if (!ret_val) {
3774                 nvm->ops.reload(hw);
3775                 msec_delay(10);
3776         }
3777
3778 out:
3779         if (ret_val)
3780                 DEBUGOUT1("NVM update error: %d\n", ret_val);
3781
3782         return ret_val;
3783 }
3784
3785 /**
3786  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
3787  *  @hw: pointer to the HW structure
3788  *
3789  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
3790  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
3791  *  calculated, in which case we need to calculate the checksum and set bit 6.
3792  **/
3793 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
3794 {
3795         s32 ret_val;
3796         u16 data;
3797         u16 word;
3798         u16 valid_csum_mask;
3799
3800         DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
3801
3802         /* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
3803          * the checksum needs to be fixed.  This bit is an indication that
3804          * the NVM was prepared by OEM software and did not calculate
3805          * the checksum...a likely scenario.
3806          */
3807         switch (hw->mac.type) {
3808         case e1000_pch_lpt:
3809                 word = NVM_COMPAT;
3810                 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
3811                 break;
3812         default:
3813                 word = NVM_FUTURE_INIT_WORD1;
3814                 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
3815                 break;
3816         }
3817
3818         ret_val = hw->nvm.ops.read(hw, word, 1, &data);
3819         if (ret_val)
3820                 return ret_val;
3821
3822         if (!(data & valid_csum_mask)) {
3823                 data |= valid_csum_mask;
3824                 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
3825                 if (ret_val)
3826                         return ret_val;
3827                 ret_val = hw->nvm.ops.update(hw);
3828                 if (ret_val)
3829                         return ret_val;
3830         }
3831
3832         return e1000_validate_nvm_checksum_generic(hw);
3833 }
3834
3835 /**
3836  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3837  *  @hw: pointer to the HW structure
3838  *  @offset: The offset (in bytes) of the byte/word to read.
3839  *  @size: Size of data to read, 1=byte 2=word
3840  *  @data: The byte(s) to write to the NVM.
3841  *
3842  *  Writes one/two bytes to the NVM using the flash access registers.
3843  **/
3844 STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3845                                           u8 size, u16 data)
3846 {
3847         union ich8_hws_flash_status hsfsts;
3848         union ich8_hws_flash_ctrl hsflctl;
3849         u32 flash_linear_addr;
3850         u32 flash_data = 0;
3851         s32 ret_val;
3852         u8 count = 0;
3853
3854         DEBUGFUNC("e1000_write_ich8_data");
3855
3856         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3857                 return -E1000_ERR_NVM;
3858
3859         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3860                              hw->nvm.flash_base_addr);
3861
3862         do {
3863                 usec_delay(1);
3864                 /* Steps */
3865                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3866                 if (ret_val != E1000_SUCCESS)
3867                         break;
3868                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3869
3870                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3871                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3872                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3873                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3874
3875                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3876
3877                 if (size == 1)
3878                         flash_data = (u32)data & 0x00FF;
3879                 else
3880                         flash_data = (u32)data;
3881
3882                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
3883
3884                 /* check if FCERR is set to 1 , if set to 1, clear it
3885                  * and try the whole sequence a few more times else done
3886                  */
3887                 ret_val =
3888                     e1000_flash_cycle_ich8lan(hw,
3889                                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3890                 if (ret_val == E1000_SUCCESS)
3891                         break;
3892
3893                 /* If we're here, then things are most likely
3894                  * completely hosed, but if the error condition
3895                  * is detected, it won't hurt to give it another
3896                  * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3897                  */
3898                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3899                 if (hsfsts.hsf_status.flcerr)
3900                         /* Repeat for some time before giving up. */
3901                         continue;
3902                 if (!hsfsts.hsf_status.flcdone) {
3903                         DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3904                         break;
3905                 }
3906         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3907
3908         return ret_val;
3909 }
3910
3911
3912 /**
3913  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3914  *  @hw: pointer to the HW structure
3915  *  @offset: The index of the byte to read.
3916  *  @data: The byte to write to the NVM.
3917  *
3918  *  Writes a single byte to the NVM using the flash access registers.
3919  **/
3920 STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3921                                           u8 data)
3922 {
3923         u16 word = (u16)data;
3924
3925         DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3926
3927         return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3928 }
3929
3930
3931
3932 /**
3933  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3934  *  @hw: pointer to the HW structure
3935  *  @offset: The offset of the byte to write.
3936  *  @byte: The byte to write to the NVM.
3937  *
3938  *  Writes a single byte to the NVM using the flash access registers.
3939  *  Goes through a retry algorithm before giving up.
3940  **/
3941 STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3942                                                 u32 offset, u8 byte)
3943 {
3944         s32 ret_val;
3945         u16 program_retries;
3946
3947         DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3948
3949         ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3950         if (!ret_val)
3951                 return ret_val;
3952
3953         for (program_retries = 0; program_retries < 100; program_retries++) {
3954                 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3955                 usec_delay(100);
3956                 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3957                 if (ret_val == E1000_SUCCESS)
3958                         break;
3959         }
3960         if (program_retries == 100)
3961                 return -E1000_ERR_NVM;
3962
3963         return E1000_SUCCESS;
3964 }
3965
3966 /**
3967  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3968  *  @hw: pointer to the HW structure
3969  *  @bank: 0 for first bank, 1 for second bank, etc.
3970  *
3971  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3972  *  bank N is 4096 * N + flash_reg_addr.
3973  **/
3974 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3975 {
3976         struct e1000_nvm_info *nvm = &hw->nvm;
3977         union ich8_hws_flash_status hsfsts;
3978         union ich8_hws_flash_ctrl hsflctl;
3979         u32 flash_linear_addr;
3980         /* bank size is in 16bit words - adjust to bytes */
3981         u32 flash_bank_size = nvm->flash_bank_size * 2;
3982         s32 ret_val;
3983         s32 count = 0;
3984         s32 j, iteration, sector_size;
3985
3986         DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3987
3988         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3989
3990         /* Determine HW Sector size: Read BERASE bits of hw flash status
3991          * register
3992          * 00: The Hw sector is 256 bytes, hence we need to erase 16
3993          *     consecutive sectors.  The start index for the nth Hw sector
3994          *     can be calculated as = bank * 4096 + n * 256
3995          * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3996          *     The start index for the nth Hw sector can be calculated
3997          *     as = bank * 4096
3998          * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3999          *     (ich9 only, otherwise error condition)
4000          * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4001          */
4002         switch (hsfsts.hsf_status.berasesz) {
4003         case 0:
4004                 /* Hw sector size 256 */
4005                 sector_size = ICH_FLASH_SEG_SIZE_256;
4006                 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4007                 break;
4008         case 1:
4009                 sector_size = ICH_FLASH_SEG_SIZE_4K;
4010                 iteration = 1;
4011                 break;
4012         case 2:
4013                 sector_size = ICH_FLASH_SEG_SIZE_8K;
4014                 iteration = 1;
4015                 break;
4016         case 3:
4017                 sector_size = ICH_FLASH_SEG_SIZE_64K;
4018                 iteration = 1;
4019                 break;
4020         default:
4021                 return -E1000_ERR_NVM;
4022         }
4023
4024         /* Start with the base address, then add the sector offset. */
4025         flash_linear_addr = hw->nvm.flash_base_addr;
4026         flash_linear_addr += (bank) ? flash_bank_size : 0;
4027
4028         for (j = 0; j < iteration; j++) {
4029                 do {
4030                         u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4031
4032                         /* Steps */
4033                         ret_val = e1000_flash_cycle_init_ich8lan(hw);
4034                         if (ret_val)
4035                                 return ret_val;
4036
4037                         /* Write a value 11 (block Erase) in Flash
4038                          * Cycle field in hw flash control
4039                          */
4040                         hsflctl.regval =
4041                             E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4042
4043                         hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4044                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4045                                                 hsflctl.regval);
4046
4047                         /* Write the last 24 bits of an index within the
4048                          * block into Flash Linear address field in Flash
4049                          * Address.
4050                          */
4051                         flash_linear_addr += (j * sector_size);
4052                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4053                                               flash_linear_addr);
4054
4055                         ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4056                         if (ret_val == E1000_SUCCESS)
4057                                 break;
4058
4059                         /* Check if FCERR is set to 1.  If 1,
4060                          * clear it and try the whole sequence
4061                          * a few more times else Done
4062                          */
4063                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4064                                                       ICH_FLASH_HSFSTS);
4065                         if (hsfsts.hsf_status.flcerr)
4066                                 /* repeat for some time before giving up */
4067                                 continue;
4068                         else if (!hsfsts.hsf_status.flcdone)
4069                                 return ret_val;
4070                 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4071         }
4072
4073         return E1000_SUCCESS;
4074 }
4075
4076 /**
4077  *  e1000_valid_led_default_ich8lan - Set the default LED settings
4078  *  @hw: pointer to the HW structure
4079  *  @data: Pointer to the LED settings
4080  *
4081  *  Reads the LED default settings from the NVM to data.  If the NVM LED
4082  *  settings is all 0's or F's, set the LED default to a valid LED default
4083  *  setting.
4084  **/
4085 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4086 {
4087         s32 ret_val;
4088
4089         DEBUGFUNC("e1000_valid_led_default_ich8lan");
4090
4091         ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4092         if (ret_val) {
4093                 DEBUGOUT("NVM Read Error\n");
4094                 return ret_val;
4095         }
4096
4097         if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4098                 *data = ID_LED_DEFAULT_ICH8LAN;
4099
4100         return E1000_SUCCESS;
4101 }
4102
4103 /**
4104  *  e1000_id_led_init_pchlan - store LED configurations
4105  *  @hw: pointer to the HW structure
4106  *
4107  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4108  *  the PHY LED configuration register.
4109  *
4110  *  PCH also does not have an "always on" or "always off" mode which
4111  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4112  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4113  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4114  *  link based on logic in e1000_led_[on|off]_pchlan().
4115  **/
4116 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4117 {
4118         struct e1000_mac_info *mac = &hw->mac;
4119         s32 ret_val;
4120         const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4121         const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4122         u16 data, i, temp, shift;
4123
4124         DEBUGFUNC("e1000_id_led_init_pchlan");
4125
4126         /* Get default ID LED modes */
4127         ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4128         if (ret_val)
4129                 return ret_val;
4130
4131         mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4132         mac->ledctl_mode1 = mac->ledctl_default;
4133         mac->ledctl_mode2 = mac->ledctl_default;
4134
4135         for (i = 0; i < 4; i++) {
4136                 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4137                 shift = (i * 5);
4138                 switch (temp) {
4139                 case ID_LED_ON1_DEF2:
4140                 case ID_LED_ON1_ON2:
4141                 case ID_LED_ON1_OFF2:
4142                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4143                         mac->ledctl_mode1 |= (ledctl_on << shift);
4144                         break;
4145                 case ID_LED_OFF1_DEF2:
4146                 case ID_LED_OFF1_ON2:
4147                 case ID_LED_OFF1_OFF2:
4148                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4149                         mac->ledctl_mode1 |= (ledctl_off << shift);
4150                         break;
4151                 default:
4152                         /* Do nothing */
4153                         break;
4154                 }
4155                 switch (temp) {
4156                 case ID_LED_DEF1_ON2:
4157                 case ID_LED_ON1_ON2:
4158                 case ID_LED_OFF1_ON2:
4159                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4160                         mac->ledctl_mode2 |= (ledctl_on << shift);
4161                         break;
4162                 case ID_LED_DEF1_OFF2:
4163                 case ID_LED_ON1_OFF2:
4164                 case ID_LED_OFF1_OFF2:
4165                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4166                         mac->ledctl_mode2 |= (ledctl_off << shift);
4167                         break;
4168                 default:
4169                         /* Do nothing */
4170                         break;
4171                 }
4172         }
4173
4174         return E1000_SUCCESS;
4175 }
4176
4177 /**
4178  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4179  *  @hw: pointer to the HW structure
4180  *
4181  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4182  *  register, so the the bus width is hard coded.
4183  **/
4184 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4185 {
4186         struct e1000_bus_info *bus = &hw->bus;
4187         s32 ret_val;
4188
4189         DEBUGFUNC("e1000_get_bus_info_ich8lan");
4190
4191         ret_val = e1000_get_bus_info_pcie_generic(hw);
4192
4193         /* ICH devices are "PCI Express"-ish.  They have
4194          * a configuration space, but do not contain
4195          * PCI Express Capability registers, so bus width
4196          * must be hardcoded.
4197          */
4198         if (bus->width == e1000_bus_width_unknown)
4199                 bus->width = e1000_bus_width_pcie_x1;
4200
4201         return ret_val;
4202 }
4203
4204 /**
4205  *  e1000_reset_hw_ich8lan - Reset the hardware
4206  *  @hw: pointer to the HW structure
4207  *
4208  *  Does a full reset of the hardware which includes a reset of the PHY and
4209  *  MAC.
4210  **/
4211 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4212 {
4213         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4214         u16 kum_cfg;
4215         u32 ctrl, reg;
4216         s32 ret_val;
4217
4218         DEBUGFUNC("e1000_reset_hw_ich8lan");
4219
4220         /* Prevent the PCI-E bus from sticking if there is no TLP connection
4221          * on the last TLP read/write transaction when MAC is reset.
4222          */
4223         ret_val = e1000_disable_pcie_master_generic(hw);
4224         if (ret_val)
4225                 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4226
4227         DEBUGOUT("Masking off all interrupts\n");
4228         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4229
4230         /* Disable the Transmit and Receive units.  Then delay to allow
4231          * any pending transactions to complete before we hit the MAC
4232          * with the global reset.
4233          */
4234         E1000_WRITE_REG(hw, E1000_RCTL, 0);
4235         E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4236         E1000_WRITE_FLUSH(hw);
4237
4238         msec_delay(10);
4239
4240         /* Workaround for ICH8 bit corruption issue in FIFO memory */
4241         if (hw->mac.type == e1000_ich8lan) {
4242                 /* Set Tx and Rx buffer allocation to 8k apiece. */
4243                 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4244                 /* Set Packet Buffer Size to 16k. */
4245                 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4246         }
4247
4248         if (hw->mac.type == e1000_pchlan) {
4249                 /* Save the NVM K1 bit setting*/
4250                 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4251                 if (ret_val)
4252                         return ret_val;
4253
4254                 if (kum_cfg & E1000_NVM_K1_ENABLE)
4255                         dev_spec->nvm_k1_enabled = true;
4256                 else
4257                         dev_spec->nvm_k1_enabled = false;
4258         }
4259
4260         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4261
4262         if (!hw->phy.ops.check_reset_block(hw)) {
4263                 /* Full-chip reset requires MAC and PHY reset at the same
4264                  * time to make sure the interface between MAC and the
4265                  * external PHY is reset.
4266                  */
4267                 ctrl |= E1000_CTRL_PHY_RST;
4268
4269                 /* Gate automatic PHY configuration by hardware on
4270                  * non-managed 82579
4271                  */
4272                 if ((hw->mac.type == e1000_pch2lan) &&
4273                     !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4274                         e1000_gate_hw_phy_config_ich8lan(hw, true);
4275         }
4276         ret_val = e1000_acquire_swflag_ich8lan(hw);
4277         DEBUGOUT("Issuing a global reset to ich8lan\n");
4278         E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4279         /* cannot issue a flush here because it hangs the hardware */
4280         msec_delay(20);
4281
4282         /* Set Phy Config Counter to 50msec */
4283         if (hw->mac.type == e1000_pch2lan) {
4284                 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4285                 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4286                 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4287                 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4288         }
4289
4290         if (!ret_val)
4291                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
4292
4293         if (ctrl & E1000_CTRL_PHY_RST) {
4294                 ret_val = hw->phy.ops.get_cfg_done(hw);
4295                 if (ret_val)
4296                         return ret_val;
4297
4298                 ret_val = e1000_post_phy_reset_ich8lan(hw);
4299                 if (ret_val)
4300                         return ret_val;
4301         }
4302
4303         /* For PCH, this write will make sure that any noise
4304          * will be detected as a CRC error and be dropped rather than show up
4305          * as a bad packet to the DMA engine.
4306          */
4307         if (hw->mac.type == e1000_pchlan)
4308                 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
4309
4310         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4311         E1000_READ_REG(hw, E1000_ICR);
4312
4313         reg = E1000_READ_REG(hw, E1000_KABGTXD);
4314         reg |= E1000_KABGTXD_BGSQLBIAS;
4315         E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
4316
4317         return E1000_SUCCESS;
4318 }
4319
4320 /**
4321  *  e1000_init_hw_ich8lan - Initialize the hardware
4322  *  @hw: pointer to the HW structure
4323  *
4324  *  Prepares the hardware for transmit and receive by doing the following:
4325  *   - initialize hardware bits
4326  *   - initialize LED identification
4327  *   - setup receive address registers
4328  *   - setup flow control
4329  *   - setup transmit descriptors
4330  *   - clear statistics
4331  **/
4332 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4333 {
4334         struct e1000_mac_info *mac = &hw->mac;
4335         u32 ctrl_ext, txdctl, snoop;
4336         s32 ret_val;
4337         u16 i;
4338
4339         DEBUGFUNC("e1000_init_hw_ich8lan");
4340
4341         e1000_initialize_hw_bits_ich8lan(hw);
4342
4343         /* Initialize identification LED */
4344         ret_val = mac->ops.id_led_init(hw);
4345         /* An error is not fatal and we should not stop init due to this */
4346         if (ret_val)
4347                 DEBUGOUT("Error initializing identification LED\n");
4348
4349         /* Setup the receive address. */
4350         e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
4351
4352         /* Zero out the Multicast HASH table */
4353         DEBUGOUT("Zeroing the MTA\n");
4354         for (i = 0; i < mac->mta_reg_count; i++)
4355                 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4356
4357         /* The 82578 Rx buffer will stall if wakeup is enabled in host and
4358          * the ME.  Disable wakeup by clearing the host wakeup bit.
4359          * Reset the phy after disabling host wakeup to reset the Rx buffer.
4360          */
4361         if (hw->phy.type == e1000_phy_82578) {
4362                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
4363                 i &= ~BM_WUC_HOST_WU_BIT;
4364                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
4365                 ret_val = e1000_phy_hw_reset_ich8lan(hw);
4366                 if (ret_val)
4367                         return ret_val;
4368         }
4369
4370         /* Setup link and flow control */
4371         ret_val = mac->ops.setup_link(hw);
4372
4373         /* Set the transmit descriptor write-back policy for both queues */
4374         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
4375         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4376                   E1000_TXDCTL_FULL_TX_DESC_WB);
4377         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4378                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4379         E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
4380         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
4381         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4382                   E1000_TXDCTL_FULL_TX_DESC_WB);
4383         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4384                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4385         E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
4386
4387         /* ICH8 has opposite polarity of no_snoop bits.
4388          * By default, we should use snoop behavior.
4389          */
4390         if (mac->type == e1000_ich8lan)
4391                 snoop = PCIE_ICH8_SNOOP_ALL;
4392         else
4393                 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
4394         e1000_set_pcie_no_snoop_generic(hw, snoop);
4395
4396         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
4397         ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4398         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
4399
4400         /* Clear all of the statistics registers (clear on read).  It is
4401          * important that we do this after we have tried to establish link
4402          * because the symbol error count will increment wildly if there
4403          * is no link.
4404          */
4405         e1000_clear_hw_cntrs_ich8lan(hw);
4406
4407         return ret_val;
4408 }
4409
4410 /**
4411  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4412  *  @hw: pointer to the HW structure
4413  *
4414  *  Sets/Clears required hardware bits necessary for correctly setting up the
4415  *  hardware for transmit and receive.
4416  **/
4417 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4418 {
4419         u32 reg;
4420
4421         DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
4422
4423         /* Extended Device Control */
4424         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
4425         reg |= (1 << 22);
4426         /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4427         if (hw->mac.type >= e1000_pchlan)
4428                 reg |= E1000_CTRL_EXT_PHYPDEN;
4429         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
4430
4431         /* Transmit Descriptor Control 0 */
4432         reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
4433         reg |= (1 << 22);
4434         E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
4435
4436         /* Transmit Descriptor Control 1 */
4437         reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
4438         reg |= (1 << 22);
4439         E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
4440
4441         /* Transmit Arbitration Control 0 */
4442         reg = E1000_READ_REG(hw, E1000_TARC(0));
4443         if (hw->mac.type == e1000_ich8lan)
4444                 reg |= (1 << 28) | (1 << 29);
4445         reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
4446         E1000_WRITE_REG(hw, E1000_TARC(0), reg);
4447
4448         /* Transmit Arbitration Control 1 */
4449         reg = E1000_READ_REG(hw, E1000_TARC(1));
4450         if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
4451                 reg &= ~(1 << 28);
4452         else
4453                 reg |= (1 << 28);
4454         reg |= (1 << 24) | (1 << 26) | (1 << 30);
4455         E1000_WRITE_REG(hw, E1000_TARC(1), reg);
4456
4457         /* Device Status */
4458         if (hw->mac.type == e1000_ich8lan) {
4459                 reg = E1000_READ_REG(hw, E1000_STATUS);
4460                 reg &= ~(1 << 31);
4461                 E1000_WRITE_REG(hw, E1000_STATUS, reg);
4462         }
4463
4464         /* work-around descriptor data corruption issue during nfs v2 udp
4465          * traffic, just disable the nfs filtering capability
4466          */
4467         reg = E1000_READ_REG(hw, E1000_RFCTL);
4468         reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4469
4470         /* Disable IPv6 extension header parsing because some malformed
4471          * IPv6 headers can hang the Rx.
4472          */
4473         if (hw->mac.type == e1000_ich8lan)
4474                 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4475         E1000_WRITE_REG(hw, E1000_RFCTL, reg);
4476
4477         /* Enable ECC on Lynxpoint */
4478         if (hw->mac.type == e1000_pch_lpt) {
4479                 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
4480                 reg |= E1000_PBECCSTS_ECC_ENABLE;
4481                 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
4482
4483                 reg = E1000_READ_REG(hw, E1000_CTRL);
4484                 reg |= E1000_CTRL_MEHE;
4485                 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4486         }
4487
4488         return;
4489 }
4490
4491 /**
4492  *  e1000_setup_link_ich8lan - Setup flow control and link settings
4493  *  @hw: pointer to the HW structure
4494  *
4495  *  Determines which flow control settings to use, then configures flow
4496  *  control.  Calls the appropriate media-specific link configuration
4497  *  function.  Assuming the adapter has a valid link partner, a valid link
4498  *  should be established.  Assumes the hardware has previously been reset
4499  *  and the transmitter and receiver are not enabled.
4500  **/
4501 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4502 {
4503         s32 ret_val;
4504
4505         DEBUGFUNC("e1000_setup_link_ich8lan");
4506
4507         if (hw->phy.ops.check_reset_block(hw))
4508                 return E1000_SUCCESS;
4509
4510         /* ICH parts do not have a word in the NVM to determine
4511          * the default flow control setting, so we explicitly
4512          * set it to full.
4513          */
4514         if (hw->fc.requested_mode == e1000_fc_default)
4515                 hw->fc.requested_mode = e1000_fc_full;
4516
4517         /* Save off the requested flow control mode for use later.  Depending
4518          * on the link partner's capabilities, we may or may not use this mode.
4519          */
4520         hw->fc.current_mode = hw->fc.requested_mode;
4521
4522         DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
4523                 hw->fc.current_mode);
4524
4525         /* Continue to configure the copper link. */
4526         ret_val = hw->mac.ops.setup_physical_interface(hw);
4527         if (ret_val)
4528                 return ret_val;
4529
4530         E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
4531         if ((hw->phy.type == e1000_phy_82578) ||
4532             (hw->phy.type == e1000_phy_82579) ||
4533             (hw->phy.type == e1000_phy_i217) ||
4534             (hw->phy.type == e1000_phy_82577)) {
4535                 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
4536
4537                 ret_val = hw->phy.ops.write_reg(hw,
4538                                              PHY_REG(BM_PORT_CTRL_PAGE, 27),
4539                                              hw->fc.pause_time);
4540                 if (ret_val)
4541                         return ret_val;
4542         }
4543
4544         return e1000_set_fc_watermarks_generic(hw);
4545 }
4546
4547 /**
4548  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4549  *  @hw: pointer to the HW structure
4550  *
4551  *  Configures the kumeran interface to the PHY to wait the appropriate time
4552  *  when polling the PHY, then call the generic setup_copper_link to finish
4553  *  configuring the copper link.
4554  **/
4555 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4556 {
4557         u32 ctrl;
4558         s32 ret_val;
4559         u16 reg_data;
4560
4561         DEBUGFUNC("e1000_setup_copper_link_ich8lan");
4562
4563         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4564         ctrl |= E1000_CTRL_SLU;
4565         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4566         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4567
4568         /* Set the mac to wait the maximum time between each iteration
4569          * and increase the max iterations when polling the phy;
4570          * this fixes erroneous timeouts at 10Mbps.
4571          */
4572         ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
4573                                                0xFFFF);
4574         if (ret_val)
4575                 return ret_val;
4576         ret_val = e1000_read_kmrn_reg_generic(hw,
4577                                               E1000_KMRNCTRLSTA_INBAND_PARAM,
4578                                               &reg_data);
4579         if (ret_val)
4580                 return ret_val;
4581         reg_data |= 0x3F;
4582         ret_val = e1000_write_kmrn_reg_generic(hw,
4583                                                E1000_KMRNCTRLSTA_INBAND_PARAM,
4584                                                reg_data);
4585         if (ret_val)
4586                 return ret_val;
4587
4588         switch (hw->phy.type) {
4589         case e1000_phy_igp_3:
4590                 ret_val = e1000_copper_link_setup_igp(hw);
4591                 if (ret_val)
4592                         return ret_val;
4593                 break;
4594         case e1000_phy_bm:
4595         case e1000_phy_82578:
4596                 ret_val = e1000_copper_link_setup_m88(hw);
4597                 if (ret_val)
4598                         return ret_val;
4599                 break;
4600         case e1000_phy_82577:
4601         case e1000_phy_82579:
4602                 ret_val = e1000_copper_link_setup_82577(hw);
4603                 if (ret_val)
4604                         return ret_val;
4605                 break;
4606         case e1000_phy_ife:
4607                 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
4608                                                &reg_data);
4609                 if (ret_val)
4610                         return ret_val;
4611
4612                 reg_data &= ~IFE_PMC_AUTO_MDIX;
4613
4614                 switch (hw->phy.mdix) {
4615                 case 1:
4616                         reg_data &= ~IFE_PMC_FORCE_MDIX;
4617                         break;
4618                 case 2:
4619                         reg_data |= IFE_PMC_FORCE_MDIX;
4620                         break;
4621                 case 0:
4622                 default:
4623                         reg_data |= IFE_PMC_AUTO_MDIX;
4624                         break;
4625                 }
4626                 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
4627                                                 reg_data);
4628                 if (ret_val)
4629                         return ret_val;
4630                 break;
4631         default:
4632                 break;
4633         }
4634
4635         return e1000_setup_copper_link_generic(hw);
4636 }
4637
4638 /**
4639  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
4640  *  @hw: pointer to the HW structure
4641  *
4642  *  Calls the PHY specific link setup function and then calls the
4643  *  generic setup_copper_link to finish configuring the link for
4644  *  Lynxpoint PCH devices
4645  **/
4646 STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
4647 {
4648         u32 ctrl;
4649         s32 ret_val;
4650
4651         DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
4652
4653         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4654         ctrl |= E1000_CTRL_SLU;
4655         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4656         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4657
4658         ret_val = e1000_copper_link_setup_82577(hw);
4659         if (ret_val)
4660                 return ret_val;
4661
4662         return e1000_setup_copper_link_generic(hw);
4663 }
4664
4665 /**
4666  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
4667  *  @hw: pointer to the HW structure
4668  *  @speed: pointer to store current link speed
4669  *  @duplex: pointer to store the current link duplex
4670  *
4671  *  Calls the generic get_speed_and_duplex to retrieve the current link
4672  *  information and then calls the Kumeran lock loss workaround for links at
4673  *  gigabit speeds.
4674  **/
4675 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
4676                                           u16 *duplex)
4677 {
4678         s32 ret_val;
4679
4680         DEBUGFUNC("e1000_get_link_up_info_ich8lan");
4681
4682         ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
4683         if (ret_val)
4684                 return ret_val;
4685
4686         if ((hw->mac.type == e1000_ich8lan) &&
4687             (hw->phy.type == e1000_phy_igp_3) &&
4688             (*speed == SPEED_1000)) {
4689                 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
4690         }
4691
4692         return ret_val;
4693 }
4694
4695 /**
4696  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
4697  *  @hw: pointer to the HW structure
4698  *
4699  *  Work-around for 82566 Kumeran PCS lock loss:
4700  *  On link status change (i.e. PCI reset, speed change) and link is up and
4701  *  speed is gigabit-
4702  *    0) if workaround is optionally disabled do nothing
4703  *    1) wait 1ms for Kumeran link to come up
4704  *    2) check Kumeran Diagnostic register PCS lock loss bit
4705  *    3) if not set the link is locked (all is good), otherwise...
4706  *    4) reset the PHY
4707  *    5) repeat up to 10 times
4708  *  Note: this is only called for IGP3 copper when speed is 1gb.
4709  **/
4710 STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
4711 {
4712         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4713         u32 phy_ctrl;
4714         s32 ret_val;
4715         u16 i, data;
4716         bool link;
4717
4718         DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
4719
4720         if (!dev_spec->kmrn_lock_loss_workaround_enabled)
4721                 return E1000_SUCCESS;
4722
4723         /* Make sure link is up before proceeding.  If not just return.
4724          * Attempting this while link is negotiating fouled up link
4725          * stability
4726          */
4727         ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
4728         if (!link)
4729                 return E1000_SUCCESS;
4730
4731         for (i = 0; i < 10; i++) {
4732                 /* read once to clear */
4733                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4734                 if (ret_val)
4735                         return ret_val;
4736                 /* and again to get new status */
4737                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4738                 if (ret_val)
4739                         return ret_val;
4740
4741                 /* check for PCS lock */
4742                 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
4743                         return E1000_SUCCESS;
4744
4745                 /* Issue PHY reset */
4746                 hw->phy.ops.reset(hw);
4747                 msec_delay_irq(5);
4748         }
4749         /* Disable GigE link negotiation */
4750         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4751         phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
4752                      E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4753         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4754
4755         /* Call gig speed drop workaround on Gig disable before accessing
4756          * any PHY registers
4757          */
4758         e1000_gig_downshift_workaround_ich8lan(hw);
4759
4760         /* unable to acquire PCS lock */
4761         return -E1000_ERR_PHY;
4762 }
4763
4764 /**
4765  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
4766  *  @hw: pointer to the HW structure
4767  *  @state: boolean value used to set the current Kumeran workaround state
4768  *
4769  *  If ICH8, set the current Kumeran workaround state (enabled - true
4770  *  /disabled - false).
4771  **/
4772 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
4773                                                  bool state)
4774 {
4775         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4776
4777         DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
4778
4779         if (hw->mac.type != e1000_ich8lan) {
4780                 DEBUGOUT("Workaround applies to ICH8 only.\n");
4781                 return;
4782         }
4783
4784         dev_spec->kmrn_lock_loss_workaround_enabled = state;
4785
4786         return;
4787 }
4788
4789 /**
4790  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
4791  *  @hw: pointer to the HW structure
4792  *
4793  *  Workaround for 82566 power-down on D3 entry:
4794  *    1) disable gigabit link
4795  *    2) write VR power-down enable
4796  *    3) read it back
4797  *  Continue if successful, else issue LCD reset and repeat
4798  **/
4799 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
4800 {
4801         u32 reg;
4802         u16 data;
4803         u8  retry = 0;
4804
4805         DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
4806
4807         if (hw->phy.type != e1000_phy_igp_3)
4808                 return;
4809
4810         /* Try the workaround twice (if needed) */
4811         do {
4812                 /* Disable link */
4813                 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
4814                 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
4815                         E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4816                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
4817
4818                 /* Call gig speed drop workaround on Gig disable before
4819                  * accessing any PHY registers
4820                  */
4821                 if (hw->mac.type == e1000_ich8lan)
4822                         e1000_gig_downshift_workaround_ich8lan(hw);
4823
4824                 /* Write VR power-down enable */
4825                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4826                 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4827                 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
4828                                       data | IGP3_VR_CTRL_MODE_SHUTDOWN);
4829
4830                 /* Read it back and test */
4831                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4832                 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4833                 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
4834                         break;
4835
4836                 /* Issue PHY reset and repeat at most one more time */
4837                 reg = E1000_READ_REG(hw, E1000_CTRL);
4838                 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
4839                 retry++;
4840         } while (retry);
4841 }
4842
4843 /**
4844  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
4845  *  @hw: pointer to the HW structure
4846  *
4847  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
4848  *  LPLU, Gig disable, MDIC PHY reset):
4849  *    1) Set Kumeran Near-end loopback
4850  *    2) Clear Kumeran Near-end loopback
4851  *  Should only be called for ICH8[m] devices with any 1G Phy.
4852  **/
4853 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
4854 {
4855         s32 ret_val;
4856         u16 reg_data;
4857
4858         DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
4859
4860         if ((hw->mac.type != e1000_ich8lan) ||
4861             (hw->phy.type == e1000_phy_ife))
4862                 return;
4863
4864         ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4865                                               &reg_data);
4866         if (ret_val)
4867                 return;
4868         reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
4869         ret_val = e1000_write_kmrn_reg_generic(hw,
4870                                                E1000_KMRNCTRLSTA_DIAG_OFFSET,
4871                                                reg_data);
4872         if (ret_val)
4873                 return;
4874         reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
4875         e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4876                                      reg_data);
4877 }
4878
4879 /**
4880  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4881  *  @hw: pointer to the HW structure
4882  *
4883  *  During S0 to Sx transition, it is possible the link remains at gig
4884  *  instead of negotiating to a lower speed.  Before going to Sx, set
4885  *  'Gig Disable' to force link speed negotiation to a lower speed based on
4886  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
4887  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4888  *  needs to be written.
4889  *  Parts that support (and are linked to a partner which support) EEE in
4890  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4891  *  than 10Mbps w/o EEE.
4892  **/
4893 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4894 {
4895         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4896         u32 phy_ctrl;
4897         s32 ret_val;
4898
4899         DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
4900
4901         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4902         phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4903
4904         if (hw->phy.type == e1000_phy_i217) {
4905                 u16 phy_reg, device_id = hw->device_id;
4906
4907                 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4908                     (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
4909                     (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
4910                     (device_id == E1000_DEV_ID_PCH_I218_V3)) {
4911                         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
4912
4913                         E1000_WRITE_REG(hw, E1000_FEXTNVM6,
4914                                         fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4915                 }
4916
4917                 ret_val = hw->phy.ops.acquire(hw);
4918                 if (ret_val)
4919                         goto out;
4920
4921                 if (!dev_spec->eee_disable) {
4922                         u16 eee_advert;
4923
4924                         ret_val =
4925                             e1000_read_emi_reg_locked(hw,
4926                                                       I217_EEE_ADVERTISEMENT,
4927                                                       &eee_advert);
4928                         if (ret_val)
4929                                 goto release;
4930
4931                         /* Disable LPLU if both link partners support 100BaseT
4932                          * EEE and 100Full is advertised on both ends of the
4933                          * link, and enable Auto Enable LPI since there will
4934                          * be no driver to enable LPI while in Sx.
4935                          */
4936                         if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4937                             (dev_spec->eee_lp_ability &
4938                              I82579_EEE_100_SUPPORTED) &&
4939                             (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
4940                                 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4941                                               E1000_PHY_CTRL_NOND0A_LPLU);
4942
4943                                 /* Set Auto Enable LPI after link up */
4944                                 hw->phy.ops.read_reg_locked(hw,
4945                                                             I217_LPI_GPIO_CTRL,
4946                                                             &phy_reg);
4947                                 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4948                                 hw->phy.ops.write_reg_locked(hw,
4949                                                              I217_LPI_GPIO_CTRL,
4950                                                              phy_reg);
4951                         }
4952                 }
4953
4954                 /* For i217 Intel Rapid Start Technology support,
4955                  * when the system is going into Sx and no manageability engine
4956                  * is present, the driver must configure proxy to reset only on
4957                  * power good.  LPI (Low Power Idle) state must also reset only
4958                  * on power good, as well as the MTA (Multicast table array).
4959                  * The SMBus release must also be disabled on LCD reset.
4960                  */
4961                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4962                       E1000_ICH_FWSM_FW_VALID)) {
4963                         /* Enable proxy to reset only on power good. */
4964                         hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
4965                                                     &phy_reg);
4966                         phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4967                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
4968                                                      phy_reg);
4969
4970                         /* Set bit enable LPI (EEE) to reset only on
4971                          * power good.
4972                         */
4973                         hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
4974                         phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4975                         hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
4976
4977                         /* Disable the SMB release on LCD reset. */
4978                         hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
4979                         phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4980                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4981                 }
4982
4983                 /* Enable MTA to reset for Intel Rapid Start Technology
4984                  * Support
4985                  */
4986                 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
4987                 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4988                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4989
4990 release:
4991                 hw->phy.ops.release(hw);
4992         }
4993 out:
4994         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4995
4996         if (hw->mac.type == e1000_ich8lan)
4997                 e1000_gig_downshift_workaround_ich8lan(hw);
4998
4999         if (hw->mac.type >= e1000_pchlan) {
5000                 e1000_oem_bits_config_ich8lan(hw, false);
5001
5002                 /* Reset PHY to activate OEM bits on 82577/8 */
5003                 if (hw->mac.type == e1000_pchlan)
5004                         e1000_phy_hw_reset_generic(hw);
5005
5006                 ret_val = hw->phy.ops.acquire(hw);
5007                 if (ret_val)
5008                         return;
5009                 e1000_write_smbus_addr(hw);
5010                 hw->phy.ops.release(hw);
5011         }
5012
5013         return;
5014 }
5015
5016 /**
5017  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5018  *  @hw: pointer to the HW structure
5019  *
5020  *  During Sx to S0 transitions on non-managed devices or managed devices
5021  *  on which PHY resets are not blocked, if the PHY registers cannot be
5022  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
5023  *  the PHY.
5024  *  On i217, setup Intel Rapid Start Technology.
5025  **/
5026 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5027 {
5028         s32 ret_val;
5029
5030         DEBUGFUNC("e1000_resume_workarounds_pchlan");
5031         if (hw->mac.type < e1000_pch2lan)
5032                 return E1000_SUCCESS;
5033
5034         ret_val = e1000_init_phy_workarounds_pchlan(hw);
5035         if (ret_val) {
5036                 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5037                 return ret_val;
5038         }
5039
5040         /* For i217 Intel Rapid Start Technology support when the system
5041          * is transitioning from Sx and no manageability engine is present
5042          * configure SMBus to restore on reset, disable proxy, and enable
5043          * the reset on MTA (Multicast table array).
5044          */
5045         if (hw->phy.type == e1000_phy_i217) {
5046                 u16 phy_reg;
5047
5048                 ret_val = hw->phy.ops.acquire(hw);
5049                 if (ret_val) {
5050                         DEBUGOUT("Failed to setup iRST\n");
5051                         return ret_val;
5052                 }
5053
5054                 /* Clear Auto Enable LPI after link up */
5055                 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5056                 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5057                 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5058
5059                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5060                     E1000_ICH_FWSM_FW_VALID)) {
5061                         /* Restore clear on SMB if no manageability engine
5062                          * is present
5063                          */
5064                         ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5065                                                               &phy_reg);
5066                         if (ret_val)
5067                                 goto release;
5068                         phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5069                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5070
5071                         /* Disable Proxy */
5072                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5073                 }
5074                 /* Enable reset on MTA */
5075                 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5076                                                       &phy_reg);
5077                 if (ret_val)
5078                         goto release;
5079                 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5080                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5081 release:
5082                 if (ret_val)
5083                         DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5084                 hw->phy.ops.release(hw);
5085                 return ret_val;
5086         }
5087         return E1000_SUCCESS;
5088 }
5089
5090 /**
5091  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5092  *  @hw: pointer to the HW structure
5093  *
5094  *  Return the LED back to the default configuration.
5095  **/
5096 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5097 {
5098         DEBUGFUNC("e1000_cleanup_led_ich8lan");
5099
5100         if (hw->phy.type == e1000_phy_ife)
5101                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5102                                              0);
5103
5104         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5105         return E1000_SUCCESS;
5106 }
5107
5108 /**
5109  *  e1000_led_on_ich8lan - Turn LEDs on
5110  *  @hw: pointer to the HW structure
5111  *
5112  *  Turn on the LEDs.
5113  **/
5114 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5115 {
5116         DEBUGFUNC("e1000_led_on_ich8lan");
5117
5118         if (hw->phy.type == e1000_phy_ife)
5119                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5120                                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5121
5122         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5123         return E1000_SUCCESS;
5124 }
5125
5126 /**
5127  *  e1000_led_off_ich8lan - Turn LEDs off
5128  *  @hw: pointer to the HW structure
5129  *
5130  *  Turn off the LEDs.
5131  **/
5132 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5133 {
5134         DEBUGFUNC("e1000_led_off_ich8lan");
5135
5136         if (hw->phy.type == e1000_phy_ife)
5137                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5138                                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5139
5140         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5141         return E1000_SUCCESS;
5142 }
5143
5144 /**
5145  *  e1000_setup_led_pchlan - Configures SW controllable LED
5146  *  @hw: pointer to the HW structure
5147  *
5148  *  This prepares the SW controllable LED for use.
5149  **/
5150 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5151 {
5152         DEBUGFUNC("e1000_setup_led_pchlan");
5153
5154         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5155                                      (u16)hw->mac.ledctl_mode1);
5156 }
5157
5158 /**
5159  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5160  *  @hw: pointer to the HW structure
5161  *
5162  *  Return the LED back to the default configuration.
5163  **/
5164 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5165 {
5166         DEBUGFUNC("e1000_cleanup_led_pchlan");
5167
5168         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5169                                      (u16)hw->mac.ledctl_default);
5170 }
5171
5172 /**
5173  *  e1000_led_on_pchlan - Turn LEDs on
5174  *  @hw: pointer to the HW structure
5175  *
5176  *  Turn on the LEDs.
5177  **/
5178 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5179 {
5180         u16 data = (u16)hw->mac.ledctl_mode2;
5181         u32 i, led;
5182
5183         DEBUGFUNC("e1000_led_on_pchlan");
5184
5185         /* If no link, then turn LED on by setting the invert bit
5186          * for each LED that's mode is "link_up" in ledctl_mode2.
5187          */
5188         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5189                 for (i = 0; i < 3; i++) {
5190                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5191                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5192                             E1000_LEDCTL_MODE_LINK_UP)
5193                                 continue;
5194                         if (led & E1000_PHY_LED0_IVRT)
5195                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5196                         else
5197                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5198                 }
5199         }
5200
5201         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5202 }
5203
5204 /**
5205  *  e1000_led_off_pchlan - Turn LEDs off
5206  *  @hw: pointer to the HW structure
5207  *
5208  *  Turn off the LEDs.
5209  **/
5210 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5211 {
5212         u16 data = (u16)hw->mac.ledctl_mode1;
5213         u32 i, led;
5214
5215         DEBUGFUNC("e1000_led_off_pchlan");
5216
5217         /* If no link, then turn LED off by clearing the invert bit
5218          * for each LED that's mode is "link_up" in ledctl_mode1.
5219          */
5220         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5221                 for (i = 0; i < 3; i++) {
5222                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5223                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5224                             E1000_LEDCTL_MODE_LINK_UP)
5225                                 continue;
5226                         if (led & E1000_PHY_LED0_IVRT)
5227                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5228                         else
5229                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5230                 }
5231         }
5232
5233         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5234 }
5235
5236 /**
5237  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5238  *  @hw: pointer to the HW structure
5239  *
5240  *  Read appropriate register for the config done bit for completion status
5241  *  and configure the PHY through s/w for EEPROM-less parts.
5242  *
5243  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5244  *  config done bit, so only an error is logged and continues.  If we were
5245  *  to return with error, EEPROM-less silicon would not be able to be reset
5246  *  or change link.
5247  **/
5248 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5249 {
5250         s32 ret_val = E1000_SUCCESS;
5251         u32 bank = 0;
5252         u32 status;
5253
5254         DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5255
5256         e1000_get_cfg_done_generic(hw);
5257
5258         /* Wait for indication from h/w that it has completed basic config */
5259         if (hw->mac.type >= e1000_ich10lan) {
5260                 e1000_lan_init_done_ich8lan(hw);
5261         } else {
5262                 ret_val = e1000_get_auto_rd_done_generic(hw);
5263                 if (ret_val) {
5264                         /* When auto config read does not complete, do not
5265                          * return with an error. This can happen in situations
5266                          * where there is no eeprom and prevents getting link.
5267                          */
5268                         DEBUGOUT("Auto Read Done did not complete\n");
5269                         ret_val = E1000_SUCCESS;
5270                 }
5271         }
5272
5273         /* Clear PHY Reset Asserted bit */
5274         status = E1000_READ_REG(hw, E1000_STATUS);
5275         if (status & E1000_STATUS_PHYRA)
5276                 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5277         else
5278                 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5279
5280         /* If EEPROM is not marked present, init the IGP 3 PHY manually */
5281         if (hw->mac.type <= e1000_ich9lan) {
5282                 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5283                     (hw->phy.type == e1000_phy_igp_3)) {
5284                         e1000_phy_init_script_igp3(hw);
5285                 }
5286         } else {
5287                 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5288                         /* Maybe we should do a basic PHY config */
5289                         DEBUGOUT("EEPROM not present\n");
5290                         ret_val = -E1000_ERR_CONFIG;
5291                 }
5292         }
5293
5294         return ret_val;
5295 }
5296
5297 /**
5298  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5299  * @hw: pointer to the HW structure
5300  *
5301  * In the case of a PHY power down to save power, or to turn off link during a
5302  * driver unload, or wake on lan is not enabled, remove the link.
5303  **/
5304 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5305 {
5306         /* If the management interface is not enabled, then power down */
5307         if (!(hw->mac.ops.check_mng_mode(hw) ||
5308               hw->phy.ops.check_reset_block(hw)))
5309                 e1000_power_down_phy_copper(hw);
5310
5311         return;
5312 }
5313
5314 /**
5315  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5316  *  @hw: pointer to the HW structure
5317  *
5318  *  Clears hardware counters specific to the silicon family and calls
5319  *  clear_hw_cntrs_generic to clear all general purpose counters.
5320  **/
5321 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5322 {
5323         u16 phy_data;
5324         s32 ret_val;
5325
5326         DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
5327
5328         e1000_clear_hw_cntrs_base_generic(hw);
5329
5330         E1000_READ_REG(hw, E1000_ALGNERRC);
5331         E1000_READ_REG(hw, E1000_RXERRC);
5332         E1000_READ_REG(hw, E1000_TNCRS);
5333         E1000_READ_REG(hw, E1000_CEXTERR);
5334         E1000_READ_REG(hw, E1000_TSCTC);
5335         E1000_READ_REG(hw, E1000_TSCTFC);
5336
5337         E1000_READ_REG(hw, E1000_MGTPRC);
5338         E1000_READ_REG(hw, E1000_MGTPDC);
5339         E1000_READ_REG(hw, E1000_MGTPTC);
5340
5341         E1000_READ_REG(hw, E1000_IAC);
5342         E1000_READ_REG(hw, E1000_ICRXOC);
5343
5344         /* Clear PHY statistics registers */
5345         if ((hw->phy.type == e1000_phy_82578) ||
5346             (hw->phy.type == e1000_phy_82579) ||
5347             (hw->phy.type == e1000_phy_i217) ||
5348             (hw->phy.type == e1000_phy_82577)) {
5349                 ret_val = hw->phy.ops.acquire(hw);
5350                 if (ret_val)
5351                         return;
5352                 ret_val = hw->phy.ops.set_page(hw,
5353                                                HV_STATS_PAGE << IGP_PAGE_SHIFT);
5354                 if (ret_val)
5355                         goto release;
5356                 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5357                 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5358                 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5359                 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5360                 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5361                 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5362                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5363                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5364                 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5365                 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5366                 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5367                 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5368                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5369                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5370 release:
5371                 hw->phy.ops.release(hw);
5372         }
5373 }
5374
5375 /**
5376  *  e1000_configure_k0s_lpt - Configure K0s power state
5377  *  @hw: pointer to the HW structure
5378  *  @entry_latency: Tx idle period for entering K0s - valid values are 0 to 3.
5379  *      0 corresponds to 128ns, each value over 0 doubles the duration.
5380  *  @min_time: Minimum Tx idle period allowed  - valid values are 0 to 4.
5381  *      0 corresponds to 128ns, each value over 0 doubles the duration.
5382  *
5383  *  Configure the K1 power state based on the provided parameter.
5384  *  Assumes semaphore already acquired.
5385  *
5386  *  Success returns 0, Failure returns:
5387  *      -E1000_ERR_PHY (-2) in case of access error
5388  *      -E1000_ERR_PARAM (-4) in case of parameters error
5389  **/
5390 s32 e1000_configure_k0s_lpt(struct e1000_hw *hw, u8 entry_latency, u8 min_time)
5391 {
5392         s32 ret_val;
5393         u16 kmrn_reg = 0;
5394
5395         DEBUGFUNC("e1000_configure_k0s_lpt");
5396
5397         if (entry_latency > 3 || min_time > 4)
5398                 return -E1000_ERR_PARAM;
5399
5400         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
5401                                              &kmrn_reg);
5402         if (ret_val)
5403                 return ret_val;
5404
5405         /* for now don't touch the latency */
5406         kmrn_reg &= ~(E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_MASK);
5407         kmrn_reg |= ((min_time << E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT));
5408
5409         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
5410                                               kmrn_reg);
5411         if (ret_val)
5412                 return ret_val;
5413
5414         return E1000_SUCCESS;
5415 }