ba22f52032d6053d6c00e794b9e3cae4a031aa35
[dpdk.git] / drivers / net / e1000 / base / e1000_ich8lan.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /* 82562G 10/100 Network Connection
35  * 82562G-2 10/100 Network Connection
36  * 82562GT 10/100 Network Connection
37  * 82562GT-2 10/100 Network Connection
38  * 82562V 10/100 Network Connection
39  * 82562V-2 10/100 Network Connection
40  * 82566DC-2 Gigabit Network Connection
41  * 82566DC Gigabit Network Connection
42  * 82566DM-2 Gigabit Network Connection
43  * 82566DM Gigabit Network Connection
44  * 82566MC Gigabit Network Connection
45  * 82566MM Gigabit Network Connection
46  * 82567LM Gigabit Network Connection
47  * 82567LF Gigabit Network Connection
48  * 82567V Gigabit Network Connection
49  * 82567LM-2 Gigabit Network Connection
50  * 82567LF-2 Gigabit Network Connection
51  * 82567V-2 Gigabit Network Connection
52  * 82567LF-3 Gigabit Network Connection
53  * 82567LM-3 Gigabit Network Connection
54  * 82567LM-4 Gigabit Network Connection
55  * 82577LM Gigabit Network Connection
56  * 82577LC Gigabit Network Connection
57  * 82578DM Gigabit Network Connection
58  * 82578DC Gigabit Network Connection
59  * 82579LM Gigabit Network Connection
60  * 82579V Gigabit Network Connection
61  * Ethernet Connection I217-LM
62  * Ethernet Connection I217-V
63  * Ethernet Connection I218-V
64  * Ethernet Connection I218-LM
65  * Ethernet Connection (2) I218-LM
66  * Ethernet Connection (2) I218-V
67  * Ethernet Connection (3) I218-LM
68  * Ethernet Connection (3) I218-V
69  */
70
71 #include "e1000_api.h"
72
73 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
74 STATIC s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 STATIC s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 STATIC int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 STATIC int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
84 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
85                                               u8 *mc_addr_list,
86                                               u32 mc_addr_count);
87 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
88 STATIC s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
89 STATIC s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
90 STATIC s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
91 STATIC s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
92                                             bool active);
93 STATIC s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
94                                             bool active);
95 STATIC s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
96                                    u16 words, u16 *data);
97 STATIC s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
98                                     u16 words, u16 *data);
99 STATIC s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100 STATIC s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 STATIC s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
102                                             u16 *data);
103 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
104 STATIC s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
105 STATIC s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
106 STATIC s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
107 STATIC s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
108 STATIC s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
109 STATIC s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
110 STATIC s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
111                                            u16 *speed, u16 *duplex);
112 STATIC s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
113 STATIC s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
114 STATIC s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
115 STATIC s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
116 STATIC s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
117 STATIC s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
118 STATIC s32  e1000_led_on_pchlan(struct e1000_hw *hw);
119 STATIC s32  e1000_led_off_pchlan(struct e1000_hw *hw);
120 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
121 STATIC s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
122 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
123 STATIC s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
124 STATIC s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
125                                           u32 offset, u8 *data);
126 STATIC s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
127                                           u8 size, u16 *data);
128 STATIC s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
129                                           u32 offset, u16 *data);
130 STATIC s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
131                                                  u32 offset, u8 byte);
132 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
133 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
134 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
135 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
136 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
137 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
138
139 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
140 /* Offset 04h HSFSTS */
141 union ich8_hws_flash_status {
142         struct ich8_hsfsts {
143                 u16 flcdone:1; /* bit 0 Flash Cycle Done */
144                 u16 flcerr:1; /* bit 1 Flash Cycle Error */
145                 u16 dael:1; /* bit 2 Direct Access error Log */
146                 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
147                 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
148                 u16 reserved1:2; /* bit 13:6 Reserved */
149                 u16 reserved2:6; /* bit 13:6 Reserved */
150                 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
151                 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
152         } hsf_status;
153         u16 regval;
154 };
155
156 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
157 /* Offset 06h FLCTL */
158 union ich8_hws_flash_ctrl {
159         struct ich8_hsflctl {
160                 u16 flcgo:1;   /* 0 Flash Cycle Go */
161                 u16 flcycle:2;   /* 2:1 Flash Cycle */
162                 u16 reserved:5;   /* 7:3 Reserved  */
163                 u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
164                 u16 flockdn:6;   /* 15:10 Reserved */
165         } hsf_ctrl;
166         u16 regval;
167 };
168
169 /* ICH Flash Region Access Permissions */
170 union ich8_hws_flash_regacc {
171         struct ich8_flracc {
172                 u32 grra:8; /* 0:7 GbE region Read Access */
173                 u32 grwa:8; /* 8:15 GbE region Write Access */
174                 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
175                 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
176         } hsf_flregacc;
177         u16 regval;
178 };
179
180 /**
181  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
182  *  @hw: pointer to the HW structure
183  *
184  *  Test access to the PHY registers by reading the PHY ID registers.  If
185  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
186  *  otherwise assume the read PHY ID is correct if it is valid.
187  *
188  *  Assumes the sw/fw/hw semaphore is already acquired.
189  **/
190 STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
191 {
192         u16 phy_reg = 0;
193         u32 phy_id = 0;
194         s32 ret_val = 0;
195         u16 retry_count;
196         u32 mac_reg = 0;
197
198         for (retry_count = 0; retry_count < 2; retry_count++) {
199                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
200                 if (ret_val || (phy_reg == 0xFFFF))
201                         continue;
202                 phy_id = (u32)(phy_reg << 16);
203
204                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
205                 if (ret_val || (phy_reg == 0xFFFF)) {
206                         phy_id = 0;
207                         continue;
208                 }
209                 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
210                 break;
211         }
212
213         if (hw->phy.id) {
214                 if  (hw->phy.id == phy_id)
215                         goto out;
216         } else if (phy_id) {
217                 hw->phy.id = phy_id;
218                 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
219                 goto out;
220         }
221
222         /* In case the PHY needs to be in mdio slow mode,
223          * set slow mode and try to get the PHY id again.
224          */
225         if (hw->mac.type < e1000_pch_lpt) {
226                 hw->phy.ops.release(hw);
227                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
228                 if (!ret_val)
229                         ret_val = e1000_get_phy_id(hw);
230                 hw->phy.ops.acquire(hw);
231         }
232
233         if (ret_val)
234                 return false;
235 out:
236         if (hw->mac.type == e1000_pch_lpt) {
237                 /* Unforce SMBus mode in PHY */
238                 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
239                 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
240                 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
241
242                 /* Unforce SMBus mode in MAC */
243                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
244                 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
245                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
246         }
247
248         return true;
249 }
250
251 /**
252  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
253  *  @hw: pointer to the HW structure
254  *
255  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
256  *  used to reset the PHY to a quiescent state when necessary.
257  **/
258 STATIC void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
259 {
260         u32 mac_reg;
261
262         DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
263
264         /* Set Phy Config Counter to 50msec */
265         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
266         mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
267         mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
268         E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
269
270         /* Toggle LANPHYPC Value bit */
271         mac_reg = E1000_READ_REG(hw, E1000_CTRL);
272         mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
273         mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
274         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
275         E1000_WRITE_FLUSH(hw);
276         usec_delay(10);
277         mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
278         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
279         E1000_WRITE_FLUSH(hw);
280
281         if (hw->mac.type < e1000_pch_lpt) {
282                 msec_delay(50);
283         } else {
284                 u16 count = 20;
285
286                 do {
287                         msec_delay(5);
288                 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
289                            E1000_CTRL_EXT_LPCD) && count--);
290
291                 msec_delay(30);
292         }
293 }
294
295 /**
296  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
297  *  @hw: pointer to the HW structure
298  *
299  *  Workarounds/flow necessary for PHY initialization during driver load
300  *  and resume paths.
301  **/
302 STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
303 {
304         u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
305         s32 ret_val;
306
307         DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
308
309         /* Gate automatic PHY configuration by hardware on managed and
310          * non-managed 82579 and newer adapters.
311          */
312         e1000_gate_hw_phy_config_ich8lan(hw, true);
313
314 #ifdef ULP_SUPPORT
315         /* It is not possible to be certain of the current state of ULP
316          * so forcibly disable it.
317          */
318         hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
319
320 #endif /* ULP_SUPPORT */
321         ret_val = hw->phy.ops.acquire(hw);
322         if (ret_val) {
323                 DEBUGOUT("Failed to initialize PHY flow\n");
324                 goto out;
325         }
326
327         /* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
328          * inaccessible and resetting the PHY is not blocked, toggle the
329          * LANPHYPC Value bit to force the interconnect to PCIe mode.
330          */
331         switch (hw->mac.type) {
332         case e1000_pch_lpt:
333                 if (e1000_phy_is_accessible_pchlan(hw))
334                         break;
335
336                 /* Before toggling LANPHYPC, see if PHY is accessible by
337                  * forcing MAC to SMBus mode first.
338                  */
339                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
340                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
341                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
342
343                 /* Wait 50 milliseconds for MAC to finish any retries
344                  * that it might be trying to perform from previous
345                  * attempts to acknowledge any phy read requests.
346                  */
347                  msec_delay(50);
348
349                 /* fall-through */
350         case e1000_pch2lan:
351                 if (e1000_phy_is_accessible_pchlan(hw))
352                         break;
353
354                 /* fall-through */
355         case e1000_pchlan:
356                 if ((hw->mac.type == e1000_pchlan) &&
357                     (fwsm & E1000_ICH_FWSM_FW_VALID))
358                         break;
359
360                 if (hw->phy.ops.check_reset_block(hw)) {
361                         DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
362                         ret_val = -E1000_ERR_PHY;
363                         break;
364                 }
365
366                 /* Toggle LANPHYPC Value bit */
367                 e1000_toggle_lanphypc_pch_lpt(hw);
368                 if (hw->mac.type >= e1000_pch_lpt) {
369                         if (e1000_phy_is_accessible_pchlan(hw))
370                                 break;
371
372                         /* Toggling LANPHYPC brings the PHY out of SMBus mode
373                          * so ensure that the MAC is also out of SMBus mode
374                          */
375                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
376                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
377                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
378
379                         if (e1000_phy_is_accessible_pchlan(hw))
380                                 break;
381
382                         ret_val = -E1000_ERR_PHY;
383                 }
384                 break;
385         default:
386                 break;
387         }
388
389         hw->phy.ops.release(hw);
390         if (!ret_val) {
391
392                 /* Check to see if able to reset PHY.  Print error if not */
393                 if (hw->phy.ops.check_reset_block(hw)) {
394                         ERROR_REPORT("Reset blocked by ME\n");
395                         goto out;
396                 }
397
398                 /* Reset the PHY before any access to it.  Doing so, ensures
399                  * that the PHY is in a known good state before we read/write
400                  * PHY registers.  The generic reset is sufficient here,
401                  * because we haven't determined the PHY type yet.
402                  */
403                 ret_val = e1000_phy_hw_reset_generic(hw);
404                 if (ret_val)
405                         goto out;
406
407                 /* On a successful reset, possibly need to wait for the PHY
408                  * to quiesce to an accessible state before returning control
409                  * to the calling function.  If the PHY does not quiesce, then
410                  * return E1000E_BLK_PHY_RESET, as this is the condition that
411                  *  the PHY is in.
412                  */
413                 ret_val = hw->phy.ops.check_reset_block(hw);
414                 if (ret_val)
415                         ERROR_REPORT("ME blocked access to PHY after reset\n");
416         }
417
418 out:
419         /* Ungate automatic PHY configuration on non-managed 82579 */
420         if ((hw->mac.type == e1000_pch2lan) &&
421             !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
422                 msec_delay(10);
423                 e1000_gate_hw_phy_config_ich8lan(hw, false);
424         }
425
426         return ret_val;
427 }
428
429 /**
430  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
431  *  @hw: pointer to the HW structure
432  *
433  *  Initialize family-specific PHY parameters and function pointers.
434  **/
435 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
436 {
437         struct e1000_phy_info *phy = &hw->phy;
438         s32 ret_val;
439
440         DEBUGFUNC("e1000_init_phy_params_pchlan");
441
442         phy->addr               = 1;
443         phy->reset_delay_us     = 100;
444
445         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
446         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
447         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
448         phy->ops.set_page       = e1000_set_page_igp;
449         phy->ops.read_reg       = e1000_read_phy_reg_hv;
450         phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
451         phy->ops.read_reg_page  = e1000_read_phy_reg_page_hv;
452         phy->ops.release        = e1000_release_swflag_ich8lan;
453         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
454         phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
455         phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
456         phy->ops.write_reg      = e1000_write_phy_reg_hv;
457         phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
458         phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
459         phy->ops.power_up       = e1000_power_up_phy_copper;
460         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
461         phy->autoneg_mask       = AUTONEG_ADVERTISE_SPEED_DEFAULT;
462
463         phy->id = e1000_phy_unknown;
464
465         ret_val = e1000_init_phy_workarounds_pchlan(hw);
466         if (ret_val)
467                 return ret_val;
468
469         if (phy->id == e1000_phy_unknown)
470                 switch (hw->mac.type) {
471                 default:
472                         ret_val = e1000_get_phy_id(hw);
473                         if (ret_val)
474                                 return ret_val;
475                         if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
476                                 break;
477                         /* fall-through */
478                 case e1000_pch2lan:
479                 case e1000_pch_lpt:
480                         /* In case the PHY needs to be in mdio slow mode,
481                          * set slow mode and try to get the PHY id again.
482                          */
483                         ret_val = e1000_set_mdio_slow_mode_hv(hw);
484                         if (ret_val)
485                                 return ret_val;
486                         ret_val = e1000_get_phy_id(hw);
487                         if (ret_val)
488                                 return ret_val;
489                         break;
490                 }
491         phy->type = e1000_get_phy_type_from_id(phy->id);
492
493         switch (phy->type) {
494         case e1000_phy_82577:
495         case e1000_phy_82579:
496         case e1000_phy_i217:
497                 phy->ops.check_polarity = e1000_check_polarity_82577;
498                 phy->ops.force_speed_duplex =
499                         e1000_phy_force_speed_duplex_82577;
500                 phy->ops.get_cable_length = e1000_get_cable_length_82577;
501                 phy->ops.get_info = e1000_get_phy_info_82577;
502                 phy->ops.commit = e1000_phy_sw_reset_generic;
503                 break;
504         case e1000_phy_82578:
505                 phy->ops.check_polarity = e1000_check_polarity_m88;
506                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
507                 phy->ops.get_cable_length = e1000_get_cable_length_m88;
508                 phy->ops.get_info = e1000_get_phy_info_m88;
509                 break;
510         default:
511                 ret_val = -E1000_ERR_PHY;
512                 break;
513         }
514
515         return ret_val;
516 }
517
518 /**
519  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
520  *  @hw: pointer to the HW structure
521  *
522  *  Initialize family-specific PHY parameters and function pointers.
523  **/
524 STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
525 {
526         struct e1000_phy_info *phy = &hw->phy;
527         s32 ret_val;
528         u16 i = 0;
529
530         DEBUGFUNC("e1000_init_phy_params_ich8lan");
531
532         phy->addr               = 1;
533         phy->reset_delay_us     = 100;
534
535         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
536         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
537         phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
538         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
539         phy->ops.read_reg       = e1000_read_phy_reg_igp;
540         phy->ops.release        = e1000_release_swflag_ich8lan;
541         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
542         phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
543         phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
544         phy->ops.write_reg      = e1000_write_phy_reg_igp;
545         phy->ops.power_up       = e1000_power_up_phy_copper;
546         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
547
548         /* We may need to do this twice - once for IGP and if that fails,
549          * we'll set BM func pointers and try again
550          */
551         ret_val = e1000_determine_phy_address(hw);
552         if (ret_val) {
553                 phy->ops.write_reg = e1000_write_phy_reg_bm;
554                 phy->ops.read_reg  = e1000_read_phy_reg_bm;
555                 ret_val = e1000_determine_phy_address(hw);
556                 if (ret_val) {
557                         DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
558                         return ret_val;
559                 }
560         }
561
562         phy->id = 0;
563         while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
564                (i++ < 100)) {
565                 msec_delay(1);
566                 ret_val = e1000_get_phy_id(hw);
567                 if (ret_val)
568                         return ret_val;
569         }
570
571         /* Verify phy id */
572         switch (phy->id) {
573         case IGP03E1000_E_PHY_ID:
574                 phy->type = e1000_phy_igp_3;
575                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
576                 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
577                 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
578                 phy->ops.get_info = e1000_get_phy_info_igp;
579                 phy->ops.check_polarity = e1000_check_polarity_igp;
580                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
581                 break;
582         case IFE_E_PHY_ID:
583         case IFE_PLUS_E_PHY_ID:
584         case IFE_C_E_PHY_ID:
585                 phy->type = e1000_phy_ife;
586                 phy->autoneg_mask = E1000_ALL_NOT_GIG;
587                 phy->ops.get_info = e1000_get_phy_info_ife;
588                 phy->ops.check_polarity = e1000_check_polarity_ife;
589                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
590                 break;
591         case BME1000_E_PHY_ID:
592                 phy->type = e1000_phy_bm;
593                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
594                 phy->ops.read_reg = e1000_read_phy_reg_bm;
595                 phy->ops.write_reg = e1000_write_phy_reg_bm;
596                 phy->ops.commit = e1000_phy_sw_reset_generic;
597                 phy->ops.get_info = e1000_get_phy_info_m88;
598                 phy->ops.check_polarity = e1000_check_polarity_m88;
599                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
600                 break;
601         default:
602                 return -E1000_ERR_PHY;
603                 break;
604         }
605
606         return E1000_SUCCESS;
607 }
608
609 /**
610  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
611  *  @hw: pointer to the HW structure
612  *
613  *  Initialize family-specific NVM parameters and function
614  *  pointers.
615  **/
616 STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
617 {
618         struct e1000_nvm_info *nvm = &hw->nvm;
619         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
620         u32 gfpreg, sector_base_addr, sector_end_addr;
621         u16 i;
622
623         DEBUGFUNC("e1000_init_nvm_params_ich8lan");
624
625         /* Can't read flash registers if the register set isn't mapped. */
626         nvm->type = e1000_nvm_flash_sw;
627         if (!hw->flash_address) {
628                 DEBUGOUT("ERROR: Flash registers not mapped\n");
629                 return -E1000_ERR_CONFIG;
630         }
631
632         gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
633
634         /* sector_X_addr is a "sector"-aligned address (4096 bytes)
635          * Add 1 to sector_end_addr since this sector is included in
636          * the overall size.
637          */
638         sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
639         sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
640
641         /* flash_base_addr is byte-aligned */
642         nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
643
644         /* find total size of the NVM, then cut in half since the total
645          * size represents two separate NVM banks.
646          */
647         nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
648                                 << FLASH_SECTOR_ADDR_SHIFT);
649         nvm->flash_bank_size /= 2;
650         /* Adjust to word count */
651         nvm->flash_bank_size /= sizeof(u16);
652
653         nvm->word_size = E1000_SHADOW_RAM_WORDS;
654
655         /* Clear shadow ram */
656         for (i = 0; i < nvm->word_size; i++) {
657                 dev_spec->shadow_ram[i].modified = false;
658                 dev_spec->shadow_ram[i].value    = 0xFFFF;
659         }
660
661         E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
662         E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
663
664         /* Function Pointers */
665         nvm->ops.acquire        = e1000_acquire_nvm_ich8lan;
666         nvm->ops.release        = e1000_release_nvm_ich8lan;
667         nvm->ops.read           = e1000_read_nvm_ich8lan;
668         nvm->ops.update         = e1000_update_nvm_checksum_ich8lan;
669         nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
670         nvm->ops.validate       = e1000_validate_nvm_checksum_ich8lan;
671         nvm->ops.write          = e1000_write_nvm_ich8lan;
672
673         return E1000_SUCCESS;
674 }
675
676 /**
677  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
678  *  @hw: pointer to the HW structure
679  *
680  *  Initialize family-specific MAC parameters and function
681  *  pointers.
682  **/
683 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
684 {
685         struct e1000_mac_info *mac = &hw->mac;
686 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
687         u16 pci_cfg;
688 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
689
690         DEBUGFUNC("e1000_init_mac_params_ich8lan");
691
692         /* Set media type function pointer */
693         hw->phy.media_type = e1000_media_type_copper;
694
695         /* Set mta register count */
696         mac->mta_reg_count = 32;
697         /* Set rar entry count */
698         mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
699         if (mac->type == e1000_ich8lan)
700                 mac->rar_entry_count--;
701         /* Set if part includes ASF firmware */
702         mac->asf_firmware_present = true;
703         /* FWSM register */
704         mac->has_fwsm = true;
705         /* ARC subsystem not supported */
706         mac->arc_subsystem_valid = false;
707         /* Adaptive IFS supported */
708         mac->adaptive_ifs = true;
709
710         /* Function pointers */
711
712         /* bus type/speed/width */
713         mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
714         /* function id */
715         mac->ops.set_lan_id = e1000_set_lan_id_single_port;
716         /* reset */
717         mac->ops.reset_hw = e1000_reset_hw_ich8lan;
718         /* hw initialization */
719         mac->ops.init_hw = e1000_init_hw_ich8lan;
720         /* link setup */
721         mac->ops.setup_link = e1000_setup_link_ich8lan;
722         /* physical interface setup */
723         mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
724         /* check for link */
725         mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
726         /* link info */
727         mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
728         /* multicast address update */
729         mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
730         /* clear hardware counters */
731         mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
732
733         /* LED and other operations */
734         switch (mac->type) {
735         case e1000_ich8lan:
736         case e1000_ich9lan:
737         case e1000_ich10lan:
738                 /* check management mode */
739                 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
740                 /* ID LED init */
741                 mac->ops.id_led_init = e1000_id_led_init_generic;
742                 /* blink LED */
743                 mac->ops.blink_led = e1000_blink_led_generic;
744                 /* setup LED */
745                 mac->ops.setup_led = e1000_setup_led_generic;
746                 /* cleanup LED */
747                 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
748                 /* turn on/off LED */
749                 mac->ops.led_on = e1000_led_on_ich8lan;
750                 mac->ops.led_off = e1000_led_off_ich8lan;
751                 break;
752         case e1000_pch2lan:
753                 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
754                 mac->ops.rar_set = e1000_rar_set_pch2lan;
755                 /* fall-through */
756         case e1000_pch_lpt:
757 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
758                 /* multicast address update for pch2 */
759                 mac->ops.update_mc_addr_list =
760                         e1000_update_mc_addr_list_pch2lan;
761                 /* fall-through */
762 #endif
763         case e1000_pchlan:
764 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
765                 /* save PCH revision_id */
766                 e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
767                 hw->revision_id = (u8)(pci_cfg &= 0x000F);
768 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
769                 /* check management mode */
770                 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
771                 /* ID LED init */
772                 mac->ops.id_led_init = e1000_id_led_init_pchlan;
773                 /* setup LED */
774                 mac->ops.setup_led = e1000_setup_led_pchlan;
775                 /* cleanup LED */
776                 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
777                 /* turn on/off LED */
778                 mac->ops.led_on = e1000_led_on_pchlan;
779                 mac->ops.led_off = e1000_led_off_pchlan;
780                 break;
781         default:
782                 break;
783         }
784
785         if (mac->type == e1000_pch_lpt) {
786                 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
787                 mac->ops.rar_set = e1000_rar_set_pch_lpt;
788                 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
789         }
790
791         /* Enable PCS Lock-loss workaround for ICH8 */
792         if (mac->type == e1000_ich8lan)
793                 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
794
795         return E1000_SUCCESS;
796 }
797
798 /**
799  *  __e1000_access_emi_reg_locked - Read/write EMI register
800  *  @hw: pointer to the HW structure
801  *  @addr: EMI address to program
802  *  @data: pointer to value to read/write from/to the EMI address
803  *  @read: boolean flag to indicate read or write
804  *
805  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
806  **/
807 STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
808                                          u16 *data, bool read)
809 {
810         s32 ret_val;
811
812         DEBUGFUNC("__e1000_access_emi_reg_locked");
813
814         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
815         if (ret_val)
816                 return ret_val;
817
818         if (read)
819                 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
820                                                       data);
821         else
822                 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
823                                                        *data);
824
825         return ret_val;
826 }
827
828 /**
829  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
830  *  @hw: pointer to the HW structure
831  *  @addr: EMI address to program
832  *  @data: value to be read from the EMI address
833  *
834  *  Assumes the SW/FW/HW Semaphore is already acquired.
835  **/
836 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
837 {
838         DEBUGFUNC("e1000_read_emi_reg_locked");
839
840         return __e1000_access_emi_reg_locked(hw, addr, data, true);
841 }
842
843 /**
844  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
845  *  @hw: pointer to the HW structure
846  *  @addr: EMI address to program
847  *  @data: value to be written to the EMI address
848  *
849  *  Assumes the SW/FW/HW Semaphore is already acquired.
850  **/
851 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
852 {
853         DEBUGFUNC("e1000_read_emi_reg_locked");
854
855         return __e1000_access_emi_reg_locked(hw, addr, &data, false);
856 }
857
858 /**
859  *  e1000_set_eee_pchlan - Enable/disable EEE support
860  *  @hw: pointer to the HW structure
861  *
862  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
863  *  the link and the EEE capabilities of the link partner.  The LPI Control
864  *  register bits will remain set only if/when link is up.
865  *
866  *  EEE LPI must not be asserted earlier than one second after link is up.
867  *  On 82579, EEE LPI should not be enabled until such time otherwise there
868  *  can be link issues with some switches.  Other devices can have EEE LPI
869  *  enabled immediately upon link up since they have a timer in hardware which
870  *  prevents LPI from being asserted too early.
871  **/
872 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
873 {
874         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
875         s32 ret_val;
876         u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
877
878         DEBUGFUNC("e1000_set_eee_pchlan");
879
880         switch (hw->phy.type) {
881         case e1000_phy_82579:
882                 lpa = I82579_EEE_LP_ABILITY;
883                 pcs_status = I82579_EEE_PCS_STATUS;
884                 adv_addr = I82579_EEE_ADVERTISEMENT;
885                 break;
886         case e1000_phy_i217:
887                 lpa = I217_EEE_LP_ABILITY;
888                 pcs_status = I217_EEE_PCS_STATUS;
889                 adv_addr = I217_EEE_ADVERTISEMENT;
890                 break;
891         default:
892                 return E1000_SUCCESS;
893         }
894
895         ret_val = hw->phy.ops.acquire(hw);
896         if (ret_val)
897                 return ret_val;
898
899         ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
900         if (ret_val)
901                 goto release;
902
903         /* Clear bits that enable EEE in various speeds */
904         lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
905
906         /* Enable EEE if not disabled by user */
907         if (!dev_spec->eee_disable) {
908                 /* Save off link partner's EEE ability */
909                 ret_val = e1000_read_emi_reg_locked(hw, lpa,
910                                                     &dev_spec->eee_lp_ability);
911                 if (ret_val)
912                         goto release;
913
914                 /* Read EEE advertisement */
915                 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
916                 if (ret_val)
917                         goto release;
918
919                 /* Enable EEE only for speeds in which the link partner is
920                  * EEE capable and for which we advertise EEE.
921                  */
922                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
923                         lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
924
925                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
926                         hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
927                         if (data & NWAY_LPAR_100TX_FD_CAPS)
928                                 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
929                         else
930                                 /* EEE is not supported in 100Half, so ignore
931                                  * partner's EEE in 100 ability if full-duplex
932                                  * is not advertised.
933                                  */
934                                 dev_spec->eee_lp_ability &=
935                                     ~I82579_EEE_100_SUPPORTED;
936                 }
937         }
938
939         if (hw->phy.type == e1000_phy_82579) {
940                 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
941                                                     &data);
942                 if (ret_val)
943                         goto release;
944
945                 data &= ~I82579_LPI_100_PLL_SHUT;
946                 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
947                                                      data);
948         }
949
950         /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
951         ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
952         if (ret_val)
953                 goto release;
954
955         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
956 release:
957         hw->phy.ops.release(hw);
958
959         return ret_val;
960 }
961
962 /**
963  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
964  *  @hw:   pointer to the HW structure
965  *  @link: link up bool flag
966  *
967  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
968  *  preventing further DMA write requests.  Workaround the issue by disabling
969  *  the de-assertion of the clock request when in 1Gpbs mode.
970  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
971  *  speeds in order to avoid Tx hangs.
972  **/
973 STATIC s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
974 {
975         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
976         u32 status = E1000_READ_REG(hw, E1000_STATUS);
977         s32 ret_val = E1000_SUCCESS;
978         u16 reg;
979
980         if (link && (status & E1000_STATUS_SPEED_1000)) {
981                 ret_val = hw->phy.ops.acquire(hw);
982                 if (ret_val)
983                         return ret_val;
984
985                 ret_val =
986                     e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
987                                                &reg);
988                 if (ret_val)
989                         goto release;
990
991                 ret_val =
992                     e1000_write_kmrn_reg_locked(hw,
993                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
994                                                 reg &
995                                                 ~E1000_KMRNCTRLSTA_K1_ENABLE);
996                 if (ret_val)
997                         goto release;
998
999                 usec_delay(10);
1000
1001                 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1002                                 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1003
1004                 ret_val =
1005                     e1000_write_kmrn_reg_locked(hw,
1006                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
1007                                                 reg);
1008 release:
1009                 hw->phy.ops.release(hw);
1010         } else {
1011                 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1012                 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1013
1014                 if (!link || ((status & E1000_STATUS_SPEED_100) &&
1015                               (status & E1000_STATUS_FD)))
1016                         goto update_fextnvm6;
1017
1018                 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1019                 if (ret_val)
1020                         return ret_val;
1021
1022                 /* Clear link status transmit timeout */
1023                 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1024
1025                 if (status & E1000_STATUS_SPEED_100) {
1026                         /* Set inband Tx timeout to 5x10us for 100Half */
1027                         reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1028
1029                         /* Do not extend the K1 entry latency for 100Half */
1030                         fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1031                 } else {
1032                         /* Set inband Tx timeout to 50x10us for 10Full/Half */
1033                         reg |= 50 <<
1034                                I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1035
1036                         /* Extend the K1 entry latency for 10 Mbps */
1037                         fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1038                 }
1039
1040                 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1041                 if (ret_val)
1042                         return ret_val;
1043
1044 update_fextnvm6:
1045                 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1046         }
1047
1048         return ret_val;
1049 }
1050
1051 #ifdef ULP_SUPPORT
1052 /**
1053  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1054  *  @hw: pointer to the HW structure
1055  *  @to_sx: boolean indicating a system power state transition to Sx
1056  *
1057  *  When link is down, configure ULP mode to significantly reduce the power
1058  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1059  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1060  *  system, configure the ULP mode by software.
1061  */
1062 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1063 {
1064         u32 mac_reg;
1065         s32 ret_val = E1000_SUCCESS;
1066         u16 phy_reg;
1067
1068         if ((hw->mac.type < e1000_pch_lpt) ||
1069             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1070             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1071             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1072             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1073             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1074                 return 0;
1075
1076         if (!to_sx) {
1077                 int i = 0;
1078                 /* Poll up to 5 seconds for Cable Disconnected indication */
1079                 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1080                          E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1081                         /* Bail if link is re-acquired */
1082                         if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1083                                 return -E1000_ERR_PHY;
1084                         if (i++ == 100)
1085                                 break;
1086
1087                         msec_delay(50);
1088                 }
1089                 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1090                           (E1000_READ_REG(hw, E1000_FEXT) &
1091                            E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1092                           i * 50);
1093         }
1094
1095         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1096                 /* Request ME configure ULP mode in the PHY */
1097                 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1098                 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1099                 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1100
1101                 goto out;
1102         }
1103
1104         ret_val = hw->phy.ops.acquire(hw);
1105         if (ret_val)
1106                 goto out;
1107
1108         /* During S0 Idle keep the phy in PCI-E mode */
1109         if (hw->dev_spec.ich8lan.smbus_disable)
1110                 goto skip_smbus;
1111
1112         /* Force SMBus mode in PHY */
1113         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1114         if (ret_val)
1115                 goto release;
1116         phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1117         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1118
1119         /* Force SMBus mode in MAC */
1120         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1121         mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1122         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1123
1124 skip_smbus:
1125         if (!to_sx) {
1126                 /* Change the 'Link Status Change' interrupt to trigger
1127                  * on 'Cable Status Change'
1128                  */
1129                 ret_val = e1000_read_kmrn_reg_locked(hw,
1130                                                      E1000_KMRNCTRLSTA_OP_MODES,
1131                                                      &phy_reg);
1132                 if (ret_val)
1133                         goto release;
1134                 phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1135                 e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1136                                             phy_reg);
1137         }
1138
1139         /* Set Inband ULP Exit, Reset to SMBus mode and
1140          * Disable SMBus Release on PERST# in PHY
1141          */
1142         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1143         if (ret_val)
1144                 goto release;
1145         phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1146                     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1147         if (to_sx) {
1148                 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1149                         phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1150
1151                 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1152         } else {
1153                 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1154         }
1155         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1156
1157         /* Set Disable SMBus Release on PERST# in MAC */
1158         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1159         mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1160         E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1161
1162         /* Commit ULP changes in PHY by starting auto ULP configuration */
1163         phy_reg |= I218_ULP_CONFIG1_START;
1164         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1165
1166         if (!to_sx) {
1167                 /* Disable Tx so that the MAC doesn't send any (buffered)
1168                  * packets to the PHY.
1169                  */
1170                 mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1171                 mac_reg &= ~E1000_TCTL_EN;
1172                 E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1173         }
1174 release:
1175         hw->phy.ops.release(hw);
1176 out:
1177         if (ret_val)
1178                 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1179         else
1180                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1181
1182         return ret_val;
1183 }
1184
1185 /**
1186  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1187  *  @hw: pointer to the HW structure
1188  *  @force: boolean indicating whether or not to force disabling ULP
1189  *
1190  *  Un-configure ULP mode when link is up, the system is transitioned from
1191  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1192  *  system, poll for an indication from ME that ULP has been un-configured.
1193  *  If not on an ME enabled system, un-configure the ULP mode by software.
1194  *
1195  *  During nominal operation, this function is called when link is acquired
1196  *  to disable ULP mode (force=false); otherwise, for example when unloading
1197  *  the driver or during Sx->S0 transitions, this is called with force=true
1198  *  to forcibly disable ULP.
1199
1200  *  When the cable is plugged in while the device is in D0, a Cable Status
1201  *  Change interrupt is generated which causes this function to be called
1202  *  to partially disable ULP mode and restart autonegotiation.  This function
1203  *  is then called again due to the resulting Link Status Change interrupt
1204  *  to finish cleaning up after the ULP flow.
1205  */
1206 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1207 {
1208         s32 ret_val = E1000_SUCCESS;
1209         u32 mac_reg;
1210         u16 phy_reg;
1211         int i = 0;
1212
1213         if ((hw->mac.type < e1000_pch_lpt) ||
1214             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1215             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1216             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1217             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1218             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1219                 return 0;
1220
1221         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1222                 if (force) {
1223                         /* Request ME un-configure ULP mode in the PHY */
1224                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1225                         mac_reg &= ~E1000_H2ME_ULP;
1226                         mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1227                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1228                 }
1229
1230                 /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
1231                 while (E1000_READ_REG(hw, E1000_FWSM) &
1232                        E1000_FWSM_ULP_CFG_DONE) {
1233                         if (i++ == 10) {
1234                                 ret_val = -E1000_ERR_PHY;
1235                                 goto out;
1236                         }
1237
1238                         msec_delay(10);
1239                 }
1240                 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1241
1242                 if (force) {
1243                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1244                         mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1245                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1246                 } else {
1247                         /* Clear H2ME.ULP after ME ULP configuration */
1248                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1249                         mac_reg &= ~E1000_H2ME_ULP;
1250                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1251
1252                         /* Restore link speed advertisements and restart
1253                          * Auto-negotiation
1254                          */
1255                         ret_val = e1000_phy_setup_autoneg(hw);
1256                         if (ret_val)
1257                                 goto out;
1258
1259                         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1260                 }
1261
1262                 goto out;
1263         }
1264
1265         ret_val = hw->phy.ops.acquire(hw);
1266         if (ret_val)
1267                 goto out;
1268
1269         /* Revert the change to the 'Link Status Change'
1270          * interrupt to trigger on 'Cable Status Change'
1271          */
1272         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1273                                              &phy_reg);
1274         if (ret_val)
1275                 goto release;
1276         phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1277         e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg);
1278
1279         if (force)
1280                 /* Toggle LANPHYPC Value bit */
1281                 e1000_toggle_lanphypc_pch_lpt(hw);
1282
1283         /* Unforce SMBus mode in PHY */
1284         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1285         if (ret_val) {
1286                 /* The MAC might be in PCIe mode, so temporarily force to
1287                  * SMBus mode in order to access the PHY.
1288                  */
1289                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1290                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1291                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1292
1293                 msec_delay(50);
1294
1295                 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1296                                                        &phy_reg);
1297                 if (ret_val)
1298                         goto release;
1299         }
1300         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1301         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1302
1303         /* Unforce SMBus mode in MAC */
1304         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1305         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1306         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1307
1308         /* When ULP mode was previously entered, K1 was disabled by the
1309          * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1310          */
1311         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1312         if (ret_val)
1313                 goto release;
1314         phy_reg |= HV_PM_CTRL_K1_ENABLE;
1315         e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1316
1317         /* Clear ULP enabled configuration */
1318         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1319         if (ret_val)
1320                 goto release;
1321         /* CSC interrupt received due to ULP Indication */
1322         if ((phy_reg & I218_ULP_CONFIG1_IND) || force) {
1323                 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1324                              I218_ULP_CONFIG1_STICKY_ULP |
1325                              I218_ULP_CONFIG1_RESET_TO_SMBUS |
1326                              I218_ULP_CONFIG1_WOL_HOST |
1327                              I218_ULP_CONFIG1_INBAND_EXIT |
1328                              I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1329                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1330
1331                 /* Commit ULP changes by starting auto ULP configuration */
1332                 phy_reg |= I218_ULP_CONFIG1_START;
1333                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1334
1335                 /* Clear Disable SMBus Release on PERST# in MAC */
1336                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1337                 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1338                 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1339
1340                 if (!force) {
1341                         hw->phy.ops.release(hw);
1342
1343                         if (hw->mac.autoneg)
1344                                 e1000_phy_setup_autoneg(hw);
1345
1346                         e1000_sw_lcd_config_ich8lan(hw);
1347
1348                         e1000_oem_bits_config_ich8lan(hw, true);
1349
1350                         /* Set ULP state to unknown and return non-zero to
1351                          * indicate no link (yet) and re-enter on the next LSC
1352                          * to finish disabling ULP flow.
1353                          */
1354                         hw->dev_spec.ich8lan.ulp_state =
1355                             e1000_ulp_state_unknown;
1356
1357                         return 1;
1358                 }
1359         }
1360
1361         /* Re-enable Tx */
1362         mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1363         mac_reg |= E1000_TCTL_EN;
1364         E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1365
1366 release:
1367         hw->phy.ops.release(hw);
1368         if (force) {
1369                 hw->phy.ops.reset(hw);
1370                 msec_delay(50);
1371         }
1372 out:
1373         if (ret_val)
1374                 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1375         else
1376                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1377
1378         return ret_val;
1379 }
1380
1381 #endif /* ULP_SUPPORT */
1382 /**
1383  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1384  *  @hw: pointer to the HW structure
1385  *
1386  *  Checks to see of the link status of the hardware has changed.  If a
1387  *  change in link status has been detected, then we read the PHY registers
1388  *  to get the current speed/duplex if link exists.
1389  **/
1390 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1391 {
1392         struct e1000_mac_info *mac = &hw->mac;
1393         s32 ret_val, tipg_reg = 0;
1394         u16 emi_addr, emi_val = 0;
1395         bool link = false;
1396         u16 phy_reg;
1397
1398         DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1399
1400         /* We only want to go out to the PHY registers to see if Auto-Neg
1401          * has completed and/or if our link status has changed.  The
1402          * get_link_status flag is set upon receiving a Link Status
1403          * Change or Rx Sequence Error interrupt.
1404          */
1405         if (!mac->get_link_status)
1406                 return E1000_SUCCESS;
1407
1408         if ((hw->mac.type < e1000_pch_lpt) ||
1409             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1410             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) {
1411                 /* First we want to see if the MII Status Register reports
1412                  * link.  If so, then we want to get the current speed/duplex
1413                  * of the PHY.
1414                  */
1415                 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1416                 if (ret_val)
1417                         return ret_val;
1418         } else {
1419                 /* Check the MAC's STATUS register to determine link state
1420                  * since the PHY could be inaccessible while in ULP mode.
1421                  */
1422                 link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
1423                 if (link)
1424                         ret_val = e1000_disable_ulp_lpt_lp(hw, false);
1425                 else
1426                         ret_val = e1000_enable_ulp_lpt_lp(hw, false);
1427
1428                 if (ret_val)
1429                         return ret_val;
1430         }
1431
1432         if (hw->mac.type == e1000_pchlan) {
1433                 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1434                 if (ret_val)
1435                         return ret_val;
1436         }
1437
1438         /* When connected at 10Mbps half-duplex, some parts are excessively
1439          * aggressive resulting in many collisions. To avoid this, increase
1440          * the IPG and reduce Rx latency in the PHY.
1441          */
1442         if (((hw->mac.type == e1000_pch2lan) ||
1443              (hw->mac.type == e1000_pch_lpt)) && link) {
1444                 u16 speed, duplex;
1445
1446                 e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1447                 tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1448                 tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1449
1450                 if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1451                         tipg_reg |= 0xFF;
1452                         /* Reduce Rx latency in analog PHY */
1453                         emi_val = 0;
1454                 } else {
1455                         /* Roll back the default values */
1456                         tipg_reg |= 0x08;
1457                         emi_val = 1;
1458                 }
1459
1460                 E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1461
1462                 ret_val = hw->phy.ops.acquire(hw);
1463                 if (ret_val)
1464                         return ret_val;
1465
1466                 if (hw->mac.type == e1000_pch2lan)
1467                         emi_addr = I82579_RX_CONFIG;
1468                 else
1469                         emi_addr = I217_RX_CONFIG;
1470                 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1471
1472                 hw->phy.ops.release(hw);
1473
1474                 if (ret_val)
1475                         return ret_val;
1476         }
1477
1478         /* I217 Packet Loss issue:
1479          * ensure that FEXTNVM4 Beacon Duration is set correctly
1480          * on power up.
1481          * Set the Beacon Duration for I217 to 8 usec
1482          */
1483         if (hw->mac.type == e1000_pch_lpt) {
1484                 u32 mac_reg;
1485
1486                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1487                 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1488                 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1489                 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1490         }
1491
1492         /* Work-around I218 hang issue */
1493         if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1494             (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1495             (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1496             (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1497                 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1498                 if (ret_val)
1499                         return ret_val;
1500         }
1501
1502         /* Clear link partner's EEE ability */
1503         hw->dev_spec.ich8lan.eee_lp_ability = 0;
1504
1505         if (!link)
1506                 return E1000_SUCCESS; /* No link detected */
1507
1508         mac->get_link_status = false;
1509
1510         switch (hw->mac.type) {
1511         case e1000_pch2lan:
1512                 ret_val = e1000_k1_workaround_lv(hw);
1513                 if (ret_val)
1514                         return ret_val;
1515                 /* fall-thru */
1516         case e1000_pchlan:
1517                 if (hw->phy.type == e1000_phy_82578) {
1518                         ret_val = e1000_link_stall_workaround_hv(hw);
1519                         if (ret_val)
1520                                 return ret_val;
1521                 }
1522
1523                 /* Workaround for PCHx parts in half-duplex:
1524                  * Set the number of preambles removed from the packet
1525                  * when it is passed from the PHY to the MAC to prevent
1526                  * the MAC from misinterpreting the packet type.
1527                  */
1528                 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1529                 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1530
1531                 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1532                     E1000_STATUS_FD)
1533                         phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1534
1535                 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1536                 break;
1537         default:
1538                 break;
1539         }
1540
1541         /* Check if there was DownShift, must be checked
1542          * immediately after link-up
1543          */
1544         e1000_check_downshift_generic(hw);
1545
1546         /* Enable/Disable EEE after link up */
1547         if (hw->phy.type > e1000_phy_82579) {
1548                 ret_val = e1000_set_eee_pchlan(hw);
1549                 if (ret_val)
1550                         return ret_val;
1551         }
1552
1553         /* If we are forcing speed/duplex, then we simply return since
1554          * we have already determined whether we have link or not.
1555          */
1556         if (!mac->autoneg)
1557                 return -E1000_ERR_CONFIG;
1558
1559         /* Auto-Neg is enabled.  Auto Speed Detection takes care
1560          * of MAC speed/duplex configuration.  So we only need to
1561          * configure Collision Distance in the MAC.
1562          */
1563         mac->ops.config_collision_dist(hw);
1564
1565         /* Configure Flow Control now that Auto-Neg has completed.
1566          * First, we need to restore the desired flow control
1567          * settings because we may have had to re-autoneg with a
1568          * different link partner.
1569          */
1570         ret_val = e1000_config_fc_after_link_up_generic(hw);
1571         if (ret_val)
1572                 DEBUGOUT("Error configuring flow control\n");
1573
1574         return ret_val;
1575 }
1576
1577 /**
1578  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1579  *  @hw: pointer to the HW structure
1580  *
1581  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1582  **/
1583 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1584 {
1585         DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1586
1587         hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1588         hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1589         switch (hw->mac.type) {
1590         case e1000_ich8lan:
1591         case e1000_ich9lan:
1592         case e1000_ich10lan:
1593                 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1594                 break;
1595         case e1000_pchlan:
1596         case e1000_pch2lan:
1597         case e1000_pch_lpt:
1598                 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1599                 break;
1600         default:
1601                 break;
1602         }
1603 }
1604
1605 /**
1606  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1607  *  @hw: pointer to the HW structure
1608  *
1609  *  Acquires the mutex for performing NVM operations.
1610  **/
1611 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1612 {
1613         DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1614
1615         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1616
1617         return E1000_SUCCESS;
1618 }
1619
1620 /**
1621  *  e1000_release_nvm_ich8lan - Release NVM mutex
1622  *  @hw: pointer to the HW structure
1623  *
1624  *  Releases the mutex used while performing NVM operations.
1625  **/
1626 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1627 {
1628         DEBUGFUNC("e1000_release_nvm_ich8lan");
1629
1630         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1631
1632         return;
1633 }
1634
1635 /**
1636  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1637  *  @hw: pointer to the HW structure
1638  *
1639  *  Acquires the software control flag for performing PHY and select
1640  *  MAC CSR accesses.
1641  **/
1642 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1643 {
1644         u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1645         s32 ret_val = E1000_SUCCESS;
1646
1647         DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1648
1649         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1650
1651         while (timeout) {
1652                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1653                 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1654                         break;
1655
1656                 msec_delay_irq(1);
1657                 timeout--;
1658         }
1659
1660         if (!timeout) {
1661                 DEBUGOUT("SW has already locked the resource.\n");
1662                 ret_val = -E1000_ERR_CONFIG;
1663                 goto out;
1664         }
1665
1666         timeout = SW_FLAG_TIMEOUT;
1667
1668         extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1669         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1670
1671         while (timeout) {
1672                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1673                 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1674                         break;
1675
1676                 msec_delay_irq(1);
1677                 timeout--;
1678         }
1679
1680         if (!timeout) {
1681                 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1682                           E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1683                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1684                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1685                 ret_val = -E1000_ERR_CONFIG;
1686                 goto out;
1687         }
1688
1689 out:
1690         if (ret_val)
1691                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1692
1693         return ret_val;
1694 }
1695
1696 /**
1697  *  e1000_release_swflag_ich8lan - Release software control flag
1698  *  @hw: pointer to the HW structure
1699  *
1700  *  Releases the software control flag for performing PHY and select
1701  *  MAC CSR accesses.
1702  **/
1703 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1704 {
1705         u32 extcnf_ctrl;
1706
1707         DEBUGFUNC("e1000_release_swflag_ich8lan");
1708
1709         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1710
1711         if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1712                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1713                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1714         } else {
1715                 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1716         }
1717
1718         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1719
1720         return;
1721 }
1722
1723 /**
1724  *  e1000_check_mng_mode_ich8lan - Checks management mode
1725  *  @hw: pointer to the HW structure
1726  *
1727  *  This checks if the adapter has any manageability enabled.
1728  *  This is a function pointer entry point only called by read/write
1729  *  routines for the PHY and NVM parts.
1730  **/
1731 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1732 {
1733         u32 fwsm;
1734
1735         DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1736
1737         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1738
1739         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1740                ((fwsm & E1000_FWSM_MODE_MASK) ==
1741                 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1742 }
1743
1744 /**
1745  *  e1000_check_mng_mode_pchlan - Checks management mode
1746  *  @hw: pointer to the HW structure
1747  *
1748  *  This checks if the adapter has iAMT enabled.
1749  *  This is a function pointer entry point only called by read/write
1750  *  routines for the PHY and NVM parts.
1751  **/
1752 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1753 {
1754         u32 fwsm;
1755
1756         DEBUGFUNC("e1000_check_mng_mode_pchlan");
1757
1758         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1759
1760         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1761                (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1762 }
1763
1764 /**
1765  *  e1000_rar_set_pch2lan - Set receive address register
1766  *  @hw: pointer to the HW structure
1767  *  @addr: pointer to the receive address
1768  *  @index: receive address array register
1769  *
1770  *  Sets the receive address array register at index to the address passed
1771  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1772  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1773  *  Use SHRA[0-3] in place of those reserved for ME.
1774  **/
1775 STATIC int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1776 {
1777         u32 rar_low, rar_high;
1778
1779         DEBUGFUNC("e1000_rar_set_pch2lan");
1780
1781         /* HW expects these in little endian so we reverse the byte order
1782          * from network order (big endian) to little endian
1783          */
1784         rar_low = ((u32) addr[0] |
1785                    ((u32) addr[1] << 8) |
1786                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1787
1788         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1789
1790         /* If MAC address zero, no need to set the AV bit */
1791         if (rar_low || rar_high)
1792                 rar_high |= E1000_RAH_AV;
1793
1794         if (index == 0) {
1795                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1796                 E1000_WRITE_FLUSH(hw);
1797                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1798                 E1000_WRITE_FLUSH(hw);
1799                 return E1000_SUCCESS;
1800         }
1801
1802         /* RAR[1-6] are owned by manageability.  Skip those and program the
1803          * next address into the SHRA register array.
1804          */
1805         if (index < (u32) (hw->mac.rar_entry_count)) {
1806                 s32 ret_val;
1807
1808                 ret_val = e1000_acquire_swflag_ich8lan(hw);
1809                 if (ret_val)
1810                         goto out;
1811
1812                 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1813                 E1000_WRITE_FLUSH(hw);
1814                 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1815                 E1000_WRITE_FLUSH(hw);
1816
1817                 e1000_release_swflag_ich8lan(hw);
1818
1819                 /* verify the register updates */
1820                 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1821                     (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1822                         return E1000_SUCCESS;
1823
1824                 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1825                          (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1826         }
1827
1828 out:
1829         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1830         return -E1000_ERR_CONFIG;
1831 }
1832
1833 /**
1834  *  e1000_rar_set_pch_lpt - Set receive address registers
1835  *  @hw: pointer to the HW structure
1836  *  @addr: pointer to the receive address
1837  *  @index: receive address array register
1838  *
1839  *  Sets the receive address register array at index to the address passed
1840  *  in by addr. For LPT, RAR[0] is the base address register that is to
1841  *  contain the MAC address. SHRA[0-10] are the shared receive address
1842  *  registers that are shared between the Host and manageability engine (ME).
1843  **/
1844 STATIC int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1845 {
1846         u32 rar_low, rar_high;
1847         u32 wlock_mac;
1848
1849         DEBUGFUNC("e1000_rar_set_pch_lpt");
1850
1851         /* HW expects these in little endian so we reverse the byte order
1852          * from network order (big endian) to little endian
1853          */
1854         rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1855                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1856
1857         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1858
1859         /* If MAC address zero, no need to set the AV bit */
1860         if (rar_low || rar_high)
1861                 rar_high |= E1000_RAH_AV;
1862
1863         if (index == 0) {
1864                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1865                 E1000_WRITE_FLUSH(hw);
1866                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1867                 E1000_WRITE_FLUSH(hw);
1868                 return E1000_SUCCESS;
1869         }
1870
1871         /* The manageability engine (ME) can lock certain SHRAR registers that
1872          * it is using - those registers are unavailable for use.
1873          */
1874         if (index < hw->mac.rar_entry_count) {
1875                 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
1876                             E1000_FWSM_WLOCK_MAC_MASK;
1877                 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1878
1879                 /* Check if all SHRAR registers are locked */
1880                 if (wlock_mac == 1)
1881                         goto out;
1882
1883                 if ((wlock_mac == 0) || (index <= wlock_mac)) {
1884                         s32 ret_val;
1885
1886                         ret_val = e1000_acquire_swflag_ich8lan(hw);
1887
1888                         if (ret_val)
1889                                 goto out;
1890
1891                         E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
1892                                         rar_low);
1893                         E1000_WRITE_FLUSH(hw);
1894                         E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
1895                                         rar_high);
1896                         E1000_WRITE_FLUSH(hw);
1897
1898                         e1000_release_swflag_ich8lan(hw);
1899
1900                         /* verify the register updates */
1901                         if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1902                             (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
1903                                 return E1000_SUCCESS;
1904                 }
1905         }
1906
1907 out:
1908         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1909         return -E1000_ERR_CONFIG;
1910 }
1911
1912 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
1913 /**
1914  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
1915  *  @hw: pointer to the HW structure
1916  *  @mc_addr_list: array of multicast addresses to program
1917  *  @mc_addr_count: number of multicast addresses to program
1918  *
1919  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
1920  *  The caller must have a packed mc_addr_list of multicast addresses.
1921  **/
1922 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
1923                                               u8 *mc_addr_list,
1924                                               u32 mc_addr_count)
1925 {
1926         u16 phy_reg = 0;
1927         int i;
1928         s32 ret_val;
1929
1930         DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
1931
1932         e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
1933
1934         ret_val = hw->phy.ops.acquire(hw);
1935         if (ret_val)
1936                 return;
1937
1938         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1939         if (ret_val)
1940                 goto release;
1941
1942         for (i = 0; i < hw->mac.mta_reg_count; i++) {
1943                 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
1944                                            (u16)(hw->mac.mta_shadow[i] &
1945                                                  0xFFFF));
1946                 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
1947                                            (u16)((hw->mac.mta_shadow[i] >> 16) &
1948                                                  0xFFFF));
1949         }
1950
1951         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1952
1953 release:
1954         hw->phy.ops.release(hw);
1955 }
1956
1957 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
1958 /**
1959  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1960  *  @hw: pointer to the HW structure
1961  *
1962  *  Checks if firmware is blocking the reset of the PHY.
1963  *  This is a function pointer entry point only called by
1964  *  reset routines.
1965  **/
1966 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1967 {
1968         u32 fwsm;
1969         bool blocked = false;
1970         int i = 0;
1971
1972         DEBUGFUNC("e1000_check_reset_block_ich8lan");
1973
1974         do {
1975                 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1976                 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
1977                         blocked = true;
1978                         msec_delay(10);
1979                         continue;
1980                 }
1981                 blocked = false;
1982         } while (blocked && (i++ < 10));
1983         return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
1984 }
1985
1986 /**
1987  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1988  *  @hw: pointer to the HW structure
1989  *
1990  *  Assumes semaphore already acquired.
1991  *
1992  **/
1993 STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1994 {
1995         u16 phy_data;
1996         u32 strap = E1000_READ_REG(hw, E1000_STRAP);
1997         u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
1998                 E1000_STRAP_SMT_FREQ_SHIFT;
1999         s32 ret_val;
2000
2001         strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2002
2003         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2004         if (ret_val)
2005                 return ret_val;
2006
2007         phy_data &= ~HV_SMB_ADDR_MASK;
2008         phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2009         phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2010
2011         if (hw->phy.type == e1000_phy_i217) {
2012                 /* Restore SMBus frequency */
2013                 if (freq--) {
2014                         phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2015                         phy_data |= (freq & (1 << 0)) <<
2016                                 HV_SMB_ADDR_FREQ_LOW_SHIFT;
2017                         phy_data |= (freq & (1 << 1)) <<
2018                                 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2019                 } else {
2020                         DEBUGOUT("Unsupported SMB frequency in PHY\n");
2021                 }
2022         }
2023
2024         return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2025 }
2026
2027 /**
2028  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2029  *  @hw:   pointer to the HW structure
2030  *
2031  *  SW should configure the LCD from the NVM extended configuration region
2032  *  as a workaround for certain parts.
2033  **/
2034 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2035 {
2036         struct e1000_phy_info *phy = &hw->phy;
2037         u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2038         s32 ret_val = E1000_SUCCESS;
2039         u16 word_addr, reg_data, reg_addr, phy_page = 0;
2040
2041         DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2042
2043         /* Initialize the PHY from the NVM on ICH platforms.  This
2044          * is needed due to an issue where the NVM configuration is
2045          * not properly autoloaded after power transitions.
2046          * Therefore, after each PHY reset, we will load the
2047          * configuration data out of the NVM manually.
2048          */
2049         switch (hw->mac.type) {
2050         case e1000_ich8lan:
2051                 if (phy->type != e1000_phy_igp_3)
2052                         return ret_val;
2053
2054                 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2055                     (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2056                         sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2057                         break;
2058                 }
2059                 /* Fall-thru */
2060         case e1000_pchlan:
2061         case e1000_pch2lan:
2062         case e1000_pch_lpt:
2063                 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2064                 break;
2065         default:
2066                 return ret_val;
2067         }
2068
2069         ret_val = hw->phy.ops.acquire(hw);
2070         if (ret_val)
2071                 return ret_val;
2072
2073         data = E1000_READ_REG(hw, E1000_FEXTNVM);
2074         if (!(data & sw_cfg_mask))
2075                 goto release;
2076
2077         /* Make sure HW does not configure LCD from PHY
2078          * extended configuration before SW configuration
2079          */
2080         data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2081         if ((hw->mac.type < e1000_pch2lan) &&
2082             (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2083                         goto release;
2084
2085         cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2086         cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2087         cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2088         if (!cnf_size)
2089                 goto release;
2090
2091         cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2092         cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2093
2094         if (((hw->mac.type == e1000_pchlan) &&
2095              !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2096             (hw->mac.type > e1000_pchlan)) {
2097                 /* HW configures the SMBus address and LEDs when the
2098                  * OEM and LCD Write Enable bits are set in the NVM.
2099                  * When both NVM bits are cleared, SW will configure
2100                  * them instead.
2101                  */
2102                 ret_val = e1000_write_smbus_addr(hw);
2103                 if (ret_val)
2104                         goto release;
2105
2106                 data = E1000_READ_REG(hw, E1000_LEDCTL);
2107                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2108                                                         (u16)data);
2109                 if (ret_val)
2110                         goto release;
2111         }
2112
2113         /* Configure LCD from extended configuration region. */
2114
2115         /* cnf_base_addr is in DWORD */
2116         word_addr = (u16)(cnf_base_addr << 1);
2117
2118         for (i = 0; i < cnf_size; i++) {
2119                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2120                                            &reg_data);
2121                 if (ret_val)
2122                         goto release;
2123
2124                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2125                                            1, &reg_addr);
2126                 if (ret_val)
2127                         goto release;
2128
2129                 /* Save off the PHY page for future writes. */
2130                 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2131                         phy_page = reg_data;
2132                         continue;
2133                 }
2134
2135                 reg_addr &= PHY_REG_MASK;
2136                 reg_addr |= phy_page;
2137
2138                 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2139                                                     reg_data);
2140                 if (ret_val)
2141                         goto release;
2142         }
2143
2144 release:
2145         hw->phy.ops.release(hw);
2146         return ret_val;
2147 }
2148
2149 /**
2150  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2151  *  @hw:   pointer to the HW structure
2152  *  @link: link up bool flag
2153  *
2154  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2155  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2156  *  If link is down, the function will restore the default K1 setting located
2157  *  in the NVM.
2158  **/
2159 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2160 {
2161         s32 ret_val = E1000_SUCCESS;
2162         u16 status_reg = 0;
2163         bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2164
2165         DEBUGFUNC("e1000_k1_gig_workaround_hv");
2166
2167         if (hw->mac.type != e1000_pchlan)
2168                 return E1000_SUCCESS;
2169
2170         /* Wrap the whole flow with the sw flag */
2171         ret_val = hw->phy.ops.acquire(hw);
2172         if (ret_val)
2173                 return ret_val;
2174
2175         /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2176         if (link) {
2177                 if (hw->phy.type == e1000_phy_82578) {
2178                         ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2179                                                               &status_reg);
2180                         if (ret_val)
2181                                 goto release;
2182
2183                         status_reg &= (BM_CS_STATUS_LINK_UP |
2184                                        BM_CS_STATUS_RESOLVED |
2185                                        BM_CS_STATUS_SPEED_MASK);
2186
2187                         if (status_reg == (BM_CS_STATUS_LINK_UP |
2188                                            BM_CS_STATUS_RESOLVED |
2189                                            BM_CS_STATUS_SPEED_1000))
2190                                 k1_enable = false;
2191                 }
2192
2193                 if (hw->phy.type == e1000_phy_82577) {
2194                         ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2195                                                               &status_reg);
2196                         if (ret_val)
2197                                 goto release;
2198
2199                         status_reg &= (HV_M_STATUS_LINK_UP |
2200                                        HV_M_STATUS_AUTONEG_COMPLETE |
2201                                        HV_M_STATUS_SPEED_MASK);
2202
2203                         if (status_reg == (HV_M_STATUS_LINK_UP |
2204                                            HV_M_STATUS_AUTONEG_COMPLETE |
2205                                            HV_M_STATUS_SPEED_1000))
2206                                 k1_enable = false;
2207                 }
2208
2209                 /* Link stall fix for link up */
2210                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2211                                                        0x0100);
2212                 if (ret_val)
2213                         goto release;
2214
2215         } else {
2216                 /* Link stall fix for link down */
2217                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2218                                                        0x4100);
2219                 if (ret_val)
2220                         goto release;
2221         }
2222
2223         ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2224
2225 release:
2226         hw->phy.ops.release(hw);
2227
2228         return ret_val;
2229 }
2230
2231 /**
2232  *  e1000_configure_k1_ich8lan - Configure K1 power state
2233  *  @hw: pointer to the HW structure
2234  *  @enable: K1 state to configure
2235  *
2236  *  Configure the K1 power state based on the provided parameter.
2237  *  Assumes semaphore already acquired.
2238  *
2239  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2240  **/
2241 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2242 {
2243         s32 ret_val;
2244         u32 ctrl_reg = 0;
2245         u32 ctrl_ext = 0;
2246         u32 reg = 0;
2247         u16 kmrn_reg = 0;
2248
2249         DEBUGFUNC("e1000_configure_k1_ich8lan");
2250
2251         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2252                                              &kmrn_reg);
2253         if (ret_val)
2254                 return ret_val;
2255
2256         if (k1_enable)
2257                 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2258         else
2259                 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2260
2261         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2262                                               kmrn_reg);
2263         if (ret_val)
2264                 return ret_val;
2265
2266         usec_delay(20);
2267         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2268         ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2269
2270         reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2271         reg |= E1000_CTRL_FRCSPD;
2272         E1000_WRITE_REG(hw, E1000_CTRL, reg);
2273
2274         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2275         E1000_WRITE_FLUSH(hw);
2276         usec_delay(20);
2277         E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2278         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2279         E1000_WRITE_FLUSH(hw);
2280         usec_delay(20);
2281
2282         return E1000_SUCCESS;
2283 }
2284
2285 /**
2286  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2287  *  @hw:       pointer to the HW structure
2288  *  @d0_state: boolean if entering d0 or d3 device state
2289  *
2290  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2291  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2292  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2293  **/
2294 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2295 {
2296         s32 ret_val = 0;
2297         u32 mac_reg;
2298         u16 oem_reg;
2299
2300         DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2301
2302         if (hw->mac.type < e1000_pchlan)
2303                 return ret_val;
2304
2305         ret_val = hw->phy.ops.acquire(hw);
2306         if (ret_val)
2307                 return ret_val;
2308
2309         if (hw->mac.type == e1000_pchlan) {
2310                 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2311                 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2312                         goto release;
2313         }
2314
2315         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2316         if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2317                 goto release;
2318
2319         mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2320
2321         ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2322         if (ret_val)
2323                 goto release;
2324
2325         oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2326
2327         if (d0_state) {
2328                 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2329                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2330
2331                 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2332                         oem_reg |= HV_OEM_BITS_LPLU;
2333         } else {
2334                 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2335                     E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2336                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2337
2338                 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2339                     E1000_PHY_CTRL_NOND0A_LPLU))
2340                         oem_reg |= HV_OEM_BITS_LPLU;
2341         }
2342
2343         /* Set Restart auto-neg to activate the bits */
2344         if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2345             !hw->phy.ops.check_reset_block(hw))
2346                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2347
2348         ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2349
2350 release:
2351         hw->phy.ops.release(hw);
2352
2353         return ret_val;
2354 }
2355
2356
2357 /**
2358  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2359  *  @hw:   pointer to the HW structure
2360  **/
2361 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2362 {
2363         s32 ret_val;
2364         u16 data;
2365
2366         DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2367
2368         ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2369         if (ret_val)
2370                 return ret_val;
2371
2372         data |= HV_KMRN_MDIO_SLOW;
2373
2374         ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2375
2376         return ret_val;
2377 }
2378
2379 /**
2380  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2381  *  done after every PHY reset.
2382  **/
2383 STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2384 {
2385         s32 ret_val = E1000_SUCCESS;
2386         u16 phy_data;
2387
2388         DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2389
2390         if (hw->mac.type != e1000_pchlan)
2391                 return E1000_SUCCESS;
2392
2393         /* Set MDIO slow mode before any other MDIO access */
2394         if (hw->phy.type == e1000_phy_82577) {
2395                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2396                 if (ret_val)
2397                         return ret_val;
2398         }
2399
2400         if (((hw->phy.type == e1000_phy_82577) &&
2401              ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2402             ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2403                 /* Disable generation of early preamble */
2404                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2405                 if (ret_val)
2406                         return ret_val;
2407
2408                 /* Preamble tuning for SSC */
2409                 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2410                                                 0xA204);
2411                 if (ret_val)
2412                         return ret_val;
2413         }
2414
2415         if (hw->phy.type == e1000_phy_82578) {
2416                 /* Return registers to default by doing a soft reset then
2417                  * writing 0x3140 to the control register.
2418                  */
2419                 if (hw->phy.revision < 2) {
2420                         e1000_phy_sw_reset_generic(hw);
2421                         ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2422                                                         0x3140);
2423                 }
2424         }
2425
2426         /* Select page 0 */
2427         ret_val = hw->phy.ops.acquire(hw);
2428         if (ret_val)
2429                 return ret_val;
2430
2431         hw->phy.addr = 1;
2432         ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2433         hw->phy.ops.release(hw);
2434         if (ret_val)
2435                 return ret_val;
2436
2437         /* Configure the K1 Si workaround during phy reset assuming there is
2438          * link so that it disables K1 if link is in 1Gbps.
2439          */
2440         ret_val = e1000_k1_gig_workaround_hv(hw, true);
2441         if (ret_val)
2442                 return ret_val;
2443
2444         /* Workaround for link disconnects on a busy hub in half duplex */
2445         ret_val = hw->phy.ops.acquire(hw);
2446         if (ret_val)
2447                 return ret_val;
2448         ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2449         if (ret_val)
2450                 goto release;
2451         ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2452                                                phy_data & 0x00FF);
2453         if (ret_val)
2454                 goto release;
2455
2456         /* set MSE higher to enable link to stay up when noise is high */
2457         ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2458 release:
2459         hw->phy.ops.release(hw);
2460
2461         return ret_val;
2462 }
2463
2464 /**
2465  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2466  *  @hw:   pointer to the HW structure
2467  **/
2468 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2469 {
2470         u32 mac_reg;
2471         u16 i, phy_reg = 0;
2472         s32 ret_val;
2473
2474         DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2475
2476         ret_val = hw->phy.ops.acquire(hw);
2477         if (ret_val)
2478                 return;
2479         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2480         if (ret_val)
2481                 goto release;
2482
2483         /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2484         for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2485                 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2486                 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2487                                            (u16)(mac_reg & 0xFFFF));
2488                 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2489                                            (u16)((mac_reg >> 16) & 0xFFFF));
2490
2491                 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2492                 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2493                                            (u16)(mac_reg & 0xFFFF));
2494                 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2495                                            (u16)((mac_reg & E1000_RAH_AV)
2496                                                  >> 16));
2497         }
2498
2499         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2500
2501 release:
2502         hw->phy.ops.release(hw);
2503 }
2504
2505 #ifndef CRC32_OS_SUPPORT
2506 STATIC u32 e1000_calc_rx_da_crc(u8 mac[])
2507 {
2508         u32 poly = 0xEDB88320;  /* Polynomial for 802.3 CRC calculation */
2509         u32 i, j, mask, crc;
2510
2511         DEBUGFUNC("e1000_calc_rx_da_crc");
2512
2513         crc = 0xffffffff;
2514         for (i = 0; i < 6; i++) {
2515                 crc = crc ^ mac[i];
2516                 for (j = 8; j > 0; j--) {
2517                         mask = (crc & 1) * (-1);
2518                         crc = (crc >> 1) ^ (poly & mask);
2519                 }
2520         }
2521         return ~crc;
2522 }
2523
2524 #endif /* CRC32_OS_SUPPORT */
2525 /**
2526  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2527  *  with 82579 PHY
2528  *  @hw: pointer to the HW structure
2529  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2530  **/
2531 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2532 {
2533         s32 ret_val = E1000_SUCCESS;
2534         u16 phy_reg, data;
2535         u32 mac_reg;
2536         u16 i;
2537
2538         DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2539
2540         if (hw->mac.type < e1000_pch2lan)
2541                 return E1000_SUCCESS;
2542
2543         /* disable Rx path while enabling/disabling workaround */
2544         hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2545         ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2546                                         phy_reg | (1 << 14));
2547         if (ret_val)
2548                 return ret_val;
2549
2550         if (enable) {
2551                 /* Write Rx addresses (rar_entry_count for RAL/H, and
2552                  * SHRAL/H) and initial CRC values to the MAC
2553                  */
2554                 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2555                         u8 mac_addr[ETH_ADDR_LEN] = {0};
2556                         u32 addr_high, addr_low;
2557
2558                         addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2559                         if (!(addr_high & E1000_RAH_AV))
2560                                 continue;
2561                         addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2562                         mac_addr[0] = (addr_low & 0xFF);
2563                         mac_addr[1] = ((addr_low >> 8) & 0xFF);
2564                         mac_addr[2] = ((addr_low >> 16) & 0xFF);
2565                         mac_addr[3] = ((addr_low >> 24) & 0xFF);
2566                         mac_addr[4] = (addr_high & 0xFF);
2567                         mac_addr[5] = ((addr_high >> 8) & 0xFF);
2568
2569 #ifndef CRC32_OS_SUPPORT
2570                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2571                                         e1000_calc_rx_da_crc(mac_addr));
2572 #else /* CRC32_OS_SUPPORT */
2573                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2574                                         E1000_CRC32(ETH_ADDR_LEN, mac_addr));
2575 #endif /* CRC32_OS_SUPPORT */
2576                 }
2577
2578                 /* Write Rx addresses to the PHY */
2579                 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2580
2581                 /* Enable jumbo frame workaround in the MAC */
2582                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2583                 mac_reg &= ~(1 << 14);
2584                 mac_reg |= (7 << 15);
2585                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2586
2587                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2588                 mac_reg |= E1000_RCTL_SECRC;
2589                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2590
2591                 ret_val = e1000_read_kmrn_reg_generic(hw,
2592                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2593                                                 &data);
2594                 if (ret_val)
2595                         return ret_val;
2596                 ret_val = e1000_write_kmrn_reg_generic(hw,
2597                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2598                                                 data | (1 << 0));
2599                 if (ret_val)
2600                         return ret_val;
2601                 ret_val = e1000_read_kmrn_reg_generic(hw,
2602                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2603                                                 &data);
2604                 if (ret_val)
2605                         return ret_val;
2606                 data &= ~(0xF << 8);
2607                 data |= (0xB << 8);
2608                 ret_val = e1000_write_kmrn_reg_generic(hw,
2609                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2610                                                 data);
2611                 if (ret_val)
2612                         return ret_val;
2613
2614                 /* Enable jumbo frame workaround in the PHY */
2615                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2616                 data &= ~(0x7F << 5);
2617                 data |= (0x37 << 5);
2618                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2619                 if (ret_val)
2620                         return ret_val;
2621                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2622                 data &= ~(1 << 13);
2623                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2624                 if (ret_val)
2625                         return ret_val;
2626                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2627                 data &= ~(0x3FF << 2);
2628                 data |= (E1000_TX_PTR_GAP << 2);
2629                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2630                 if (ret_val)
2631                         return ret_val;
2632                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2633                 if (ret_val)
2634                         return ret_val;
2635                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2636                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2637                                                 (1 << 10));
2638                 if (ret_val)
2639                         return ret_val;
2640         } else {
2641                 /* Write MAC register values back to h/w defaults */
2642                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2643                 mac_reg &= ~(0xF << 14);
2644                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2645
2646                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2647                 mac_reg &= ~E1000_RCTL_SECRC;
2648                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2649
2650                 ret_val = e1000_read_kmrn_reg_generic(hw,
2651                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2652                                                 &data);
2653                 if (ret_val)
2654                         return ret_val;
2655                 ret_val = e1000_write_kmrn_reg_generic(hw,
2656                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2657                                                 data & ~(1 << 0));
2658                 if (ret_val)
2659                         return ret_val;
2660                 ret_val = e1000_read_kmrn_reg_generic(hw,
2661                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2662                                                 &data);
2663                 if (ret_val)
2664                         return ret_val;
2665                 data &= ~(0xF << 8);
2666                 data |= (0xB << 8);
2667                 ret_val = e1000_write_kmrn_reg_generic(hw,
2668                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2669                                                 data);
2670                 if (ret_val)
2671                         return ret_val;
2672
2673                 /* Write PHY register values back to h/w defaults */
2674                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2675                 data &= ~(0x7F << 5);
2676                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2677                 if (ret_val)
2678                         return ret_val;
2679                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2680                 data |= (1 << 13);
2681                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2682                 if (ret_val)
2683                         return ret_val;
2684                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2685                 data &= ~(0x3FF << 2);
2686                 data |= (0x8 << 2);
2687                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2688                 if (ret_val)
2689                         return ret_val;
2690                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2691                 if (ret_val)
2692                         return ret_val;
2693                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2694                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2695                                                 ~(1 << 10));
2696                 if (ret_val)
2697                         return ret_val;
2698         }
2699
2700         /* re-enable Rx path after enabling/disabling workaround */
2701         return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2702                                      ~(1 << 14));
2703 }
2704
2705 /**
2706  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2707  *  done after every PHY reset.
2708  **/
2709 STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2710 {
2711         s32 ret_val = E1000_SUCCESS;
2712
2713         DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2714
2715         if (hw->mac.type != e1000_pch2lan)
2716                 return E1000_SUCCESS;
2717
2718         /* Set MDIO slow mode before any other MDIO access */
2719         ret_val = e1000_set_mdio_slow_mode_hv(hw);
2720         if (ret_val)
2721                 return ret_val;
2722
2723         ret_val = hw->phy.ops.acquire(hw);
2724         if (ret_val)
2725                 return ret_val;
2726         /* set MSE higher to enable link to stay up when noise is high */
2727         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2728         if (ret_val)
2729                 goto release;
2730         /* drop link after 5 times MSE threshold was reached */
2731         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2732 release:
2733         hw->phy.ops.release(hw);
2734
2735         return ret_val;
2736 }
2737
2738 /**
2739  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2740  *  @hw:   pointer to the HW structure
2741  *
2742  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2743  *  Disable K1 for 1000 and 100 speeds
2744  **/
2745 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2746 {
2747         s32 ret_val = E1000_SUCCESS;
2748         u16 status_reg = 0;
2749
2750         DEBUGFUNC("e1000_k1_workaround_lv");
2751
2752         if (hw->mac.type != e1000_pch2lan)
2753                 return E1000_SUCCESS;
2754
2755         /* Set K1 beacon duration based on 10Mbs speed */
2756         ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2757         if (ret_val)
2758                 return ret_val;
2759
2760         if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2761             == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2762                 if (status_reg &
2763                     (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2764                         u16 pm_phy_reg;
2765
2766                         /* LV 1G/100 Packet drop issue wa  */
2767                         ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2768                                                        &pm_phy_reg);
2769                         if (ret_val)
2770                                 return ret_val;
2771                         pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2772                         ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2773                                                         pm_phy_reg);
2774                         if (ret_val)
2775                                 return ret_val;
2776                 } else {
2777                         u32 mac_reg;
2778                         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2779                         mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2780                         mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2781                         E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2782                 }
2783         }
2784
2785         return ret_val;
2786 }
2787
2788 /**
2789  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2790  *  @hw:   pointer to the HW structure
2791  *  @gate: boolean set to true to gate, false to ungate
2792  *
2793  *  Gate/ungate the automatic PHY configuration via hardware; perform
2794  *  the configuration via software instead.
2795  **/
2796 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2797 {
2798         u32 extcnf_ctrl;
2799
2800         DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2801
2802         if (hw->mac.type < e1000_pch2lan)
2803                 return;
2804
2805         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2806
2807         if (gate)
2808                 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2809         else
2810                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2811
2812         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2813 }
2814
2815 /**
2816  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2817  *  @hw: pointer to the HW structure
2818  *
2819  *  Check the appropriate indication the MAC has finished configuring the
2820  *  PHY after a software reset.
2821  **/
2822 STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2823 {
2824         u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2825
2826         DEBUGFUNC("e1000_lan_init_done_ich8lan");
2827
2828         /* Wait for basic configuration completes before proceeding */
2829         do {
2830                 data = E1000_READ_REG(hw, E1000_STATUS);
2831                 data &= E1000_STATUS_LAN_INIT_DONE;
2832                 usec_delay(100);
2833         } while ((!data) && --loop);
2834
2835         /* If basic configuration is incomplete before the above loop
2836          * count reaches 0, loading the configuration from NVM will
2837          * leave the PHY in a bad state possibly resulting in no link.
2838          */
2839         if (loop == 0)
2840                 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2841
2842         /* Clear the Init Done bit for the next init event */
2843         data = E1000_READ_REG(hw, E1000_STATUS);
2844         data &= ~E1000_STATUS_LAN_INIT_DONE;
2845         E1000_WRITE_REG(hw, E1000_STATUS, data);
2846 }
2847
2848 /**
2849  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2850  *  @hw: pointer to the HW structure
2851  **/
2852 STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2853 {
2854         s32 ret_val = E1000_SUCCESS;
2855         u16 reg;
2856
2857         DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2858
2859         if (hw->phy.ops.check_reset_block(hw))
2860                 return E1000_SUCCESS;
2861
2862         /* Allow time for h/w to get to quiescent state after reset */
2863         msec_delay(10);
2864
2865         /* Perform any necessary post-reset workarounds */
2866         switch (hw->mac.type) {
2867         case e1000_pchlan:
2868                 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2869                 if (ret_val)
2870                         return ret_val;
2871                 break;
2872         case e1000_pch2lan:
2873                 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2874                 if (ret_val)
2875                         return ret_val;
2876                 break;
2877         default:
2878                 break;
2879         }
2880
2881         /* Clear the host wakeup bit after lcd reset */
2882         if (hw->mac.type >= e1000_pchlan) {
2883                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
2884                 reg &= ~BM_WUC_HOST_WU_BIT;
2885                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
2886         }
2887
2888         /* Configure the LCD with the extended configuration region in NVM */
2889         ret_val = e1000_sw_lcd_config_ich8lan(hw);
2890         if (ret_val)
2891                 return ret_val;
2892
2893         /* Configure the LCD with the OEM bits in NVM */
2894         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2895
2896         if (hw->mac.type == e1000_pch2lan) {
2897                 /* Ungate automatic PHY configuration on non-managed 82579 */
2898                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
2899                     E1000_ICH_FWSM_FW_VALID)) {
2900                         msec_delay(10);
2901                         e1000_gate_hw_phy_config_ich8lan(hw, false);
2902                 }
2903
2904                 /* Set EEE LPI Update Timer to 200usec */
2905                 ret_val = hw->phy.ops.acquire(hw);
2906                 if (ret_val)
2907                         return ret_val;
2908                 ret_val = e1000_write_emi_reg_locked(hw,
2909                                                      I82579_LPI_UPDATE_TIMER,
2910                                                      0x1387);
2911                 hw->phy.ops.release(hw);
2912         }
2913
2914         return ret_val;
2915 }
2916
2917 /**
2918  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2919  *  @hw: pointer to the HW structure
2920  *
2921  *  Resets the PHY
2922  *  This is a function pointer entry point called by drivers
2923  *  or other shared routines.
2924  **/
2925 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2926 {
2927         s32 ret_val = E1000_SUCCESS;
2928
2929         DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2930
2931         /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2932         if ((hw->mac.type == e1000_pch2lan) &&
2933             !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2934                 e1000_gate_hw_phy_config_ich8lan(hw, true);
2935
2936         ret_val = e1000_phy_hw_reset_generic(hw);
2937         if (ret_val)
2938                 return ret_val;
2939
2940         return e1000_post_phy_reset_ich8lan(hw);
2941 }
2942
2943 /**
2944  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2945  *  @hw: pointer to the HW structure
2946  *  @active: true to enable LPLU, false to disable
2947  *
2948  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2949  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2950  *  the phy speed. This function will manually set the LPLU bit and restart
2951  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2952  *  since it configures the same bit.
2953  **/
2954 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2955 {
2956         s32 ret_val;
2957         u16 oem_reg;
2958
2959         DEBUGFUNC("e1000_set_lplu_state_pchlan");
2960
2961         ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2962         if (ret_val)
2963                 return ret_val;
2964
2965         if (active)
2966                 oem_reg |= HV_OEM_BITS_LPLU;
2967         else
2968                 oem_reg &= ~HV_OEM_BITS_LPLU;
2969
2970         if (!hw->phy.ops.check_reset_block(hw))
2971                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2972
2973         return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2974 }
2975
2976 /**
2977  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2978  *  @hw: pointer to the HW structure
2979  *  @active: true to enable LPLU, false to disable
2980  *
2981  *  Sets the LPLU D0 state according to the active flag.  When
2982  *  activating LPLU this function also disables smart speed
2983  *  and vice versa.  LPLU will not be activated unless the
2984  *  device autonegotiation advertisement meets standards of
2985  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2986  *  This is a function pointer entry point only called by
2987  *  PHY setup routines.
2988  **/
2989 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2990 {
2991         struct e1000_phy_info *phy = &hw->phy;
2992         u32 phy_ctrl;
2993         s32 ret_val = E1000_SUCCESS;
2994         u16 data;
2995
2996         DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2997
2998         if (phy->type == e1000_phy_ife)
2999                 return E1000_SUCCESS;
3000
3001         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3002
3003         if (active) {
3004                 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3005                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3006
3007                 if (phy->type != e1000_phy_igp_3)
3008                         return E1000_SUCCESS;
3009
3010                 /* Call gig speed drop workaround on LPLU before accessing
3011                  * any PHY registers
3012                  */
3013                 if (hw->mac.type == e1000_ich8lan)
3014                         e1000_gig_downshift_workaround_ich8lan(hw);
3015
3016                 /* When LPLU is enabled, we should disable SmartSpeed */
3017                 ret_val = phy->ops.read_reg(hw,
3018                                             IGP01E1000_PHY_PORT_CONFIG,
3019                                             &data);
3020                 if (ret_val)
3021                         return ret_val;
3022                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3023                 ret_val = phy->ops.write_reg(hw,
3024                                              IGP01E1000_PHY_PORT_CONFIG,
3025                                              data);
3026                 if (ret_val)
3027                         return ret_val;
3028         } else {
3029                 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3030                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3031
3032                 if (phy->type != e1000_phy_igp_3)
3033                         return E1000_SUCCESS;
3034
3035                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3036                  * during Dx states where the power conservation is most
3037                  * important.  During driver activity we should enable
3038                  * SmartSpeed, so performance is maintained.
3039                  */
3040                 if (phy->smart_speed == e1000_smart_speed_on) {
3041                         ret_val = phy->ops.read_reg(hw,
3042                                                     IGP01E1000_PHY_PORT_CONFIG,
3043                                                     &data);
3044                         if (ret_val)
3045                                 return ret_val;
3046
3047                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3048                         ret_val = phy->ops.write_reg(hw,
3049                                                      IGP01E1000_PHY_PORT_CONFIG,
3050                                                      data);
3051                         if (ret_val)
3052                                 return ret_val;
3053                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3054                         ret_val = phy->ops.read_reg(hw,
3055                                                     IGP01E1000_PHY_PORT_CONFIG,
3056                                                     &data);
3057                         if (ret_val)
3058                                 return ret_val;
3059
3060                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3061                         ret_val = phy->ops.write_reg(hw,
3062                                                      IGP01E1000_PHY_PORT_CONFIG,
3063                                                      data);
3064                         if (ret_val)
3065                                 return ret_val;
3066                 }
3067         }
3068
3069         return E1000_SUCCESS;
3070 }
3071
3072 /**
3073  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3074  *  @hw: pointer to the HW structure
3075  *  @active: true to enable LPLU, false to disable
3076  *
3077  *  Sets the LPLU D3 state according to the active flag.  When
3078  *  activating LPLU this function also disables smart speed
3079  *  and vice versa.  LPLU will not be activated unless the
3080  *  device autonegotiation advertisement meets standards of
3081  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3082  *  This is a function pointer entry point only called by
3083  *  PHY setup routines.
3084  **/
3085 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3086 {
3087         struct e1000_phy_info *phy = &hw->phy;
3088         u32 phy_ctrl;
3089         s32 ret_val = E1000_SUCCESS;
3090         u16 data;
3091
3092         DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3093
3094         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3095
3096         if (!active) {
3097                 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3098                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3099
3100                 if (phy->type != e1000_phy_igp_3)
3101                         return E1000_SUCCESS;
3102
3103                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3104                  * during Dx states where the power conservation is most
3105                  * important.  During driver activity we should enable
3106                  * SmartSpeed, so performance is maintained.
3107                  */
3108                 if (phy->smart_speed == e1000_smart_speed_on) {
3109                         ret_val = phy->ops.read_reg(hw,
3110                                                     IGP01E1000_PHY_PORT_CONFIG,
3111                                                     &data);
3112                         if (ret_val)
3113                                 return ret_val;
3114
3115                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3116                         ret_val = phy->ops.write_reg(hw,
3117                                                      IGP01E1000_PHY_PORT_CONFIG,
3118                                                      data);
3119                         if (ret_val)
3120                                 return ret_val;
3121                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3122                         ret_val = phy->ops.read_reg(hw,
3123                                                     IGP01E1000_PHY_PORT_CONFIG,
3124                                                     &data);
3125                         if (ret_val)
3126                                 return ret_val;
3127
3128                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3129                         ret_val = phy->ops.write_reg(hw,
3130                                                      IGP01E1000_PHY_PORT_CONFIG,
3131                                                      data);
3132                         if (ret_val)
3133                                 return ret_val;
3134                 }
3135         } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3136                    (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3137                    (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3138                 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3139                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3140
3141                 if (phy->type != e1000_phy_igp_3)
3142                         return E1000_SUCCESS;
3143
3144                 /* Call gig speed drop workaround on LPLU before accessing
3145                  * any PHY registers
3146                  */
3147                 if (hw->mac.type == e1000_ich8lan)
3148                         e1000_gig_downshift_workaround_ich8lan(hw);
3149
3150                 /* When LPLU is enabled, we should disable SmartSpeed */
3151                 ret_val = phy->ops.read_reg(hw,
3152                                             IGP01E1000_PHY_PORT_CONFIG,
3153                                             &data);
3154                 if (ret_val)
3155                         return ret_val;
3156
3157                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3158                 ret_val = phy->ops.write_reg(hw,
3159                                              IGP01E1000_PHY_PORT_CONFIG,
3160                                              data);
3161         }
3162
3163         return ret_val;
3164 }
3165
3166 /**
3167  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3168  *  @hw: pointer to the HW structure
3169  *  @bank:  pointer to the variable that returns the active bank
3170  *
3171  *  Reads signature byte from the NVM using the flash access registers.
3172  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3173  **/
3174 STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3175 {
3176         u32 eecd;
3177         struct e1000_nvm_info *nvm = &hw->nvm;
3178         u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3179         u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3180         u8 sig_byte = 0;
3181         s32 ret_val;
3182
3183         DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3184
3185         switch (hw->mac.type) {
3186         case e1000_ich8lan:
3187         case e1000_ich9lan:
3188                 eecd = E1000_READ_REG(hw, E1000_EECD);
3189                 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3190                     E1000_EECD_SEC1VAL_VALID_MASK) {
3191                         if (eecd & E1000_EECD_SEC1VAL)
3192                                 *bank = 1;
3193                         else
3194                                 *bank = 0;
3195
3196                         return E1000_SUCCESS;
3197                 }
3198                 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3199                 /* fall-thru */
3200         default:
3201                 /* set bank to 0 in case flash read fails */
3202                 *bank = 0;
3203
3204                 /* Check bank 0 */
3205                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3206                                                         &sig_byte);
3207                 if (ret_val)
3208                         return ret_val;
3209                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3210                     E1000_ICH_NVM_SIG_VALUE) {
3211                         *bank = 0;
3212                         return E1000_SUCCESS;
3213                 }
3214
3215                 /* Check bank 1 */
3216                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3217                                                         bank1_offset,
3218                                                         &sig_byte);
3219                 if (ret_val)
3220                         return ret_val;
3221                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3222                     E1000_ICH_NVM_SIG_VALUE) {
3223                         *bank = 1;
3224                         return E1000_SUCCESS;
3225                 }
3226
3227                 DEBUGOUT("ERROR: No valid NVM bank present\n");
3228                 return -E1000_ERR_NVM;
3229         }
3230 }
3231
3232 /**
3233  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3234  *  @hw: pointer to the HW structure
3235  *  @offset: The offset (in bytes) of the word(s) to read.
3236  *  @words: Size of data to read in words
3237  *  @data: Pointer to the word(s) to read at offset.
3238  *
3239  *  Reads a word(s) from the NVM using the flash access registers.
3240  **/
3241 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3242                                   u16 *data)
3243 {
3244         struct e1000_nvm_info *nvm = &hw->nvm;
3245         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3246         u32 act_offset;
3247         s32 ret_val = E1000_SUCCESS;
3248         u32 bank = 0;
3249         u16 i, word;
3250
3251         DEBUGFUNC("e1000_read_nvm_ich8lan");
3252
3253         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3254             (words == 0)) {
3255                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3256                 ret_val = -E1000_ERR_NVM;
3257                 goto out;
3258         }
3259
3260         nvm->ops.acquire(hw);
3261
3262         ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3263         if (ret_val != E1000_SUCCESS) {
3264                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3265                 bank = 0;
3266         }
3267
3268         act_offset = (bank) ? nvm->flash_bank_size : 0;
3269         act_offset += offset;
3270
3271         ret_val = E1000_SUCCESS;
3272         for (i = 0; i < words; i++) {
3273                 if (dev_spec->shadow_ram[offset+i].modified) {
3274                         data[i] = dev_spec->shadow_ram[offset+i].value;
3275                 } else {
3276                         ret_val = e1000_read_flash_word_ich8lan(hw,
3277                                                                 act_offset + i,
3278                                                                 &word);
3279                         if (ret_val)
3280                                 break;
3281                         data[i] = word;
3282                 }
3283         }
3284
3285         nvm->ops.release(hw);
3286
3287 out:
3288         if (ret_val)
3289                 DEBUGOUT1("NVM read error: %d\n", ret_val);
3290
3291         return ret_val;
3292 }
3293
3294 /**
3295  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3296  *  @hw: pointer to the HW structure
3297  *
3298  *  This function does initial flash setup so that a new read/write/erase cycle
3299  *  can be started.
3300  **/
3301 STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3302 {
3303         union ich8_hws_flash_status hsfsts;
3304         s32 ret_val = -E1000_ERR_NVM;
3305
3306         DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3307
3308         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3309
3310         /* Check if the flash descriptor is valid */
3311         if (!hsfsts.hsf_status.fldesvalid) {
3312                 DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3313                 return -E1000_ERR_NVM;
3314         }
3315
3316         /* Clear FCERR and DAEL in hw status by writing 1 */
3317         hsfsts.hsf_status.flcerr = 1;
3318         hsfsts.hsf_status.dael = 1;
3319         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3320
3321         /* Either we should have a hardware SPI cycle in progress
3322          * bit to check against, in order to start a new cycle or
3323          * FDONE bit should be changed in the hardware so that it
3324          * is 1 after hardware reset, which can then be used as an
3325          * indication whether a cycle is in progress or has been
3326          * completed.
3327          */
3328
3329         if (!hsfsts.hsf_status.flcinprog) {
3330                 /* There is no cycle running at present,
3331                  * so we can start a cycle.
3332                  * Begin by setting Flash Cycle Done.
3333                  */
3334                 hsfsts.hsf_status.flcdone = 1;
3335                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3336                 ret_val = E1000_SUCCESS;
3337         } else {
3338                 s32 i;
3339
3340                 /* Otherwise poll for sometime so the current
3341                  * cycle has a chance to end before giving up.
3342                  */
3343                 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3344                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3345                                                               ICH_FLASH_HSFSTS);
3346                         if (!hsfsts.hsf_status.flcinprog) {
3347                                 ret_val = E1000_SUCCESS;
3348                                 break;
3349                         }
3350                         usec_delay(1);
3351                 }
3352                 if (ret_val == E1000_SUCCESS) {
3353                         /* Successful in waiting for previous cycle to timeout,
3354                          * now set the Flash Cycle Done.
3355                          */
3356                         hsfsts.hsf_status.flcdone = 1;
3357                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3358                                                 hsfsts.regval);
3359                 } else {
3360                         DEBUGOUT("Flash controller busy, cannot get access\n");
3361                 }
3362         }
3363
3364         return ret_val;
3365 }
3366
3367 /**
3368  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3369  *  @hw: pointer to the HW structure
3370  *  @timeout: maximum time to wait for completion
3371  *
3372  *  This function starts a flash cycle and waits for its completion.
3373  **/
3374 STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3375 {
3376         union ich8_hws_flash_ctrl hsflctl;
3377         union ich8_hws_flash_status hsfsts;
3378         u32 i = 0;
3379
3380         DEBUGFUNC("e1000_flash_cycle_ich8lan");
3381
3382         /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3383         hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3384         hsflctl.hsf_ctrl.flcgo = 1;
3385
3386         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3387
3388         /* wait till FDONE bit is set to 1 */
3389         do {
3390                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3391                 if (hsfsts.hsf_status.flcdone)
3392                         break;
3393                 usec_delay(1);
3394         } while (i++ < timeout);
3395
3396         if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3397                 return E1000_SUCCESS;
3398
3399         return -E1000_ERR_NVM;
3400 }
3401
3402 /**
3403  *  e1000_read_flash_word_ich8lan - Read word from flash
3404  *  @hw: pointer to the HW structure
3405  *  @offset: offset to data location
3406  *  @data: pointer to the location for storing the data
3407  *
3408  *  Reads the flash word at offset into data.  Offset is converted
3409  *  to bytes before read.
3410  **/
3411 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3412                                          u16 *data)
3413 {
3414         DEBUGFUNC("e1000_read_flash_word_ich8lan");
3415
3416         if (!data)
3417                 return -E1000_ERR_NVM;
3418
3419         /* Must convert offset into bytes. */
3420         offset <<= 1;
3421
3422         return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3423 }
3424
3425 /**
3426  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3427  *  @hw: pointer to the HW structure
3428  *  @offset: The offset of the byte to read.
3429  *  @data: Pointer to a byte to store the value read.
3430  *
3431  *  Reads a single byte from the NVM using the flash access registers.
3432  **/
3433 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3434                                          u8 *data)
3435 {
3436         s32 ret_val;
3437         u16 word = 0;
3438
3439         ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3440
3441         if (ret_val)
3442                 return ret_val;
3443
3444         *data = (u8)word;
3445
3446         return E1000_SUCCESS;
3447 }
3448
3449 /**
3450  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3451  *  @hw: pointer to the HW structure
3452  *  @offset: The offset (in bytes) of the byte or word to read.
3453  *  @size: Size of data to read, 1=byte 2=word
3454  *  @data: Pointer to the word to store the value read.
3455  *
3456  *  Reads a byte or word from the NVM using the flash access registers.
3457  **/
3458 STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3459                                          u8 size, u16 *data)
3460 {
3461         union ich8_hws_flash_status hsfsts;
3462         union ich8_hws_flash_ctrl hsflctl;
3463         u32 flash_linear_addr;
3464         u32 flash_data = 0;
3465         s32 ret_val = -E1000_ERR_NVM;
3466         u8 count = 0;
3467
3468         DEBUGFUNC("e1000_read_flash_data_ich8lan");
3469
3470         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3471                 return -E1000_ERR_NVM;
3472         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3473                              hw->nvm.flash_base_addr);
3474
3475         do {
3476                 usec_delay(1);
3477                 /* Steps */
3478                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3479                 if (ret_val != E1000_SUCCESS)
3480                         break;
3481                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3482
3483                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3484                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3485                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3486                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3487
3488                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3489
3490                 ret_val =
3491                     e1000_flash_cycle_ich8lan(hw,
3492                                               ICH_FLASH_READ_COMMAND_TIMEOUT);
3493
3494                 /* Check if FCERR is set to 1, if set to 1, clear it
3495                  * and try the whole sequence a few more times, else
3496                  * read in (shift in) the Flash Data0, the order is
3497                  * least significant byte first msb to lsb
3498                  */
3499                 if (ret_val == E1000_SUCCESS) {
3500                         flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3501                         if (size == 1)
3502                                 *data = (u8)(flash_data & 0x000000FF);
3503                         else if (size == 2)
3504                                 *data = (u16)(flash_data & 0x0000FFFF);
3505                         break;
3506                 } else {
3507                         /* If we've gotten here, then things are probably
3508                          * completely hosed, but if the error condition is
3509                          * detected, it won't hurt to give it another try...
3510                          * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3511                          */
3512                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3513                                                               ICH_FLASH_HSFSTS);
3514                         if (hsfsts.hsf_status.flcerr) {
3515                                 /* Repeat for some time before giving up. */
3516                                 continue;
3517                         } else if (!hsfsts.hsf_status.flcdone) {
3518                                 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3519                                 break;
3520                         }
3521                 }
3522         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3523
3524         return ret_val;
3525 }
3526
3527 /**
3528  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3529  *  @hw: pointer to the HW structure
3530  *  @offset: The offset (in bytes) of the word(s) to write.
3531  *  @words: Size of data to write in words
3532  *  @data: Pointer to the word(s) to write at offset.
3533  *
3534  *  Writes a byte or word to the NVM using the flash access registers.
3535  **/
3536 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3537                                    u16 *data)
3538 {
3539         struct e1000_nvm_info *nvm = &hw->nvm;
3540         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3541         u16 i;
3542
3543         DEBUGFUNC("e1000_write_nvm_ich8lan");
3544
3545         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3546             (words == 0)) {
3547                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3548                 return -E1000_ERR_NVM;
3549         }
3550
3551         nvm->ops.acquire(hw);
3552
3553         for (i = 0; i < words; i++) {
3554                 dev_spec->shadow_ram[offset+i].modified = true;
3555                 dev_spec->shadow_ram[offset+i].value = data[i];
3556         }
3557
3558         nvm->ops.release(hw);
3559
3560         return E1000_SUCCESS;
3561 }
3562
3563 /**
3564  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3565  *  @hw: pointer to the HW structure
3566  *
3567  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3568  *  which writes the checksum to the shadow ram.  The changes in the shadow
3569  *  ram are then committed to the EEPROM by processing each bank at a time
3570  *  checking for the modified bit and writing only the pending changes.
3571  *  After a successful commit, the shadow ram is cleared and is ready for
3572  *  future writes.
3573  **/
3574 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3575 {
3576         struct e1000_nvm_info *nvm = &hw->nvm;
3577         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3578         u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3579         s32 ret_val;
3580         u16 data;
3581
3582         DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
3583
3584         ret_val = e1000_update_nvm_checksum_generic(hw);
3585         if (ret_val)
3586                 goto out;
3587
3588         if (nvm->type != e1000_nvm_flash_sw)
3589                 goto out;
3590
3591         nvm->ops.acquire(hw);
3592
3593         /* We're writing to the opposite bank so if we're on bank 1,
3594          * write to bank 0 etc.  We also need to erase the segment that
3595          * is going to be written
3596          */
3597         ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3598         if (ret_val != E1000_SUCCESS) {
3599                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3600                 bank = 0;
3601         }
3602
3603         if (bank == 0) {
3604                 new_bank_offset = nvm->flash_bank_size;
3605                 old_bank_offset = 0;
3606                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3607                 if (ret_val)
3608                         goto release;
3609         } else {
3610                 old_bank_offset = nvm->flash_bank_size;
3611                 new_bank_offset = 0;
3612                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3613                 if (ret_val)
3614                         goto release;
3615         }
3616
3617         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3618                 /* Determine whether to write the value stored
3619                  * in the other NVM bank or a modified value stored
3620                  * in the shadow RAM
3621                  */
3622                 if (dev_spec->shadow_ram[i].modified) {
3623                         data = dev_spec->shadow_ram[i].value;
3624                 } else {
3625                         ret_val = e1000_read_flash_word_ich8lan(hw, i +
3626                                                                 old_bank_offset,
3627                                                                 &data);
3628                         if (ret_val)
3629                                 break;
3630                 }
3631
3632                 /* If the word is 0x13, then make sure the signature bits
3633                  * (15:14) are 11b until the commit has completed.
3634                  * This will allow us to write 10b which indicates the
3635                  * signature is valid.  We want to do this after the write
3636                  * has completed so that we don't mark the segment valid
3637                  * while the write is still in progress
3638                  */
3639                 if (i == E1000_ICH_NVM_SIG_WORD)
3640                         data |= E1000_ICH_NVM_SIG_MASK;
3641
3642                 /* Convert offset to bytes. */
3643                 act_offset = (i + new_bank_offset) << 1;
3644
3645                 usec_delay(100);
3646                 /* Write the bytes to the new bank. */
3647                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3648                                                                act_offset,
3649                                                                (u8)data);
3650                 if (ret_val)
3651                         break;
3652
3653                 usec_delay(100);
3654                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3655                                                           act_offset + 1,
3656                                                           (u8)(data >> 8));
3657                 if (ret_val)
3658                         break;
3659         }
3660
3661         /* Don't bother writing the segment valid bits if sector
3662          * programming failed.
3663          */
3664         if (ret_val) {
3665                 DEBUGOUT("Flash commit failed.\n");
3666                 goto release;
3667         }
3668
3669         /* Finally validate the new segment by setting bit 15:14
3670          * to 10b in word 0x13 , this can be done without an
3671          * erase as well since these bits are 11 to start with
3672          * and we need to change bit 14 to 0b
3673          */
3674         act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3675         ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
3676         if (ret_val)
3677                 goto release;
3678
3679         data &= 0xBFFF;
3680         ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3681                                                        act_offset * 2 + 1,
3682                                                        (u8)(data >> 8));
3683         if (ret_val)
3684                 goto release;
3685
3686         /* And invalidate the previously valid segment by setting
3687          * its signature word (0x13) high_byte to 0b. This can be
3688          * done without an erase because flash erase sets all bits
3689          * to 1's. We can write 1's to 0's without an erase
3690          */
3691         act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3692         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
3693         if (ret_val)
3694                 goto release;
3695
3696         /* Great!  Everything worked, we can now clear the cached entries. */
3697         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3698                 dev_spec->shadow_ram[i].modified = false;
3699                 dev_spec->shadow_ram[i].value = 0xFFFF;
3700         }
3701
3702 release:
3703         nvm->ops.release(hw);
3704
3705         /* Reload the EEPROM, or else modifications will not appear
3706          * until after the next adapter reset.
3707          */
3708         if (!ret_val) {
3709                 nvm->ops.reload(hw);
3710                 msec_delay(10);
3711         }
3712
3713 out:
3714         if (ret_val)
3715                 DEBUGOUT1("NVM update error: %d\n", ret_val);
3716
3717         return ret_val;
3718 }
3719
3720 /**
3721  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
3722  *  @hw: pointer to the HW structure
3723  *
3724  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
3725  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
3726  *  calculated, in which case we need to calculate the checksum and set bit 6.
3727  **/
3728 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
3729 {
3730         s32 ret_val;
3731         u16 data;
3732         u16 word;
3733         u16 valid_csum_mask;
3734
3735         DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
3736
3737         /* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
3738          * the checksum needs to be fixed.  This bit is an indication that
3739          * the NVM was prepared by OEM software and did not calculate
3740          * the checksum...a likely scenario.
3741          */
3742         switch (hw->mac.type) {
3743         case e1000_pch_lpt:
3744                 word = NVM_COMPAT;
3745                 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
3746                 break;
3747         default:
3748                 word = NVM_FUTURE_INIT_WORD1;
3749                 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
3750                 break;
3751         }
3752
3753         ret_val = hw->nvm.ops.read(hw, word, 1, &data);
3754         if (ret_val)
3755                 return ret_val;
3756
3757         if (!(data & valid_csum_mask)) {
3758                 data |= valid_csum_mask;
3759                 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
3760                 if (ret_val)
3761                         return ret_val;
3762                 ret_val = hw->nvm.ops.update(hw);
3763                 if (ret_val)
3764                         return ret_val;
3765         }
3766
3767         return e1000_validate_nvm_checksum_generic(hw);
3768 }
3769
3770 /**
3771  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3772  *  @hw: pointer to the HW structure
3773  *  @offset: The offset (in bytes) of the byte/word to read.
3774  *  @size: Size of data to read, 1=byte 2=word
3775  *  @data: The byte(s) to write to the NVM.
3776  *
3777  *  Writes one/two bytes to the NVM using the flash access registers.
3778  **/
3779 STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3780                                           u8 size, u16 data)
3781 {
3782         union ich8_hws_flash_status hsfsts;
3783         union ich8_hws_flash_ctrl hsflctl;
3784         u32 flash_linear_addr;
3785         u32 flash_data = 0;
3786         s32 ret_val;
3787         u8 count = 0;
3788
3789         DEBUGFUNC("e1000_write_ich8_data");
3790
3791         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3792                 return -E1000_ERR_NVM;
3793
3794         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3795                              hw->nvm.flash_base_addr);
3796
3797         do {
3798                 usec_delay(1);
3799                 /* Steps */
3800                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3801                 if (ret_val != E1000_SUCCESS)
3802                         break;
3803                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3804
3805                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3806                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3807                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3808                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3809
3810                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3811
3812                 if (size == 1)
3813                         flash_data = (u32)data & 0x00FF;
3814                 else
3815                         flash_data = (u32)data;
3816
3817                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
3818
3819                 /* check if FCERR is set to 1 , if set to 1, clear it
3820                  * and try the whole sequence a few more times else done
3821                  */
3822                 ret_val =
3823                     e1000_flash_cycle_ich8lan(hw,
3824                                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3825                 if (ret_val == E1000_SUCCESS)
3826                         break;
3827
3828                 /* If we're here, then things are most likely
3829                  * completely hosed, but if the error condition
3830                  * is detected, it won't hurt to give it another
3831                  * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3832                  */
3833                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3834                 if (hsfsts.hsf_status.flcerr)
3835                         /* Repeat for some time before giving up. */
3836                         continue;
3837                 if (!hsfsts.hsf_status.flcdone) {
3838                         DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3839                         break;
3840                 }
3841         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3842
3843         return ret_val;
3844 }
3845
3846 /**
3847  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3848  *  @hw: pointer to the HW structure
3849  *  @offset: The index of the byte to read.
3850  *  @data: The byte to write to the NVM.
3851  *
3852  *  Writes a single byte to the NVM using the flash access registers.
3853  **/
3854 STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3855                                           u8 data)
3856 {
3857         u16 word = (u16)data;
3858
3859         DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3860
3861         return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3862 }
3863
3864 /**
3865  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3866  *  @hw: pointer to the HW structure
3867  *  @offset: The offset of the byte to write.
3868  *  @byte: The byte to write to the NVM.
3869  *
3870  *  Writes a single byte to the NVM using the flash access registers.
3871  *  Goes through a retry algorithm before giving up.
3872  **/
3873 STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3874                                                 u32 offset, u8 byte)
3875 {
3876         s32 ret_val;
3877         u16 program_retries;
3878
3879         DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3880
3881         ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3882         if (!ret_val)
3883                 return ret_val;
3884
3885         for (program_retries = 0; program_retries < 100; program_retries++) {
3886                 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3887                 usec_delay(100);
3888                 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3889                 if (ret_val == E1000_SUCCESS)
3890                         break;
3891         }
3892         if (program_retries == 100)
3893                 return -E1000_ERR_NVM;
3894
3895         return E1000_SUCCESS;
3896 }
3897
3898 /**
3899  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3900  *  @hw: pointer to the HW structure
3901  *  @bank: 0 for first bank, 1 for second bank, etc.
3902  *
3903  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3904  *  bank N is 4096 * N + flash_reg_addr.
3905  **/
3906 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3907 {
3908         struct e1000_nvm_info *nvm = &hw->nvm;
3909         union ich8_hws_flash_status hsfsts;
3910         union ich8_hws_flash_ctrl hsflctl;
3911         u32 flash_linear_addr;
3912         /* bank size is in 16bit words - adjust to bytes */
3913         u32 flash_bank_size = nvm->flash_bank_size * 2;
3914         s32 ret_val;
3915         s32 count = 0;
3916         s32 j, iteration, sector_size;
3917
3918         DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3919
3920         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3921
3922         /* Determine HW Sector size: Read BERASE bits of hw flash status
3923          * register
3924          * 00: The Hw sector is 256 bytes, hence we need to erase 16
3925          *     consecutive sectors.  The start index for the nth Hw sector
3926          *     can be calculated as = bank * 4096 + n * 256
3927          * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3928          *     The start index for the nth Hw sector can be calculated
3929          *     as = bank * 4096
3930          * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3931          *     (ich9 only, otherwise error condition)
3932          * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3933          */
3934         switch (hsfsts.hsf_status.berasesz) {
3935         case 0:
3936                 /* Hw sector size 256 */
3937                 sector_size = ICH_FLASH_SEG_SIZE_256;
3938                 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3939                 break;
3940         case 1:
3941                 sector_size = ICH_FLASH_SEG_SIZE_4K;
3942                 iteration = 1;
3943                 break;
3944         case 2:
3945                 sector_size = ICH_FLASH_SEG_SIZE_8K;
3946                 iteration = 1;
3947                 break;
3948         case 3:
3949                 sector_size = ICH_FLASH_SEG_SIZE_64K;
3950                 iteration = 1;
3951                 break;
3952         default:
3953                 return -E1000_ERR_NVM;
3954         }
3955
3956         /* Start with the base address, then add the sector offset. */
3957         flash_linear_addr = hw->nvm.flash_base_addr;
3958         flash_linear_addr += (bank) ? flash_bank_size : 0;
3959
3960         for (j = 0; j < iteration; j++) {
3961                 do {
3962                         u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
3963
3964                         /* Steps */
3965                         ret_val = e1000_flash_cycle_init_ich8lan(hw);
3966                         if (ret_val)
3967                                 return ret_val;
3968
3969                         /* Write a value 11 (block Erase) in Flash
3970                          * Cycle field in hw flash control
3971                          */
3972                         hsflctl.regval =
3973                             E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3974
3975                         hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3976                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3977                                                 hsflctl.regval);
3978
3979                         /* Write the last 24 bits of an index within the
3980                          * block into Flash Linear address field in Flash
3981                          * Address.
3982                          */
3983                         flash_linear_addr += (j * sector_size);
3984                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3985                                               flash_linear_addr);
3986
3987                         ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
3988                         if (ret_val == E1000_SUCCESS)
3989                                 break;
3990
3991                         /* Check if FCERR is set to 1.  If 1,
3992                          * clear it and try the whole sequence
3993                          * a few more times else Done
3994                          */
3995                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3996                                                       ICH_FLASH_HSFSTS);
3997                         if (hsfsts.hsf_status.flcerr)
3998                                 /* repeat for some time before giving up */
3999                                 continue;
4000                         else if (!hsfsts.hsf_status.flcdone)
4001                                 return ret_val;
4002                 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4003         }
4004
4005         return E1000_SUCCESS;
4006 }
4007
4008 /**
4009  *  e1000_valid_led_default_ich8lan - Set the default LED settings
4010  *  @hw: pointer to the HW structure
4011  *  @data: Pointer to the LED settings
4012  *
4013  *  Reads the LED default settings from the NVM to data.  If the NVM LED
4014  *  settings is all 0's or F's, set the LED default to a valid LED default
4015  *  setting.
4016  **/
4017 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4018 {
4019         s32 ret_val;
4020
4021         DEBUGFUNC("e1000_valid_led_default_ich8lan");
4022
4023         ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4024         if (ret_val) {
4025                 DEBUGOUT("NVM Read Error\n");
4026                 return ret_val;
4027         }
4028
4029         if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4030                 *data = ID_LED_DEFAULT_ICH8LAN;
4031
4032         return E1000_SUCCESS;
4033 }
4034
4035 /**
4036  *  e1000_id_led_init_pchlan - store LED configurations
4037  *  @hw: pointer to the HW structure
4038  *
4039  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4040  *  the PHY LED configuration register.
4041  *
4042  *  PCH also does not have an "always on" or "always off" mode which
4043  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4044  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4045  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4046  *  link based on logic in e1000_led_[on|off]_pchlan().
4047  **/
4048 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4049 {
4050         struct e1000_mac_info *mac = &hw->mac;
4051         s32 ret_val;
4052         const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4053         const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4054         u16 data, i, temp, shift;
4055
4056         DEBUGFUNC("e1000_id_led_init_pchlan");
4057
4058         /* Get default ID LED modes */
4059         ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4060         if (ret_val)
4061                 return ret_val;
4062
4063         mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4064         mac->ledctl_mode1 = mac->ledctl_default;
4065         mac->ledctl_mode2 = mac->ledctl_default;
4066
4067         for (i = 0; i < 4; i++) {
4068                 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4069                 shift = (i * 5);
4070                 switch (temp) {
4071                 case ID_LED_ON1_DEF2:
4072                 case ID_LED_ON1_ON2:
4073                 case ID_LED_ON1_OFF2:
4074                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4075                         mac->ledctl_mode1 |= (ledctl_on << shift);
4076                         break;
4077                 case ID_LED_OFF1_DEF2:
4078                 case ID_LED_OFF1_ON2:
4079                 case ID_LED_OFF1_OFF2:
4080                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4081                         mac->ledctl_mode1 |= (ledctl_off << shift);
4082                         break;
4083                 default:
4084                         /* Do nothing */
4085                         break;
4086                 }
4087                 switch (temp) {
4088                 case ID_LED_DEF1_ON2:
4089                 case ID_LED_ON1_ON2:
4090                 case ID_LED_OFF1_ON2:
4091                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4092                         mac->ledctl_mode2 |= (ledctl_on << shift);
4093                         break;
4094                 case ID_LED_DEF1_OFF2:
4095                 case ID_LED_ON1_OFF2:
4096                 case ID_LED_OFF1_OFF2:
4097                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4098                         mac->ledctl_mode2 |= (ledctl_off << shift);
4099                         break;
4100                 default:
4101                         /* Do nothing */
4102                         break;
4103                 }
4104         }
4105
4106         return E1000_SUCCESS;
4107 }
4108
4109 /**
4110  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4111  *  @hw: pointer to the HW structure
4112  *
4113  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4114  *  register, so the the bus width is hard coded.
4115  **/
4116 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4117 {
4118         struct e1000_bus_info *bus = &hw->bus;
4119         s32 ret_val;
4120
4121         DEBUGFUNC("e1000_get_bus_info_ich8lan");
4122
4123         ret_val = e1000_get_bus_info_pcie_generic(hw);
4124
4125         /* ICH devices are "PCI Express"-ish.  They have
4126          * a configuration space, but do not contain
4127          * PCI Express Capability registers, so bus width
4128          * must be hardcoded.
4129          */
4130         if (bus->width == e1000_bus_width_unknown)
4131                 bus->width = e1000_bus_width_pcie_x1;
4132
4133         return ret_val;
4134 }
4135
4136 /**
4137  *  e1000_reset_hw_ich8lan - Reset the hardware
4138  *  @hw: pointer to the HW structure
4139  *
4140  *  Does a full reset of the hardware which includes a reset of the PHY and
4141  *  MAC.
4142  **/
4143 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4144 {
4145         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4146         u16 kum_cfg;
4147         u32 ctrl, reg;
4148         s32 ret_val;
4149
4150         DEBUGFUNC("e1000_reset_hw_ich8lan");
4151
4152         /* Prevent the PCI-E bus from sticking if there is no TLP connection
4153          * on the last TLP read/write transaction when MAC is reset.
4154          */
4155         ret_val = e1000_disable_pcie_master_generic(hw);
4156         if (ret_val)
4157                 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4158
4159         DEBUGOUT("Masking off all interrupts\n");
4160         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4161
4162         /* Disable the Transmit and Receive units.  Then delay to allow
4163          * any pending transactions to complete before we hit the MAC
4164          * with the global reset.
4165          */
4166         E1000_WRITE_REG(hw, E1000_RCTL, 0);
4167         E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4168         E1000_WRITE_FLUSH(hw);
4169
4170         msec_delay(10);
4171
4172         /* Workaround for ICH8 bit corruption issue in FIFO memory */
4173         if (hw->mac.type == e1000_ich8lan) {
4174                 /* Set Tx and Rx buffer allocation to 8k apiece. */
4175                 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4176                 /* Set Packet Buffer Size to 16k. */
4177                 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4178         }
4179
4180         if (hw->mac.type == e1000_pchlan) {
4181                 /* Save the NVM K1 bit setting*/
4182                 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4183                 if (ret_val)
4184                         return ret_val;
4185
4186                 if (kum_cfg & E1000_NVM_K1_ENABLE)
4187                         dev_spec->nvm_k1_enabled = true;
4188                 else
4189                         dev_spec->nvm_k1_enabled = false;
4190         }
4191
4192         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4193
4194         if (!hw->phy.ops.check_reset_block(hw)) {
4195                 /* Full-chip reset requires MAC and PHY reset at the same
4196                  * time to make sure the interface between MAC and the
4197                  * external PHY is reset.
4198                  */
4199                 ctrl |= E1000_CTRL_PHY_RST;
4200
4201                 /* Gate automatic PHY configuration by hardware on
4202                  * non-managed 82579
4203                  */
4204                 if ((hw->mac.type == e1000_pch2lan) &&
4205                     !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4206                         e1000_gate_hw_phy_config_ich8lan(hw, true);
4207         }
4208         ret_val = e1000_acquire_swflag_ich8lan(hw);
4209         DEBUGOUT("Issuing a global reset to ich8lan\n");
4210         E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4211         /* cannot issue a flush here because it hangs the hardware */
4212         msec_delay(20);
4213
4214         /* Set Phy Config Counter to 50msec */
4215         if (hw->mac.type == e1000_pch2lan) {
4216                 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4217                 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4218                 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4219                 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4220         }
4221
4222         if (!ret_val)
4223                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
4224
4225         if (ctrl & E1000_CTRL_PHY_RST) {
4226                 ret_val = hw->phy.ops.get_cfg_done(hw);
4227                 if (ret_val)
4228                         return ret_val;
4229
4230                 ret_val = e1000_post_phy_reset_ich8lan(hw);
4231                 if (ret_val)
4232                         return ret_val;
4233         }
4234
4235         /* For PCH, this write will make sure that any noise
4236          * will be detected as a CRC error and be dropped rather than show up
4237          * as a bad packet to the DMA engine.
4238          */
4239         if (hw->mac.type == e1000_pchlan)
4240                 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
4241
4242         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4243         E1000_READ_REG(hw, E1000_ICR);
4244
4245         reg = E1000_READ_REG(hw, E1000_KABGTXD);
4246         reg |= E1000_KABGTXD_BGSQLBIAS;
4247         E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
4248
4249         return E1000_SUCCESS;
4250 }
4251
4252 /**
4253  *  e1000_init_hw_ich8lan - Initialize the hardware
4254  *  @hw: pointer to the HW structure
4255  *
4256  *  Prepares the hardware for transmit and receive by doing the following:
4257  *   - initialize hardware bits
4258  *   - initialize LED identification
4259  *   - setup receive address registers
4260  *   - setup flow control
4261  *   - setup transmit descriptors
4262  *   - clear statistics
4263  **/
4264 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4265 {
4266         struct e1000_mac_info *mac = &hw->mac;
4267         u32 ctrl_ext, txdctl, snoop;
4268         s32 ret_val;
4269         u16 i;
4270
4271         DEBUGFUNC("e1000_init_hw_ich8lan");
4272
4273         e1000_initialize_hw_bits_ich8lan(hw);
4274
4275         /* Initialize identification LED */
4276         ret_val = mac->ops.id_led_init(hw);
4277         /* An error is not fatal and we should not stop init due to this */
4278         if (ret_val)
4279                 DEBUGOUT("Error initializing identification LED\n");
4280
4281         /* Setup the receive address. */
4282         e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
4283
4284         /* Zero out the Multicast HASH table */
4285         DEBUGOUT("Zeroing the MTA\n");
4286         for (i = 0; i < mac->mta_reg_count; i++)
4287                 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4288
4289         /* The 82578 Rx buffer will stall if wakeup is enabled in host and
4290          * the ME.  Disable wakeup by clearing the host wakeup bit.
4291          * Reset the phy after disabling host wakeup to reset the Rx buffer.
4292          */
4293         if (hw->phy.type == e1000_phy_82578) {
4294                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
4295                 i &= ~BM_WUC_HOST_WU_BIT;
4296                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
4297                 ret_val = e1000_phy_hw_reset_ich8lan(hw);
4298                 if (ret_val)
4299                         return ret_val;
4300         }
4301
4302         /* Setup link and flow control */
4303         ret_val = mac->ops.setup_link(hw);
4304
4305         /* Set the transmit descriptor write-back policy for both queues */
4306         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
4307         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4308                   E1000_TXDCTL_FULL_TX_DESC_WB);
4309         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4310                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4311         E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
4312         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
4313         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4314                   E1000_TXDCTL_FULL_TX_DESC_WB);
4315         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4316                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4317         E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
4318
4319         /* ICH8 has opposite polarity of no_snoop bits.
4320          * By default, we should use snoop behavior.
4321          */
4322         if (mac->type == e1000_ich8lan)
4323                 snoop = PCIE_ICH8_SNOOP_ALL;
4324         else
4325                 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
4326         e1000_set_pcie_no_snoop_generic(hw, snoop);
4327
4328         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
4329         ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4330         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
4331
4332         /* Clear all of the statistics registers (clear on read).  It is
4333          * important that we do this after we have tried to establish link
4334          * because the symbol error count will increment wildly if there
4335          * is no link.
4336          */
4337         e1000_clear_hw_cntrs_ich8lan(hw);
4338
4339         return ret_val;
4340 }
4341
4342 /**
4343  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4344  *  @hw: pointer to the HW structure
4345  *
4346  *  Sets/Clears required hardware bits necessary for correctly setting up the
4347  *  hardware for transmit and receive.
4348  **/
4349 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4350 {
4351         u32 reg;
4352
4353         DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
4354
4355         /* Extended Device Control */
4356         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
4357         reg |= (1 << 22);
4358         /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4359         if (hw->mac.type >= e1000_pchlan)
4360                 reg |= E1000_CTRL_EXT_PHYPDEN;
4361         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
4362
4363         /* Transmit Descriptor Control 0 */
4364         reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
4365         reg |= (1 << 22);
4366         E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
4367
4368         /* Transmit Descriptor Control 1 */
4369         reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
4370         reg |= (1 << 22);
4371         E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
4372
4373         /* Transmit Arbitration Control 0 */
4374         reg = E1000_READ_REG(hw, E1000_TARC(0));
4375         if (hw->mac.type == e1000_ich8lan)
4376                 reg |= (1 << 28) | (1 << 29);
4377         reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
4378         E1000_WRITE_REG(hw, E1000_TARC(0), reg);
4379
4380         /* Transmit Arbitration Control 1 */
4381         reg = E1000_READ_REG(hw, E1000_TARC(1));
4382         if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
4383                 reg &= ~(1 << 28);
4384         else
4385                 reg |= (1 << 28);
4386         reg |= (1 << 24) | (1 << 26) | (1 << 30);
4387         E1000_WRITE_REG(hw, E1000_TARC(1), reg);
4388
4389         /* Device Status */
4390         if (hw->mac.type == e1000_ich8lan) {
4391                 reg = E1000_READ_REG(hw, E1000_STATUS);
4392                 reg &= ~(1 << 31);
4393                 E1000_WRITE_REG(hw, E1000_STATUS, reg);
4394         }
4395
4396         /* work-around descriptor data corruption issue during nfs v2 udp
4397          * traffic, just disable the nfs filtering capability
4398          */
4399         reg = E1000_READ_REG(hw, E1000_RFCTL);
4400         reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4401
4402         /* Disable IPv6 extension header parsing because some malformed
4403          * IPv6 headers can hang the Rx.
4404          */
4405         if (hw->mac.type == e1000_ich8lan)
4406                 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4407         E1000_WRITE_REG(hw, E1000_RFCTL, reg);
4408
4409         /* Enable ECC on Lynxpoint */
4410         if (hw->mac.type == e1000_pch_lpt) {
4411                 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
4412                 reg |= E1000_PBECCSTS_ECC_ENABLE;
4413                 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
4414
4415                 reg = E1000_READ_REG(hw, E1000_CTRL);
4416                 reg |= E1000_CTRL_MEHE;
4417                 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4418         }
4419
4420         return;
4421 }
4422
4423 /**
4424  *  e1000_setup_link_ich8lan - Setup flow control and link settings
4425  *  @hw: pointer to the HW structure
4426  *
4427  *  Determines which flow control settings to use, then configures flow
4428  *  control.  Calls the appropriate media-specific link configuration
4429  *  function.  Assuming the adapter has a valid link partner, a valid link
4430  *  should be established.  Assumes the hardware has previously been reset
4431  *  and the transmitter and receiver are not enabled.
4432  **/
4433 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4434 {
4435         s32 ret_val;
4436
4437         DEBUGFUNC("e1000_setup_link_ich8lan");
4438
4439         if (hw->phy.ops.check_reset_block(hw))
4440                 return E1000_SUCCESS;
4441
4442         /* ICH parts do not have a word in the NVM to determine
4443          * the default flow control setting, so we explicitly
4444          * set it to full.
4445          */
4446         if (hw->fc.requested_mode == e1000_fc_default)
4447                 hw->fc.requested_mode = e1000_fc_full;
4448
4449         /* Save off the requested flow control mode for use later.  Depending
4450          * on the link partner's capabilities, we may or may not use this mode.
4451          */
4452         hw->fc.current_mode = hw->fc.requested_mode;
4453
4454         DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
4455                 hw->fc.current_mode);
4456
4457         /* Continue to configure the copper link. */
4458         ret_val = hw->mac.ops.setup_physical_interface(hw);
4459         if (ret_val)
4460                 return ret_val;
4461
4462         E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
4463         if ((hw->phy.type == e1000_phy_82578) ||
4464             (hw->phy.type == e1000_phy_82579) ||
4465             (hw->phy.type == e1000_phy_i217) ||
4466             (hw->phy.type == e1000_phy_82577)) {
4467                 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
4468
4469                 ret_val = hw->phy.ops.write_reg(hw,
4470                                              PHY_REG(BM_PORT_CTRL_PAGE, 27),
4471                                              hw->fc.pause_time);
4472                 if (ret_val)
4473                         return ret_val;
4474         }
4475
4476         return e1000_set_fc_watermarks_generic(hw);
4477 }
4478
4479 /**
4480  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4481  *  @hw: pointer to the HW structure
4482  *
4483  *  Configures the kumeran interface to the PHY to wait the appropriate time
4484  *  when polling the PHY, then call the generic setup_copper_link to finish
4485  *  configuring the copper link.
4486  **/
4487 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4488 {
4489         u32 ctrl;
4490         s32 ret_val;
4491         u16 reg_data;
4492
4493         DEBUGFUNC("e1000_setup_copper_link_ich8lan");
4494
4495         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4496         ctrl |= E1000_CTRL_SLU;
4497         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4498         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4499
4500         /* Set the mac to wait the maximum time between each iteration
4501          * and increase the max iterations when polling the phy;
4502          * this fixes erroneous timeouts at 10Mbps.
4503          */
4504         ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
4505                                                0xFFFF);
4506         if (ret_val)
4507                 return ret_val;
4508         ret_val = e1000_read_kmrn_reg_generic(hw,
4509                                               E1000_KMRNCTRLSTA_INBAND_PARAM,
4510                                               &reg_data);
4511         if (ret_val)
4512                 return ret_val;
4513         reg_data |= 0x3F;
4514         ret_val = e1000_write_kmrn_reg_generic(hw,
4515                                                E1000_KMRNCTRLSTA_INBAND_PARAM,
4516                                                reg_data);
4517         if (ret_val)
4518                 return ret_val;
4519
4520         switch (hw->phy.type) {
4521         case e1000_phy_igp_3:
4522                 ret_val = e1000_copper_link_setup_igp(hw);
4523                 if (ret_val)
4524                         return ret_val;
4525                 break;
4526         case e1000_phy_bm:
4527         case e1000_phy_82578:
4528                 ret_val = e1000_copper_link_setup_m88(hw);
4529                 if (ret_val)
4530                         return ret_val;
4531                 break;
4532         case e1000_phy_82577:
4533         case e1000_phy_82579:
4534                 ret_val = e1000_copper_link_setup_82577(hw);
4535                 if (ret_val)
4536                         return ret_val;
4537                 break;
4538         case e1000_phy_ife:
4539                 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
4540                                                &reg_data);
4541                 if (ret_val)
4542                         return ret_val;
4543
4544                 reg_data &= ~IFE_PMC_AUTO_MDIX;
4545
4546                 switch (hw->phy.mdix) {
4547                 case 1:
4548                         reg_data &= ~IFE_PMC_FORCE_MDIX;
4549                         break;
4550                 case 2:
4551                         reg_data |= IFE_PMC_FORCE_MDIX;
4552                         break;
4553                 case 0:
4554                 default:
4555                         reg_data |= IFE_PMC_AUTO_MDIX;
4556                         break;
4557                 }
4558                 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
4559                                                 reg_data);
4560                 if (ret_val)
4561                         return ret_val;
4562                 break;
4563         default:
4564                 break;
4565         }
4566
4567         return e1000_setup_copper_link_generic(hw);
4568 }
4569
4570 /**
4571  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
4572  *  @hw: pointer to the HW structure
4573  *
4574  *  Calls the PHY specific link setup function and then calls the
4575  *  generic setup_copper_link to finish configuring the link for
4576  *  Lynxpoint PCH devices
4577  **/
4578 STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
4579 {
4580         u32 ctrl;
4581         s32 ret_val;
4582
4583         DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
4584
4585         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4586         ctrl |= E1000_CTRL_SLU;
4587         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4588         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4589
4590         ret_val = e1000_copper_link_setup_82577(hw);
4591         if (ret_val)
4592                 return ret_val;
4593
4594         return e1000_setup_copper_link_generic(hw);
4595 }
4596
4597 /**
4598  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
4599  *  @hw: pointer to the HW structure
4600  *  @speed: pointer to store current link speed
4601  *  @duplex: pointer to store the current link duplex
4602  *
4603  *  Calls the generic get_speed_and_duplex to retrieve the current link
4604  *  information and then calls the Kumeran lock loss workaround for links at
4605  *  gigabit speeds.
4606  **/
4607 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
4608                                           u16 *duplex)
4609 {
4610         s32 ret_val;
4611
4612         DEBUGFUNC("e1000_get_link_up_info_ich8lan");
4613
4614         ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
4615         if (ret_val)
4616                 return ret_val;
4617
4618         if ((hw->mac.type == e1000_ich8lan) &&
4619             (hw->phy.type == e1000_phy_igp_3) &&
4620             (*speed == SPEED_1000)) {
4621                 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
4622         }
4623
4624         return ret_val;
4625 }
4626
4627 /**
4628  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
4629  *  @hw: pointer to the HW structure
4630  *
4631  *  Work-around for 82566 Kumeran PCS lock loss:
4632  *  On link status change (i.e. PCI reset, speed change) and link is up and
4633  *  speed is gigabit-
4634  *    0) if workaround is optionally disabled do nothing
4635  *    1) wait 1ms for Kumeran link to come up
4636  *    2) check Kumeran Diagnostic register PCS lock loss bit
4637  *    3) if not set the link is locked (all is good), otherwise...
4638  *    4) reset the PHY
4639  *    5) repeat up to 10 times
4640  *  Note: this is only called for IGP3 copper when speed is 1gb.
4641  **/
4642 STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
4643 {
4644         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4645         u32 phy_ctrl;
4646         s32 ret_val;
4647         u16 i, data;
4648         bool link;
4649
4650         DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
4651
4652         if (!dev_spec->kmrn_lock_loss_workaround_enabled)
4653                 return E1000_SUCCESS;
4654
4655         /* Make sure link is up before proceeding.  If not just return.
4656          * Attempting this while link is negotiating fouled up link
4657          * stability
4658          */
4659         ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
4660         if (!link)
4661                 return E1000_SUCCESS;
4662
4663         for (i = 0; i < 10; i++) {
4664                 /* read once to clear */
4665                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4666                 if (ret_val)
4667                         return ret_val;
4668                 /* and again to get new status */
4669                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4670                 if (ret_val)
4671                         return ret_val;
4672
4673                 /* check for PCS lock */
4674                 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
4675                         return E1000_SUCCESS;
4676
4677                 /* Issue PHY reset */
4678                 hw->phy.ops.reset(hw);
4679                 msec_delay_irq(5);
4680         }
4681         /* Disable GigE link negotiation */
4682         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4683         phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
4684                      E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4685         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4686
4687         /* Call gig speed drop workaround on Gig disable before accessing
4688          * any PHY registers
4689          */
4690         e1000_gig_downshift_workaround_ich8lan(hw);
4691
4692         /* unable to acquire PCS lock */
4693         return -E1000_ERR_PHY;
4694 }
4695
4696 /**
4697  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
4698  *  @hw: pointer to the HW structure
4699  *  @state: boolean value used to set the current Kumeran workaround state
4700  *
4701  *  If ICH8, set the current Kumeran workaround state (enabled - true
4702  *  /disabled - false).
4703  **/
4704 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
4705                                                  bool state)
4706 {
4707         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4708
4709         DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
4710
4711         if (hw->mac.type != e1000_ich8lan) {
4712                 DEBUGOUT("Workaround applies to ICH8 only.\n");
4713                 return;
4714         }
4715
4716         dev_spec->kmrn_lock_loss_workaround_enabled = state;
4717
4718         return;
4719 }
4720
4721 /**
4722  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
4723  *  @hw: pointer to the HW structure
4724  *
4725  *  Workaround for 82566 power-down on D3 entry:
4726  *    1) disable gigabit link
4727  *    2) write VR power-down enable
4728  *    3) read it back
4729  *  Continue if successful, else issue LCD reset and repeat
4730  **/
4731 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
4732 {
4733         u32 reg;
4734         u16 data;
4735         u8  retry = 0;
4736
4737         DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
4738
4739         if (hw->phy.type != e1000_phy_igp_3)
4740                 return;
4741
4742         /* Try the workaround twice (if needed) */
4743         do {
4744                 /* Disable link */
4745                 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
4746                 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
4747                         E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4748                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
4749
4750                 /* Call gig speed drop workaround on Gig disable before
4751                  * accessing any PHY registers
4752                  */
4753                 if (hw->mac.type == e1000_ich8lan)
4754                         e1000_gig_downshift_workaround_ich8lan(hw);
4755
4756                 /* Write VR power-down enable */
4757                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4758                 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4759                 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
4760                                       data | IGP3_VR_CTRL_MODE_SHUTDOWN);
4761
4762                 /* Read it back and test */
4763                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4764                 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4765                 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
4766                         break;
4767
4768                 /* Issue PHY reset and repeat at most one more time */
4769                 reg = E1000_READ_REG(hw, E1000_CTRL);
4770                 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
4771                 retry++;
4772         } while (retry);
4773 }
4774
4775 /**
4776  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
4777  *  @hw: pointer to the HW structure
4778  *
4779  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
4780  *  LPLU, Gig disable, MDIC PHY reset):
4781  *    1) Set Kumeran Near-end loopback
4782  *    2) Clear Kumeran Near-end loopback
4783  *  Should only be called for ICH8[m] devices with any 1G Phy.
4784  **/
4785 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
4786 {
4787         s32 ret_val;
4788         u16 reg_data;
4789
4790         DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
4791
4792         if ((hw->mac.type != e1000_ich8lan) ||
4793             (hw->phy.type == e1000_phy_ife))
4794                 return;
4795
4796         ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4797                                               &reg_data);
4798         if (ret_val)
4799                 return;
4800         reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
4801         ret_val = e1000_write_kmrn_reg_generic(hw,
4802                                                E1000_KMRNCTRLSTA_DIAG_OFFSET,
4803                                                reg_data);
4804         if (ret_val)
4805                 return;
4806         reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
4807         e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4808                                      reg_data);
4809 }
4810
4811 /**
4812  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4813  *  @hw: pointer to the HW structure
4814  *
4815  *  During S0 to Sx transition, it is possible the link remains at gig
4816  *  instead of negotiating to a lower speed.  Before going to Sx, set
4817  *  'Gig Disable' to force link speed negotiation to a lower speed based on
4818  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
4819  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4820  *  needs to be written.
4821  *  Parts that support (and are linked to a partner which support) EEE in
4822  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4823  *  than 10Mbps w/o EEE.
4824  **/
4825 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4826 {
4827         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4828         u32 phy_ctrl;
4829         s32 ret_val;
4830
4831         DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
4832
4833         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4834         phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4835
4836         if (hw->phy.type == e1000_phy_i217) {
4837                 u16 phy_reg, device_id = hw->device_id;
4838
4839                 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4840                     (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
4841                     (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
4842                     (device_id == E1000_DEV_ID_PCH_I218_V3)) {
4843                         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
4844
4845                         E1000_WRITE_REG(hw, E1000_FEXTNVM6,
4846                                         fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4847                 }
4848
4849                 ret_val = hw->phy.ops.acquire(hw);
4850                 if (ret_val)
4851                         goto out;
4852
4853                 if (!dev_spec->eee_disable) {
4854                         u16 eee_advert;
4855
4856                         ret_val =
4857                             e1000_read_emi_reg_locked(hw,
4858                                                       I217_EEE_ADVERTISEMENT,
4859                                                       &eee_advert);
4860                         if (ret_val)
4861                                 goto release;
4862
4863                         /* Disable LPLU if both link partners support 100BaseT
4864                          * EEE and 100Full is advertised on both ends of the
4865                          * link, and enable Auto Enable LPI since there will
4866                          * be no driver to enable LPI while in Sx.
4867                          */
4868                         if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4869                             (dev_spec->eee_lp_ability &
4870                              I82579_EEE_100_SUPPORTED) &&
4871                             (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
4872                                 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4873                                               E1000_PHY_CTRL_NOND0A_LPLU);
4874
4875                                 /* Set Auto Enable LPI after link up */
4876                                 hw->phy.ops.read_reg_locked(hw,
4877                                                             I217_LPI_GPIO_CTRL,
4878                                                             &phy_reg);
4879                                 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4880                                 hw->phy.ops.write_reg_locked(hw,
4881                                                              I217_LPI_GPIO_CTRL,
4882                                                              phy_reg);
4883                         }
4884                 }
4885
4886                 /* For i217 Intel Rapid Start Technology support,
4887                  * when the system is going into Sx and no manageability engine
4888                  * is present, the driver must configure proxy to reset only on
4889                  * power good.  LPI (Low Power Idle) state must also reset only
4890                  * on power good, as well as the MTA (Multicast table array).
4891                  * The SMBus release must also be disabled on LCD reset.
4892                  */
4893                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4894                       E1000_ICH_FWSM_FW_VALID)) {
4895                         /* Enable proxy to reset only on power good. */
4896                         hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
4897                                                     &phy_reg);
4898                         phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4899                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
4900                                                      phy_reg);
4901
4902                         /* Set bit enable LPI (EEE) to reset only on
4903                          * power good.
4904                         */
4905                         hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
4906                         phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4907                         hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
4908
4909                         /* Disable the SMB release on LCD reset. */
4910                         hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
4911                         phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4912                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4913                 }
4914
4915                 /* Enable MTA to reset for Intel Rapid Start Technology
4916                  * Support
4917                  */
4918                 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
4919                 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4920                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4921
4922 release:
4923                 hw->phy.ops.release(hw);
4924         }
4925 out:
4926         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4927
4928         if (hw->mac.type == e1000_ich8lan)
4929                 e1000_gig_downshift_workaround_ich8lan(hw);
4930
4931         if (hw->mac.type >= e1000_pchlan) {
4932                 e1000_oem_bits_config_ich8lan(hw, false);
4933
4934                 /* Reset PHY to activate OEM bits on 82577/8 */
4935                 if (hw->mac.type == e1000_pchlan)
4936                         e1000_phy_hw_reset_generic(hw);
4937
4938                 ret_val = hw->phy.ops.acquire(hw);
4939                 if (ret_val)
4940                         return;
4941                 e1000_write_smbus_addr(hw);
4942                 hw->phy.ops.release(hw);
4943         }
4944
4945         return;
4946 }
4947
4948 /**
4949  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
4950  *  @hw: pointer to the HW structure
4951  *
4952  *  During Sx to S0 transitions on non-managed devices or managed devices
4953  *  on which PHY resets are not blocked, if the PHY registers cannot be
4954  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
4955  *  the PHY.
4956  *  On i217, setup Intel Rapid Start Technology.
4957  **/
4958 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4959 {
4960         s32 ret_val;
4961
4962         DEBUGFUNC("e1000_resume_workarounds_pchlan");
4963         if (hw->mac.type < e1000_pch2lan)
4964                 return E1000_SUCCESS;
4965
4966         ret_val = e1000_init_phy_workarounds_pchlan(hw);
4967         if (ret_val) {
4968                 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
4969                 return ret_val;
4970         }
4971
4972         /* For i217 Intel Rapid Start Technology support when the system
4973          * is transitioning from Sx and no manageability engine is present
4974          * configure SMBus to restore on reset, disable proxy, and enable
4975          * the reset on MTA (Multicast table array).
4976          */
4977         if (hw->phy.type == e1000_phy_i217) {
4978                 u16 phy_reg;
4979
4980                 ret_val = hw->phy.ops.acquire(hw);
4981                 if (ret_val) {
4982                         DEBUGOUT("Failed to setup iRST\n");
4983                         return ret_val;
4984                 }
4985
4986                 /* Clear Auto Enable LPI after link up */
4987                 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
4988                 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4989                 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
4990
4991                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4992                     E1000_ICH_FWSM_FW_VALID)) {
4993                         /* Restore clear on SMB if no manageability engine
4994                          * is present
4995                          */
4996                         ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
4997                                                               &phy_reg);
4998                         if (ret_val)
4999                                 goto release;
5000                         phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5001                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5002
5003                         /* Disable Proxy */
5004                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5005                 }
5006                 /* Enable reset on MTA */
5007                 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5008                                                       &phy_reg);
5009                 if (ret_val)
5010                         goto release;
5011                 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5012                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5013 release:
5014                 if (ret_val)
5015                         DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5016                 hw->phy.ops.release(hw);
5017                 return ret_val;
5018         }
5019         return E1000_SUCCESS;
5020 }
5021
5022 /**
5023  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5024  *  @hw: pointer to the HW structure
5025  *
5026  *  Return the LED back to the default configuration.
5027  **/
5028 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5029 {
5030         DEBUGFUNC("e1000_cleanup_led_ich8lan");
5031
5032         if (hw->phy.type == e1000_phy_ife)
5033                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5034                                              0);
5035
5036         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5037         return E1000_SUCCESS;
5038 }
5039
5040 /**
5041  *  e1000_led_on_ich8lan - Turn LEDs on
5042  *  @hw: pointer to the HW structure
5043  *
5044  *  Turn on the LEDs.
5045  **/
5046 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5047 {
5048         DEBUGFUNC("e1000_led_on_ich8lan");
5049
5050         if (hw->phy.type == e1000_phy_ife)
5051                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5052                                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5053
5054         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5055         return E1000_SUCCESS;
5056 }
5057
5058 /**
5059  *  e1000_led_off_ich8lan - Turn LEDs off
5060  *  @hw: pointer to the HW structure
5061  *
5062  *  Turn off the LEDs.
5063  **/
5064 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5065 {
5066         DEBUGFUNC("e1000_led_off_ich8lan");
5067
5068         if (hw->phy.type == e1000_phy_ife)
5069                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5070                                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5071
5072         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5073         return E1000_SUCCESS;
5074 }
5075
5076 /**
5077  *  e1000_setup_led_pchlan - Configures SW controllable LED
5078  *  @hw: pointer to the HW structure
5079  *
5080  *  This prepares the SW controllable LED for use.
5081  **/
5082 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5083 {
5084         DEBUGFUNC("e1000_setup_led_pchlan");
5085
5086         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5087                                      (u16)hw->mac.ledctl_mode1);
5088 }
5089
5090 /**
5091  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5092  *  @hw: pointer to the HW structure
5093  *
5094  *  Return the LED back to the default configuration.
5095  **/
5096 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5097 {
5098         DEBUGFUNC("e1000_cleanup_led_pchlan");
5099
5100         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5101                                      (u16)hw->mac.ledctl_default);
5102 }
5103
5104 /**
5105  *  e1000_led_on_pchlan - Turn LEDs on
5106  *  @hw: pointer to the HW structure
5107  *
5108  *  Turn on the LEDs.
5109  **/
5110 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5111 {
5112         u16 data = (u16)hw->mac.ledctl_mode2;
5113         u32 i, led;
5114
5115         DEBUGFUNC("e1000_led_on_pchlan");
5116
5117         /* If no link, then turn LED on by setting the invert bit
5118          * for each LED that's mode is "link_up" in ledctl_mode2.
5119          */
5120         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5121                 for (i = 0; i < 3; i++) {
5122                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5123                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5124                             E1000_LEDCTL_MODE_LINK_UP)
5125                                 continue;
5126                         if (led & E1000_PHY_LED0_IVRT)
5127                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5128                         else
5129                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5130                 }
5131         }
5132
5133         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5134 }
5135
5136 /**
5137  *  e1000_led_off_pchlan - Turn LEDs off
5138  *  @hw: pointer to the HW structure
5139  *
5140  *  Turn off the LEDs.
5141  **/
5142 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5143 {
5144         u16 data = (u16)hw->mac.ledctl_mode1;
5145         u32 i, led;
5146
5147         DEBUGFUNC("e1000_led_off_pchlan");
5148
5149         /* If no link, then turn LED off by clearing the invert bit
5150          * for each LED that's mode is "link_up" in ledctl_mode1.
5151          */
5152         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5153                 for (i = 0; i < 3; i++) {
5154                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5155                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5156                             E1000_LEDCTL_MODE_LINK_UP)
5157                                 continue;
5158                         if (led & E1000_PHY_LED0_IVRT)
5159                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5160                         else
5161                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5162                 }
5163         }
5164
5165         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5166 }
5167
5168 /**
5169  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5170  *  @hw: pointer to the HW structure
5171  *
5172  *  Read appropriate register for the config done bit for completion status
5173  *  and configure the PHY through s/w for EEPROM-less parts.
5174  *
5175  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5176  *  config done bit, so only an error is logged and continues.  If we were
5177  *  to return with error, EEPROM-less silicon would not be able to be reset
5178  *  or change link.
5179  **/
5180 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5181 {
5182         s32 ret_val = E1000_SUCCESS;
5183         u32 bank = 0;
5184         u32 status;
5185
5186         DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5187
5188         e1000_get_cfg_done_generic(hw);
5189
5190         /* Wait for indication from h/w that it has completed basic config */
5191         if (hw->mac.type >= e1000_ich10lan) {
5192                 e1000_lan_init_done_ich8lan(hw);
5193         } else {
5194                 ret_val = e1000_get_auto_rd_done_generic(hw);
5195                 if (ret_val) {
5196                         /* When auto config read does not complete, do not
5197                          * return with an error. This can happen in situations
5198                          * where there is no eeprom and prevents getting link.
5199                          */
5200                         DEBUGOUT("Auto Read Done did not complete\n");
5201                         ret_val = E1000_SUCCESS;
5202                 }
5203         }
5204
5205         /* Clear PHY Reset Asserted bit */
5206         status = E1000_READ_REG(hw, E1000_STATUS);
5207         if (status & E1000_STATUS_PHYRA)
5208                 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5209         else
5210                 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5211
5212         /* If EEPROM is not marked present, init the IGP 3 PHY manually */
5213         if (hw->mac.type <= e1000_ich9lan) {
5214                 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5215                     (hw->phy.type == e1000_phy_igp_3)) {
5216                         e1000_phy_init_script_igp3(hw);
5217                 }
5218         } else {
5219                 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5220                         /* Maybe we should do a basic PHY config */
5221                         DEBUGOUT("EEPROM not present\n");
5222                         ret_val = -E1000_ERR_CONFIG;
5223                 }
5224         }
5225
5226         return ret_val;
5227 }
5228
5229 /**
5230  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5231  * @hw: pointer to the HW structure
5232  *
5233  * In the case of a PHY power down to save power, or to turn off link during a
5234  * driver unload, or wake on lan is not enabled, remove the link.
5235  **/
5236 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5237 {
5238         /* If the management interface is not enabled, then power down */
5239         if (!(hw->mac.ops.check_mng_mode(hw) ||
5240               hw->phy.ops.check_reset_block(hw)))
5241                 e1000_power_down_phy_copper(hw);
5242
5243         return;
5244 }
5245
5246 /**
5247  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5248  *  @hw: pointer to the HW structure
5249  *
5250  *  Clears hardware counters specific to the silicon family and calls
5251  *  clear_hw_cntrs_generic to clear all general purpose counters.
5252  **/
5253 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5254 {
5255         u16 phy_data;
5256         s32 ret_val;
5257
5258         DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
5259
5260         e1000_clear_hw_cntrs_base_generic(hw);
5261
5262         E1000_READ_REG(hw, E1000_ALGNERRC);
5263         E1000_READ_REG(hw, E1000_RXERRC);
5264         E1000_READ_REG(hw, E1000_TNCRS);
5265         E1000_READ_REG(hw, E1000_CEXTERR);
5266         E1000_READ_REG(hw, E1000_TSCTC);
5267         E1000_READ_REG(hw, E1000_TSCTFC);
5268
5269         E1000_READ_REG(hw, E1000_MGTPRC);
5270         E1000_READ_REG(hw, E1000_MGTPDC);
5271         E1000_READ_REG(hw, E1000_MGTPTC);
5272
5273         E1000_READ_REG(hw, E1000_IAC);
5274         E1000_READ_REG(hw, E1000_ICRXOC);
5275
5276         /* Clear PHY statistics registers */
5277         if ((hw->phy.type == e1000_phy_82578) ||
5278             (hw->phy.type == e1000_phy_82579) ||
5279             (hw->phy.type == e1000_phy_i217) ||
5280             (hw->phy.type == e1000_phy_82577)) {
5281                 ret_val = hw->phy.ops.acquire(hw);
5282                 if (ret_val)
5283                         return;
5284                 ret_val = hw->phy.ops.set_page(hw,
5285                                                HV_STATS_PAGE << IGP_PAGE_SHIFT);
5286                 if (ret_val)
5287                         goto release;
5288                 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5289                 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5290                 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5291                 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5292                 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5293                 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5294                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5295                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5296                 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5297                 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5298                 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5299                 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5300                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5301                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5302 release:
5303                 hw->phy.ops.release(hw);
5304         }
5305 }
5306