e1000/base: return code after setting receive address register
[dpdk.git] / drivers / net / e1000 / base / e1000_ich8lan.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /* 82562G 10/100 Network Connection
35  * 82562G-2 10/100 Network Connection
36  * 82562GT 10/100 Network Connection
37  * 82562GT-2 10/100 Network Connection
38  * 82562V 10/100 Network Connection
39  * 82562V-2 10/100 Network Connection
40  * 82566DC-2 Gigabit Network Connection
41  * 82566DC Gigabit Network Connection
42  * 82566DM-2 Gigabit Network Connection
43  * 82566DM Gigabit Network Connection
44  * 82566MC Gigabit Network Connection
45  * 82566MM Gigabit Network Connection
46  * 82567LM Gigabit Network Connection
47  * 82567LF Gigabit Network Connection
48  * 82567V Gigabit Network Connection
49  * 82567LM-2 Gigabit Network Connection
50  * 82567LF-2 Gigabit Network Connection
51  * 82567V-2 Gigabit Network Connection
52  * 82567LF-3 Gigabit Network Connection
53  * 82567LM-3 Gigabit Network Connection
54  * 82567LM-4 Gigabit Network Connection
55  * 82577LM Gigabit Network Connection
56  * 82577LC Gigabit Network Connection
57  * 82578DM Gigabit Network Connection
58  * 82578DC Gigabit Network Connection
59  * 82579LM Gigabit Network Connection
60  * 82579V Gigabit Network Connection
61  * Ethernet Connection I217-LM
62  * Ethernet Connection I217-V
63  * Ethernet Connection I218-V
64  * Ethernet Connection I218-LM
65  * Ethernet Connection (2) I218-LM
66  * Ethernet Connection (2) I218-V
67  * Ethernet Connection (3) I218-LM
68  * Ethernet Connection (3) I218-V
69  */
70
71 #include "e1000_api.h"
72
73 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
74 STATIC s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 STATIC s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 STATIC int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 STATIC int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
84 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
85                                               u8 *mc_addr_list,
86                                               u32 mc_addr_count);
87 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
88 STATIC s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
89 STATIC s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
90 STATIC s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
91 STATIC s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
92                                             bool active);
93 STATIC s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
94                                             bool active);
95 STATIC s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
96                                    u16 words, u16 *data);
97 STATIC s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
98                                     u16 words, u16 *data);
99 STATIC s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100 STATIC s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 STATIC s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
102                                             u16 *data);
103 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
104 STATIC s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
105 STATIC s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
106 STATIC s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
107 STATIC s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
108 STATIC s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
109 STATIC s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
110 STATIC s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
111                                            u16 *speed, u16 *duplex);
112 STATIC s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
113 STATIC s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
114 STATIC s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
115 STATIC s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
116 STATIC s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
117 STATIC s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
118 STATIC s32  e1000_led_on_pchlan(struct e1000_hw *hw);
119 STATIC s32  e1000_led_off_pchlan(struct e1000_hw *hw);
120 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
121 STATIC s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
122 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
123 STATIC s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
124 STATIC s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
125                                           u32 offset, u8 *data);
126 STATIC s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
127                                           u8 size, u16 *data);
128 STATIC s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
129                                           u32 offset, u16 *data);
130 STATIC s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
131                                                  u32 offset, u8 byte);
132 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
133 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
134 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
135 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
136 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
137 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
138
139 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
140 /* Offset 04h HSFSTS */
141 union ich8_hws_flash_status {
142         struct ich8_hsfsts {
143                 u16 flcdone:1; /* bit 0 Flash Cycle Done */
144                 u16 flcerr:1; /* bit 1 Flash Cycle Error */
145                 u16 dael:1; /* bit 2 Direct Access error Log */
146                 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
147                 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
148                 u16 reserved1:2; /* bit 13:6 Reserved */
149                 u16 reserved2:6; /* bit 13:6 Reserved */
150                 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
151                 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
152         } hsf_status;
153         u16 regval;
154 };
155
156 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
157 /* Offset 06h FLCTL */
158 union ich8_hws_flash_ctrl {
159         struct ich8_hsflctl {
160                 u16 flcgo:1;   /* 0 Flash Cycle Go */
161                 u16 flcycle:2;   /* 2:1 Flash Cycle */
162                 u16 reserved:5;   /* 7:3 Reserved  */
163                 u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
164                 u16 flockdn:6;   /* 15:10 Reserved */
165         } hsf_ctrl;
166         u16 regval;
167 };
168
169 /* ICH Flash Region Access Permissions */
170 union ich8_hws_flash_regacc {
171         struct ich8_flracc {
172                 u32 grra:8; /* 0:7 GbE region Read Access */
173                 u32 grwa:8; /* 8:15 GbE region Write Access */
174                 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
175                 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
176         } hsf_flregacc;
177         u16 regval;
178 };
179
180 /**
181  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
182  *  @hw: pointer to the HW structure
183  *
184  *  Test access to the PHY registers by reading the PHY ID registers.  If
185  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
186  *  otherwise assume the read PHY ID is correct if it is valid.
187  *
188  *  Assumes the sw/fw/hw semaphore is already acquired.
189  **/
190 STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
191 {
192         u16 phy_reg = 0;
193         u32 phy_id = 0;
194         s32 ret_val = 0;
195         u16 retry_count;
196         u32 mac_reg = 0;
197
198         for (retry_count = 0; retry_count < 2; retry_count++) {
199                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
200                 if (ret_val || (phy_reg == 0xFFFF))
201                         continue;
202                 phy_id = (u32)(phy_reg << 16);
203
204                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
205                 if (ret_val || (phy_reg == 0xFFFF)) {
206                         phy_id = 0;
207                         continue;
208                 }
209                 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
210                 break;
211         }
212
213         if (hw->phy.id) {
214                 if  (hw->phy.id == phy_id)
215                         goto out;
216         } else if (phy_id) {
217                 hw->phy.id = phy_id;
218                 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
219                 goto out;
220         }
221
222         /* In case the PHY needs to be in mdio slow mode,
223          * set slow mode and try to get the PHY id again.
224          */
225         if (hw->mac.type < e1000_pch_lpt) {
226                 hw->phy.ops.release(hw);
227                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
228                 if (!ret_val)
229                         ret_val = e1000_get_phy_id(hw);
230                 hw->phy.ops.acquire(hw);
231         }
232
233         if (ret_val)
234                 return false;
235 out:
236         if (hw->mac.type == e1000_pch_lpt) {
237                 /* Unforce SMBus mode in PHY */
238                 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
239                 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
240                 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
241
242                 /* Unforce SMBus mode in MAC */
243                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
244                 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
245                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
246         }
247
248         return true;
249 }
250
251 /**
252  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
253  *  @hw: pointer to the HW structure
254  *
255  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
256  *  used to reset the PHY to a quiescent state when necessary.
257  **/
258 STATIC void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
259 {
260         u32 mac_reg;
261
262         DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
263
264         /* Set Phy Config Counter to 50msec */
265         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
266         mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
267         mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
268         E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
269
270         /* Toggle LANPHYPC Value bit */
271         mac_reg = E1000_READ_REG(hw, E1000_CTRL);
272         mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
273         mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
274         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
275         E1000_WRITE_FLUSH(hw);
276         usec_delay(10);
277         mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
278         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
279         E1000_WRITE_FLUSH(hw);
280
281         if (hw->mac.type < e1000_pch_lpt) {
282                 msec_delay(50);
283         } else {
284                 u16 count = 20;
285
286                 do {
287                         msec_delay(5);
288                 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
289                            E1000_CTRL_EXT_LPCD) && count--);
290
291                 msec_delay(30);
292         }
293 }
294
295 /**
296  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
297  *  @hw: pointer to the HW structure
298  *
299  *  Workarounds/flow necessary for PHY initialization during driver load
300  *  and resume paths.
301  **/
302 STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
303 {
304         u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
305         s32 ret_val;
306
307         DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
308
309         /* Gate automatic PHY configuration by hardware on managed and
310          * non-managed 82579 and newer adapters.
311          */
312         e1000_gate_hw_phy_config_ich8lan(hw, true);
313
314 #ifdef ULP_SUPPORT
315         /* It is not possible to be certain of the current state of ULP
316          * so forcibly disable it.
317          */
318         hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
319
320 #endif /* ULP_SUPPORT */
321         ret_val = hw->phy.ops.acquire(hw);
322         if (ret_val) {
323                 DEBUGOUT("Failed to initialize PHY flow\n");
324                 goto out;
325         }
326
327         /* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
328          * inaccessible and resetting the PHY is not blocked, toggle the
329          * LANPHYPC Value bit to force the interconnect to PCIe mode.
330          */
331         switch (hw->mac.type) {
332         case e1000_pch_lpt:
333                 if (e1000_phy_is_accessible_pchlan(hw))
334                         break;
335
336                 /* Before toggling LANPHYPC, see if PHY is accessible by
337                  * forcing MAC to SMBus mode first.
338                  */
339                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
340                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
341                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
342
343                 /* Wait 50 milliseconds for MAC to finish any retries
344                  * that it might be trying to perform from previous
345                  * attempts to acknowledge any phy read requests.
346                  */
347                  msec_delay(50);
348
349                 /* fall-through */
350         case e1000_pch2lan:
351                 if (e1000_phy_is_accessible_pchlan(hw))
352                         break;
353
354                 /* fall-through */
355         case e1000_pchlan:
356                 if ((hw->mac.type == e1000_pchlan) &&
357                     (fwsm & E1000_ICH_FWSM_FW_VALID))
358                         break;
359
360                 if (hw->phy.ops.check_reset_block(hw)) {
361                         DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
362                         ret_val = -E1000_ERR_PHY;
363                         break;
364                 }
365
366                 /* Toggle LANPHYPC Value bit */
367                 e1000_toggle_lanphypc_pch_lpt(hw);
368                 if (hw->mac.type >= e1000_pch_lpt) {
369                         if (e1000_phy_is_accessible_pchlan(hw))
370                                 break;
371
372                         /* Toggling LANPHYPC brings the PHY out of SMBus mode
373                          * so ensure that the MAC is also out of SMBus mode
374                          */
375                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
376                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
377                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
378
379                         if (e1000_phy_is_accessible_pchlan(hw))
380                                 break;
381
382                         ret_val = -E1000_ERR_PHY;
383                 }
384                 break;
385         default:
386                 break;
387         }
388
389         hw->phy.ops.release(hw);
390         if (!ret_val) {
391
392                 /* Check to see if able to reset PHY.  Print error if not */
393                 if (hw->phy.ops.check_reset_block(hw)) {
394                         ERROR_REPORT("Reset blocked by ME\n");
395                         goto out;
396                 }
397
398                 /* Reset the PHY before any access to it.  Doing so, ensures
399                  * that the PHY is in a known good state before we read/write
400                  * PHY registers.  The generic reset is sufficient here,
401                  * because we haven't determined the PHY type yet.
402                  */
403                 ret_val = e1000_phy_hw_reset_generic(hw);
404                 if (ret_val)
405                         goto out;
406
407                 /* On a successful reset, possibly need to wait for the PHY
408                  * to quiesce to an accessible state before returning control
409                  * to the calling function.  If the PHY does not quiesce, then
410                  * return E1000E_BLK_PHY_RESET, as this is the condition that
411                  *  the PHY is in.
412                  */
413                 ret_val = hw->phy.ops.check_reset_block(hw);
414                 if (ret_val)
415                         ERROR_REPORT("ME blocked access to PHY after reset\n");
416         }
417
418 out:
419         /* Ungate automatic PHY configuration on non-managed 82579 */
420         if ((hw->mac.type == e1000_pch2lan) &&
421             !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
422                 msec_delay(10);
423                 e1000_gate_hw_phy_config_ich8lan(hw, false);
424         }
425
426         return ret_val;
427 }
428
429 /**
430  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
431  *  @hw: pointer to the HW structure
432  *
433  *  Initialize family-specific PHY parameters and function pointers.
434  **/
435 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
436 {
437         struct e1000_phy_info *phy = &hw->phy;
438         s32 ret_val;
439
440         DEBUGFUNC("e1000_init_phy_params_pchlan");
441
442         phy->addr               = 1;
443         phy->reset_delay_us     = 100;
444
445         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
446         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
447         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
448         phy->ops.set_page       = e1000_set_page_igp;
449         phy->ops.read_reg       = e1000_read_phy_reg_hv;
450         phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
451         phy->ops.read_reg_page  = e1000_read_phy_reg_page_hv;
452         phy->ops.release        = e1000_release_swflag_ich8lan;
453         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
454         phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
455         phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
456         phy->ops.write_reg      = e1000_write_phy_reg_hv;
457         phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
458         phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
459         phy->ops.power_up       = e1000_power_up_phy_copper;
460         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
461         phy->autoneg_mask       = AUTONEG_ADVERTISE_SPEED_DEFAULT;
462
463         phy->id = e1000_phy_unknown;
464
465         ret_val = e1000_init_phy_workarounds_pchlan(hw);
466         if (ret_val)
467                 return ret_val;
468
469         if (phy->id == e1000_phy_unknown)
470                 switch (hw->mac.type) {
471                 default:
472                         ret_val = e1000_get_phy_id(hw);
473                         if (ret_val)
474                                 return ret_val;
475                         if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
476                                 break;
477                         /* fall-through */
478                 case e1000_pch2lan:
479                 case e1000_pch_lpt:
480                         /* In case the PHY needs to be in mdio slow mode,
481                          * set slow mode and try to get the PHY id again.
482                          */
483                         ret_val = e1000_set_mdio_slow_mode_hv(hw);
484                         if (ret_val)
485                                 return ret_val;
486                         ret_val = e1000_get_phy_id(hw);
487                         if (ret_val)
488                                 return ret_val;
489                         break;
490                 }
491         phy->type = e1000_get_phy_type_from_id(phy->id);
492
493         switch (phy->type) {
494         case e1000_phy_82577:
495         case e1000_phy_82579:
496         case e1000_phy_i217:
497                 phy->ops.check_polarity = e1000_check_polarity_82577;
498                 phy->ops.force_speed_duplex =
499                         e1000_phy_force_speed_duplex_82577;
500                 phy->ops.get_cable_length = e1000_get_cable_length_82577;
501                 phy->ops.get_info = e1000_get_phy_info_82577;
502                 phy->ops.commit = e1000_phy_sw_reset_generic;
503                 break;
504         case e1000_phy_82578:
505                 phy->ops.check_polarity = e1000_check_polarity_m88;
506                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
507                 phy->ops.get_cable_length = e1000_get_cable_length_m88;
508                 phy->ops.get_info = e1000_get_phy_info_m88;
509                 break;
510         default:
511                 ret_val = -E1000_ERR_PHY;
512                 break;
513         }
514
515         return ret_val;
516 }
517
518 /**
519  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
520  *  @hw: pointer to the HW structure
521  *
522  *  Initialize family-specific PHY parameters and function pointers.
523  **/
524 STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
525 {
526         struct e1000_phy_info *phy = &hw->phy;
527         s32 ret_val;
528         u16 i = 0;
529
530         DEBUGFUNC("e1000_init_phy_params_ich8lan");
531
532         phy->addr               = 1;
533         phy->reset_delay_us     = 100;
534
535         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
536         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
537         phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
538         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
539         phy->ops.read_reg       = e1000_read_phy_reg_igp;
540         phy->ops.release        = e1000_release_swflag_ich8lan;
541         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
542         phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
543         phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
544         phy->ops.write_reg      = e1000_write_phy_reg_igp;
545         phy->ops.power_up       = e1000_power_up_phy_copper;
546         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
547
548         /* We may need to do this twice - once for IGP and if that fails,
549          * we'll set BM func pointers and try again
550          */
551         ret_val = e1000_determine_phy_address(hw);
552         if (ret_val) {
553                 phy->ops.write_reg = e1000_write_phy_reg_bm;
554                 phy->ops.read_reg  = e1000_read_phy_reg_bm;
555                 ret_val = e1000_determine_phy_address(hw);
556                 if (ret_val) {
557                         DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
558                         return ret_val;
559                 }
560         }
561
562         phy->id = 0;
563         while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
564                (i++ < 100)) {
565                 msec_delay(1);
566                 ret_val = e1000_get_phy_id(hw);
567                 if (ret_val)
568                         return ret_val;
569         }
570
571         /* Verify phy id */
572         switch (phy->id) {
573         case IGP03E1000_E_PHY_ID:
574                 phy->type = e1000_phy_igp_3;
575                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
576                 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
577                 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
578                 phy->ops.get_info = e1000_get_phy_info_igp;
579                 phy->ops.check_polarity = e1000_check_polarity_igp;
580                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
581                 break;
582         case IFE_E_PHY_ID:
583         case IFE_PLUS_E_PHY_ID:
584         case IFE_C_E_PHY_ID:
585                 phy->type = e1000_phy_ife;
586                 phy->autoneg_mask = E1000_ALL_NOT_GIG;
587                 phy->ops.get_info = e1000_get_phy_info_ife;
588                 phy->ops.check_polarity = e1000_check_polarity_ife;
589                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
590                 break;
591         case BME1000_E_PHY_ID:
592                 phy->type = e1000_phy_bm;
593                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
594                 phy->ops.read_reg = e1000_read_phy_reg_bm;
595                 phy->ops.write_reg = e1000_write_phy_reg_bm;
596                 phy->ops.commit = e1000_phy_sw_reset_generic;
597                 phy->ops.get_info = e1000_get_phy_info_m88;
598                 phy->ops.check_polarity = e1000_check_polarity_m88;
599                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
600                 break;
601         default:
602                 return -E1000_ERR_PHY;
603                 break;
604         }
605
606         return E1000_SUCCESS;
607 }
608
609 /**
610  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
611  *  @hw: pointer to the HW structure
612  *
613  *  Initialize family-specific NVM parameters and function
614  *  pointers.
615  **/
616 STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
617 {
618         struct e1000_nvm_info *nvm = &hw->nvm;
619         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
620         u32 gfpreg, sector_base_addr, sector_end_addr;
621         u16 i;
622
623         DEBUGFUNC("e1000_init_nvm_params_ich8lan");
624
625         /* Can't read flash registers if the register set isn't mapped. */
626         nvm->type = e1000_nvm_flash_sw;
627         if (!hw->flash_address) {
628                 DEBUGOUT("ERROR: Flash registers not mapped\n");
629                 return -E1000_ERR_CONFIG;
630         }
631
632         gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
633
634         /* sector_X_addr is a "sector"-aligned address (4096 bytes)
635          * Add 1 to sector_end_addr since this sector is included in
636          * the overall size.
637          */
638         sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
639         sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
640
641         /* flash_base_addr is byte-aligned */
642         nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
643
644         /* find total size of the NVM, then cut in half since the total
645          * size represents two separate NVM banks.
646          */
647         nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
648                                 << FLASH_SECTOR_ADDR_SHIFT);
649         nvm->flash_bank_size /= 2;
650         /* Adjust to word count */
651         nvm->flash_bank_size /= sizeof(u16);
652
653         nvm->word_size = E1000_SHADOW_RAM_WORDS;
654
655         /* Clear shadow ram */
656         for (i = 0; i < nvm->word_size; i++) {
657                 dev_spec->shadow_ram[i].modified = false;
658                 dev_spec->shadow_ram[i].value    = 0xFFFF;
659         }
660
661         E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
662         E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
663
664         /* Function Pointers */
665         nvm->ops.acquire        = e1000_acquire_nvm_ich8lan;
666         nvm->ops.release        = e1000_release_nvm_ich8lan;
667         nvm->ops.read           = e1000_read_nvm_ich8lan;
668         nvm->ops.update         = e1000_update_nvm_checksum_ich8lan;
669         nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
670         nvm->ops.validate       = e1000_validate_nvm_checksum_ich8lan;
671         nvm->ops.write          = e1000_write_nvm_ich8lan;
672
673         return E1000_SUCCESS;
674 }
675
676 /**
677  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
678  *  @hw: pointer to the HW structure
679  *
680  *  Initialize family-specific MAC parameters and function
681  *  pointers.
682  **/
683 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
684 {
685         struct e1000_mac_info *mac = &hw->mac;
686 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
687         u16 pci_cfg;
688 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
689
690         DEBUGFUNC("e1000_init_mac_params_ich8lan");
691
692         /* Set media type function pointer */
693         hw->phy.media_type = e1000_media_type_copper;
694
695         /* Set mta register count */
696         mac->mta_reg_count = 32;
697         /* Set rar entry count */
698         mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
699         if (mac->type == e1000_ich8lan)
700                 mac->rar_entry_count--;
701         /* Set if part includes ASF firmware */
702         mac->asf_firmware_present = true;
703         /* FWSM register */
704         mac->has_fwsm = true;
705         /* ARC subsystem not supported */
706         mac->arc_subsystem_valid = false;
707         /* Adaptive IFS supported */
708         mac->adaptive_ifs = true;
709
710         /* Function pointers */
711
712         /* bus type/speed/width */
713         mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
714         /* function id */
715         mac->ops.set_lan_id = e1000_set_lan_id_single_port;
716         /* reset */
717         mac->ops.reset_hw = e1000_reset_hw_ich8lan;
718         /* hw initialization */
719         mac->ops.init_hw = e1000_init_hw_ich8lan;
720         /* link setup */
721         mac->ops.setup_link = e1000_setup_link_ich8lan;
722         /* physical interface setup */
723         mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
724         /* check for link */
725         mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
726         /* link info */
727         mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
728         /* multicast address update */
729         mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
730         /* clear hardware counters */
731         mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
732
733         /* LED and other operations */
734         switch (mac->type) {
735         case e1000_ich8lan:
736         case e1000_ich9lan:
737         case e1000_ich10lan:
738                 /* check management mode */
739                 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
740                 /* ID LED init */
741                 mac->ops.id_led_init = e1000_id_led_init_generic;
742                 /* blink LED */
743                 mac->ops.blink_led = e1000_blink_led_generic;
744                 /* setup LED */
745                 mac->ops.setup_led = e1000_setup_led_generic;
746                 /* cleanup LED */
747                 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
748                 /* turn on/off LED */
749                 mac->ops.led_on = e1000_led_on_ich8lan;
750                 mac->ops.led_off = e1000_led_off_ich8lan;
751                 break;
752         case e1000_pch2lan:
753                 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
754                 mac->ops.rar_set = e1000_rar_set_pch2lan;
755                 /* fall-through */
756         case e1000_pch_lpt:
757 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
758                 /* multicast address update for pch2 */
759                 mac->ops.update_mc_addr_list =
760                         e1000_update_mc_addr_list_pch2lan;
761                 /* fall-through */
762 #endif
763         case e1000_pchlan:
764 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
765                 /* save PCH revision_id */
766                 e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
767                 hw->revision_id = (u8)(pci_cfg &= 0x000F);
768 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
769                 /* check management mode */
770                 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
771                 /* ID LED init */
772                 mac->ops.id_led_init = e1000_id_led_init_pchlan;
773                 /* setup LED */
774                 mac->ops.setup_led = e1000_setup_led_pchlan;
775                 /* cleanup LED */
776                 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
777                 /* turn on/off LED */
778                 mac->ops.led_on = e1000_led_on_pchlan;
779                 mac->ops.led_off = e1000_led_off_pchlan;
780                 break;
781         default:
782                 break;
783         }
784
785         if (mac->type == e1000_pch_lpt) {
786                 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
787                 mac->ops.rar_set = e1000_rar_set_pch_lpt;
788                 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
789         }
790
791         /* Enable PCS Lock-loss workaround for ICH8 */
792         if (mac->type == e1000_ich8lan)
793                 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
794
795         return E1000_SUCCESS;
796 }
797
798 /**
799  *  __e1000_access_emi_reg_locked - Read/write EMI register
800  *  @hw: pointer to the HW structure
801  *  @addr: EMI address to program
802  *  @data: pointer to value to read/write from/to the EMI address
803  *  @read: boolean flag to indicate read or write
804  *
805  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
806  **/
807 STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
808                                          u16 *data, bool read)
809 {
810         s32 ret_val;
811
812         DEBUGFUNC("__e1000_access_emi_reg_locked");
813
814         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
815         if (ret_val)
816                 return ret_val;
817
818         if (read)
819                 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
820                                                       data);
821         else
822                 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
823                                                        *data);
824
825         return ret_val;
826 }
827
828 /**
829  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
830  *  @hw: pointer to the HW structure
831  *  @addr: EMI address to program
832  *  @data: value to be read from the EMI address
833  *
834  *  Assumes the SW/FW/HW Semaphore is already acquired.
835  **/
836 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
837 {
838         DEBUGFUNC("e1000_read_emi_reg_locked");
839
840         return __e1000_access_emi_reg_locked(hw, addr, data, true);
841 }
842
843 /**
844  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
845  *  @hw: pointer to the HW structure
846  *  @addr: EMI address to program
847  *  @data: value to be written to the EMI address
848  *
849  *  Assumes the SW/FW/HW Semaphore is already acquired.
850  **/
851 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
852 {
853         DEBUGFUNC("e1000_read_emi_reg_locked");
854
855         return __e1000_access_emi_reg_locked(hw, addr, &data, false);
856 }
857
858 /**
859  *  e1000_set_eee_pchlan - Enable/disable EEE support
860  *  @hw: pointer to the HW structure
861  *
862  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
863  *  the link and the EEE capabilities of the link partner.  The LPI Control
864  *  register bits will remain set only if/when link is up.
865  *
866  *  EEE LPI must not be asserted earlier than one second after link is up.
867  *  On 82579, EEE LPI should not be enabled until such time otherwise there
868  *  can be link issues with some switches.  Other devices can have EEE LPI
869  *  enabled immediately upon link up since they have a timer in hardware which
870  *  prevents LPI from being asserted too early.
871  **/
872 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
873 {
874         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
875         s32 ret_val;
876         u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
877
878         DEBUGFUNC("e1000_set_eee_pchlan");
879
880         switch (hw->phy.type) {
881         case e1000_phy_82579:
882                 lpa = I82579_EEE_LP_ABILITY;
883                 pcs_status = I82579_EEE_PCS_STATUS;
884                 adv_addr = I82579_EEE_ADVERTISEMENT;
885                 break;
886         case e1000_phy_i217:
887                 lpa = I217_EEE_LP_ABILITY;
888                 pcs_status = I217_EEE_PCS_STATUS;
889                 adv_addr = I217_EEE_ADVERTISEMENT;
890                 break;
891         default:
892                 return E1000_SUCCESS;
893         }
894
895         ret_val = hw->phy.ops.acquire(hw);
896         if (ret_val)
897                 return ret_val;
898
899         ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
900         if (ret_val)
901                 goto release;
902
903         /* Clear bits that enable EEE in various speeds */
904         lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
905
906         /* Enable EEE if not disabled by user */
907         if (!dev_spec->eee_disable) {
908                 /* Save off link partner's EEE ability */
909                 ret_val = e1000_read_emi_reg_locked(hw, lpa,
910                                                     &dev_spec->eee_lp_ability);
911                 if (ret_val)
912                         goto release;
913
914                 /* Read EEE advertisement */
915                 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
916                 if (ret_val)
917                         goto release;
918
919                 /* Enable EEE only for speeds in which the link partner is
920                  * EEE capable and for which we advertise EEE.
921                  */
922                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
923                         lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
924
925                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
926                         hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
927                         if (data & NWAY_LPAR_100TX_FD_CAPS)
928                                 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
929                         else
930                                 /* EEE is not supported in 100Half, so ignore
931                                  * partner's EEE in 100 ability if full-duplex
932                                  * is not advertised.
933                                  */
934                                 dev_spec->eee_lp_ability &=
935                                     ~I82579_EEE_100_SUPPORTED;
936                 }
937         }
938
939         /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
940         ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
941         if (ret_val)
942                 goto release;
943
944         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
945 release:
946         hw->phy.ops.release(hw);
947
948         return ret_val;
949 }
950
951 /**
952  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
953  *  @hw:   pointer to the HW structure
954  *  @link: link up bool flag
955  *
956  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
957  *  preventing further DMA write requests.  Workaround the issue by disabling
958  *  the de-assertion of the clock request when in 1Gpbs mode.
959  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
960  *  speeds in order to avoid Tx hangs.
961  **/
962 STATIC s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
963 {
964         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
965         u32 status = E1000_READ_REG(hw, E1000_STATUS);
966         s32 ret_val = E1000_SUCCESS;
967         u16 reg;
968
969         if (link && (status & E1000_STATUS_SPEED_1000)) {
970                 ret_val = hw->phy.ops.acquire(hw);
971                 if (ret_val)
972                         return ret_val;
973
974                 ret_val =
975                     e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
976                                                &reg);
977                 if (ret_val)
978                         goto release;
979
980                 ret_val =
981                     e1000_write_kmrn_reg_locked(hw,
982                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
983                                                 reg &
984                                                 ~E1000_KMRNCTRLSTA_K1_ENABLE);
985                 if (ret_val)
986                         goto release;
987
988                 usec_delay(10);
989
990                 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
991                                 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
992
993                 ret_val =
994                     e1000_write_kmrn_reg_locked(hw,
995                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
996                                                 reg);
997 release:
998                 hw->phy.ops.release(hw);
999         } else {
1000                 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1001                 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1002
1003                 if (!link || ((status & E1000_STATUS_SPEED_100) &&
1004                               (status & E1000_STATUS_FD)))
1005                         goto update_fextnvm6;
1006
1007                 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1008                 if (ret_val)
1009                         return ret_val;
1010
1011                 /* Clear link status transmit timeout */
1012                 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1013
1014                 if (status & E1000_STATUS_SPEED_100) {
1015                         /* Set inband Tx timeout to 5x10us for 100Half */
1016                         reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1017
1018                         /* Do not extend the K1 entry latency for 100Half */
1019                         fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1020                 } else {
1021                         /* Set inband Tx timeout to 50x10us for 10Full/Half */
1022                         reg |= 50 <<
1023                                I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1024
1025                         /* Extend the K1 entry latency for 10 Mbps */
1026                         fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1027                 }
1028
1029                 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1030                 if (ret_val)
1031                         return ret_val;
1032
1033 update_fextnvm6:
1034                 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1035         }
1036
1037         return ret_val;
1038 }
1039
1040 #ifdef ULP_SUPPORT
1041 /**
1042  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1043  *  @hw: pointer to the HW structure
1044  *  @to_sx: boolean indicating a system power state transition to Sx
1045  *
1046  *  When link is down, configure ULP mode to significantly reduce the power
1047  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1048  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1049  *  system, configure the ULP mode by software.
1050  */
1051 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1052 {
1053         u32 mac_reg;
1054         s32 ret_val = E1000_SUCCESS;
1055         u16 phy_reg;
1056
1057         if ((hw->mac.type < e1000_pch_lpt) ||
1058             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1059             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1060             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1061             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1062             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1063                 return 0;
1064
1065         if (!to_sx) {
1066                 int i = 0;
1067                 /* Poll up to 5 seconds for Cable Disconnected indication */
1068                 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1069                          E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1070                         /* Bail if link is re-acquired */
1071                         if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1072                                 return -E1000_ERR_PHY;
1073                         if (i++ == 100)
1074                                 break;
1075
1076                         msec_delay(50);
1077                 }
1078                 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1079                           (E1000_READ_REG(hw, E1000_FEXT) &
1080                            E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1081                           i * 50);
1082         }
1083
1084         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1085                 /* Request ME configure ULP mode in the PHY */
1086                 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1087                 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1088                 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1089
1090                 goto out;
1091         }
1092
1093         ret_val = hw->phy.ops.acquire(hw);
1094         if (ret_val)
1095                 goto out;
1096
1097         /* During S0 Idle keep the phy in PCI-E mode */
1098         if (hw->dev_spec.ich8lan.smbus_disable)
1099                 goto skip_smbus;
1100
1101         /* Force SMBus mode in PHY */
1102         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1103         if (ret_val)
1104                 goto release;
1105         phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1106         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1107
1108         /* Force SMBus mode in MAC */
1109         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1110         mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1111         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1112
1113 skip_smbus:
1114         if (!to_sx) {
1115                 /* Change the 'Link Status Change' interrupt to trigger
1116                  * on 'Cable Status Change'
1117                  */
1118                 ret_val = e1000_read_kmrn_reg_locked(hw,
1119                                                      E1000_KMRNCTRLSTA_OP_MODES,
1120                                                      &phy_reg);
1121                 if (ret_val)
1122                         goto release;
1123                 phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1124                 e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1125                                             phy_reg);
1126         }
1127
1128         /* Set Inband ULP Exit, Reset to SMBus mode and
1129          * Disable SMBus Release on PERST# in PHY
1130          */
1131         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1132         if (ret_val)
1133                 goto release;
1134         phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1135                     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1136         if (to_sx) {
1137                 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1138                         phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1139
1140                 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1141         } else {
1142                 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1143         }
1144         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1145
1146         /* Set Disable SMBus Release on PERST# in MAC */
1147         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1148         mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1149         E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1150
1151         /* Commit ULP changes in PHY by starting auto ULP configuration */
1152         phy_reg |= I218_ULP_CONFIG1_START;
1153         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1154
1155         if (!to_sx) {
1156                 /* Disable Tx so that the MAC doesn't send any (buffered)
1157                  * packets to the PHY.
1158                  */
1159                 mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1160                 mac_reg &= ~E1000_TCTL_EN;
1161                 E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1162         }
1163 release:
1164         hw->phy.ops.release(hw);
1165 out:
1166         if (ret_val)
1167                 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1168         else
1169                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1170
1171         return ret_val;
1172 }
1173
1174 /**
1175  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1176  *  @hw: pointer to the HW structure
1177  *  @force: boolean indicating whether or not to force disabling ULP
1178  *
1179  *  Un-configure ULP mode when link is up, the system is transitioned from
1180  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1181  *  system, poll for an indication from ME that ULP has been un-configured.
1182  *  If not on an ME enabled system, un-configure the ULP mode by software.
1183  *
1184  *  During nominal operation, this function is called when link is acquired
1185  *  to disable ULP mode (force=false); otherwise, for example when unloading
1186  *  the driver or during Sx->S0 transitions, this is called with force=true
1187  *  to forcibly disable ULP.
1188
1189  *  When the cable is plugged in while the device is in D0, a Cable Status
1190  *  Change interrupt is generated which causes this function to be called
1191  *  to partially disable ULP mode and restart autonegotiation.  This function
1192  *  is then called again due to the resulting Link Status Change interrupt
1193  *  to finish cleaning up after the ULP flow.
1194  */
1195 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1196 {
1197         s32 ret_val = E1000_SUCCESS;
1198         u32 mac_reg;
1199         u16 phy_reg;
1200         int i = 0;
1201
1202         if ((hw->mac.type < e1000_pch_lpt) ||
1203             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1204             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1205             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1206             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1207             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1208                 return 0;
1209
1210         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1211                 if (force) {
1212                         /* Request ME un-configure ULP mode in the PHY */
1213                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1214                         mac_reg &= ~E1000_H2ME_ULP;
1215                         mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1216                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1217                 }
1218
1219                 /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
1220                 while (E1000_READ_REG(hw, E1000_FWSM) &
1221                        E1000_FWSM_ULP_CFG_DONE) {
1222                         if (i++ == 10) {
1223                                 ret_val = -E1000_ERR_PHY;
1224                                 goto out;
1225                         }
1226
1227                         msec_delay(10);
1228                 }
1229                 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1230
1231                 if (force) {
1232                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1233                         mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1234                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1235                 } else {
1236                         /* Clear H2ME.ULP after ME ULP configuration */
1237                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1238                         mac_reg &= ~E1000_H2ME_ULP;
1239                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1240
1241                         /* Restore link speed advertisements and restart
1242                          * Auto-negotiation
1243                          */
1244                         ret_val = e1000_phy_setup_autoneg(hw);
1245                         if (ret_val)
1246                                 goto out;
1247
1248                         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1249                 }
1250
1251                 goto out;
1252         }
1253
1254         ret_val = hw->phy.ops.acquire(hw);
1255         if (ret_val)
1256                 goto out;
1257
1258         /* Revert the change to the 'Link Status Change'
1259          * interrupt to trigger on 'Cable Status Change'
1260          */
1261         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1262                                              &phy_reg);
1263         if (ret_val)
1264                 goto release;
1265         phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1266         e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg);
1267
1268         if (force)
1269                 /* Toggle LANPHYPC Value bit */
1270                 e1000_toggle_lanphypc_pch_lpt(hw);
1271
1272         /* Unforce SMBus mode in PHY */
1273         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1274         if (ret_val) {
1275                 /* The MAC might be in PCIe mode, so temporarily force to
1276                  * SMBus mode in order to access the PHY.
1277                  */
1278                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1279                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1280                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1281
1282                 msec_delay(50);
1283
1284                 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1285                                                        &phy_reg);
1286                 if (ret_val)
1287                         goto release;
1288         }
1289         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1290         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1291
1292         /* Unforce SMBus mode in MAC */
1293         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1294         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1295         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1296
1297         /* When ULP mode was previously entered, K1 was disabled by the
1298          * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1299          */
1300         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1301         if (ret_val)
1302                 goto release;
1303         phy_reg |= HV_PM_CTRL_K1_ENABLE;
1304         e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1305
1306         /* Clear ULP enabled configuration */
1307         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1308         if (ret_val)
1309                 goto release;
1310         /* CSC interrupt received due to ULP Indication */
1311         if ((phy_reg & I218_ULP_CONFIG1_IND) || force) {
1312                 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1313                              I218_ULP_CONFIG1_STICKY_ULP |
1314                              I218_ULP_CONFIG1_RESET_TO_SMBUS |
1315                              I218_ULP_CONFIG1_WOL_HOST |
1316                              I218_ULP_CONFIG1_INBAND_EXIT |
1317                              I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1318                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1319
1320                 /* Commit ULP changes by starting auto ULP configuration */
1321                 phy_reg |= I218_ULP_CONFIG1_START;
1322                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1323
1324                 /* Clear Disable SMBus Release on PERST# in MAC */
1325                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1326                 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1327                 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1328
1329                 if (!force) {
1330                         hw->phy.ops.release(hw);
1331
1332                         if (hw->mac.autoneg)
1333                                 e1000_phy_setup_autoneg(hw);
1334
1335                         e1000_sw_lcd_config_ich8lan(hw);
1336
1337                         e1000_oem_bits_config_ich8lan(hw, true);
1338
1339                         /* Set ULP state to unknown and return non-zero to
1340                          * indicate no link (yet) and re-enter on the next LSC
1341                          * to finish disabling ULP flow.
1342                          */
1343                         hw->dev_spec.ich8lan.ulp_state =
1344                             e1000_ulp_state_unknown;
1345
1346                         return 1;
1347                 }
1348         }
1349
1350         /* Re-enable Tx */
1351         mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1352         mac_reg |= E1000_TCTL_EN;
1353         E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1354
1355 release:
1356         hw->phy.ops.release(hw);
1357         if (force) {
1358                 hw->phy.ops.reset(hw);
1359                 msec_delay(50);
1360         }
1361 out:
1362         if (ret_val)
1363                 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1364         else
1365                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1366
1367         return ret_val;
1368 }
1369
1370 #endif /* ULP_SUPPORT */
1371 /**
1372  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1373  *  @hw: pointer to the HW structure
1374  *
1375  *  Checks to see of the link status of the hardware has changed.  If a
1376  *  change in link status has been detected, then we read the PHY registers
1377  *  to get the current speed/duplex if link exists.
1378  **/
1379 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1380 {
1381         struct e1000_mac_info *mac = &hw->mac;
1382         s32 ret_val;
1383         bool link = false;
1384         u16 phy_reg;
1385
1386         DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1387
1388         /* We only want to go out to the PHY registers to see if Auto-Neg
1389          * has completed and/or if our link status has changed.  The
1390          * get_link_status flag is set upon receiving a Link Status
1391          * Change or Rx Sequence Error interrupt.
1392          */
1393         if (!mac->get_link_status)
1394                 return E1000_SUCCESS;
1395
1396         if ((hw->mac.type < e1000_pch_lpt) ||
1397             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1398             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) {
1399                 /* First we want to see if the MII Status Register reports
1400                  * link.  If so, then we want to get the current speed/duplex
1401                  * of the PHY.
1402                  */
1403                 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1404                 if (ret_val)
1405                         return ret_val;
1406         } else {
1407                 /* Check the MAC's STATUS register to determine link state
1408                  * since the PHY could be inaccessible while in ULP mode.
1409                  */
1410                 link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
1411                 if (link)
1412                         ret_val = e1000_disable_ulp_lpt_lp(hw, false);
1413                 else
1414                         ret_val = e1000_enable_ulp_lpt_lp(hw, false);
1415
1416                 if (ret_val)
1417                         return ret_val;
1418         }
1419
1420         if (hw->mac.type == e1000_pchlan) {
1421                 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1422                 if (ret_val)
1423                         return ret_val;
1424         }
1425
1426         /* When connected at 10Mbps half-duplex, some parts are excessively
1427          * aggressive resulting in many collisions. To avoid this, increase
1428          * the IPG and reduce Rx latency in the PHY.
1429          */
1430         if (((hw->mac.type == e1000_pch2lan) ||
1431              (hw->mac.type == e1000_pch_lpt)) && link) {
1432                 u32 reg;
1433                 reg = E1000_READ_REG(hw, E1000_STATUS);
1434                 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1435                         u16 emi_addr;
1436
1437                         reg = E1000_READ_REG(hw, E1000_TIPG);
1438                         reg &= ~E1000_TIPG_IPGT_MASK;
1439                         reg |= 0xFF;
1440                         E1000_WRITE_REG(hw, E1000_TIPG, reg);
1441
1442                         /* Reduce Rx latency in analog PHY */
1443                         ret_val = hw->phy.ops.acquire(hw);
1444                         if (ret_val)
1445                                 return ret_val;
1446
1447                         if (hw->mac.type == e1000_pch2lan)
1448                                 emi_addr = I82579_RX_CONFIG;
1449                         else
1450                                 emi_addr = I217_RX_CONFIG;
1451                         ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
1452
1453                         hw->phy.ops.release(hw);
1454
1455                         if (ret_val)
1456                                 return ret_val;
1457                 }
1458         }
1459
1460         /* Work-around I218 hang issue */
1461         if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1462             (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1463             (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1464             (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1465                 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1466                 if (ret_val)
1467                         return ret_val;
1468         }
1469
1470         /* Clear link partner's EEE ability */
1471         hw->dev_spec.ich8lan.eee_lp_ability = 0;
1472
1473         if (!link)
1474                 return E1000_SUCCESS; /* No link detected */
1475
1476         mac->get_link_status = false;
1477
1478         switch (hw->mac.type) {
1479         case e1000_pch2lan:
1480                 ret_val = e1000_k1_workaround_lv(hw);
1481                 if (ret_val)
1482                         return ret_val;
1483                 /* fall-thru */
1484         case e1000_pchlan:
1485                 if (hw->phy.type == e1000_phy_82578) {
1486                         ret_val = e1000_link_stall_workaround_hv(hw);
1487                         if (ret_val)
1488                                 return ret_val;
1489                 }
1490
1491                 /* Workaround for PCHx parts in half-duplex:
1492                  * Set the number of preambles removed from the packet
1493                  * when it is passed from the PHY to the MAC to prevent
1494                  * the MAC from misinterpreting the packet type.
1495                  */
1496                 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1497                 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1498
1499                 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1500                     E1000_STATUS_FD)
1501                         phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1502
1503                 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1504                 break;
1505         default:
1506                 break;
1507         }
1508
1509         /* Check if there was DownShift, must be checked
1510          * immediately after link-up
1511          */
1512         e1000_check_downshift_generic(hw);
1513
1514         /* Enable/Disable EEE after link up */
1515         if (hw->phy.type > e1000_phy_82579) {
1516                 ret_val = e1000_set_eee_pchlan(hw);
1517                 if (ret_val)
1518                         return ret_val;
1519         }
1520
1521         /* If we are forcing speed/duplex, then we simply return since
1522          * we have already determined whether we have link or not.
1523          */
1524         if (!mac->autoneg)
1525                 return -E1000_ERR_CONFIG;
1526
1527         /* Auto-Neg is enabled.  Auto Speed Detection takes care
1528          * of MAC speed/duplex configuration.  So we only need to
1529          * configure Collision Distance in the MAC.
1530          */
1531         mac->ops.config_collision_dist(hw);
1532
1533         /* Configure Flow Control now that Auto-Neg has completed.
1534          * First, we need to restore the desired flow control
1535          * settings because we may have had to re-autoneg with a
1536          * different link partner.
1537          */
1538         ret_val = e1000_config_fc_after_link_up_generic(hw);
1539         if (ret_val)
1540                 DEBUGOUT("Error configuring flow control\n");
1541
1542         return ret_val;
1543 }
1544
1545 /**
1546  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1547  *  @hw: pointer to the HW structure
1548  *
1549  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1550  **/
1551 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1552 {
1553         DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1554
1555         hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1556         hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1557         switch (hw->mac.type) {
1558         case e1000_ich8lan:
1559         case e1000_ich9lan:
1560         case e1000_ich10lan:
1561                 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1562                 break;
1563         case e1000_pchlan:
1564         case e1000_pch2lan:
1565         case e1000_pch_lpt:
1566                 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1567                 break;
1568         default:
1569                 break;
1570         }
1571 }
1572
1573 /**
1574  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1575  *  @hw: pointer to the HW structure
1576  *
1577  *  Acquires the mutex for performing NVM operations.
1578  **/
1579 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1580 {
1581         DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1582
1583         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1584
1585         return E1000_SUCCESS;
1586 }
1587
1588 /**
1589  *  e1000_release_nvm_ich8lan - Release NVM mutex
1590  *  @hw: pointer to the HW structure
1591  *
1592  *  Releases the mutex used while performing NVM operations.
1593  **/
1594 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1595 {
1596         DEBUGFUNC("e1000_release_nvm_ich8lan");
1597
1598         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1599
1600         return;
1601 }
1602
1603 /**
1604  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1605  *  @hw: pointer to the HW structure
1606  *
1607  *  Acquires the software control flag for performing PHY and select
1608  *  MAC CSR accesses.
1609  **/
1610 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1611 {
1612         u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1613         s32 ret_val = E1000_SUCCESS;
1614
1615         DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1616
1617         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1618
1619         while (timeout) {
1620                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1621                 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1622                         break;
1623
1624                 msec_delay_irq(1);
1625                 timeout--;
1626         }
1627
1628         if (!timeout) {
1629                 DEBUGOUT("SW has already locked the resource.\n");
1630                 ret_val = -E1000_ERR_CONFIG;
1631                 goto out;
1632         }
1633
1634         timeout = SW_FLAG_TIMEOUT;
1635
1636         extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1637         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1638
1639         while (timeout) {
1640                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1641                 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1642                         break;
1643
1644                 msec_delay_irq(1);
1645                 timeout--;
1646         }
1647
1648         if (!timeout) {
1649                 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1650                           E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1651                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1652                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1653                 ret_val = -E1000_ERR_CONFIG;
1654                 goto out;
1655         }
1656
1657 out:
1658         if (ret_val)
1659                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1660
1661         return ret_val;
1662 }
1663
1664 /**
1665  *  e1000_release_swflag_ich8lan - Release software control flag
1666  *  @hw: pointer to the HW structure
1667  *
1668  *  Releases the software control flag for performing PHY and select
1669  *  MAC CSR accesses.
1670  **/
1671 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1672 {
1673         u32 extcnf_ctrl;
1674
1675         DEBUGFUNC("e1000_release_swflag_ich8lan");
1676
1677         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1678
1679         if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1680                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1681                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1682         } else {
1683                 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1684         }
1685
1686         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1687
1688         return;
1689 }
1690
1691 /**
1692  *  e1000_check_mng_mode_ich8lan - Checks management mode
1693  *  @hw: pointer to the HW structure
1694  *
1695  *  This checks if the adapter has any manageability enabled.
1696  *  This is a function pointer entry point only called by read/write
1697  *  routines for the PHY and NVM parts.
1698  **/
1699 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1700 {
1701         u32 fwsm;
1702
1703         DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1704
1705         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1706
1707         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1708                ((fwsm & E1000_FWSM_MODE_MASK) ==
1709                 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1710 }
1711
1712 /**
1713  *  e1000_check_mng_mode_pchlan - Checks management mode
1714  *  @hw: pointer to the HW structure
1715  *
1716  *  This checks if the adapter has iAMT enabled.
1717  *  This is a function pointer entry point only called by read/write
1718  *  routines for the PHY and NVM parts.
1719  **/
1720 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1721 {
1722         u32 fwsm;
1723
1724         DEBUGFUNC("e1000_check_mng_mode_pchlan");
1725
1726         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1727
1728         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1729                (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1730 }
1731
1732 /**
1733  *  e1000_rar_set_pch2lan - Set receive address register
1734  *  @hw: pointer to the HW structure
1735  *  @addr: pointer to the receive address
1736  *  @index: receive address array register
1737  *
1738  *  Sets the receive address array register at index to the address passed
1739  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1740  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1741  *  Use SHRA[0-3] in place of those reserved for ME.
1742  **/
1743 STATIC int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1744 {
1745         u32 rar_low, rar_high;
1746
1747         DEBUGFUNC("e1000_rar_set_pch2lan");
1748
1749         /* HW expects these in little endian so we reverse the byte order
1750          * from network order (big endian) to little endian
1751          */
1752         rar_low = ((u32) addr[0] |
1753                    ((u32) addr[1] << 8) |
1754                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1755
1756         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1757
1758         /* If MAC address zero, no need to set the AV bit */
1759         if (rar_low || rar_high)
1760                 rar_high |= E1000_RAH_AV;
1761
1762         if (index == 0) {
1763                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1764                 E1000_WRITE_FLUSH(hw);
1765                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1766                 E1000_WRITE_FLUSH(hw);
1767                 return E1000_SUCCESS;
1768         }
1769
1770         /* RAR[1-6] are owned by manageability.  Skip those and program the
1771          * next address into the SHRA register array.
1772          */
1773         if (index < (u32) (hw->mac.rar_entry_count)) {
1774                 s32 ret_val;
1775
1776                 ret_val = e1000_acquire_swflag_ich8lan(hw);
1777                 if (ret_val)
1778                         goto out;
1779
1780                 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1781                 E1000_WRITE_FLUSH(hw);
1782                 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1783                 E1000_WRITE_FLUSH(hw);
1784
1785                 e1000_release_swflag_ich8lan(hw);
1786
1787                 /* verify the register updates */
1788                 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1789                     (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1790                         return E1000_SUCCESS;
1791
1792                 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1793                          (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1794         }
1795
1796 out:
1797         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1798         return -E1000_ERR_CONFIG;
1799 }
1800
1801 /**
1802  *  e1000_rar_set_pch_lpt - Set receive address registers
1803  *  @hw: pointer to the HW structure
1804  *  @addr: pointer to the receive address
1805  *  @index: receive address array register
1806  *
1807  *  Sets the receive address register array at index to the address passed
1808  *  in by addr. For LPT, RAR[0] is the base address register that is to
1809  *  contain the MAC address. SHRA[0-10] are the shared receive address
1810  *  registers that are shared between the Host and manageability engine (ME).
1811  **/
1812 STATIC int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1813 {
1814         u32 rar_low, rar_high;
1815         u32 wlock_mac;
1816
1817         DEBUGFUNC("e1000_rar_set_pch_lpt");
1818
1819         /* HW expects these in little endian so we reverse the byte order
1820          * from network order (big endian) to little endian
1821          */
1822         rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1823                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1824
1825         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1826
1827         /* If MAC address zero, no need to set the AV bit */
1828         if (rar_low || rar_high)
1829                 rar_high |= E1000_RAH_AV;
1830
1831         if (index == 0) {
1832                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1833                 E1000_WRITE_FLUSH(hw);
1834                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1835                 E1000_WRITE_FLUSH(hw);
1836                 return E1000_SUCCESS;
1837         }
1838
1839         /* The manageability engine (ME) can lock certain SHRAR registers that
1840          * it is using - those registers are unavailable for use.
1841          */
1842         if (index < hw->mac.rar_entry_count) {
1843                 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
1844                             E1000_FWSM_WLOCK_MAC_MASK;
1845                 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1846
1847                 /* Check if all SHRAR registers are locked */
1848                 if (wlock_mac == 1)
1849                         goto out;
1850
1851                 if ((wlock_mac == 0) || (index <= wlock_mac)) {
1852                         s32 ret_val;
1853
1854                         ret_val = e1000_acquire_swflag_ich8lan(hw);
1855
1856                         if (ret_val)
1857                                 goto out;
1858
1859                         E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
1860                                         rar_low);
1861                         E1000_WRITE_FLUSH(hw);
1862                         E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
1863                                         rar_high);
1864                         E1000_WRITE_FLUSH(hw);
1865
1866                         e1000_release_swflag_ich8lan(hw);
1867
1868                         /* verify the register updates */
1869                         if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1870                             (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
1871                                 return E1000_SUCCESS;
1872                 }
1873         }
1874
1875 out:
1876         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1877         return -E1000_ERR_CONFIG;
1878 }
1879
1880 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
1881 /**
1882  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
1883  *  @hw: pointer to the HW structure
1884  *  @mc_addr_list: array of multicast addresses to program
1885  *  @mc_addr_count: number of multicast addresses to program
1886  *
1887  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
1888  *  The caller must have a packed mc_addr_list of multicast addresses.
1889  **/
1890 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
1891                                               u8 *mc_addr_list,
1892                                               u32 mc_addr_count)
1893 {
1894         u16 phy_reg = 0;
1895         int i;
1896         s32 ret_val;
1897
1898         DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
1899
1900         e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
1901
1902         ret_val = hw->phy.ops.acquire(hw);
1903         if (ret_val)
1904                 return;
1905
1906         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1907         if (ret_val)
1908                 goto release;
1909
1910         for (i = 0; i < hw->mac.mta_reg_count; i++) {
1911                 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
1912                                            (u16)(hw->mac.mta_shadow[i] &
1913                                                  0xFFFF));
1914                 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
1915                                            (u16)((hw->mac.mta_shadow[i] >> 16) &
1916                                                  0xFFFF));
1917         }
1918
1919         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1920
1921 release:
1922         hw->phy.ops.release(hw);
1923 }
1924
1925 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
1926 /**
1927  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1928  *  @hw: pointer to the HW structure
1929  *
1930  *  Checks if firmware is blocking the reset of the PHY.
1931  *  This is a function pointer entry point only called by
1932  *  reset routines.
1933  **/
1934 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1935 {
1936         u32 fwsm;
1937         bool blocked = false;
1938         int i = 0;
1939
1940         DEBUGFUNC("e1000_check_reset_block_ich8lan");
1941
1942         do {
1943                 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1944                 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
1945                         blocked = true;
1946                         msec_delay(10);
1947                         continue;
1948                 }
1949                 blocked = false;
1950         } while (blocked && (i++ < 10));
1951         return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
1952 }
1953
1954 /**
1955  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1956  *  @hw: pointer to the HW structure
1957  *
1958  *  Assumes semaphore already acquired.
1959  *
1960  **/
1961 STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1962 {
1963         u16 phy_data;
1964         u32 strap = E1000_READ_REG(hw, E1000_STRAP);
1965         u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
1966                 E1000_STRAP_SMT_FREQ_SHIFT;
1967         s32 ret_val;
1968
1969         strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1970
1971         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1972         if (ret_val)
1973                 return ret_val;
1974
1975         phy_data &= ~HV_SMB_ADDR_MASK;
1976         phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1977         phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1978
1979         if (hw->phy.type == e1000_phy_i217) {
1980                 /* Restore SMBus frequency */
1981                 if (freq--) {
1982                         phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
1983                         phy_data |= (freq & (1 << 0)) <<
1984                                 HV_SMB_ADDR_FREQ_LOW_SHIFT;
1985                         phy_data |= (freq & (1 << 1)) <<
1986                                 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
1987                 } else {
1988                         DEBUGOUT("Unsupported SMB frequency in PHY\n");
1989                 }
1990         }
1991
1992         return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1993 }
1994
1995 /**
1996  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1997  *  @hw:   pointer to the HW structure
1998  *
1999  *  SW should configure the LCD from the NVM extended configuration region
2000  *  as a workaround for certain parts.
2001  **/
2002 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2003 {
2004         struct e1000_phy_info *phy = &hw->phy;
2005         u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2006         s32 ret_val = E1000_SUCCESS;
2007         u16 word_addr, reg_data, reg_addr, phy_page = 0;
2008
2009         DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2010
2011         /* Initialize the PHY from the NVM on ICH platforms.  This
2012          * is needed due to an issue where the NVM configuration is
2013          * not properly autoloaded after power transitions.
2014          * Therefore, after each PHY reset, we will load the
2015          * configuration data out of the NVM manually.
2016          */
2017         switch (hw->mac.type) {
2018         case e1000_ich8lan:
2019                 if (phy->type != e1000_phy_igp_3)
2020                         return ret_val;
2021
2022                 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2023                     (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2024                         sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2025                         break;
2026                 }
2027                 /* Fall-thru */
2028         case e1000_pchlan:
2029         case e1000_pch2lan:
2030         case e1000_pch_lpt:
2031                 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2032                 break;
2033         default:
2034                 return ret_val;
2035         }
2036
2037         ret_val = hw->phy.ops.acquire(hw);
2038         if (ret_val)
2039                 return ret_val;
2040
2041         data = E1000_READ_REG(hw, E1000_FEXTNVM);
2042         if (!(data & sw_cfg_mask))
2043                 goto release;
2044
2045         /* Make sure HW does not configure LCD from PHY
2046          * extended configuration before SW configuration
2047          */
2048         data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2049         if ((hw->mac.type < e1000_pch2lan) &&
2050             (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2051                         goto release;
2052
2053         cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2054         cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2055         cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2056         if (!cnf_size)
2057                 goto release;
2058
2059         cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2060         cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2061
2062         if (((hw->mac.type == e1000_pchlan) &&
2063              !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2064             (hw->mac.type > e1000_pchlan)) {
2065                 /* HW configures the SMBus address and LEDs when the
2066                  * OEM and LCD Write Enable bits are set in the NVM.
2067                  * When both NVM bits are cleared, SW will configure
2068                  * them instead.
2069                  */
2070                 ret_val = e1000_write_smbus_addr(hw);
2071                 if (ret_val)
2072                         goto release;
2073
2074                 data = E1000_READ_REG(hw, E1000_LEDCTL);
2075                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2076                                                         (u16)data);
2077                 if (ret_val)
2078                         goto release;
2079         }
2080
2081         /* Configure LCD from extended configuration region. */
2082
2083         /* cnf_base_addr is in DWORD */
2084         word_addr = (u16)(cnf_base_addr << 1);
2085
2086         for (i = 0; i < cnf_size; i++) {
2087                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2088                                            &reg_data);
2089                 if (ret_val)
2090                         goto release;
2091
2092                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2093                                            1, &reg_addr);
2094                 if (ret_val)
2095                         goto release;
2096
2097                 /* Save off the PHY page for future writes. */
2098                 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2099                         phy_page = reg_data;
2100                         continue;
2101                 }
2102
2103                 reg_addr &= PHY_REG_MASK;
2104                 reg_addr |= phy_page;
2105
2106                 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2107                                                     reg_data);
2108                 if (ret_val)
2109                         goto release;
2110         }
2111
2112 release:
2113         hw->phy.ops.release(hw);
2114         return ret_val;
2115 }
2116
2117 /**
2118  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2119  *  @hw:   pointer to the HW structure
2120  *  @link: link up bool flag
2121  *
2122  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2123  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2124  *  If link is down, the function will restore the default K1 setting located
2125  *  in the NVM.
2126  **/
2127 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2128 {
2129         s32 ret_val = E1000_SUCCESS;
2130         u16 status_reg = 0;
2131         bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2132
2133         DEBUGFUNC("e1000_k1_gig_workaround_hv");
2134
2135         if (hw->mac.type != e1000_pchlan)
2136                 return E1000_SUCCESS;
2137
2138         /* Wrap the whole flow with the sw flag */
2139         ret_val = hw->phy.ops.acquire(hw);
2140         if (ret_val)
2141                 return ret_val;
2142
2143         /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2144         if (link) {
2145                 if (hw->phy.type == e1000_phy_82578) {
2146                         ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2147                                                               &status_reg);
2148                         if (ret_val)
2149                                 goto release;
2150
2151                         status_reg &= (BM_CS_STATUS_LINK_UP |
2152                                        BM_CS_STATUS_RESOLVED |
2153                                        BM_CS_STATUS_SPEED_MASK);
2154
2155                         if (status_reg == (BM_CS_STATUS_LINK_UP |
2156                                            BM_CS_STATUS_RESOLVED |
2157                                            BM_CS_STATUS_SPEED_1000))
2158                                 k1_enable = false;
2159                 }
2160
2161                 if (hw->phy.type == e1000_phy_82577) {
2162                         ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2163                                                               &status_reg);
2164                         if (ret_val)
2165                                 goto release;
2166
2167                         status_reg &= (HV_M_STATUS_LINK_UP |
2168                                        HV_M_STATUS_AUTONEG_COMPLETE |
2169                                        HV_M_STATUS_SPEED_MASK);
2170
2171                         if (status_reg == (HV_M_STATUS_LINK_UP |
2172                                            HV_M_STATUS_AUTONEG_COMPLETE |
2173                                            HV_M_STATUS_SPEED_1000))
2174                                 k1_enable = false;
2175                 }
2176
2177                 /* Link stall fix for link up */
2178                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2179                                                        0x0100);
2180                 if (ret_val)
2181                         goto release;
2182
2183         } else {
2184                 /* Link stall fix for link down */
2185                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2186                                                        0x4100);
2187                 if (ret_val)
2188                         goto release;
2189         }
2190
2191         ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2192
2193 release:
2194         hw->phy.ops.release(hw);
2195
2196         return ret_val;
2197 }
2198
2199 /**
2200  *  e1000_configure_k1_ich8lan - Configure K1 power state
2201  *  @hw: pointer to the HW structure
2202  *  @enable: K1 state to configure
2203  *
2204  *  Configure the K1 power state based on the provided parameter.
2205  *  Assumes semaphore already acquired.
2206  *
2207  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2208  **/
2209 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2210 {
2211         s32 ret_val;
2212         u32 ctrl_reg = 0;
2213         u32 ctrl_ext = 0;
2214         u32 reg = 0;
2215         u16 kmrn_reg = 0;
2216
2217         DEBUGFUNC("e1000_configure_k1_ich8lan");
2218
2219         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2220                                              &kmrn_reg);
2221         if (ret_val)
2222                 return ret_val;
2223
2224         if (k1_enable)
2225                 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2226         else
2227                 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2228
2229         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2230                                               kmrn_reg);
2231         if (ret_val)
2232                 return ret_val;
2233
2234         usec_delay(20);
2235         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2236         ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2237
2238         reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2239         reg |= E1000_CTRL_FRCSPD;
2240         E1000_WRITE_REG(hw, E1000_CTRL, reg);
2241
2242         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2243         E1000_WRITE_FLUSH(hw);
2244         usec_delay(20);
2245         E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2246         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2247         E1000_WRITE_FLUSH(hw);
2248         usec_delay(20);
2249
2250         return E1000_SUCCESS;
2251 }
2252
2253 /**
2254  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2255  *  @hw:       pointer to the HW structure
2256  *  @d0_state: boolean if entering d0 or d3 device state
2257  *
2258  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2259  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2260  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2261  **/
2262 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2263 {
2264         s32 ret_val = 0;
2265         u32 mac_reg;
2266         u16 oem_reg;
2267
2268         DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2269
2270         if (hw->mac.type < e1000_pchlan)
2271                 return ret_val;
2272
2273         ret_val = hw->phy.ops.acquire(hw);
2274         if (ret_val)
2275                 return ret_val;
2276
2277         if (hw->mac.type == e1000_pchlan) {
2278                 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2279                 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2280                         goto release;
2281         }
2282
2283         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2284         if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2285                 goto release;
2286
2287         mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2288
2289         ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2290         if (ret_val)
2291                 goto release;
2292
2293         oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2294
2295         if (d0_state) {
2296                 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2297                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2298
2299                 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2300                         oem_reg |= HV_OEM_BITS_LPLU;
2301         } else {
2302                 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2303                     E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2304                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2305
2306                 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2307                     E1000_PHY_CTRL_NOND0A_LPLU))
2308                         oem_reg |= HV_OEM_BITS_LPLU;
2309         }
2310
2311         /* Set Restart auto-neg to activate the bits */
2312         if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2313             !hw->phy.ops.check_reset_block(hw))
2314                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2315
2316         ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2317
2318 release:
2319         hw->phy.ops.release(hw);
2320
2321         return ret_val;
2322 }
2323
2324
2325 /**
2326  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2327  *  @hw:   pointer to the HW structure
2328  **/
2329 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2330 {
2331         s32 ret_val;
2332         u16 data;
2333
2334         DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2335
2336         ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2337         if (ret_val)
2338                 return ret_val;
2339
2340         data |= HV_KMRN_MDIO_SLOW;
2341
2342         ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2343
2344         return ret_val;
2345 }
2346
2347 /**
2348  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2349  *  done after every PHY reset.
2350  **/
2351 STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2352 {
2353         s32 ret_val = E1000_SUCCESS;
2354         u16 phy_data;
2355
2356         DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2357
2358         if (hw->mac.type != e1000_pchlan)
2359                 return E1000_SUCCESS;
2360
2361         /* Set MDIO slow mode before any other MDIO access */
2362         if (hw->phy.type == e1000_phy_82577) {
2363                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2364                 if (ret_val)
2365                         return ret_val;
2366         }
2367
2368         if (((hw->phy.type == e1000_phy_82577) &&
2369              ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2370             ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2371                 /* Disable generation of early preamble */
2372                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2373                 if (ret_val)
2374                         return ret_val;
2375
2376                 /* Preamble tuning for SSC */
2377                 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2378                                                 0xA204);
2379                 if (ret_val)
2380                         return ret_val;
2381         }
2382
2383         if (hw->phy.type == e1000_phy_82578) {
2384                 /* Return registers to default by doing a soft reset then
2385                  * writing 0x3140 to the control register.
2386                  */
2387                 if (hw->phy.revision < 2) {
2388                         e1000_phy_sw_reset_generic(hw);
2389                         ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2390                                                         0x3140);
2391                 }
2392         }
2393
2394         /* Select page 0 */
2395         ret_val = hw->phy.ops.acquire(hw);
2396         if (ret_val)
2397                 return ret_val;
2398
2399         hw->phy.addr = 1;
2400         ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2401         hw->phy.ops.release(hw);
2402         if (ret_val)
2403                 return ret_val;
2404
2405         /* Configure the K1 Si workaround during phy reset assuming there is
2406          * link so that it disables K1 if link is in 1Gbps.
2407          */
2408         ret_val = e1000_k1_gig_workaround_hv(hw, true);
2409         if (ret_val)
2410                 return ret_val;
2411
2412         /* Workaround for link disconnects on a busy hub in half duplex */
2413         ret_val = hw->phy.ops.acquire(hw);
2414         if (ret_val)
2415                 return ret_val;
2416         ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2417         if (ret_val)
2418                 goto release;
2419         ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2420                                                phy_data & 0x00FF);
2421         if (ret_val)
2422                 goto release;
2423
2424         /* set MSE higher to enable link to stay up when noise is high */
2425         ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2426 release:
2427         hw->phy.ops.release(hw);
2428
2429         return ret_val;
2430 }
2431
2432 /**
2433  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2434  *  @hw:   pointer to the HW structure
2435  **/
2436 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2437 {
2438         u32 mac_reg;
2439         u16 i, phy_reg = 0;
2440         s32 ret_val;
2441
2442         DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2443
2444         ret_val = hw->phy.ops.acquire(hw);
2445         if (ret_val)
2446                 return;
2447         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2448         if (ret_val)
2449                 goto release;
2450
2451         /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2452         for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2453                 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2454                 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2455                                            (u16)(mac_reg & 0xFFFF));
2456                 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2457                                            (u16)((mac_reg >> 16) & 0xFFFF));
2458
2459                 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2460                 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2461                                            (u16)(mac_reg & 0xFFFF));
2462                 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2463                                            (u16)((mac_reg & E1000_RAH_AV)
2464                                                  >> 16));
2465         }
2466
2467         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2468
2469 release:
2470         hw->phy.ops.release(hw);
2471 }
2472
2473 #ifndef CRC32_OS_SUPPORT
2474 STATIC u32 e1000_calc_rx_da_crc(u8 mac[])
2475 {
2476         u32 poly = 0xEDB88320;  /* Polynomial for 802.3 CRC calculation */
2477         u32 i, j, mask, crc;
2478
2479         DEBUGFUNC("e1000_calc_rx_da_crc");
2480
2481         crc = 0xffffffff;
2482         for (i = 0; i < 6; i++) {
2483                 crc = crc ^ mac[i];
2484                 for (j = 8; j > 0; j--) {
2485                         mask = (crc & 1) * (-1);
2486                         crc = (crc >> 1) ^ (poly & mask);
2487                 }
2488         }
2489         return ~crc;
2490 }
2491
2492 #endif /* CRC32_OS_SUPPORT */
2493 /**
2494  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2495  *  with 82579 PHY
2496  *  @hw: pointer to the HW structure
2497  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2498  **/
2499 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2500 {
2501         s32 ret_val = E1000_SUCCESS;
2502         u16 phy_reg, data;
2503         u32 mac_reg;
2504         u16 i;
2505
2506         DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2507
2508         if (hw->mac.type < e1000_pch2lan)
2509                 return E1000_SUCCESS;
2510
2511         /* disable Rx path while enabling/disabling workaround */
2512         hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2513         ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2514                                         phy_reg | (1 << 14));
2515         if (ret_val)
2516                 return ret_val;
2517
2518         if (enable) {
2519                 /* Write Rx addresses (rar_entry_count for RAL/H, and
2520                  * SHRAL/H) and initial CRC values to the MAC
2521                  */
2522                 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2523                         u8 mac_addr[ETH_ADDR_LEN] = {0};
2524                         u32 addr_high, addr_low;
2525
2526                         addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2527                         if (!(addr_high & E1000_RAH_AV))
2528                                 continue;
2529                         addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2530                         mac_addr[0] = (addr_low & 0xFF);
2531                         mac_addr[1] = ((addr_low >> 8) & 0xFF);
2532                         mac_addr[2] = ((addr_low >> 16) & 0xFF);
2533                         mac_addr[3] = ((addr_low >> 24) & 0xFF);
2534                         mac_addr[4] = (addr_high & 0xFF);
2535                         mac_addr[5] = ((addr_high >> 8) & 0xFF);
2536
2537 #ifndef CRC32_OS_SUPPORT
2538                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2539                                         e1000_calc_rx_da_crc(mac_addr));
2540 #else /* CRC32_OS_SUPPORT */
2541                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2542                                         E1000_CRC32(ETH_ADDR_LEN, mac_addr));
2543 #endif /* CRC32_OS_SUPPORT */
2544                 }
2545
2546                 /* Write Rx addresses to the PHY */
2547                 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2548
2549                 /* Enable jumbo frame workaround in the MAC */
2550                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2551                 mac_reg &= ~(1 << 14);
2552                 mac_reg |= (7 << 15);
2553                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2554
2555                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2556                 mac_reg |= E1000_RCTL_SECRC;
2557                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2558
2559                 ret_val = e1000_read_kmrn_reg_generic(hw,
2560                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2561                                                 &data);
2562                 if (ret_val)
2563                         return ret_val;
2564                 ret_val = e1000_write_kmrn_reg_generic(hw,
2565                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2566                                                 data | (1 << 0));
2567                 if (ret_val)
2568                         return ret_val;
2569                 ret_val = e1000_read_kmrn_reg_generic(hw,
2570                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2571                                                 &data);
2572                 if (ret_val)
2573                         return ret_val;
2574                 data &= ~(0xF << 8);
2575                 data |= (0xB << 8);
2576                 ret_val = e1000_write_kmrn_reg_generic(hw,
2577                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2578                                                 data);
2579                 if (ret_val)
2580                         return ret_val;
2581
2582                 /* Enable jumbo frame workaround in the PHY */
2583                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2584                 data &= ~(0x7F << 5);
2585                 data |= (0x37 << 5);
2586                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2587                 if (ret_val)
2588                         return ret_val;
2589                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2590                 data &= ~(1 << 13);
2591                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2592                 if (ret_val)
2593                         return ret_val;
2594                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2595                 data &= ~(0x3FF << 2);
2596                 data |= (0x1A << 2);
2597                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2598                 if (ret_val)
2599                         return ret_val;
2600                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2601                 if (ret_val)
2602                         return ret_val;
2603                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2604                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2605                                                 (1 << 10));
2606                 if (ret_val)
2607                         return ret_val;
2608         } else {
2609                 /* Write MAC register values back to h/w defaults */
2610                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2611                 mac_reg &= ~(0xF << 14);
2612                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2613
2614                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2615                 mac_reg &= ~E1000_RCTL_SECRC;
2616                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2617
2618                 ret_val = e1000_read_kmrn_reg_generic(hw,
2619                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2620                                                 &data);
2621                 if (ret_val)
2622                         return ret_val;
2623                 ret_val = e1000_write_kmrn_reg_generic(hw,
2624                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2625                                                 data & ~(1 << 0));
2626                 if (ret_val)
2627                         return ret_val;
2628                 ret_val = e1000_read_kmrn_reg_generic(hw,
2629                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2630                                                 &data);
2631                 if (ret_val)
2632                         return ret_val;
2633                 data &= ~(0xF << 8);
2634                 data |= (0xB << 8);
2635                 ret_val = e1000_write_kmrn_reg_generic(hw,
2636                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2637                                                 data);
2638                 if (ret_val)
2639                         return ret_val;
2640
2641                 /* Write PHY register values back to h/w defaults */
2642                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2643                 data &= ~(0x7F << 5);
2644                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2645                 if (ret_val)
2646                         return ret_val;
2647                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2648                 data |= (1 << 13);
2649                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2650                 if (ret_val)
2651                         return ret_val;
2652                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2653                 data &= ~(0x3FF << 2);
2654                 data |= (0x8 << 2);
2655                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2656                 if (ret_val)
2657                         return ret_val;
2658                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2659                 if (ret_val)
2660                         return ret_val;
2661                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2662                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2663                                                 ~(1 << 10));
2664                 if (ret_val)
2665                         return ret_val;
2666         }
2667
2668         /* re-enable Rx path after enabling/disabling workaround */
2669         return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2670                                      ~(1 << 14));
2671 }
2672
2673 /**
2674  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2675  *  done after every PHY reset.
2676  **/
2677 STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2678 {
2679         s32 ret_val = E1000_SUCCESS;
2680
2681         DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2682
2683         if (hw->mac.type != e1000_pch2lan)
2684                 return E1000_SUCCESS;
2685
2686         /* Set MDIO slow mode before any other MDIO access */
2687         ret_val = e1000_set_mdio_slow_mode_hv(hw);
2688         if (ret_val)
2689                 return ret_val;
2690
2691         ret_val = hw->phy.ops.acquire(hw);
2692         if (ret_val)
2693                 return ret_val;
2694         /* set MSE higher to enable link to stay up when noise is high */
2695         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2696         if (ret_val)
2697                 goto release;
2698         /* drop link after 5 times MSE threshold was reached */
2699         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2700 release:
2701         hw->phy.ops.release(hw);
2702
2703         return ret_val;
2704 }
2705
2706 /**
2707  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2708  *  @hw:   pointer to the HW structure
2709  *
2710  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2711  *  Disable K1 for 1000 and 100 speeds
2712  **/
2713 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2714 {
2715         s32 ret_val = E1000_SUCCESS;
2716         u16 status_reg = 0;
2717
2718         DEBUGFUNC("e1000_k1_workaround_lv");
2719
2720         if (hw->mac.type != e1000_pch2lan)
2721                 return E1000_SUCCESS;
2722
2723         /* Set K1 beacon duration based on 10Mbs speed */
2724         ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2725         if (ret_val)
2726                 return ret_val;
2727
2728         if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2729             == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2730                 if (status_reg &
2731                     (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2732                         u16 pm_phy_reg;
2733
2734                         /* LV 1G/100 Packet drop issue wa  */
2735                         ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2736                                                        &pm_phy_reg);
2737                         if (ret_val)
2738                                 return ret_val;
2739                         pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2740                         ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2741                                                         pm_phy_reg);
2742                         if (ret_val)
2743                                 return ret_val;
2744                 } else {
2745                         u32 mac_reg;
2746                         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2747                         mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2748                         mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2749                         E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2750                 }
2751         }
2752
2753         return ret_val;
2754 }
2755
2756 /**
2757  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2758  *  @hw:   pointer to the HW structure
2759  *  @gate: boolean set to true to gate, false to ungate
2760  *
2761  *  Gate/ungate the automatic PHY configuration via hardware; perform
2762  *  the configuration via software instead.
2763  **/
2764 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2765 {
2766         u32 extcnf_ctrl;
2767
2768         DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2769
2770         if (hw->mac.type < e1000_pch2lan)
2771                 return;
2772
2773         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2774
2775         if (gate)
2776                 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2777         else
2778                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2779
2780         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2781 }
2782
2783 /**
2784  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2785  *  @hw: pointer to the HW structure
2786  *
2787  *  Check the appropriate indication the MAC has finished configuring the
2788  *  PHY after a software reset.
2789  **/
2790 STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2791 {
2792         u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2793
2794         DEBUGFUNC("e1000_lan_init_done_ich8lan");
2795
2796         /* Wait for basic configuration completes before proceeding */
2797         do {
2798                 data = E1000_READ_REG(hw, E1000_STATUS);
2799                 data &= E1000_STATUS_LAN_INIT_DONE;
2800                 usec_delay(100);
2801         } while ((!data) && --loop);
2802
2803         /* If basic configuration is incomplete before the above loop
2804          * count reaches 0, loading the configuration from NVM will
2805          * leave the PHY in a bad state possibly resulting in no link.
2806          */
2807         if (loop == 0)
2808                 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2809
2810         /* Clear the Init Done bit for the next init event */
2811         data = E1000_READ_REG(hw, E1000_STATUS);
2812         data &= ~E1000_STATUS_LAN_INIT_DONE;
2813         E1000_WRITE_REG(hw, E1000_STATUS, data);
2814 }
2815
2816 /**
2817  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2818  *  @hw: pointer to the HW structure
2819  **/
2820 STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2821 {
2822         s32 ret_val = E1000_SUCCESS;
2823         u16 reg;
2824
2825         DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2826
2827         if (hw->phy.ops.check_reset_block(hw))
2828                 return E1000_SUCCESS;
2829
2830         /* Allow time for h/w to get to quiescent state after reset */
2831         msec_delay(10);
2832
2833         /* Perform any necessary post-reset workarounds */
2834         switch (hw->mac.type) {
2835         case e1000_pchlan:
2836                 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2837                 if (ret_val)
2838                         return ret_val;
2839                 break;
2840         case e1000_pch2lan:
2841                 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2842                 if (ret_val)
2843                         return ret_val;
2844                 break;
2845         default:
2846                 break;
2847         }
2848
2849         /* Clear the host wakeup bit after lcd reset */
2850         if (hw->mac.type >= e1000_pchlan) {
2851                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
2852                 reg &= ~BM_WUC_HOST_WU_BIT;
2853                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
2854         }
2855
2856         /* Configure the LCD with the extended configuration region in NVM */
2857         ret_val = e1000_sw_lcd_config_ich8lan(hw);
2858         if (ret_val)
2859                 return ret_val;
2860
2861         /* Configure the LCD with the OEM bits in NVM */
2862         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2863
2864         if (hw->mac.type == e1000_pch2lan) {
2865                 /* Ungate automatic PHY configuration on non-managed 82579 */
2866                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
2867                     E1000_ICH_FWSM_FW_VALID)) {
2868                         msec_delay(10);
2869                         e1000_gate_hw_phy_config_ich8lan(hw, false);
2870                 }
2871
2872                 /* Set EEE LPI Update Timer to 200usec */
2873                 ret_val = hw->phy.ops.acquire(hw);
2874                 if (ret_val)
2875                         return ret_val;
2876                 ret_val = e1000_write_emi_reg_locked(hw,
2877                                                      I82579_LPI_UPDATE_TIMER,
2878                                                      0x1387);
2879                 hw->phy.ops.release(hw);
2880         }
2881
2882         return ret_val;
2883 }
2884
2885 /**
2886  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2887  *  @hw: pointer to the HW structure
2888  *
2889  *  Resets the PHY
2890  *  This is a function pointer entry point called by drivers
2891  *  or other shared routines.
2892  **/
2893 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2894 {
2895         s32 ret_val = E1000_SUCCESS;
2896
2897         DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2898
2899         /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2900         if ((hw->mac.type == e1000_pch2lan) &&
2901             !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2902                 e1000_gate_hw_phy_config_ich8lan(hw, true);
2903
2904         ret_val = e1000_phy_hw_reset_generic(hw);
2905         if (ret_val)
2906                 return ret_val;
2907
2908         return e1000_post_phy_reset_ich8lan(hw);
2909 }
2910
2911 /**
2912  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2913  *  @hw: pointer to the HW structure
2914  *  @active: true to enable LPLU, false to disable
2915  *
2916  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2917  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2918  *  the phy speed. This function will manually set the LPLU bit and restart
2919  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2920  *  since it configures the same bit.
2921  **/
2922 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2923 {
2924         s32 ret_val;
2925         u16 oem_reg;
2926
2927         DEBUGFUNC("e1000_set_lplu_state_pchlan");
2928
2929         ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2930         if (ret_val)
2931                 return ret_val;
2932
2933         if (active)
2934                 oem_reg |= HV_OEM_BITS_LPLU;
2935         else
2936                 oem_reg &= ~HV_OEM_BITS_LPLU;
2937
2938         if (!hw->phy.ops.check_reset_block(hw))
2939                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2940
2941         return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2942 }
2943
2944 /**
2945  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2946  *  @hw: pointer to the HW structure
2947  *  @active: true to enable LPLU, false to disable
2948  *
2949  *  Sets the LPLU D0 state according to the active flag.  When
2950  *  activating LPLU this function also disables smart speed
2951  *  and vice versa.  LPLU will not be activated unless the
2952  *  device autonegotiation advertisement meets standards of
2953  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2954  *  This is a function pointer entry point only called by
2955  *  PHY setup routines.
2956  **/
2957 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2958 {
2959         struct e1000_phy_info *phy = &hw->phy;
2960         u32 phy_ctrl;
2961         s32 ret_val = E1000_SUCCESS;
2962         u16 data;
2963
2964         DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2965
2966         if (phy->type == e1000_phy_ife)
2967                 return E1000_SUCCESS;
2968
2969         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2970
2971         if (active) {
2972                 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2973                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2974
2975                 if (phy->type != e1000_phy_igp_3)
2976                         return E1000_SUCCESS;
2977
2978                 /* Call gig speed drop workaround on LPLU before accessing
2979                  * any PHY registers
2980                  */
2981                 if (hw->mac.type == e1000_ich8lan)
2982                         e1000_gig_downshift_workaround_ich8lan(hw);
2983
2984                 /* When LPLU is enabled, we should disable SmartSpeed */
2985                 ret_val = phy->ops.read_reg(hw,
2986                                             IGP01E1000_PHY_PORT_CONFIG,
2987                                             &data);
2988                 if (ret_val)
2989                         return ret_val;
2990                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2991                 ret_val = phy->ops.write_reg(hw,
2992                                              IGP01E1000_PHY_PORT_CONFIG,
2993                                              data);
2994                 if (ret_val)
2995                         return ret_val;
2996         } else {
2997                 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2998                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2999
3000                 if (phy->type != e1000_phy_igp_3)
3001                         return E1000_SUCCESS;
3002
3003                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3004                  * during Dx states where the power conservation is most
3005                  * important.  During driver activity we should enable
3006                  * SmartSpeed, so performance is maintained.
3007                  */
3008                 if (phy->smart_speed == e1000_smart_speed_on) {
3009                         ret_val = phy->ops.read_reg(hw,
3010                                                     IGP01E1000_PHY_PORT_CONFIG,
3011                                                     &data);
3012                         if (ret_val)
3013                                 return ret_val;
3014
3015                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3016                         ret_val = phy->ops.write_reg(hw,
3017                                                      IGP01E1000_PHY_PORT_CONFIG,
3018                                                      data);
3019                         if (ret_val)
3020                                 return ret_val;
3021                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3022                         ret_val = phy->ops.read_reg(hw,
3023                                                     IGP01E1000_PHY_PORT_CONFIG,
3024                                                     &data);
3025                         if (ret_val)
3026                                 return ret_val;
3027
3028                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3029                         ret_val = phy->ops.write_reg(hw,
3030                                                      IGP01E1000_PHY_PORT_CONFIG,
3031                                                      data);
3032                         if (ret_val)
3033                                 return ret_val;
3034                 }
3035         }
3036
3037         return E1000_SUCCESS;
3038 }
3039
3040 /**
3041  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3042  *  @hw: pointer to the HW structure
3043  *  @active: true to enable LPLU, false to disable
3044  *
3045  *  Sets the LPLU D3 state according to the active flag.  When
3046  *  activating LPLU this function also disables smart speed
3047  *  and vice versa.  LPLU will not be activated unless the
3048  *  device autonegotiation advertisement meets standards of
3049  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3050  *  This is a function pointer entry point only called by
3051  *  PHY setup routines.
3052  **/
3053 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3054 {
3055         struct e1000_phy_info *phy = &hw->phy;
3056         u32 phy_ctrl;
3057         s32 ret_val = E1000_SUCCESS;
3058         u16 data;
3059
3060         DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3061
3062         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3063
3064         if (!active) {
3065                 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3066                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3067
3068                 if (phy->type != e1000_phy_igp_3)
3069                         return E1000_SUCCESS;
3070
3071                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3072                  * during Dx states where the power conservation is most
3073                  * important.  During driver activity we should enable
3074                  * SmartSpeed, so performance is maintained.
3075                  */
3076                 if (phy->smart_speed == e1000_smart_speed_on) {
3077                         ret_val = phy->ops.read_reg(hw,
3078                                                     IGP01E1000_PHY_PORT_CONFIG,
3079                                                     &data);
3080                         if (ret_val)
3081                                 return ret_val;
3082
3083                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3084                         ret_val = phy->ops.write_reg(hw,
3085                                                      IGP01E1000_PHY_PORT_CONFIG,
3086                                                      data);
3087                         if (ret_val)
3088                                 return ret_val;
3089                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3090                         ret_val = phy->ops.read_reg(hw,
3091                                                     IGP01E1000_PHY_PORT_CONFIG,
3092                                                     &data);
3093                         if (ret_val)
3094                                 return ret_val;
3095
3096                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3097                         ret_val = phy->ops.write_reg(hw,
3098                                                      IGP01E1000_PHY_PORT_CONFIG,
3099                                                      data);
3100                         if (ret_val)
3101                                 return ret_val;
3102                 }
3103         } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3104                    (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3105                    (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3106                 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3107                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3108
3109                 if (phy->type != e1000_phy_igp_3)
3110                         return E1000_SUCCESS;
3111
3112                 /* Call gig speed drop workaround on LPLU before accessing
3113                  * any PHY registers
3114                  */
3115                 if (hw->mac.type == e1000_ich8lan)
3116                         e1000_gig_downshift_workaround_ich8lan(hw);
3117
3118                 /* When LPLU is enabled, we should disable SmartSpeed */
3119                 ret_val = phy->ops.read_reg(hw,
3120                                             IGP01E1000_PHY_PORT_CONFIG,
3121                                             &data);
3122                 if (ret_val)
3123                         return ret_val;
3124
3125                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3126                 ret_val = phy->ops.write_reg(hw,
3127                                              IGP01E1000_PHY_PORT_CONFIG,
3128                                              data);
3129         }
3130
3131         return ret_val;
3132 }
3133
3134 /**
3135  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3136  *  @hw: pointer to the HW structure
3137  *  @bank:  pointer to the variable that returns the active bank
3138  *
3139  *  Reads signature byte from the NVM using the flash access registers.
3140  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3141  **/
3142 STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3143 {
3144         u32 eecd;
3145         struct e1000_nvm_info *nvm = &hw->nvm;
3146         u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3147         u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3148         u8 sig_byte = 0;
3149         s32 ret_val;
3150
3151         DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3152
3153         switch (hw->mac.type) {
3154         case e1000_ich8lan:
3155         case e1000_ich9lan:
3156                 eecd = E1000_READ_REG(hw, E1000_EECD);
3157                 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3158                     E1000_EECD_SEC1VAL_VALID_MASK) {
3159                         if (eecd & E1000_EECD_SEC1VAL)
3160                                 *bank = 1;
3161                         else
3162                                 *bank = 0;
3163
3164                         return E1000_SUCCESS;
3165                 }
3166                 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3167                 /* fall-thru */
3168         default:
3169                 /* set bank to 0 in case flash read fails */
3170                 *bank = 0;
3171
3172                 /* Check bank 0 */
3173                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3174                                                         &sig_byte);
3175                 if (ret_val)
3176                         return ret_val;
3177                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3178                     E1000_ICH_NVM_SIG_VALUE) {
3179                         *bank = 0;
3180                         return E1000_SUCCESS;
3181                 }
3182
3183                 /* Check bank 1 */
3184                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3185                                                         bank1_offset,
3186                                                         &sig_byte);
3187                 if (ret_val)
3188                         return ret_val;
3189                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3190                     E1000_ICH_NVM_SIG_VALUE) {
3191                         *bank = 1;
3192                         return E1000_SUCCESS;
3193                 }
3194
3195                 DEBUGOUT("ERROR: No valid NVM bank present\n");
3196                 return -E1000_ERR_NVM;
3197         }
3198 }
3199
3200 /**
3201  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3202  *  @hw: pointer to the HW structure
3203  *  @offset: The offset (in bytes) of the word(s) to read.
3204  *  @words: Size of data to read in words
3205  *  @data: Pointer to the word(s) to read at offset.
3206  *
3207  *  Reads a word(s) from the NVM using the flash access registers.
3208  **/
3209 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3210                                   u16 *data)
3211 {
3212         struct e1000_nvm_info *nvm = &hw->nvm;
3213         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3214         u32 act_offset;
3215         s32 ret_val = E1000_SUCCESS;
3216         u32 bank = 0;
3217         u16 i, word;
3218
3219         DEBUGFUNC("e1000_read_nvm_ich8lan");
3220
3221         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3222             (words == 0)) {
3223                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3224                 ret_val = -E1000_ERR_NVM;
3225                 goto out;
3226         }
3227
3228         nvm->ops.acquire(hw);
3229
3230         ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3231         if (ret_val != E1000_SUCCESS) {
3232                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3233                 bank = 0;
3234         }
3235
3236         act_offset = (bank) ? nvm->flash_bank_size : 0;
3237         act_offset += offset;
3238
3239         ret_val = E1000_SUCCESS;
3240         for (i = 0; i < words; i++) {
3241                 if (dev_spec->shadow_ram[offset+i].modified) {
3242                         data[i] = dev_spec->shadow_ram[offset+i].value;
3243                 } else {
3244                         ret_val = e1000_read_flash_word_ich8lan(hw,
3245                                                                 act_offset + i,
3246                                                                 &word);
3247                         if (ret_val)
3248                                 break;
3249                         data[i] = word;
3250                 }
3251         }
3252
3253         nvm->ops.release(hw);
3254
3255 out:
3256         if (ret_val)
3257                 DEBUGOUT1("NVM read error: %d\n", ret_val);
3258
3259         return ret_val;
3260 }
3261
3262 /**
3263  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3264  *  @hw: pointer to the HW structure
3265  *
3266  *  This function does initial flash setup so that a new read/write/erase cycle
3267  *  can be started.
3268  **/
3269 STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3270 {
3271         union ich8_hws_flash_status hsfsts;
3272         s32 ret_val = -E1000_ERR_NVM;
3273
3274         DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3275
3276         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3277
3278         /* Check if the flash descriptor is valid */
3279         if (!hsfsts.hsf_status.fldesvalid) {
3280                 DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3281                 return -E1000_ERR_NVM;
3282         }
3283
3284         /* Clear FCERR and DAEL in hw status by writing 1 */
3285         hsfsts.hsf_status.flcerr = 1;
3286         hsfsts.hsf_status.dael = 1;
3287         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3288
3289         /* Either we should have a hardware SPI cycle in progress
3290          * bit to check against, in order to start a new cycle or
3291          * FDONE bit should be changed in the hardware so that it
3292          * is 1 after hardware reset, which can then be used as an
3293          * indication whether a cycle is in progress or has been
3294          * completed.
3295          */
3296
3297         if (!hsfsts.hsf_status.flcinprog) {
3298                 /* There is no cycle running at present,
3299                  * so we can start a cycle.
3300                  * Begin by setting Flash Cycle Done.
3301                  */
3302                 hsfsts.hsf_status.flcdone = 1;
3303                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3304                 ret_val = E1000_SUCCESS;
3305         } else {
3306                 s32 i;
3307
3308                 /* Otherwise poll for sometime so the current
3309                  * cycle has a chance to end before giving up.
3310                  */
3311                 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3312                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3313                                                               ICH_FLASH_HSFSTS);
3314                         if (!hsfsts.hsf_status.flcinprog) {
3315                                 ret_val = E1000_SUCCESS;
3316                                 break;
3317                         }
3318                         usec_delay(1);
3319                 }
3320                 if (ret_val == E1000_SUCCESS) {
3321                         /* Successful in waiting for previous cycle to timeout,
3322                          * now set the Flash Cycle Done.
3323                          */
3324                         hsfsts.hsf_status.flcdone = 1;
3325                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3326                                                 hsfsts.regval);
3327                 } else {
3328                         DEBUGOUT("Flash controller busy, cannot get access\n");
3329                 }
3330         }
3331
3332         return ret_val;
3333 }
3334
3335 /**
3336  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3337  *  @hw: pointer to the HW structure
3338  *  @timeout: maximum time to wait for completion
3339  *
3340  *  This function starts a flash cycle and waits for its completion.
3341  **/
3342 STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3343 {
3344         union ich8_hws_flash_ctrl hsflctl;
3345         union ich8_hws_flash_status hsfsts;
3346         u32 i = 0;
3347
3348         DEBUGFUNC("e1000_flash_cycle_ich8lan");
3349
3350         /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3351         hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3352         hsflctl.hsf_ctrl.flcgo = 1;
3353
3354         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3355
3356         /* wait till FDONE bit is set to 1 */
3357         do {
3358                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3359                 if (hsfsts.hsf_status.flcdone)
3360                         break;
3361                 usec_delay(1);
3362         } while (i++ < timeout);
3363
3364         if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3365                 return E1000_SUCCESS;
3366
3367         return -E1000_ERR_NVM;
3368 }
3369
3370 /**
3371  *  e1000_read_flash_word_ich8lan - Read word from flash
3372  *  @hw: pointer to the HW structure
3373  *  @offset: offset to data location
3374  *  @data: pointer to the location for storing the data
3375  *
3376  *  Reads the flash word at offset into data.  Offset is converted
3377  *  to bytes before read.
3378  **/
3379 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3380                                          u16 *data)
3381 {
3382         DEBUGFUNC("e1000_read_flash_word_ich8lan");
3383
3384         if (!data)
3385                 return -E1000_ERR_NVM;
3386
3387         /* Must convert offset into bytes. */
3388         offset <<= 1;
3389
3390         return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3391 }
3392
3393 /**
3394  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3395  *  @hw: pointer to the HW structure
3396  *  @offset: The offset of the byte to read.
3397  *  @data: Pointer to a byte to store the value read.
3398  *
3399  *  Reads a single byte from the NVM using the flash access registers.
3400  **/
3401 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3402                                          u8 *data)
3403 {
3404         s32 ret_val;
3405         u16 word = 0;
3406
3407         ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3408
3409         if (ret_val)
3410                 return ret_val;
3411
3412         *data = (u8)word;
3413
3414         return E1000_SUCCESS;
3415 }
3416
3417 /**
3418  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3419  *  @hw: pointer to the HW structure
3420  *  @offset: The offset (in bytes) of the byte or word to read.
3421  *  @size: Size of data to read, 1=byte 2=word
3422  *  @data: Pointer to the word to store the value read.
3423  *
3424  *  Reads a byte or word from the NVM using the flash access registers.
3425  **/
3426 STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3427                                          u8 size, u16 *data)
3428 {
3429         union ich8_hws_flash_status hsfsts;
3430         union ich8_hws_flash_ctrl hsflctl;
3431         u32 flash_linear_addr;
3432         u32 flash_data = 0;
3433         s32 ret_val = -E1000_ERR_NVM;
3434         u8 count = 0;
3435
3436         DEBUGFUNC("e1000_read_flash_data_ich8lan");
3437
3438         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3439                 return -E1000_ERR_NVM;
3440         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3441                              hw->nvm.flash_base_addr);
3442
3443         do {
3444                 usec_delay(1);
3445                 /* Steps */
3446                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3447                 if (ret_val != E1000_SUCCESS)
3448                         break;
3449                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3450
3451                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3452                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3453                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3454                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3455
3456                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3457
3458                 ret_val =
3459                     e1000_flash_cycle_ich8lan(hw,
3460                                               ICH_FLASH_READ_COMMAND_TIMEOUT);
3461
3462                 /* Check if FCERR is set to 1, if set to 1, clear it
3463                  * and try the whole sequence a few more times, else
3464                  * read in (shift in) the Flash Data0, the order is
3465                  * least significant byte first msb to lsb
3466                  */
3467                 if (ret_val == E1000_SUCCESS) {
3468                         flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3469                         if (size == 1)
3470                                 *data = (u8)(flash_data & 0x000000FF);
3471                         else if (size == 2)
3472                                 *data = (u16)(flash_data & 0x0000FFFF);
3473                         break;
3474                 } else {
3475                         /* If we've gotten here, then things are probably
3476                          * completely hosed, but if the error condition is
3477                          * detected, it won't hurt to give it another try...
3478                          * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3479                          */
3480                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3481                                                               ICH_FLASH_HSFSTS);
3482                         if (hsfsts.hsf_status.flcerr) {
3483                                 /* Repeat for some time before giving up. */
3484                                 continue;
3485                         } else if (!hsfsts.hsf_status.flcdone) {
3486                                 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3487                                 break;
3488                         }
3489                 }
3490         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3491
3492         return ret_val;
3493 }
3494
3495 /**
3496  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3497  *  @hw: pointer to the HW structure
3498  *  @offset: The offset (in bytes) of the word(s) to write.
3499  *  @words: Size of data to write in words
3500  *  @data: Pointer to the word(s) to write at offset.
3501  *
3502  *  Writes a byte or word to the NVM using the flash access registers.
3503  **/
3504 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3505                                    u16 *data)
3506 {
3507         struct e1000_nvm_info *nvm = &hw->nvm;
3508         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3509         u16 i;
3510
3511         DEBUGFUNC("e1000_write_nvm_ich8lan");
3512
3513         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3514             (words == 0)) {
3515                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3516                 return -E1000_ERR_NVM;
3517         }
3518
3519         nvm->ops.acquire(hw);
3520
3521         for (i = 0; i < words; i++) {
3522                 dev_spec->shadow_ram[offset+i].modified = true;
3523                 dev_spec->shadow_ram[offset+i].value = data[i];
3524         }
3525
3526         nvm->ops.release(hw);
3527
3528         return E1000_SUCCESS;
3529 }
3530
3531 /**
3532  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3533  *  @hw: pointer to the HW structure
3534  *
3535  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3536  *  which writes the checksum to the shadow ram.  The changes in the shadow
3537  *  ram are then committed to the EEPROM by processing each bank at a time
3538  *  checking for the modified bit and writing only the pending changes.
3539  *  After a successful commit, the shadow ram is cleared and is ready for
3540  *  future writes.
3541  **/
3542 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3543 {
3544         struct e1000_nvm_info *nvm = &hw->nvm;
3545         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3546         u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3547         s32 ret_val;
3548         u16 data;
3549
3550         DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
3551
3552         ret_val = e1000_update_nvm_checksum_generic(hw);
3553         if (ret_val)
3554                 goto out;
3555
3556         if (nvm->type != e1000_nvm_flash_sw)
3557                 goto out;
3558
3559         nvm->ops.acquire(hw);
3560
3561         /* We're writing to the opposite bank so if we're on bank 1,
3562          * write to bank 0 etc.  We also need to erase the segment that
3563          * is going to be written
3564          */
3565         ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3566         if (ret_val != E1000_SUCCESS) {
3567                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3568                 bank = 0;
3569         }
3570
3571         if (bank == 0) {
3572                 new_bank_offset = nvm->flash_bank_size;
3573                 old_bank_offset = 0;
3574                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3575                 if (ret_val)
3576                         goto release;
3577         } else {
3578                 old_bank_offset = nvm->flash_bank_size;
3579                 new_bank_offset = 0;
3580                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3581                 if (ret_val)
3582                         goto release;
3583         }
3584
3585         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3586                 /* Determine whether to write the value stored
3587                  * in the other NVM bank or a modified value stored
3588                  * in the shadow RAM
3589                  */
3590                 if (dev_spec->shadow_ram[i].modified) {
3591                         data = dev_spec->shadow_ram[i].value;
3592                 } else {
3593                         ret_val = e1000_read_flash_word_ich8lan(hw, i +
3594                                                                 old_bank_offset,
3595                                                                 &data);
3596                         if (ret_val)
3597                                 break;
3598                 }
3599
3600                 /* If the word is 0x13, then make sure the signature bits
3601                  * (15:14) are 11b until the commit has completed.
3602                  * This will allow us to write 10b which indicates the
3603                  * signature is valid.  We want to do this after the write
3604                  * has completed so that we don't mark the segment valid
3605                  * while the write is still in progress
3606                  */
3607                 if (i == E1000_ICH_NVM_SIG_WORD)
3608                         data |= E1000_ICH_NVM_SIG_MASK;
3609
3610                 /* Convert offset to bytes. */
3611                 act_offset = (i + new_bank_offset) << 1;
3612
3613                 usec_delay(100);
3614                 /* Write the bytes to the new bank. */
3615                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3616                                                                act_offset,
3617                                                                (u8)data);
3618                 if (ret_val)
3619                         break;
3620
3621                 usec_delay(100);
3622                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3623                                                           act_offset + 1,
3624                                                           (u8)(data >> 8));
3625                 if (ret_val)
3626                         break;
3627         }
3628
3629         /* Don't bother writing the segment valid bits if sector
3630          * programming failed.
3631          */
3632         if (ret_val) {
3633                 DEBUGOUT("Flash commit failed.\n");
3634                 goto release;
3635         }
3636
3637         /* Finally validate the new segment by setting bit 15:14
3638          * to 10b in word 0x13 , this can be done without an
3639          * erase as well since these bits are 11 to start with
3640          * and we need to change bit 14 to 0b
3641          */
3642         act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3643         ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
3644         if (ret_val)
3645                 goto release;
3646
3647         data &= 0xBFFF;
3648         ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3649                                                        act_offset * 2 + 1,
3650                                                        (u8)(data >> 8));
3651         if (ret_val)
3652                 goto release;
3653
3654         /* And invalidate the previously valid segment by setting
3655          * its signature word (0x13) high_byte to 0b. This can be
3656          * done without an erase because flash erase sets all bits
3657          * to 1's. We can write 1's to 0's without an erase
3658          */
3659         act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3660         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
3661         if (ret_val)
3662                 goto release;
3663
3664         /* Great!  Everything worked, we can now clear the cached entries. */
3665         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3666                 dev_spec->shadow_ram[i].modified = false;
3667                 dev_spec->shadow_ram[i].value = 0xFFFF;
3668         }
3669
3670 release:
3671         nvm->ops.release(hw);
3672
3673         /* Reload the EEPROM, or else modifications will not appear
3674          * until after the next adapter reset.
3675          */
3676         if (!ret_val) {
3677                 nvm->ops.reload(hw);
3678                 msec_delay(10);
3679         }
3680
3681 out:
3682         if (ret_val)
3683                 DEBUGOUT1("NVM update error: %d\n", ret_val);
3684
3685         return ret_val;
3686 }
3687
3688 /**
3689  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
3690  *  @hw: pointer to the HW structure
3691  *
3692  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
3693  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
3694  *  calculated, in which case we need to calculate the checksum and set bit 6.
3695  **/
3696 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
3697 {
3698         s32 ret_val;
3699         u16 data;
3700         u16 word;
3701         u16 valid_csum_mask;
3702
3703         DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
3704
3705         /* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
3706          * the checksum needs to be fixed.  This bit is an indication that
3707          * the NVM was prepared by OEM software and did not calculate
3708          * the checksum...a likely scenario.
3709          */
3710         switch (hw->mac.type) {
3711         case e1000_pch_lpt:
3712                 word = NVM_COMPAT;
3713                 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
3714                 break;
3715         default:
3716                 word = NVM_FUTURE_INIT_WORD1;
3717                 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
3718                 break;
3719         }
3720
3721         ret_val = hw->nvm.ops.read(hw, word, 1, &data);
3722         if (ret_val)
3723                 return ret_val;
3724
3725         if (!(data & valid_csum_mask)) {
3726                 data |= valid_csum_mask;
3727                 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
3728                 if (ret_val)
3729                         return ret_val;
3730                 ret_val = hw->nvm.ops.update(hw);
3731                 if (ret_val)
3732                         return ret_val;
3733         }
3734
3735         return e1000_validate_nvm_checksum_generic(hw);
3736 }
3737
3738 /**
3739  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3740  *  @hw: pointer to the HW structure
3741  *  @offset: The offset (in bytes) of the byte/word to read.
3742  *  @size: Size of data to read, 1=byte 2=word
3743  *  @data: The byte(s) to write to the NVM.
3744  *
3745  *  Writes one/two bytes to the NVM using the flash access registers.
3746  **/
3747 STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3748                                           u8 size, u16 data)
3749 {
3750         union ich8_hws_flash_status hsfsts;
3751         union ich8_hws_flash_ctrl hsflctl;
3752         u32 flash_linear_addr;
3753         u32 flash_data = 0;
3754         s32 ret_val;
3755         u8 count = 0;
3756
3757         DEBUGFUNC("e1000_write_ich8_data");
3758
3759         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3760                 return -E1000_ERR_NVM;
3761
3762         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3763                              hw->nvm.flash_base_addr);
3764
3765         do {
3766                 usec_delay(1);
3767                 /* Steps */
3768                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3769                 if (ret_val != E1000_SUCCESS)
3770                         break;
3771                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3772
3773                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3774                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3775                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3776                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3777
3778                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3779
3780                 if (size == 1)
3781                         flash_data = (u32)data & 0x00FF;
3782                 else
3783                         flash_data = (u32)data;
3784
3785                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
3786
3787                 /* check if FCERR is set to 1 , if set to 1, clear it
3788                  * and try the whole sequence a few more times else done
3789                  */
3790                 ret_val =
3791                     e1000_flash_cycle_ich8lan(hw,
3792                                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3793                 if (ret_val == E1000_SUCCESS)
3794                         break;
3795
3796                 /* If we're here, then things are most likely
3797                  * completely hosed, but if the error condition
3798                  * is detected, it won't hurt to give it another
3799                  * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3800                  */
3801                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3802                 if (hsfsts.hsf_status.flcerr)
3803                         /* Repeat for some time before giving up. */
3804                         continue;
3805                 if (!hsfsts.hsf_status.flcdone) {
3806                         DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3807                         break;
3808                 }
3809         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3810
3811         return ret_val;
3812 }
3813
3814 /**
3815  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3816  *  @hw: pointer to the HW structure
3817  *  @offset: The index of the byte to read.
3818  *  @data: The byte to write to the NVM.
3819  *
3820  *  Writes a single byte to the NVM using the flash access registers.
3821  **/
3822 STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3823                                           u8 data)
3824 {
3825         u16 word = (u16)data;
3826
3827         DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3828
3829         return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3830 }
3831
3832 /**
3833  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3834  *  @hw: pointer to the HW structure
3835  *  @offset: The offset of the byte to write.
3836  *  @byte: The byte to write to the NVM.
3837  *
3838  *  Writes a single byte to the NVM using the flash access registers.
3839  *  Goes through a retry algorithm before giving up.
3840  **/
3841 STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3842                                                 u32 offset, u8 byte)
3843 {
3844         s32 ret_val;
3845         u16 program_retries;
3846
3847         DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3848
3849         ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3850         if (!ret_val)
3851                 return ret_val;
3852
3853         for (program_retries = 0; program_retries < 100; program_retries++) {
3854                 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3855                 usec_delay(100);
3856                 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3857                 if (ret_val == E1000_SUCCESS)
3858                         break;
3859         }
3860         if (program_retries == 100)
3861                 return -E1000_ERR_NVM;
3862
3863         return E1000_SUCCESS;
3864 }
3865
3866 /**
3867  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3868  *  @hw: pointer to the HW structure
3869  *  @bank: 0 for first bank, 1 for second bank, etc.
3870  *
3871  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3872  *  bank N is 4096 * N + flash_reg_addr.
3873  **/
3874 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3875 {
3876         struct e1000_nvm_info *nvm = &hw->nvm;
3877         union ich8_hws_flash_status hsfsts;
3878         union ich8_hws_flash_ctrl hsflctl;
3879         u32 flash_linear_addr;
3880         /* bank size is in 16bit words - adjust to bytes */
3881         u32 flash_bank_size = nvm->flash_bank_size * 2;
3882         s32 ret_val;
3883         s32 count = 0;
3884         s32 j, iteration, sector_size;
3885
3886         DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3887
3888         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3889
3890         /* Determine HW Sector size: Read BERASE bits of hw flash status
3891          * register
3892          * 00: The Hw sector is 256 bytes, hence we need to erase 16
3893          *     consecutive sectors.  The start index for the nth Hw sector
3894          *     can be calculated as = bank * 4096 + n * 256
3895          * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3896          *     The start index for the nth Hw sector can be calculated
3897          *     as = bank * 4096
3898          * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3899          *     (ich9 only, otherwise error condition)
3900          * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3901          */
3902         switch (hsfsts.hsf_status.berasesz) {
3903         case 0:
3904                 /* Hw sector size 256 */
3905                 sector_size = ICH_FLASH_SEG_SIZE_256;
3906                 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3907                 break;
3908         case 1:
3909                 sector_size = ICH_FLASH_SEG_SIZE_4K;
3910                 iteration = 1;
3911                 break;
3912         case 2:
3913                 sector_size = ICH_FLASH_SEG_SIZE_8K;
3914                 iteration = 1;
3915                 break;
3916         case 3:
3917                 sector_size = ICH_FLASH_SEG_SIZE_64K;
3918                 iteration = 1;
3919                 break;
3920         default:
3921                 return -E1000_ERR_NVM;
3922         }
3923
3924         /* Start with the base address, then add the sector offset. */
3925         flash_linear_addr = hw->nvm.flash_base_addr;
3926         flash_linear_addr += (bank) ? flash_bank_size : 0;
3927
3928         for (j = 0; j < iteration; j++) {
3929                 do {
3930                         u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
3931
3932                         /* Steps */
3933                         ret_val = e1000_flash_cycle_init_ich8lan(hw);
3934                         if (ret_val)
3935                                 return ret_val;
3936
3937                         /* Write a value 11 (block Erase) in Flash
3938                          * Cycle field in hw flash control
3939                          */
3940                         hsflctl.regval =
3941                             E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3942
3943                         hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3944                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3945                                                 hsflctl.regval);
3946
3947                         /* Write the last 24 bits of an index within the
3948                          * block into Flash Linear address field in Flash
3949                          * Address.
3950                          */
3951                         flash_linear_addr += (j * sector_size);
3952                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3953                                               flash_linear_addr);
3954
3955                         ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
3956                         if (ret_val == E1000_SUCCESS)
3957                                 break;
3958
3959                         /* Check if FCERR is set to 1.  If 1,
3960                          * clear it and try the whole sequence
3961                          * a few more times else Done
3962                          */
3963                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3964                                                       ICH_FLASH_HSFSTS);
3965                         if (hsfsts.hsf_status.flcerr)
3966                                 /* repeat for some time before giving up */
3967                                 continue;
3968                         else if (!hsfsts.hsf_status.flcdone)
3969                                 return ret_val;
3970                 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3971         }
3972
3973         return E1000_SUCCESS;
3974 }
3975
3976 /**
3977  *  e1000_valid_led_default_ich8lan - Set the default LED settings
3978  *  @hw: pointer to the HW structure
3979  *  @data: Pointer to the LED settings
3980  *
3981  *  Reads the LED default settings from the NVM to data.  If the NVM LED
3982  *  settings is all 0's or F's, set the LED default to a valid LED default
3983  *  setting.
3984  **/
3985 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3986 {
3987         s32 ret_val;
3988
3989         DEBUGFUNC("e1000_valid_led_default_ich8lan");
3990
3991         ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3992         if (ret_val) {
3993                 DEBUGOUT("NVM Read Error\n");
3994                 return ret_val;
3995         }
3996
3997         if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
3998                 *data = ID_LED_DEFAULT_ICH8LAN;
3999
4000         return E1000_SUCCESS;
4001 }
4002
4003 /**
4004  *  e1000_id_led_init_pchlan - store LED configurations
4005  *  @hw: pointer to the HW structure
4006  *
4007  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4008  *  the PHY LED configuration register.
4009  *
4010  *  PCH also does not have an "always on" or "always off" mode which
4011  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4012  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4013  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4014  *  link based on logic in e1000_led_[on|off]_pchlan().
4015  **/
4016 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4017 {
4018         struct e1000_mac_info *mac = &hw->mac;
4019         s32 ret_val;
4020         const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4021         const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4022         u16 data, i, temp, shift;
4023
4024         DEBUGFUNC("e1000_id_led_init_pchlan");
4025
4026         /* Get default ID LED modes */
4027         ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4028         if (ret_val)
4029                 return ret_val;
4030
4031         mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4032         mac->ledctl_mode1 = mac->ledctl_default;
4033         mac->ledctl_mode2 = mac->ledctl_default;
4034
4035         for (i = 0; i < 4; i++) {
4036                 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4037                 shift = (i * 5);
4038                 switch (temp) {
4039                 case ID_LED_ON1_DEF2:
4040                 case ID_LED_ON1_ON2:
4041                 case ID_LED_ON1_OFF2:
4042                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4043                         mac->ledctl_mode1 |= (ledctl_on << shift);
4044                         break;
4045                 case ID_LED_OFF1_DEF2:
4046                 case ID_LED_OFF1_ON2:
4047                 case ID_LED_OFF1_OFF2:
4048                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4049                         mac->ledctl_mode1 |= (ledctl_off << shift);
4050                         break;
4051                 default:
4052                         /* Do nothing */
4053                         break;
4054                 }
4055                 switch (temp) {
4056                 case ID_LED_DEF1_ON2:
4057                 case ID_LED_ON1_ON2:
4058                 case ID_LED_OFF1_ON2:
4059                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4060                         mac->ledctl_mode2 |= (ledctl_on << shift);
4061                         break;
4062                 case ID_LED_DEF1_OFF2:
4063                 case ID_LED_ON1_OFF2:
4064                 case ID_LED_OFF1_OFF2:
4065                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4066                         mac->ledctl_mode2 |= (ledctl_off << shift);
4067                         break;
4068                 default:
4069                         /* Do nothing */
4070                         break;
4071                 }
4072         }
4073
4074         return E1000_SUCCESS;
4075 }
4076
4077 /**
4078  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4079  *  @hw: pointer to the HW structure
4080  *
4081  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4082  *  register, so the the bus width is hard coded.
4083  **/
4084 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4085 {
4086         struct e1000_bus_info *bus = &hw->bus;
4087         s32 ret_val;
4088
4089         DEBUGFUNC("e1000_get_bus_info_ich8lan");
4090
4091         ret_val = e1000_get_bus_info_pcie_generic(hw);
4092
4093         /* ICH devices are "PCI Express"-ish.  They have
4094          * a configuration space, but do not contain
4095          * PCI Express Capability registers, so bus width
4096          * must be hardcoded.
4097          */
4098         if (bus->width == e1000_bus_width_unknown)
4099                 bus->width = e1000_bus_width_pcie_x1;
4100
4101         return ret_val;
4102 }
4103
4104 /**
4105  *  e1000_reset_hw_ich8lan - Reset the hardware
4106  *  @hw: pointer to the HW structure
4107  *
4108  *  Does a full reset of the hardware which includes a reset of the PHY and
4109  *  MAC.
4110  **/
4111 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4112 {
4113         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4114         u16 kum_cfg;
4115         u32 ctrl, reg;
4116         s32 ret_val;
4117
4118         DEBUGFUNC("e1000_reset_hw_ich8lan");
4119
4120         /* Prevent the PCI-E bus from sticking if there is no TLP connection
4121          * on the last TLP read/write transaction when MAC is reset.
4122          */
4123         ret_val = e1000_disable_pcie_master_generic(hw);
4124         if (ret_val)
4125                 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4126
4127         DEBUGOUT("Masking off all interrupts\n");
4128         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4129
4130         /* Disable the Transmit and Receive units.  Then delay to allow
4131          * any pending transactions to complete before we hit the MAC
4132          * with the global reset.
4133          */
4134         E1000_WRITE_REG(hw, E1000_RCTL, 0);
4135         E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4136         E1000_WRITE_FLUSH(hw);
4137
4138         msec_delay(10);
4139
4140         /* Workaround for ICH8 bit corruption issue in FIFO memory */
4141         if (hw->mac.type == e1000_ich8lan) {
4142                 /* Set Tx and Rx buffer allocation to 8k apiece. */
4143                 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4144                 /* Set Packet Buffer Size to 16k. */
4145                 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4146         }
4147
4148         if (hw->mac.type == e1000_pchlan) {
4149                 /* Save the NVM K1 bit setting*/
4150                 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4151                 if (ret_val)
4152                         return ret_val;
4153
4154                 if (kum_cfg & E1000_NVM_K1_ENABLE)
4155                         dev_spec->nvm_k1_enabled = true;
4156                 else
4157                         dev_spec->nvm_k1_enabled = false;
4158         }
4159
4160         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4161
4162         if (!hw->phy.ops.check_reset_block(hw)) {
4163                 /* Full-chip reset requires MAC and PHY reset at the same
4164                  * time to make sure the interface between MAC and the
4165                  * external PHY is reset.
4166                  */
4167                 ctrl |= E1000_CTRL_PHY_RST;
4168
4169                 /* Gate automatic PHY configuration by hardware on
4170                  * non-managed 82579
4171                  */
4172                 if ((hw->mac.type == e1000_pch2lan) &&
4173                     !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4174                         e1000_gate_hw_phy_config_ich8lan(hw, true);
4175         }
4176         ret_val = e1000_acquire_swflag_ich8lan(hw);
4177         DEBUGOUT("Issuing a global reset to ich8lan\n");
4178         E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4179         /* cannot issue a flush here because it hangs the hardware */
4180         msec_delay(20);
4181
4182         /* Set Phy Config Counter to 50msec */
4183         if (hw->mac.type == e1000_pch2lan) {
4184                 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4185                 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4186                 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4187                 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4188         }
4189
4190         if (!ret_val)
4191                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
4192
4193         if (ctrl & E1000_CTRL_PHY_RST) {
4194                 ret_val = hw->phy.ops.get_cfg_done(hw);
4195                 if (ret_val)
4196                         return ret_val;
4197
4198                 ret_val = e1000_post_phy_reset_ich8lan(hw);
4199                 if (ret_val)
4200                         return ret_val;
4201         }
4202
4203         /* For PCH, this write will make sure that any noise
4204          * will be detected as a CRC error and be dropped rather than show up
4205          * as a bad packet to the DMA engine.
4206          */
4207         if (hw->mac.type == e1000_pchlan)
4208                 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
4209
4210         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4211         E1000_READ_REG(hw, E1000_ICR);
4212
4213         reg = E1000_READ_REG(hw, E1000_KABGTXD);
4214         reg |= E1000_KABGTXD_BGSQLBIAS;
4215         E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
4216
4217         return E1000_SUCCESS;
4218 }
4219
4220 /**
4221  *  e1000_init_hw_ich8lan - Initialize the hardware
4222  *  @hw: pointer to the HW structure
4223  *
4224  *  Prepares the hardware for transmit and receive by doing the following:
4225  *   - initialize hardware bits
4226  *   - initialize LED identification
4227  *   - setup receive address registers
4228  *   - setup flow control
4229  *   - setup transmit descriptors
4230  *   - clear statistics
4231  **/
4232 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4233 {
4234         struct e1000_mac_info *mac = &hw->mac;
4235         u32 ctrl_ext, txdctl, snoop;
4236         s32 ret_val;
4237         u16 i;
4238
4239         DEBUGFUNC("e1000_init_hw_ich8lan");
4240
4241         e1000_initialize_hw_bits_ich8lan(hw);
4242
4243         /* Initialize identification LED */
4244         ret_val = mac->ops.id_led_init(hw);
4245         /* An error is not fatal and we should not stop init due to this */
4246         if (ret_val)
4247                 DEBUGOUT("Error initializing identification LED\n");
4248
4249         /* Setup the receive address. */
4250         e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
4251
4252         /* Zero out the Multicast HASH table */
4253         DEBUGOUT("Zeroing the MTA\n");
4254         for (i = 0; i < mac->mta_reg_count; i++)
4255                 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4256
4257         /* The 82578 Rx buffer will stall if wakeup is enabled in host and
4258          * the ME.  Disable wakeup by clearing the host wakeup bit.
4259          * Reset the phy after disabling host wakeup to reset the Rx buffer.
4260          */
4261         if (hw->phy.type == e1000_phy_82578) {
4262                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
4263                 i &= ~BM_WUC_HOST_WU_BIT;
4264                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
4265                 ret_val = e1000_phy_hw_reset_ich8lan(hw);
4266                 if (ret_val)
4267                         return ret_val;
4268         }
4269
4270         /* Setup link and flow control */
4271         ret_val = mac->ops.setup_link(hw);
4272
4273         /* Set the transmit descriptor write-back policy for both queues */
4274         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
4275         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4276                   E1000_TXDCTL_FULL_TX_DESC_WB);
4277         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4278                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4279         E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
4280         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
4281         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4282                   E1000_TXDCTL_FULL_TX_DESC_WB);
4283         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4284                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4285         E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
4286
4287         /* ICH8 has opposite polarity of no_snoop bits.
4288          * By default, we should use snoop behavior.
4289          */
4290         if (mac->type == e1000_ich8lan)
4291                 snoop = PCIE_ICH8_SNOOP_ALL;
4292         else
4293                 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
4294         e1000_set_pcie_no_snoop_generic(hw, snoop);
4295
4296         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
4297         ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4298         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
4299
4300         /* Clear all of the statistics registers (clear on read).  It is
4301          * important that we do this after we have tried to establish link
4302          * because the symbol error count will increment wildly if there
4303          * is no link.
4304          */
4305         e1000_clear_hw_cntrs_ich8lan(hw);
4306
4307         return ret_val;
4308 }
4309
4310 /**
4311  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4312  *  @hw: pointer to the HW structure
4313  *
4314  *  Sets/Clears required hardware bits necessary for correctly setting up the
4315  *  hardware for transmit and receive.
4316  **/
4317 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4318 {
4319         u32 reg;
4320
4321         DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
4322
4323         /* Extended Device Control */
4324         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
4325         reg |= (1 << 22);
4326         /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4327         if (hw->mac.type >= e1000_pchlan)
4328                 reg |= E1000_CTRL_EXT_PHYPDEN;
4329         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
4330
4331         /* Transmit Descriptor Control 0 */
4332         reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
4333         reg |= (1 << 22);
4334         E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
4335
4336         /* Transmit Descriptor Control 1 */
4337         reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
4338         reg |= (1 << 22);
4339         E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
4340
4341         /* Transmit Arbitration Control 0 */
4342         reg = E1000_READ_REG(hw, E1000_TARC(0));
4343         if (hw->mac.type == e1000_ich8lan)
4344                 reg |= (1 << 28) | (1 << 29);
4345         reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
4346         E1000_WRITE_REG(hw, E1000_TARC(0), reg);
4347
4348         /* Transmit Arbitration Control 1 */
4349         reg = E1000_READ_REG(hw, E1000_TARC(1));
4350         if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
4351                 reg &= ~(1 << 28);
4352         else
4353                 reg |= (1 << 28);
4354         reg |= (1 << 24) | (1 << 26) | (1 << 30);
4355         E1000_WRITE_REG(hw, E1000_TARC(1), reg);
4356
4357         /* Device Status */
4358         if (hw->mac.type == e1000_ich8lan) {
4359                 reg = E1000_READ_REG(hw, E1000_STATUS);
4360                 reg &= ~(1 << 31);
4361                 E1000_WRITE_REG(hw, E1000_STATUS, reg);
4362         }
4363
4364         /* work-around descriptor data corruption issue during nfs v2 udp
4365          * traffic, just disable the nfs filtering capability
4366          */
4367         reg = E1000_READ_REG(hw, E1000_RFCTL);
4368         reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4369
4370         /* Disable IPv6 extension header parsing because some malformed
4371          * IPv6 headers can hang the Rx.
4372          */
4373         if (hw->mac.type == e1000_ich8lan)
4374                 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4375         E1000_WRITE_REG(hw, E1000_RFCTL, reg);
4376
4377         /* Enable ECC on Lynxpoint */
4378         if (hw->mac.type == e1000_pch_lpt) {
4379                 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
4380                 reg |= E1000_PBECCSTS_ECC_ENABLE;
4381                 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
4382
4383                 reg = E1000_READ_REG(hw, E1000_CTRL);
4384                 reg |= E1000_CTRL_MEHE;
4385                 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4386         }
4387
4388         return;
4389 }
4390
4391 /**
4392  *  e1000_setup_link_ich8lan - Setup flow control and link settings
4393  *  @hw: pointer to the HW structure
4394  *
4395  *  Determines which flow control settings to use, then configures flow
4396  *  control.  Calls the appropriate media-specific link configuration
4397  *  function.  Assuming the adapter has a valid link partner, a valid link
4398  *  should be established.  Assumes the hardware has previously been reset
4399  *  and the transmitter and receiver are not enabled.
4400  **/
4401 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4402 {
4403         s32 ret_val;
4404
4405         DEBUGFUNC("e1000_setup_link_ich8lan");
4406
4407         if (hw->phy.ops.check_reset_block(hw))
4408                 return E1000_SUCCESS;
4409
4410         /* ICH parts do not have a word in the NVM to determine
4411          * the default flow control setting, so we explicitly
4412          * set it to full.
4413          */
4414         if (hw->fc.requested_mode == e1000_fc_default)
4415                 hw->fc.requested_mode = e1000_fc_full;
4416
4417         /* Save off the requested flow control mode for use later.  Depending
4418          * on the link partner's capabilities, we may or may not use this mode.
4419          */
4420         hw->fc.current_mode = hw->fc.requested_mode;
4421
4422         DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
4423                 hw->fc.current_mode);
4424
4425         /* Continue to configure the copper link. */
4426         ret_val = hw->mac.ops.setup_physical_interface(hw);
4427         if (ret_val)
4428                 return ret_val;
4429
4430         E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
4431         if ((hw->phy.type == e1000_phy_82578) ||
4432             (hw->phy.type == e1000_phy_82579) ||
4433             (hw->phy.type == e1000_phy_i217) ||
4434             (hw->phy.type == e1000_phy_82577)) {
4435                 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
4436
4437                 ret_val = hw->phy.ops.write_reg(hw,
4438                                              PHY_REG(BM_PORT_CTRL_PAGE, 27),
4439                                              hw->fc.pause_time);
4440                 if (ret_val)
4441                         return ret_val;
4442         }
4443
4444         return e1000_set_fc_watermarks_generic(hw);
4445 }
4446
4447 /**
4448  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4449  *  @hw: pointer to the HW structure
4450  *
4451  *  Configures the kumeran interface to the PHY to wait the appropriate time
4452  *  when polling the PHY, then call the generic setup_copper_link to finish
4453  *  configuring the copper link.
4454  **/
4455 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4456 {
4457         u32 ctrl;
4458         s32 ret_val;
4459         u16 reg_data;
4460
4461         DEBUGFUNC("e1000_setup_copper_link_ich8lan");
4462
4463         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4464         ctrl |= E1000_CTRL_SLU;
4465         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4466         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4467
4468         /* Set the mac to wait the maximum time between each iteration
4469          * and increase the max iterations when polling the phy;
4470          * this fixes erroneous timeouts at 10Mbps.
4471          */
4472         ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
4473                                                0xFFFF);
4474         if (ret_val)
4475                 return ret_val;
4476         ret_val = e1000_read_kmrn_reg_generic(hw,
4477                                               E1000_KMRNCTRLSTA_INBAND_PARAM,
4478                                               &reg_data);
4479         if (ret_val)
4480                 return ret_val;
4481         reg_data |= 0x3F;
4482         ret_val = e1000_write_kmrn_reg_generic(hw,
4483                                                E1000_KMRNCTRLSTA_INBAND_PARAM,
4484                                                reg_data);
4485         if (ret_val)
4486                 return ret_val;
4487
4488         switch (hw->phy.type) {
4489         case e1000_phy_igp_3:
4490                 ret_val = e1000_copper_link_setup_igp(hw);
4491                 if (ret_val)
4492                         return ret_val;
4493                 break;
4494         case e1000_phy_bm:
4495         case e1000_phy_82578:
4496                 ret_val = e1000_copper_link_setup_m88(hw);
4497                 if (ret_val)
4498                         return ret_val;
4499                 break;
4500         case e1000_phy_82577:
4501         case e1000_phy_82579:
4502                 ret_val = e1000_copper_link_setup_82577(hw);
4503                 if (ret_val)
4504                         return ret_val;
4505                 break;
4506         case e1000_phy_ife:
4507                 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
4508                                                &reg_data);
4509                 if (ret_val)
4510                         return ret_val;
4511
4512                 reg_data &= ~IFE_PMC_AUTO_MDIX;
4513
4514                 switch (hw->phy.mdix) {
4515                 case 1:
4516                         reg_data &= ~IFE_PMC_FORCE_MDIX;
4517                         break;
4518                 case 2:
4519                         reg_data |= IFE_PMC_FORCE_MDIX;
4520                         break;
4521                 case 0:
4522                 default:
4523                         reg_data |= IFE_PMC_AUTO_MDIX;
4524                         break;
4525                 }
4526                 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
4527                                                 reg_data);
4528                 if (ret_val)
4529                         return ret_val;
4530                 break;
4531         default:
4532                 break;
4533         }
4534
4535         return e1000_setup_copper_link_generic(hw);
4536 }
4537
4538 /**
4539  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
4540  *  @hw: pointer to the HW structure
4541  *
4542  *  Calls the PHY specific link setup function and then calls the
4543  *  generic setup_copper_link to finish configuring the link for
4544  *  Lynxpoint PCH devices
4545  **/
4546 STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
4547 {
4548         u32 ctrl;
4549         s32 ret_val;
4550
4551         DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
4552
4553         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4554         ctrl |= E1000_CTRL_SLU;
4555         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4556         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4557
4558         ret_val = e1000_copper_link_setup_82577(hw);
4559         if (ret_val)
4560                 return ret_val;
4561
4562         return e1000_setup_copper_link_generic(hw);
4563 }
4564
4565 /**
4566  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
4567  *  @hw: pointer to the HW structure
4568  *  @speed: pointer to store current link speed
4569  *  @duplex: pointer to store the current link duplex
4570  *
4571  *  Calls the generic get_speed_and_duplex to retrieve the current link
4572  *  information and then calls the Kumeran lock loss workaround for links at
4573  *  gigabit speeds.
4574  **/
4575 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
4576                                           u16 *duplex)
4577 {
4578         s32 ret_val;
4579
4580         DEBUGFUNC("e1000_get_link_up_info_ich8lan");
4581
4582         ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
4583         if (ret_val)
4584                 return ret_val;
4585
4586         if ((hw->mac.type == e1000_ich8lan) &&
4587             (hw->phy.type == e1000_phy_igp_3) &&
4588             (*speed == SPEED_1000)) {
4589                 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
4590         }
4591
4592         return ret_val;
4593 }
4594
4595 /**
4596  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
4597  *  @hw: pointer to the HW structure
4598  *
4599  *  Work-around for 82566 Kumeran PCS lock loss:
4600  *  On link status change (i.e. PCI reset, speed change) and link is up and
4601  *  speed is gigabit-
4602  *    0) if workaround is optionally disabled do nothing
4603  *    1) wait 1ms for Kumeran link to come up
4604  *    2) check Kumeran Diagnostic register PCS lock loss bit
4605  *    3) if not set the link is locked (all is good), otherwise...
4606  *    4) reset the PHY
4607  *    5) repeat up to 10 times
4608  *  Note: this is only called for IGP3 copper when speed is 1gb.
4609  **/
4610 STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
4611 {
4612         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4613         u32 phy_ctrl;
4614         s32 ret_val;
4615         u16 i, data;
4616         bool link;
4617
4618         DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
4619
4620         if (!dev_spec->kmrn_lock_loss_workaround_enabled)
4621                 return E1000_SUCCESS;
4622
4623         /* Make sure link is up before proceeding.  If not just return.
4624          * Attempting this while link is negotiating fouled up link
4625          * stability
4626          */
4627         ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
4628         if (!link)
4629                 return E1000_SUCCESS;
4630
4631         for (i = 0; i < 10; i++) {
4632                 /* read once to clear */
4633                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4634                 if (ret_val)
4635                         return ret_val;
4636                 /* and again to get new status */
4637                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4638                 if (ret_val)
4639                         return ret_val;
4640
4641                 /* check for PCS lock */
4642                 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
4643                         return E1000_SUCCESS;
4644
4645                 /* Issue PHY reset */
4646                 hw->phy.ops.reset(hw);
4647                 msec_delay_irq(5);
4648         }
4649         /* Disable GigE link negotiation */
4650         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4651         phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
4652                      E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4653         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4654
4655         /* Call gig speed drop workaround on Gig disable before accessing
4656          * any PHY registers
4657          */
4658         e1000_gig_downshift_workaround_ich8lan(hw);
4659
4660         /* unable to acquire PCS lock */
4661         return -E1000_ERR_PHY;
4662 }
4663
4664 /**
4665  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
4666  *  @hw: pointer to the HW structure
4667  *  @state: boolean value used to set the current Kumeran workaround state
4668  *
4669  *  If ICH8, set the current Kumeran workaround state (enabled - true
4670  *  /disabled - false).
4671  **/
4672 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
4673                                                  bool state)
4674 {
4675         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4676
4677         DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
4678
4679         if (hw->mac.type != e1000_ich8lan) {
4680                 DEBUGOUT("Workaround applies to ICH8 only.\n");
4681                 return;
4682         }
4683
4684         dev_spec->kmrn_lock_loss_workaround_enabled = state;
4685
4686         return;
4687 }
4688
4689 /**
4690  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
4691  *  @hw: pointer to the HW structure
4692  *
4693  *  Workaround for 82566 power-down on D3 entry:
4694  *    1) disable gigabit link
4695  *    2) write VR power-down enable
4696  *    3) read it back
4697  *  Continue if successful, else issue LCD reset and repeat
4698  **/
4699 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
4700 {
4701         u32 reg;
4702         u16 data;
4703         u8  retry = 0;
4704
4705         DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
4706
4707         if (hw->phy.type != e1000_phy_igp_3)
4708                 return;
4709
4710         /* Try the workaround twice (if needed) */
4711         do {
4712                 /* Disable link */
4713                 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
4714                 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
4715                         E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4716                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
4717
4718                 /* Call gig speed drop workaround on Gig disable before
4719                  * accessing any PHY registers
4720                  */
4721                 if (hw->mac.type == e1000_ich8lan)
4722                         e1000_gig_downshift_workaround_ich8lan(hw);
4723
4724                 /* Write VR power-down enable */
4725                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4726                 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4727                 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
4728                                       data | IGP3_VR_CTRL_MODE_SHUTDOWN);
4729
4730                 /* Read it back and test */
4731                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4732                 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4733                 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
4734                         break;
4735
4736                 /* Issue PHY reset and repeat at most one more time */
4737                 reg = E1000_READ_REG(hw, E1000_CTRL);
4738                 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
4739                 retry++;
4740         } while (retry);
4741 }
4742
4743 /**
4744  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
4745  *  @hw: pointer to the HW structure
4746  *
4747  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
4748  *  LPLU, Gig disable, MDIC PHY reset):
4749  *    1) Set Kumeran Near-end loopback
4750  *    2) Clear Kumeran Near-end loopback
4751  *  Should only be called for ICH8[m] devices with any 1G Phy.
4752  **/
4753 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
4754 {
4755         s32 ret_val;
4756         u16 reg_data;
4757
4758         DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
4759
4760         if ((hw->mac.type != e1000_ich8lan) ||
4761             (hw->phy.type == e1000_phy_ife))
4762                 return;
4763
4764         ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4765                                               &reg_data);
4766         if (ret_val)
4767                 return;
4768         reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
4769         ret_val = e1000_write_kmrn_reg_generic(hw,
4770                                                E1000_KMRNCTRLSTA_DIAG_OFFSET,
4771                                                reg_data);
4772         if (ret_val)
4773                 return;
4774         reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
4775         e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4776                                      reg_data);
4777 }
4778
4779 /**
4780  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4781  *  @hw: pointer to the HW structure
4782  *
4783  *  During S0 to Sx transition, it is possible the link remains at gig
4784  *  instead of negotiating to a lower speed.  Before going to Sx, set
4785  *  'Gig Disable' to force link speed negotiation to a lower speed based on
4786  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
4787  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4788  *  needs to be written.
4789  *  Parts that support (and are linked to a partner which support) EEE in
4790  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4791  *  than 10Mbps w/o EEE.
4792  **/
4793 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4794 {
4795         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4796         u32 phy_ctrl;
4797         s32 ret_val;
4798
4799         DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
4800
4801         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4802         phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4803
4804         if (hw->phy.type == e1000_phy_i217) {
4805                 u16 phy_reg, device_id = hw->device_id;
4806
4807                 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4808                     (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
4809                     (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
4810                     (device_id == E1000_DEV_ID_PCH_I218_V3)) {
4811                         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
4812
4813                         E1000_WRITE_REG(hw, E1000_FEXTNVM6,
4814                                         fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4815                 }
4816
4817                 ret_val = hw->phy.ops.acquire(hw);
4818                 if (ret_val)
4819                         goto out;
4820
4821                 if (!dev_spec->eee_disable) {
4822                         u16 eee_advert;
4823
4824                         ret_val =
4825                             e1000_read_emi_reg_locked(hw,
4826                                                       I217_EEE_ADVERTISEMENT,
4827                                                       &eee_advert);
4828                         if (ret_val)
4829                                 goto release;
4830
4831                         /* Disable LPLU if both link partners support 100BaseT
4832                          * EEE and 100Full is advertised on both ends of the
4833                          * link, and enable Auto Enable LPI since there will
4834                          * be no driver to enable LPI while in Sx.
4835                          */
4836                         if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4837                             (dev_spec->eee_lp_ability &
4838                              I82579_EEE_100_SUPPORTED) &&
4839                             (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
4840                                 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4841                                               E1000_PHY_CTRL_NOND0A_LPLU);
4842
4843                                 /* Set Auto Enable LPI after link up */
4844                                 hw->phy.ops.read_reg_locked(hw,
4845                                                             I217_LPI_GPIO_CTRL,
4846                                                             &phy_reg);
4847                                 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4848                                 hw->phy.ops.write_reg_locked(hw,
4849                                                              I217_LPI_GPIO_CTRL,
4850                                                              phy_reg);
4851                         }
4852                 }
4853
4854                 /* For i217 Intel Rapid Start Technology support,
4855                  * when the system is going into Sx and no manageability engine
4856                  * is present, the driver must configure proxy to reset only on
4857                  * power good.  LPI (Low Power Idle) state must also reset only
4858                  * on power good, as well as the MTA (Multicast table array).
4859                  * The SMBus release must also be disabled on LCD reset.
4860                  */
4861                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4862                       E1000_ICH_FWSM_FW_VALID)) {
4863                         /* Enable proxy to reset only on power good. */
4864                         hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
4865                                                     &phy_reg);
4866                         phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4867                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
4868                                                      phy_reg);
4869
4870                         /* Set bit enable LPI (EEE) to reset only on
4871                          * power good.
4872                         */
4873                         hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
4874                         phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4875                         hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
4876
4877                         /* Disable the SMB release on LCD reset. */
4878                         hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
4879                         phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4880                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4881                 }
4882
4883                 /* Enable MTA to reset for Intel Rapid Start Technology
4884                  * Support
4885                  */
4886                 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
4887                 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4888                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4889
4890 release:
4891                 hw->phy.ops.release(hw);
4892         }
4893 out:
4894         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4895
4896         if (hw->mac.type == e1000_ich8lan)
4897                 e1000_gig_downshift_workaround_ich8lan(hw);
4898
4899         if (hw->mac.type >= e1000_pchlan) {
4900                 e1000_oem_bits_config_ich8lan(hw, false);
4901
4902                 /* Reset PHY to activate OEM bits on 82577/8 */
4903                 if (hw->mac.type == e1000_pchlan)
4904                         e1000_phy_hw_reset_generic(hw);
4905
4906                 ret_val = hw->phy.ops.acquire(hw);
4907                 if (ret_val)
4908                         return;
4909                 e1000_write_smbus_addr(hw);
4910                 hw->phy.ops.release(hw);
4911         }
4912
4913         return;
4914 }
4915
4916 /**
4917  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
4918  *  @hw: pointer to the HW structure
4919  *
4920  *  During Sx to S0 transitions on non-managed devices or managed devices
4921  *  on which PHY resets are not blocked, if the PHY registers cannot be
4922  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
4923  *  the PHY.
4924  *  On i217, setup Intel Rapid Start Technology.
4925  **/
4926 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4927 {
4928         s32 ret_val;
4929
4930         DEBUGFUNC("e1000_resume_workarounds_pchlan");
4931
4932         if (hw->mac.type < e1000_pch2lan)
4933                 return;
4934
4935         ret_val = e1000_init_phy_workarounds_pchlan(hw);
4936         if (ret_val) {
4937                 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
4938                 return;
4939         }
4940
4941         /* For i217 Intel Rapid Start Technology support when the system
4942          * is transitioning from Sx and no manageability engine is present
4943          * configure SMBus to restore on reset, disable proxy, and enable
4944          * the reset on MTA (Multicast table array).
4945          */
4946         if (hw->phy.type == e1000_phy_i217) {
4947                 u16 phy_reg;
4948
4949                 ret_val = hw->phy.ops.acquire(hw);
4950                 if (ret_val) {
4951                         DEBUGOUT("Failed to setup iRST\n");
4952                         return;
4953                 }
4954
4955                 /* Clear Auto Enable LPI after link up */
4956                 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
4957                 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4958                 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
4959
4960                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4961                     E1000_ICH_FWSM_FW_VALID)) {
4962                         /* Restore clear on SMB if no manageability engine
4963                          * is present
4964                          */
4965                         ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
4966                                                               &phy_reg);
4967                         if (ret_val)
4968                                 goto release;
4969                         phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
4970                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4971
4972                         /* Disable Proxy */
4973                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
4974                 }
4975                 /* Enable reset on MTA */
4976                 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
4977                                                       &phy_reg);
4978                 if (ret_val)
4979                         goto release;
4980                 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
4981                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4982 release:
4983                 if (ret_val)
4984                         DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
4985                 hw->phy.ops.release(hw);
4986         }
4987 }
4988
4989 /**
4990  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
4991  *  @hw: pointer to the HW structure
4992  *
4993  *  Return the LED back to the default configuration.
4994  **/
4995 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
4996 {
4997         DEBUGFUNC("e1000_cleanup_led_ich8lan");
4998
4999         if (hw->phy.type == e1000_phy_ife)
5000                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5001                                              0);
5002
5003         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5004         return E1000_SUCCESS;
5005 }
5006
5007 /**
5008  *  e1000_led_on_ich8lan - Turn LEDs on
5009  *  @hw: pointer to the HW structure
5010  *
5011  *  Turn on the LEDs.
5012  **/
5013 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5014 {
5015         DEBUGFUNC("e1000_led_on_ich8lan");
5016
5017         if (hw->phy.type == e1000_phy_ife)
5018                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5019                                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5020
5021         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5022         return E1000_SUCCESS;
5023 }
5024
5025 /**
5026  *  e1000_led_off_ich8lan - Turn LEDs off
5027  *  @hw: pointer to the HW structure
5028  *
5029  *  Turn off the LEDs.
5030  **/
5031 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5032 {
5033         DEBUGFUNC("e1000_led_off_ich8lan");
5034
5035         if (hw->phy.type == e1000_phy_ife)
5036                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5037                                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5038
5039         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5040         return E1000_SUCCESS;
5041 }
5042
5043 /**
5044  *  e1000_setup_led_pchlan - Configures SW controllable LED
5045  *  @hw: pointer to the HW structure
5046  *
5047  *  This prepares the SW controllable LED for use.
5048  **/
5049 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5050 {
5051         DEBUGFUNC("e1000_setup_led_pchlan");
5052
5053         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5054                                      (u16)hw->mac.ledctl_mode1);
5055 }
5056
5057 /**
5058  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5059  *  @hw: pointer to the HW structure
5060  *
5061  *  Return the LED back to the default configuration.
5062  **/
5063 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5064 {
5065         DEBUGFUNC("e1000_cleanup_led_pchlan");
5066
5067         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5068                                      (u16)hw->mac.ledctl_default);
5069 }
5070
5071 /**
5072  *  e1000_led_on_pchlan - Turn LEDs on
5073  *  @hw: pointer to the HW structure
5074  *
5075  *  Turn on the LEDs.
5076  **/
5077 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5078 {
5079         u16 data = (u16)hw->mac.ledctl_mode2;
5080         u32 i, led;
5081
5082         DEBUGFUNC("e1000_led_on_pchlan");
5083
5084         /* If no link, then turn LED on by setting the invert bit
5085          * for each LED that's mode is "link_up" in ledctl_mode2.
5086          */
5087         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5088                 for (i = 0; i < 3; i++) {
5089                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5090                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5091                             E1000_LEDCTL_MODE_LINK_UP)
5092                                 continue;
5093                         if (led & E1000_PHY_LED0_IVRT)
5094                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5095                         else
5096                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5097                 }
5098         }
5099
5100         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5101 }
5102
5103 /**
5104  *  e1000_led_off_pchlan - Turn LEDs off
5105  *  @hw: pointer to the HW structure
5106  *
5107  *  Turn off the LEDs.
5108  **/
5109 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5110 {
5111         u16 data = (u16)hw->mac.ledctl_mode1;
5112         u32 i, led;
5113
5114         DEBUGFUNC("e1000_led_off_pchlan");
5115
5116         /* If no link, then turn LED off by clearing the invert bit
5117          * for each LED that's mode is "link_up" in ledctl_mode1.
5118          */
5119         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5120                 for (i = 0; i < 3; i++) {
5121                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5122                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5123                             E1000_LEDCTL_MODE_LINK_UP)
5124                                 continue;
5125                         if (led & E1000_PHY_LED0_IVRT)
5126                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5127                         else
5128                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5129                 }
5130         }
5131
5132         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5133 }
5134
5135 /**
5136  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5137  *  @hw: pointer to the HW structure
5138  *
5139  *  Read appropriate register for the config done bit for completion status
5140  *  and configure the PHY through s/w for EEPROM-less parts.
5141  *
5142  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5143  *  config done bit, so only an error is logged and continues.  If we were
5144  *  to return with error, EEPROM-less silicon would not be able to be reset
5145  *  or change link.
5146  **/
5147 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5148 {
5149         s32 ret_val = E1000_SUCCESS;
5150         u32 bank = 0;
5151         u32 status;
5152
5153         DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5154
5155         e1000_get_cfg_done_generic(hw);
5156
5157         /* Wait for indication from h/w that it has completed basic config */
5158         if (hw->mac.type >= e1000_ich10lan) {
5159                 e1000_lan_init_done_ich8lan(hw);
5160         } else {
5161                 ret_val = e1000_get_auto_rd_done_generic(hw);
5162                 if (ret_val) {
5163                         /* When auto config read does not complete, do not
5164                          * return with an error. This can happen in situations
5165                          * where there is no eeprom and prevents getting link.
5166                          */
5167                         DEBUGOUT("Auto Read Done did not complete\n");
5168                         ret_val = E1000_SUCCESS;
5169                 }
5170         }
5171
5172         /* Clear PHY Reset Asserted bit */
5173         status = E1000_READ_REG(hw, E1000_STATUS);
5174         if (status & E1000_STATUS_PHYRA)
5175                 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5176         else
5177                 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5178
5179         /* If EEPROM is not marked present, init the IGP 3 PHY manually */
5180         if (hw->mac.type <= e1000_ich9lan) {
5181                 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5182                     (hw->phy.type == e1000_phy_igp_3)) {
5183                         e1000_phy_init_script_igp3(hw);
5184                 }
5185         } else {
5186                 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5187                         /* Maybe we should do a basic PHY config */
5188                         DEBUGOUT("EEPROM not present\n");
5189                         ret_val = -E1000_ERR_CONFIG;
5190                 }
5191         }
5192
5193         return ret_val;
5194 }
5195
5196 /**
5197  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5198  * @hw: pointer to the HW structure
5199  *
5200  * In the case of a PHY power down to save power, or to turn off link during a
5201  * driver unload, or wake on lan is not enabled, remove the link.
5202  **/
5203 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5204 {
5205         /* If the management interface is not enabled, then power down */
5206         if (!(hw->mac.ops.check_mng_mode(hw) ||
5207               hw->phy.ops.check_reset_block(hw)))
5208                 e1000_power_down_phy_copper(hw);
5209
5210         return;
5211 }
5212
5213 /**
5214  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5215  *  @hw: pointer to the HW structure
5216  *
5217  *  Clears hardware counters specific to the silicon family and calls
5218  *  clear_hw_cntrs_generic to clear all general purpose counters.
5219  **/
5220 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5221 {
5222         u16 phy_data;
5223         s32 ret_val;
5224
5225         DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
5226
5227         e1000_clear_hw_cntrs_base_generic(hw);
5228
5229         E1000_READ_REG(hw, E1000_ALGNERRC);
5230         E1000_READ_REG(hw, E1000_RXERRC);
5231         E1000_READ_REG(hw, E1000_TNCRS);
5232         E1000_READ_REG(hw, E1000_CEXTERR);
5233         E1000_READ_REG(hw, E1000_TSCTC);
5234         E1000_READ_REG(hw, E1000_TSCTFC);
5235
5236         E1000_READ_REG(hw, E1000_MGTPRC);
5237         E1000_READ_REG(hw, E1000_MGTPDC);
5238         E1000_READ_REG(hw, E1000_MGTPTC);
5239
5240         E1000_READ_REG(hw, E1000_IAC);
5241         E1000_READ_REG(hw, E1000_ICRXOC);
5242
5243         /* Clear PHY statistics registers */
5244         if ((hw->phy.type == e1000_phy_82578) ||
5245             (hw->phy.type == e1000_phy_82579) ||
5246             (hw->phy.type == e1000_phy_i217) ||
5247             (hw->phy.type == e1000_phy_82577)) {
5248                 ret_val = hw->phy.ops.acquire(hw);
5249                 if (ret_val)
5250                         return;
5251                 ret_val = hw->phy.ops.set_page(hw,
5252                                                HV_STATS_PAGE << IGP_PAGE_SHIFT);
5253                 if (ret_val)
5254                         goto release;
5255                 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5256                 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5257                 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5258                 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5259                 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5260                 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5261                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5262                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5263                 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5264                 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5265                 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5266                 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5267                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5268                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5269 release:
5270                 hw->phy.ops.release(hw);
5271         }
5272 }
5273