e1000/base: cleanup unused tag
[dpdk.git] / drivers / net / e1000 / base / e1000_ich8lan.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /* 82562G 10/100 Network Connection
35  * 82562G-2 10/100 Network Connection
36  * 82562GT 10/100 Network Connection
37  * 82562GT-2 10/100 Network Connection
38  * 82562V 10/100 Network Connection
39  * 82562V-2 10/100 Network Connection
40  * 82566DC-2 Gigabit Network Connection
41  * 82566DC Gigabit Network Connection
42  * 82566DM-2 Gigabit Network Connection
43  * 82566DM Gigabit Network Connection
44  * 82566MC Gigabit Network Connection
45  * 82566MM Gigabit Network Connection
46  * 82567LM Gigabit Network Connection
47  * 82567LF Gigabit Network Connection
48  * 82567V Gigabit Network Connection
49  * 82567LM-2 Gigabit Network Connection
50  * 82567LF-2 Gigabit Network Connection
51  * 82567V-2 Gigabit Network Connection
52  * 82567LF-3 Gigabit Network Connection
53  * 82567LM-3 Gigabit Network Connection
54  * 82567LM-4 Gigabit Network Connection
55  * 82577LM Gigabit Network Connection
56  * 82577LC Gigabit Network Connection
57  * 82578DM Gigabit Network Connection
58  * 82578DC Gigabit Network Connection
59  * 82579LM Gigabit Network Connection
60  * 82579V Gigabit Network Connection
61  * Ethernet Connection I217-LM
62  * Ethernet Connection I217-V
63  * Ethernet Connection I218-V
64  * Ethernet Connection I218-LM
65  * Ethernet Connection (2) I218-LM
66  * Ethernet Connection (2) I218-V
67  * Ethernet Connection (3) I218-LM
68  * Ethernet Connection (3) I218-V
69  */
70
71 #include "e1000_api.h"
72
73 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
74 STATIC s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 STATIC s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 STATIC void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
84 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
85                                               u8 *mc_addr_list,
86                                               u32 mc_addr_count);
87 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
88 STATIC s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
89 STATIC s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
90 STATIC s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
91 STATIC s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
92                                             bool active);
93 STATIC s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
94                                             bool active);
95 STATIC s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
96                                    u16 words, u16 *data);
97 STATIC s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
98                                     u16 words, u16 *data);
99 STATIC s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100 STATIC s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 STATIC s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
102                                             u16 *data);
103 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
104 STATIC s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
105 STATIC s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
106 STATIC s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
107 STATIC s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
108 STATIC s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
109 STATIC s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
110 STATIC s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
111                                            u16 *speed, u16 *duplex);
112 STATIC s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
113 STATIC s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
114 STATIC s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
115 STATIC s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
116 STATIC s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
117 STATIC s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
118 STATIC s32  e1000_led_on_pchlan(struct e1000_hw *hw);
119 STATIC s32  e1000_led_off_pchlan(struct e1000_hw *hw);
120 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
121 STATIC s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
122 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
123 STATIC s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
124 STATIC s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
125                                           u32 offset, u8 *data);
126 STATIC s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
127                                           u8 size, u16 *data);
128 STATIC s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
129                                           u32 offset, u16 *data);
130 STATIC s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
131                                                  u32 offset, u8 byte);
132 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
133 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
134 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
135 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
136 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
137 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
138
139 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
140 /* Offset 04h HSFSTS */
141 union ich8_hws_flash_status {
142         struct ich8_hsfsts {
143                 u16 flcdone:1; /* bit 0 Flash Cycle Done */
144                 u16 flcerr:1; /* bit 1 Flash Cycle Error */
145                 u16 dael:1; /* bit 2 Direct Access error Log */
146                 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
147                 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
148                 u16 reserved1:2; /* bit 13:6 Reserved */
149                 u16 reserved2:6; /* bit 13:6 Reserved */
150                 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
151                 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
152         } hsf_status;
153         u16 regval;
154 };
155
156 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
157 /* Offset 06h FLCTL */
158 union ich8_hws_flash_ctrl {
159         struct ich8_hsflctl {
160                 u16 flcgo:1;   /* 0 Flash Cycle Go */
161                 u16 flcycle:2;   /* 2:1 Flash Cycle */
162                 u16 reserved:5;   /* 7:3 Reserved  */
163                 u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
164                 u16 flockdn:6;   /* 15:10 Reserved */
165         } hsf_ctrl;
166         u16 regval;
167 };
168
169 /* ICH Flash Region Access Permissions */
170 union ich8_hws_flash_regacc {
171         struct ich8_flracc {
172                 u32 grra:8; /* 0:7 GbE region Read Access */
173                 u32 grwa:8; /* 8:15 GbE region Write Access */
174                 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
175                 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
176         } hsf_flregacc;
177         u16 regval;
178 };
179
180 /**
181  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
182  *  @hw: pointer to the HW structure
183  *
184  *  Test access to the PHY registers by reading the PHY ID registers.  If
185  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
186  *  otherwise assume the read PHY ID is correct if it is valid.
187  *
188  *  Assumes the sw/fw/hw semaphore is already acquired.
189  **/
190 STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
191 {
192         u16 phy_reg = 0;
193         u32 phy_id = 0;
194         s32 ret_val = 0;
195         u16 retry_count;
196         u32 mac_reg = 0;
197
198         for (retry_count = 0; retry_count < 2; retry_count++) {
199                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
200                 if (ret_val || (phy_reg == 0xFFFF))
201                         continue;
202                 phy_id = (u32)(phy_reg << 16);
203
204                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
205                 if (ret_val || (phy_reg == 0xFFFF)) {
206                         phy_id = 0;
207                         continue;
208                 }
209                 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
210                 break;
211         }
212
213         if (hw->phy.id) {
214                 if  (hw->phy.id == phy_id)
215                         goto out;
216         } else if (phy_id) {
217                 hw->phy.id = phy_id;
218                 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
219                 goto out;
220         }
221
222         /* In case the PHY needs to be in mdio slow mode,
223          * set slow mode and try to get the PHY id again.
224          */
225         if (hw->mac.type < e1000_pch_lpt) {
226                 hw->phy.ops.release(hw);
227                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
228                 if (!ret_val)
229                         ret_val = e1000_get_phy_id(hw);
230                 hw->phy.ops.acquire(hw);
231         }
232
233         if (ret_val)
234                 return false;
235 out:
236         if (hw->mac.type == e1000_pch_lpt) {
237                 /* Unforce SMBus mode in PHY */
238                 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
239                 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
240                 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
241
242                 /* Unforce SMBus mode in MAC */
243                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
244                 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
245                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
246         }
247
248         return true;
249 }
250
251 /**
252  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
253  *  @hw: pointer to the HW structure
254  *
255  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
256  *  used to reset the PHY to a quiescent state when necessary.
257  **/
258 STATIC void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
259 {
260         u32 mac_reg;
261
262         DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
263
264         /* Set Phy Config Counter to 50msec */
265         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
266         mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
267         mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
268         E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
269
270         /* Toggle LANPHYPC Value bit */
271         mac_reg = E1000_READ_REG(hw, E1000_CTRL);
272         mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
273         mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
274         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
275         E1000_WRITE_FLUSH(hw);
276         usec_delay(10);
277         mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
278         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
279         E1000_WRITE_FLUSH(hw);
280
281         if (hw->mac.type < e1000_pch_lpt) {
282                 msec_delay(50);
283         } else {
284                 u16 count = 20;
285
286                 do {
287                         msec_delay(5);
288                 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
289                            E1000_CTRL_EXT_LPCD) && count--);
290
291                 msec_delay(30);
292         }
293 }
294
295 /**
296  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
297  *  @hw: pointer to the HW structure
298  *
299  *  Workarounds/flow necessary for PHY initialization during driver load
300  *  and resume paths.
301  **/
302 STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
303 {
304         u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
305         s32 ret_val;
306
307         DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
308
309         /* Gate automatic PHY configuration by hardware on managed and
310          * non-managed 82579 and newer adapters.
311          */
312         e1000_gate_hw_phy_config_ich8lan(hw, true);
313
314 #ifdef ULP_SUPPORT
315         /* It is not possible to be certain of the current state of ULP
316          * so forcibly disable it.
317          */
318         hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
319
320 #endif /* ULP_SUPPORT */
321         ret_val = hw->phy.ops.acquire(hw);
322         if (ret_val) {
323                 DEBUGOUT("Failed to initialize PHY flow\n");
324                 goto out;
325         }
326
327         /* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
328          * inaccessible and resetting the PHY is not blocked, toggle the
329          * LANPHYPC Value bit to force the interconnect to PCIe mode.
330          */
331         switch (hw->mac.type) {
332         case e1000_pch_lpt:
333                 if (e1000_phy_is_accessible_pchlan(hw))
334                         break;
335
336                 /* Before toggling LANPHYPC, see if PHY is accessible by
337                  * forcing MAC to SMBus mode first.
338                  */
339                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
340                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
341                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
342
343                 /* Wait 50 milliseconds for MAC to finish any retries
344                  * that it might be trying to perform from previous
345                  * attempts to acknowledge any phy read requests.
346                  */
347                  msec_delay(50);
348
349                 /* fall-through */
350         case e1000_pch2lan:
351                 if (e1000_phy_is_accessible_pchlan(hw))
352                         break;
353
354                 /* fall-through */
355         case e1000_pchlan:
356                 if ((hw->mac.type == e1000_pchlan) &&
357                     (fwsm & E1000_ICH_FWSM_FW_VALID))
358                         break;
359
360                 if (hw->phy.ops.check_reset_block(hw)) {
361                         DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
362                         ret_val = -E1000_ERR_PHY;
363                         break;
364                 }
365
366                 /* Toggle LANPHYPC Value bit */
367                 e1000_toggle_lanphypc_pch_lpt(hw);
368                 if (hw->mac.type >= e1000_pch_lpt) {
369                         if (e1000_phy_is_accessible_pchlan(hw))
370                                 break;
371
372                         /* Toggling LANPHYPC brings the PHY out of SMBus mode
373                          * so ensure that the MAC is also out of SMBus mode
374                          */
375                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
376                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
377                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
378
379                         if (e1000_phy_is_accessible_pchlan(hw))
380                                 break;
381
382                         ret_val = -E1000_ERR_PHY;
383                 }
384                 break;
385         default:
386                 break;
387         }
388
389         hw->phy.ops.release(hw);
390         if (!ret_val) {
391
392                 /* Check to see if able to reset PHY.  Print error if not */
393                 if (hw->phy.ops.check_reset_block(hw)) {
394                         ERROR_REPORT("Reset blocked by ME\n");
395                         goto out;
396                 }
397
398                 /* Reset the PHY before any access to it.  Doing so, ensures
399                  * that the PHY is in a known good state before we read/write
400                  * PHY registers.  The generic reset is sufficient here,
401                  * because we haven't determined the PHY type yet.
402                  */
403                 ret_val = e1000_phy_hw_reset_generic(hw);
404                 if (ret_val)
405                         goto out;
406
407                 /* On a successful reset, possibly need to wait for the PHY
408                  * to quiesce to an accessible state before returning control
409                  * to the calling function.  If the PHY does not quiesce, then
410                  * return E1000E_BLK_PHY_RESET, as this is the condition that
411                  *  the PHY is in.
412                  */
413                 ret_val = hw->phy.ops.check_reset_block(hw);
414                 if (ret_val)
415                         ERROR_REPORT("ME blocked access to PHY after reset\n");
416         }
417
418 out:
419         /* Ungate automatic PHY configuration on non-managed 82579 */
420         if ((hw->mac.type == e1000_pch2lan) &&
421             !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
422                 msec_delay(10);
423                 e1000_gate_hw_phy_config_ich8lan(hw, false);
424         }
425
426         return ret_val;
427 }
428
429 /**
430  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
431  *  @hw: pointer to the HW structure
432  *
433  *  Initialize family-specific PHY parameters and function pointers.
434  **/
435 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
436 {
437         struct e1000_phy_info *phy = &hw->phy;
438         s32 ret_val;
439
440         DEBUGFUNC("e1000_init_phy_params_pchlan");
441
442         phy->addr               = 1;
443         phy->reset_delay_us     = 100;
444
445         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
446         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
447         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
448         phy->ops.set_page       = e1000_set_page_igp;
449         phy->ops.read_reg       = e1000_read_phy_reg_hv;
450         phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
451         phy->ops.read_reg_page  = e1000_read_phy_reg_page_hv;
452         phy->ops.release        = e1000_release_swflag_ich8lan;
453         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
454         phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
455         phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
456         phy->ops.write_reg      = e1000_write_phy_reg_hv;
457         phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
458         phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
459         phy->ops.power_up       = e1000_power_up_phy_copper;
460         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
461         phy->autoneg_mask       = AUTONEG_ADVERTISE_SPEED_DEFAULT;
462
463         phy->id = e1000_phy_unknown;
464
465         ret_val = e1000_init_phy_workarounds_pchlan(hw);
466         if (ret_val)
467                 return ret_val;
468
469         if (phy->id == e1000_phy_unknown)
470                 switch (hw->mac.type) {
471                 default:
472                         ret_val = e1000_get_phy_id(hw);
473                         if (ret_val)
474                                 return ret_val;
475                         if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
476                                 break;
477                         /* fall-through */
478                 case e1000_pch2lan:
479                 case e1000_pch_lpt:
480                         /* In case the PHY needs to be in mdio slow mode,
481                          * set slow mode and try to get the PHY id again.
482                          */
483                         ret_val = e1000_set_mdio_slow_mode_hv(hw);
484                         if (ret_val)
485                                 return ret_val;
486                         ret_val = e1000_get_phy_id(hw);
487                         if (ret_val)
488                                 return ret_val;
489                         break;
490                 }
491         phy->type = e1000_get_phy_type_from_id(phy->id);
492
493         switch (phy->type) {
494         case e1000_phy_82577:
495         case e1000_phy_82579:
496         case e1000_phy_i217:
497                 phy->ops.check_polarity = e1000_check_polarity_82577;
498                 phy->ops.force_speed_duplex =
499                         e1000_phy_force_speed_duplex_82577;
500                 phy->ops.get_cable_length = e1000_get_cable_length_82577;
501                 phy->ops.get_info = e1000_get_phy_info_82577;
502                 phy->ops.commit = e1000_phy_sw_reset_generic;
503                 break;
504         case e1000_phy_82578:
505                 phy->ops.check_polarity = e1000_check_polarity_m88;
506                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
507                 phy->ops.get_cable_length = e1000_get_cable_length_m88;
508                 phy->ops.get_info = e1000_get_phy_info_m88;
509                 break;
510         default:
511                 ret_val = -E1000_ERR_PHY;
512                 break;
513         }
514
515         return ret_val;
516 }
517
518 /**
519  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
520  *  @hw: pointer to the HW structure
521  *
522  *  Initialize family-specific PHY parameters and function pointers.
523  **/
524 STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
525 {
526         struct e1000_phy_info *phy = &hw->phy;
527         s32 ret_val;
528         u16 i = 0;
529
530         DEBUGFUNC("e1000_init_phy_params_ich8lan");
531
532         phy->addr               = 1;
533         phy->reset_delay_us     = 100;
534
535         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
536         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
537         phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
538         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
539         phy->ops.read_reg       = e1000_read_phy_reg_igp;
540         phy->ops.release        = e1000_release_swflag_ich8lan;
541         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
542         phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
543         phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
544         phy->ops.write_reg      = e1000_write_phy_reg_igp;
545         phy->ops.power_up       = e1000_power_up_phy_copper;
546         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
547
548         /* We may need to do this twice - once for IGP and if that fails,
549          * we'll set BM func pointers and try again
550          */
551         ret_val = e1000_determine_phy_address(hw);
552         if (ret_val) {
553                 phy->ops.write_reg = e1000_write_phy_reg_bm;
554                 phy->ops.read_reg  = e1000_read_phy_reg_bm;
555                 ret_val = e1000_determine_phy_address(hw);
556                 if (ret_val) {
557                         DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
558                         return ret_val;
559                 }
560         }
561
562         phy->id = 0;
563         while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
564                (i++ < 100)) {
565                 msec_delay(1);
566                 ret_val = e1000_get_phy_id(hw);
567                 if (ret_val)
568                         return ret_val;
569         }
570
571         /* Verify phy id */
572         switch (phy->id) {
573         case IGP03E1000_E_PHY_ID:
574                 phy->type = e1000_phy_igp_3;
575                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
576                 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
577                 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
578                 phy->ops.get_info = e1000_get_phy_info_igp;
579                 phy->ops.check_polarity = e1000_check_polarity_igp;
580                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
581                 break;
582         case IFE_E_PHY_ID:
583         case IFE_PLUS_E_PHY_ID:
584         case IFE_C_E_PHY_ID:
585                 phy->type = e1000_phy_ife;
586                 phy->autoneg_mask = E1000_ALL_NOT_GIG;
587                 phy->ops.get_info = e1000_get_phy_info_ife;
588                 phy->ops.check_polarity = e1000_check_polarity_ife;
589                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
590                 break;
591         case BME1000_E_PHY_ID:
592                 phy->type = e1000_phy_bm;
593                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
594                 phy->ops.read_reg = e1000_read_phy_reg_bm;
595                 phy->ops.write_reg = e1000_write_phy_reg_bm;
596                 phy->ops.commit = e1000_phy_sw_reset_generic;
597                 phy->ops.get_info = e1000_get_phy_info_m88;
598                 phy->ops.check_polarity = e1000_check_polarity_m88;
599                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
600                 break;
601         default:
602                 return -E1000_ERR_PHY;
603                 break;
604         }
605
606         return E1000_SUCCESS;
607 }
608
609 /**
610  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
611  *  @hw: pointer to the HW structure
612  *
613  *  Initialize family-specific NVM parameters and function
614  *  pointers.
615  **/
616 STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
617 {
618         struct e1000_nvm_info *nvm = &hw->nvm;
619         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
620         u32 gfpreg, sector_base_addr, sector_end_addr;
621         u16 i;
622
623         DEBUGFUNC("e1000_init_nvm_params_ich8lan");
624
625         /* Can't read flash registers if the register set isn't mapped. */
626         nvm->type = e1000_nvm_flash_sw;
627         if (!hw->flash_address) {
628                 DEBUGOUT("ERROR: Flash registers not mapped\n");
629                 return -E1000_ERR_CONFIG;
630         }
631
632         gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
633
634         /* sector_X_addr is a "sector"-aligned address (4096 bytes)
635          * Add 1 to sector_end_addr since this sector is included in
636          * the overall size.
637          */
638         sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
639         sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
640
641         /* flash_base_addr is byte-aligned */
642         nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
643
644         /* find total size of the NVM, then cut in half since the total
645          * size represents two separate NVM banks.
646          */
647         nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
648                                 << FLASH_SECTOR_ADDR_SHIFT);
649         nvm->flash_bank_size /= 2;
650         /* Adjust to word count */
651         nvm->flash_bank_size /= sizeof(u16);
652
653         nvm->word_size = E1000_SHADOW_RAM_WORDS;
654
655         /* Clear shadow ram */
656         for (i = 0; i < nvm->word_size; i++) {
657                 dev_spec->shadow_ram[i].modified = false;
658                 dev_spec->shadow_ram[i].value    = 0xFFFF;
659         }
660
661         E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
662         E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
663
664         /* Function Pointers */
665         nvm->ops.acquire        = e1000_acquire_nvm_ich8lan;
666         nvm->ops.release        = e1000_release_nvm_ich8lan;
667         nvm->ops.read           = e1000_read_nvm_ich8lan;
668         nvm->ops.update         = e1000_update_nvm_checksum_ich8lan;
669         nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
670         nvm->ops.validate       = e1000_validate_nvm_checksum_ich8lan;
671         nvm->ops.write          = e1000_write_nvm_ich8lan;
672
673         return E1000_SUCCESS;
674 }
675
676 /**
677  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
678  *  @hw: pointer to the HW structure
679  *
680  *  Initialize family-specific MAC parameters and function
681  *  pointers.
682  **/
683 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
684 {
685         struct e1000_mac_info *mac = &hw->mac;
686 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
687         u16 pci_cfg;
688 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
689
690         DEBUGFUNC("e1000_init_mac_params_ich8lan");
691
692         /* Set media type function pointer */
693         hw->phy.media_type = e1000_media_type_copper;
694
695         /* Set mta register count */
696         mac->mta_reg_count = 32;
697         /* Set rar entry count */
698         mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
699         if (mac->type == e1000_ich8lan)
700                 mac->rar_entry_count--;
701         /* Set if part includes ASF firmware */
702         mac->asf_firmware_present = true;
703         /* FWSM register */
704         mac->has_fwsm = true;
705         /* ARC subsystem not supported */
706         mac->arc_subsystem_valid = false;
707         /* Adaptive IFS supported */
708         mac->adaptive_ifs = true;
709
710         /* Function pointers */
711
712         /* bus type/speed/width */
713         mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
714         /* function id */
715         mac->ops.set_lan_id = e1000_set_lan_id_single_port;
716         /* reset */
717         mac->ops.reset_hw = e1000_reset_hw_ich8lan;
718         /* hw initialization */
719         mac->ops.init_hw = e1000_init_hw_ich8lan;
720         /* link setup */
721         mac->ops.setup_link = e1000_setup_link_ich8lan;
722         /* physical interface setup */
723         mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
724         /* check for link */
725         mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
726         /* link info */
727         mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
728         /* multicast address update */
729         mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
730         /* clear hardware counters */
731         mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
732
733         /* LED and other operations */
734         switch (mac->type) {
735         case e1000_ich8lan:
736         case e1000_ich9lan:
737         case e1000_ich10lan:
738                 /* check management mode */
739                 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
740                 /* ID LED init */
741                 mac->ops.id_led_init = e1000_id_led_init_generic;
742                 /* blink LED */
743                 mac->ops.blink_led = e1000_blink_led_generic;
744                 /* setup LED */
745                 mac->ops.setup_led = e1000_setup_led_generic;
746                 /* cleanup LED */
747                 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
748                 /* turn on/off LED */
749                 mac->ops.led_on = e1000_led_on_ich8lan;
750                 mac->ops.led_off = e1000_led_off_ich8lan;
751                 break;
752         case e1000_pch2lan:
753                 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
754                 mac->ops.rar_set = e1000_rar_set_pch2lan;
755                 /* fall-through */
756         case e1000_pch_lpt:
757 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
758                 /* multicast address update for pch2 */
759                 mac->ops.update_mc_addr_list =
760                         e1000_update_mc_addr_list_pch2lan;
761                 /* fall-through */
762 #endif
763         case e1000_pchlan:
764 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
765                 /* save PCH revision_id */
766                 e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
767                 hw->revision_id = (u8)(pci_cfg &= 0x000F);
768 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
769                 /* check management mode */
770                 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
771                 /* ID LED init */
772                 mac->ops.id_led_init = e1000_id_led_init_pchlan;
773                 /* setup LED */
774                 mac->ops.setup_led = e1000_setup_led_pchlan;
775                 /* cleanup LED */
776                 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
777                 /* turn on/off LED */
778                 mac->ops.led_on = e1000_led_on_pchlan;
779                 mac->ops.led_off = e1000_led_off_pchlan;
780                 break;
781         default:
782                 break;
783         }
784
785         if (mac->type == e1000_pch_lpt) {
786                 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
787                 mac->ops.rar_set = e1000_rar_set_pch_lpt;
788                 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
789         }
790
791         /* Enable PCS Lock-loss workaround for ICH8 */
792         if (mac->type == e1000_ich8lan)
793                 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
794
795         return E1000_SUCCESS;
796 }
797
798 /**
799  *  __e1000_access_emi_reg_locked - Read/write EMI register
800  *  @hw: pointer to the HW structure
801  *  @addr: EMI address to program
802  *  @data: pointer to value to read/write from/to the EMI address
803  *  @read: boolean flag to indicate read or write
804  *
805  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
806  **/
807 STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
808                                          u16 *data, bool read)
809 {
810         s32 ret_val;
811
812         DEBUGFUNC("__e1000_access_emi_reg_locked");
813
814         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
815         if (ret_val)
816                 return ret_val;
817
818         if (read)
819                 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
820                                                       data);
821         else
822                 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
823                                                        *data);
824
825         return ret_val;
826 }
827
828 /**
829  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
830  *  @hw: pointer to the HW structure
831  *  @addr: EMI address to program
832  *  @data: value to be read from the EMI address
833  *
834  *  Assumes the SW/FW/HW Semaphore is already acquired.
835  **/
836 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
837 {
838         DEBUGFUNC("e1000_read_emi_reg_locked");
839
840         return __e1000_access_emi_reg_locked(hw, addr, data, true);
841 }
842
843 /**
844  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
845  *  @hw: pointer to the HW structure
846  *  @addr: EMI address to program
847  *  @data: value to be written to the EMI address
848  *
849  *  Assumes the SW/FW/HW Semaphore is already acquired.
850  **/
851 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
852 {
853         DEBUGFUNC("e1000_read_emi_reg_locked");
854
855         return __e1000_access_emi_reg_locked(hw, addr, &data, false);
856 }
857
858 /**
859  *  e1000_set_eee_pchlan - Enable/disable EEE support
860  *  @hw: pointer to the HW structure
861  *
862  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
863  *  the link and the EEE capabilities of the link partner.  The LPI Control
864  *  register bits will remain set only if/when link is up.
865  *
866  *  EEE LPI must not be asserted earlier than one second after link is up.
867  *  On 82579, EEE LPI should not be enabled until such time otherwise there
868  *  can be link issues with some switches.  Other devices can have EEE LPI
869  *  enabled immediately upon link up since they have a timer in hardware which
870  *  prevents LPI from being asserted too early.
871  **/
872 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
873 {
874         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
875         s32 ret_val;
876         u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
877
878         DEBUGFUNC("e1000_set_eee_pchlan");
879
880         switch (hw->phy.type) {
881         case e1000_phy_82579:
882                 lpa = I82579_EEE_LP_ABILITY;
883                 pcs_status = I82579_EEE_PCS_STATUS;
884                 adv_addr = I82579_EEE_ADVERTISEMENT;
885                 break;
886         case e1000_phy_i217:
887                 lpa = I217_EEE_LP_ABILITY;
888                 pcs_status = I217_EEE_PCS_STATUS;
889                 adv_addr = I217_EEE_ADVERTISEMENT;
890                 break;
891         default:
892                 return E1000_SUCCESS;
893         }
894
895         ret_val = hw->phy.ops.acquire(hw);
896         if (ret_val)
897                 return ret_val;
898
899         ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
900         if (ret_val)
901                 goto release;
902
903         /* Clear bits that enable EEE in various speeds */
904         lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
905
906         /* Enable EEE if not disabled by user */
907         if (!dev_spec->eee_disable) {
908                 /* Save off link partner's EEE ability */
909                 ret_val = e1000_read_emi_reg_locked(hw, lpa,
910                                                     &dev_spec->eee_lp_ability);
911                 if (ret_val)
912                         goto release;
913
914                 /* Read EEE advertisement */
915                 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
916                 if (ret_val)
917                         goto release;
918
919                 /* Enable EEE only for speeds in which the link partner is
920                  * EEE capable and for which we advertise EEE.
921                  */
922                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
923                         lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
924
925                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
926                         hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
927                         if (data & NWAY_LPAR_100TX_FD_CAPS)
928                                 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
929                         else
930                                 /* EEE is not supported in 100Half, so ignore
931                                  * partner's EEE in 100 ability if full-duplex
932                                  * is not advertised.
933                                  */
934                                 dev_spec->eee_lp_ability &=
935                                     ~I82579_EEE_100_SUPPORTED;
936                 }
937         }
938
939         /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
940         ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
941         if (ret_val)
942                 goto release;
943
944         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
945 release:
946         hw->phy.ops.release(hw);
947
948         return ret_val;
949 }
950
951 /**
952  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
953  *  @hw:   pointer to the HW structure
954  *  @link: link up bool flag
955  *
956  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
957  *  preventing further DMA write requests.  Workaround the issue by disabling
958  *  the de-assertion of the clock request when in 1Gpbs mode.
959  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
960  *  speeds in order to avoid Tx hangs.
961  **/
962 STATIC s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
963 {
964         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
965         u32 status = E1000_READ_REG(hw, E1000_STATUS);
966         s32 ret_val = E1000_SUCCESS;
967         u16 reg;
968
969         if (link && (status & E1000_STATUS_SPEED_1000)) {
970                 ret_val = hw->phy.ops.acquire(hw);
971                 if (ret_val)
972                         return ret_val;
973
974                 ret_val =
975                     e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
976                                                &reg);
977                 if (ret_val)
978                         goto release;
979
980                 ret_val =
981                     e1000_write_kmrn_reg_locked(hw,
982                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
983                                                 reg &
984                                                 ~E1000_KMRNCTRLSTA_K1_ENABLE);
985                 if (ret_val)
986                         goto release;
987
988                 usec_delay(10);
989
990                 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
991                                 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
992
993                 ret_val =
994                     e1000_write_kmrn_reg_locked(hw,
995                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
996                                                 reg);
997 release:
998                 hw->phy.ops.release(hw);
999         } else {
1000                 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1001                 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1002
1003                 if (!link || ((status & E1000_STATUS_SPEED_100) &&
1004                               (status & E1000_STATUS_FD)))
1005                         goto update_fextnvm6;
1006
1007                 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1008                 if (ret_val)
1009                         return ret_val;
1010
1011                 /* Clear link status transmit timeout */
1012                 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1013
1014                 if (status & E1000_STATUS_SPEED_100) {
1015                         /* Set inband Tx timeout to 5x10us for 100Half */
1016                         reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1017
1018                         /* Do not extend the K1 entry latency for 100Half */
1019                         fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1020                 } else {
1021                         /* Set inband Tx timeout to 50x10us for 10Full/Half */
1022                         reg |= 50 <<
1023                                I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1024
1025                         /* Extend the K1 entry latency for 10 Mbps */
1026                         fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1027                 }
1028
1029                 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1030                 if (ret_val)
1031                         return ret_val;
1032
1033 update_fextnvm6:
1034                 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1035         }
1036
1037         return ret_val;
1038 }
1039
1040 #ifdef ULP_SUPPORT
1041 /**
1042  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1043  *  @hw: pointer to the HW structure
1044  *  @to_sx: boolean indicating a system power state transition to Sx
1045  *
1046  *  When link is down, configure ULP mode to significantly reduce the power
1047  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1048  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1049  *  system, configure the ULP mode by software.
1050  */
1051 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1052 {
1053         u32 mac_reg;
1054         s32 ret_val = E1000_SUCCESS;
1055         u16 phy_reg;
1056
1057         if ((hw->mac.type < e1000_pch_lpt) ||
1058             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1059             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1060             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1061             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1062             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1063                 return 0;
1064
1065         if (!to_sx) {
1066                 int i = 0;
1067                 /* Poll up to 5 seconds for Cable Disconnected indication */
1068                 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1069                          E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1070                         /* Bail if link is re-acquired */
1071                         if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1072                                 return -E1000_ERR_PHY;
1073                         if (i++ == 100)
1074                                 break;
1075
1076                         msec_delay(50);
1077                 }
1078                 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1079                           (E1000_READ_REG(hw, E1000_FEXT) &
1080                            E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1081                           i * 50);
1082         }
1083
1084         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1085                 /* Request ME configure ULP mode in the PHY */
1086                 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1087                 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1088                 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1089
1090                 goto out;
1091         }
1092
1093         ret_val = hw->phy.ops.acquire(hw);
1094         if (ret_val)
1095                 goto out;
1096
1097         /* During S0 Idle keep the phy in PCI-E mode */
1098         if (hw->dev_spec.ich8lan.smbus_disable)
1099                 goto skip_smbus;
1100
1101         /* Force SMBus mode in PHY */
1102         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1103         if (ret_val)
1104                 goto release;
1105         phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1106         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1107
1108         /* Force SMBus mode in MAC */
1109         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1110         mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1111         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1112
1113 skip_smbus:
1114         if (!to_sx) {
1115                 /* Change the 'Link Status Change' interrupt to trigger
1116                  * on 'Cable Status Change'
1117                  */
1118                 ret_val = e1000_read_kmrn_reg_locked(hw,
1119                                                      E1000_KMRNCTRLSTA_OP_MODES,
1120                                                      &phy_reg);
1121                 if (ret_val)
1122                         goto release;
1123                 phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1124                 e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1125                                             phy_reg);
1126         }
1127
1128         /* Set Inband ULP Exit, Reset to SMBus mode and
1129          * Disable SMBus Release on PERST# in PHY
1130          */
1131         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1132         if (ret_val)
1133                 goto release;
1134         phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1135                     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1136         if (to_sx) {
1137                 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1138                         phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1139
1140                 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1141         } else {
1142                 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1143         }
1144         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1145
1146         /* Set Disable SMBus Release on PERST# in MAC */
1147         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1148         mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1149         E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1150
1151         /* Commit ULP changes in PHY by starting auto ULP configuration */
1152         phy_reg |= I218_ULP_CONFIG1_START;
1153         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1154
1155         if (!to_sx) {
1156                 /* Disable Tx so that the MAC doesn't send any (buffered)
1157                  * packets to the PHY.
1158                  */
1159                 mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1160                 mac_reg &= ~E1000_TCTL_EN;
1161                 E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1162         }
1163 release:
1164         hw->phy.ops.release(hw);
1165 out:
1166         if (ret_val)
1167                 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1168         else
1169                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1170
1171         return ret_val;
1172 }
1173
1174 /**
1175  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1176  *  @hw: pointer to the HW structure
1177  *  @force: boolean indicating whether or not to force disabling ULP
1178  *
1179  *  Un-configure ULP mode when link is up, the system is transitioned from
1180  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1181  *  system, poll for an indication from ME that ULP has been un-configured.
1182  *  If not on an ME enabled system, un-configure the ULP mode by software.
1183  *
1184  *  During nominal operation, this function is called when link is acquired
1185  *  to disable ULP mode (force=false); otherwise, for example when unloading
1186  *  the driver or during Sx->S0 transitions, this is called with force=true
1187  *  to forcibly disable ULP.
1188
1189  *  When the cable is plugged in while the device is in D0, a Cable Status
1190  *  Change interrupt is generated which causes this function to be called
1191  *  to partially disable ULP mode and restart autonegotiation.  This function
1192  *  is then called again due to the resulting Link Status Change interrupt
1193  *  to finish cleaning up after the ULP flow.
1194  */
1195 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1196 {
1197         s32 ret_val = E1000_SUCCESS;
1198         u32 mac_reg;
1199         u16 phy_reg;
1200         int i = 0;
1201
1202         if ((hw->mac.type < e1000_pch_lpt) ||
1203             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1204             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1205             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1206             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1207             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1208                 return 0;
1209
1210         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1211                 if (force) {
1212                         /* Request ME un-configure ULP mode in the PHY */
1213                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1214                         mac_reg &= ~E1000_H2ME_ULP;
1215                         mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1216                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1217                 }
1218
1219                 /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
1220                 while (E1000_READ_REG(hw, E1000_FWSM) &
1221                        E1000_FWSM_ULP_CFG_DONE) {
1222                         if (i++ == 10) {
1223                                 ret_val = -E1000_ERR_PHY;
1224                                 goto out;
1225                         }
1226
1227                         msec_delay(10);
1228                 }
1229                 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1230
1231                 if (force) {
1232                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1233                         mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1234                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1235                 } else {
1236                         /* Clear H2ME.ULP after ME ULP configuration */
1237                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1238                         mac_reg &= ~E1000_H2ME_ULP;
1239                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1240
1241                         /* Restore link speed advertisements and restart
1242                          * Auto-negotiation
1243                          */
1244                         ret_val = e1000_phy_setup_autoneg(hw);
1245                         if (ret_val)
1246                                 goto out;
1247
1248                         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1249                 }
1250
1251                 goto out;
1252         }
1253
1254         ret_val = hw->phy.ops.acquire(hw);
1255         if (ret_val)
1256                 goto out;
1257
1258         /* Revert the change to the 'Link Status Change'
1259          * interrupt to trigger on 'Cable Status Change'
1260          */
1261         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1262                                              &phy_reg);
1263         if (ret_val)
1264                 goto release;
1265         phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1266         e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg);
1267
1268         if (force)
1269                 /* Toggle LANPHYPC Value bit */
1270                 e1000_toggle_lanphypc_pch_lpt(hw);
1271
1272         /* Unforce SMBus mode in PHY */
1273         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1274         if (ret_val) {
1275                 /* The MAC might be in PCIe mode, so temporarily force to
1276                  * SMBus mode in order to access the PHY.
1277                  */
1278                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1279                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1280                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1281
1282                 msec_delay(50);
1283
1284                 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1285                                                        &phy_reg);
1286                 if (ret_val)
1287                         goto release;
1288         }
1289         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1290         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1291
1292         /* Unforce SMBus mode in MAC */
1293         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1294         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1295         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1296
1297         /* When ULP mode was previously entered, K1 was disabled by the
1298          * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1299          */
1300         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1301         if (ret_val)
1302                 goto release;
1303         phy_reg |= HV_PM_CTRL_K1_ENABLE;
1304         e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1305
1306         /* Clear ULP enabled configuration */
1307         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1308         if (ret_val)
1309                 goto release;
1310         /* CSC interrupt received due to ULP Indication */
1311         if ((phy_reg & I218_ULP_CONFIG1_IND) || force) {
1312                 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1313                              I218_ULP_CONFIG1_STICKY_ULP |
1314                              I218_ULP_CONFIG1_RESET_TO_SMBUS |
1315                              I218_ULP_CONFIG1_WOL_HOST |
1316                              I218_ULP_CONFIG1_INBAND_EXIT |
1317                              I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1318                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1319
1320                 /* Commit ULP changes by starting auto ULP configuration */
1321                 phy_reg |= I218_ULP_CONFIG1_START;
1322                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1323
1324                 /* Clear Disable SMBus Release on PERST# in MAC */
1325                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1326                 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1327                 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1328
1329                 if (!force) {
1330                         hw->phy.ops.release(hw);
1331
1332                         if (hw->mac.autoneg)
1333                                 e1000_phy_setup_autoneg(hw);
1334
1335                         e1000_sw_lcd_config_ich8lan(hw);
1336
1337                         e1000_oem_bits_config_ich8lan(hw, true);
1338
1339                         /* Set ULP state to unknown and return non-zero to
1340                          * indicate no link (yet) and re-enter on the next LSC
1341                          * to finish disabling ULP flow.
1342                          */
1343                         hw->dev_spec.ich8lan.ulp_state =
1344                             e1000_ulp_state_unknown;
1345
1346                         return 1;
1347                 }
1348         }
1349
1350         /* Re-enable Tx */
1351         mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1352         mac_reg |= E1000_TCTL_EN;
1353         E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1354
1355 release:
1356         hw->phy.ops.release(hw);
1357         if (force) {
1358                 hw->phy.ops.reset(hw);
1359                 msec_delay(50);
1360         }
1361 out:
1362         if (ret_val)
1363                 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1364         else
1365                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1366
1367         return ret_val;
1368 }
1369
1370 #endif /* ULP_SUPPORT */
1371 /**
1372  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1373  *  @hw: pointer to the HW structure
1374  *
1375  *  Checks to see of the link status of the hardware has changed.  If a
1376  *  change in link status has been detected, then we read the PHY registers
1377  *  to get the current speed/duplex if link exists.
1378  **/
1379 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1380 {
1381         struct e1000_mac_info *mac = &hw->mac;
1382         s32 ret_val;
1383         bool link = false;
1384         u16 phy_reg;
1385
1386         DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1387
1388         /* We only want to go out to the PHY registers to see if Auto-Neg
1389          * has completed and/or if our link status has changed.  The
1390          * get_link_status flag is set upon receiving a Link Status
1391          * Change or Rx Sequence Error interrupt.
1392          */
1393         if (!mac->get_link_status)
1394                 return E1000_SUCCESS;
1395
1396         if ((hw->mac.type < e1000_pch_lpt) ||
1397             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1398             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) {
1399                 /* First we want to see if the MII Status Register reports
1400                  * link.  If so, then we want to get the current speed/duplex
1401                  * of the PHY.
1402                  */
1403                 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1404                 if (ret_val)
1405                         return ret_val;
1406         } else {
1407                 /* Check the MAC's STATUS register to determine link state
1408                  * since the PHY could be inaccessible while in ULP mode.
1409                  */
1410                 link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
1411                 if (link)
1412                         ret_val = e1000_disable_ulp_lpt_lp(hw, false);
1413                 else
1414                         ret_val = e1000_enable_ulp_lpt_lp(hw, false);
1415
1416                 if (ret_val)
1417                         return ret_val;
1418         }
1419
1420         if (hw->mac.type == e1000_pchlan) {
1421                 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1422                 if (ret_val)
1423                         return ret_val;
1424         }
1425
1426         /* When connected at 10Mbps half-duplex, some parts are excessively
1427          * aggressive resulting in many collisions. To avoid this, increase
1428          * the IPG and reduce Rx latency in the PHY.
1429          */
1430         if (((hw->mac.type == e1000_pch2lan) ||
1431              (hw->mac.type == e1000_pch_lpt)) && link) {
1432                 u32 reg;
1433                 reg = E1000_READ_REG(hw, E1000_STATUS);
1434                 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1435                         u16 emi_addr;
1436
1437                         reg = E1000_READ_REG(hw, E1000_TIPG);
1438                         reg &= ~E1000_TIPG_IPGT_MASK;
1439                         reg |= 0xFF;
1440                         E1000_WRITE_REG(hw, E1000_TIPG, reg);
1441
1442                         /* Reduce Rx latency in analog PHY */
1443                         ret_val = hw->phy.ops.acquire(hw);
1444                         if (ret_val)
1445                                 return ret_val;
1446
1447                         if (hw->mac.type == e1000_pch2lan)
1448                                 emi_addr = I82579_RX_CONFIG;
1449                         else
1450                                 emi_addr = I217_RX_CONFIG;
1451                         ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
1452
1453                         hw->phy.ops.release(hw);
1454
1455                         if (ret_val)
1456                                 return ret_val;
1457                 }
1458         }
1459
1460         /* Work-around I218 hang issue */
1461         if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1462             (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1463             (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1464             (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1465                 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1466                 if (ret_val)
1467                         return ret_val;
1468         }
1469
1470         /* Clear link partner's EEE ability */
1471         hw->dev_spec.ich8lan.eee_lp_ability = 0;
1472
1473         if (!link)
1474                 return E1000_SUCCESS; /* No link detected */
1475
1476         mac->get_link_status = false;
1477
1478         switch (hw->mac.type) {
1479         case e1000_pch2lan:
1480                 ret_val = e1000_k1_workaround_lv(hw);
1481                 if (ret_val)
1482                         return ret_val;
1483                 /* fall-thru */
1484         case e1000_pchlan:
1485                 if (hw->phy.type == e1000_phy_82578) {
1486                         ret_val = e1000_link_stall_workaround_hv(hw);
1487                         if (ret_val)
1488                                 return ret_val;
1489                 }
1490
1491                 /* Workaround for PCHx parts in half-duplex:
1492                  * Set the number of preambles removed from the packet
1493                  * when it is passed from the PHY to the MAC to prevent
1494                  * the MAC from misinterpreting the packet type.
1495                  */
1496                 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1497                 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1498
1499                 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1500                     E1000_STATUS_FD)
1501                         phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1502
1503                 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1504                 break;
1505         default:
1506                 break;
1507         }
1508
1509         /* Check if there was DownShift, must be checked
1510          * immediately after link-up
1511          */
1512         e1000_check_downshift_generic(hw);
1513
1514         /* Enable/Disable EEE after link up */
1515         if (hw->phy.type > e1000_phy_82579) {
1516                 ret_val = e1000_set_eee_pchlan(hw);
1517                 if (ret_val)
1518                         return ret_val;
1519         }
1520
1521         /* If we are forcing speed/duplex, then we simply return since
1522          * we have already determined whether we have link or not.
1523          */
1524         if (!mac->autoneg)
1525                 return -E1000_ERR_CONFIG;
1526
1527         /* Auto-Neg is enabled.  Auto Speed Detection takes care
1528          * of MAC speed/duplex configuration.  So we only need to
1529          * configure Collision Distance in the MAC.
1530          */
1531         mac->ops.config_collision_dist(hw);
1532
1533         /* Configure Flow Control now that Auto-Neg has completed.
1534          * First, we need to restore the desired flow control
1535          * settings because we may have had to re-autoneg with a
1536          * different link partner.
1537          */
1538         ret_val = e1000_config_fc_after_link_up_generic(hw);
1539         if (ret_val)
1540                 DEBUGOUT("Error configuring flow control\n");
1541
1542         return ret_val;
1543 }
1544
1545 /**
1546  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1547  *  @hw: pointer to the HW structure
1548  *
1549  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1550  **/
1551 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1552 {
1553         DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1554
1555         hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1556         hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1557         switch (hw->mac.type) {
1558         case e1000_ich8lan:
1559         case e1000_ich9lan:
1560         case e1000_ich10lan:
1561                 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1562                 break;
1563         case e1000_pchlan:
1564         case e1000_pch2lan:
1565         case e1000_pch_lpt:
1566                 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1567                 break;
1568         default:
1569                 break;
1570         }
1571 }
1572
1573 /**
1574  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1575  *  @hw: pointer to the HW structure
1576  *
1577  *  Acquires the mutex for performing NVM operations.
1578  **/
1579 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1580 {
1581         DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1582
1583         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1584
1585         return E1000_SUCCESS;
1586 }
1587
1588 /**
1589  *  e1000_release_nvm_ich8lan - Release NVM mutex
1590  *  @hw: pointer to the HW structure
1591  *
1592  *  Releases the mutex used while performing NVM operations.
1593  **/
1594 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1595 {
1596         DEBUGFUNC("e1000_release_nvm_ich8lan");
1597
1598         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1599
1600         return;
1601 }
1602
1603 /**
1604  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1605  *  @hw: pointer to the HW structure
1606  *
1607  *  Acquires the software control flag for performing PHY and select
1608  *  MAC CSR accesses.
1609  **/
1610 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1611 {
1612         u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1613         s32 ret_val = E1000_SUCCESS;
1614
1615         DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1616
1617         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1618
1619         while (timeout) {
1620                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1621                 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1622                         break;
1623
1624                 msec_delay_irq(1);
1625                 timeout--;
1626         }
1627
1628         if (!timeout) {
1629                 DEBUGOUT("SW has already locked the resource.\n");
1630                 ret_val = -E1000_ERR_CONFIG;
1631                 goto out;
1632         }
1633
1634         timeout = SW_FLAG_TIMEOUT;
1635
1636         extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1637         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1638
1639         while (timeout) {
1640                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1641                 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1642                         break;
1643
1644                 msec_delay_irq(1);
1645                 timeout--;
1646         }
1647
1648         if (!timeout) {
1649                 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1650                           E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1651                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1652                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1653                 ret_val = -E1000_ERR_CONFIG;
1654                 goto out;
1655         }
1656
1657 out:
1658         if (ret_val)
1659                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1660
1661         return ret_val;
1662 }
1663
1664 /**
1665  *  e1000_release_swflag_ich8lan - Release software control flag
1666  *  @hw: pointer to the HW structure
1667  *
1668  *  Releases the software control flag for performing PHY and select
1669  *  MAC CSR accesses.
1670  **/
1671 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1672 {
1673         u32 extcnf_ctrl;
1674
1675         DEBUGFUNC("e1000_release_swflag_ich8lan");
1676
1677         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1678
1679         if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1680                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1681                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1682         } else {
1683                 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1684         }
1685
1686         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1687
1688         return;
1689 }
1690
1691 /**
1692  *  e1000_check_mng_mode_ich8lan - Checks management mode
1693  *  @hw: pointer to the HW structure
1694  *
1695  *  This checks if the adapter has any manageability enabled.
1696  *  This is a function pointer entry point only called by read/write
1697  *  routines for the PHY and NVM parts.
1698  **/
1699 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1700 {
1701         u32 fwsm;
1702
1703         DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1704
1705         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1706
1707         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1708                ((fwsm & E1000_FWSM_MODE_MASK) ==
1709                 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1710 }
1711
1712 /**
1713  *  e1000_check_mng_mode_pchlan - Checks management mode
1714  *  @hw: pointer to the HW structure
1715  *
1716  *  This checks if the adapter has iAMT enabled.
1717  *  This is a function pointer entry point only called by read/write
1718  *  routines for the PHY and NVM parts.
1719  **/
1720 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1721 {
1722         u32 fwsm;
1723
1724         DEBUGFUNC("e1000_check_mng_mode_pchlan");
1725
1726         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1727
1728         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1729                (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1730 }
1731
1732 /**
1733  *  e1000_rar_set_pch2lan - Set receive address register
1734  *  @hw: pointer to the HW structure
1735  *  @addr: pointer to the receive address
1736  *  @index: receive address array register
1737  *
1738  *  Sets the receive address array register at index to the address passed
1739  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1740  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1741  *  Use SHRA[0-3] in place of those reserved for ME.
1742  **/
1743 STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1744 {
1745         u32 rar_low, rar_high;
1746
1747         DEBUGFUNC("e1000_rar_set_pch2lan");
1748
1749         /* HW expects these in little endian so we reverse the byte order
1750          * from network order (big endian) to little endian
1751          */
1752         rar_low = ((u32) addr[0] |
1753                    ((u32) addr[1] << 8) |
1754                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1755
1756         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1757
1758         /* If MAC address zero, no need to set the AV bit */
1759         if (rar_low || rar_high)
1760                 rar_high |= E1000_RAH_AV;
1761
1762         if (index == 0) {
1763                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1764                 E1000_WRITE_FLUSH(hw);
1765                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1766                 E1000_WRITE_FLUSH(hw);
1767                 return;
1768         }
1769
1770         /* RAR[1-6] are owned by manageability.  Skip those and program the
1771          * next address into the SHRA register array.
1772          */
1773         if (index < (u32) (hw->mac.rar_entry_count)) {
1774                 s32 ret_val;
1775
1776                 ret_val = e1000_acquire_swflag_ich8lan(hw);
1777                 if (ret_val)
1778                         goto out;
1779
1780                 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1781                 E1000_WRITE_FLUSH(hw);
1782                 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1783                 E1000_WRITE_FLUSH(hw);
1784
1785                 e1000_release_swflag_ich8lan(hw);
1786
1787                 /* verify the register updates */
1788                 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1789                     (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1790                         return;
1791
1792                 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1793                          (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1794         }
1795
1796 out:
1797         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1798 }
1799
1800 /**
1801  *  e1000_rar_set_pch_lpt - Set receive address registers
1802  *  @hw: pointer to the HW structure
1803  *  @addr: pointer to the receive address
1804  *  @index: receive address array register
1805  *
1806  *  Sets the receive address register array at index to the address passed
1807  *  in by addr. For LPT, RAR[0] is the base address register that is to
1808  *  contain the MAC address. SHRA[0-10] are the shared receive address
1809  *  registers that are shared between the Host and manageability engine (ME).
1810  **/
1811 STATIC void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1812 {
1813         u32 rar_low, rar_high;
1814         u32 wlock_mac;
1815
1816         DEBUGFUNC("e1000_rar_set_pch_lpt");
1817
1818         /* HW expects these in little endian so we reverse the byte order
1819          * from network order (big endian) to little endian
1820          */
1821         rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1822                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1823
1824         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1825
1826         /* If MAC address zero, no need to set the AV bit */
1827         if (rar_low || rar_high)
1828                 rar_high |= E1000_RAH_AV;
1829
1830         if (index == 0) {
1831                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1832                 E1000_WRITE_FLUSH(hw);
1833                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1834                 E1000_WRITE_FLUSH(hw);
1835                 return;
1836         }
1837
1838         /* The manageability engine (ME) can lock certain SHRAR registers that
1839          * it is using - those registers are unavailable for use.
1840          */
1841         if (index < hw->mac.rar_entry_count) {
1842                 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
1843                             E1000_FWSM_WLOCK_MAC_MASK;
1844                 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1845
1846                 /* Check if all SHRAR registers are locked */
1847                 if (wlock_mac == 1)
1848                         goto out;
1849
1850                 if ((wlock_mac == 0) || (index <= wlock_mac)) {
1851                         s32 ret_val;
1852
1853                         ret_val = e1000_acquire_swflag_ich8lan(hw);
1854
1855                         if (ret_val)
1856                                 goto out;
1857
1858                         E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
1859                                         rar_low);
1860                         E1000_WRITE_FLUSH(hw);
1861                         E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
1862                                         rar_high);
1863                         E1000_WRITE_FLUSH(hw);
1864
1865                         e1000_release_swflag_ich8lan(hw);
1866
1867                         /* verify the register updates */
1868                         if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1869                             (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
1870                                 return;
1871                 }
1872         }
1873
1874 out:
1875         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1876 }
1877
1878 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
1879 /**
1880  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
1881  *  @hw: pointer to the HW structure
1882  *  @mc_addr_list: array of multicast addresses to program
1883  *  @mc_addr_count: number of multicast addresses to program
1884  *
1885  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
1886  *  The caller must have a packed mc_addr_list of multicast addresses.
1887  **/
1888 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
1889                                               u8 *mc_addr_list,
1890                                               u32 mc_addr_count)
1891 {
1892         u16 phy_reg = 0;
1893         int i;
1894         s32 ret_val;
1895
1896         DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
1897
1898         e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
1899
1900         ret_val = hw->phy.ops.acquire(hw);
1901         if (ret_val)
1902                 return;
1903
1904         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1905         if (ret_val)
1906                 goto release;
1907
1908         for (i = 0; i < hw->mac.mta_reg_count; i++) {
1909                 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
1910                                            (u16)(hw->mac.mta_shadow[i] &
1911                                                  0xFFFF));
1912                 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
1913                                            (u16)((hw->mac.mta_shadow[i] >> 16) &
1914                                                  0xFFFF));
1915         }
1916
1917         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1918
1919 release:
1920         hw->phy.ops.release(hw);
1921 }
1922
1923 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
1924 /**
1925  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1926  *  @hw: pointer to the HW structure
1927  *
1928  *  Checks if firmware is blocking the reset of the PHY.
1929  *  This is a function pointer entry point only called by
1930  *  reset routines.
1931  **/
1932 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1933 {
1934         u32 fwsm;
1935         bool blocked = false;
1936         int i = 0;
1937
1938         DEBUGFUNC("e1000_check_reset_block_ich8lan");
1939
1940         do {
1941                 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1942                 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
1943                         blocked = true;
1944                         msec_delay(10);
1945                         continue;
1946                 }
1947                 blocked = false;
1948         } while (blocked && (i++ < 10));
1949         return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
1950 }
1951
1952 /**
1953  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1954  *  @hw: pointer to the HW structure
1955  *
1956  *  Assumes semaphore already acquired.
1957  *
1958  **/
1959 STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1960 {
1961         u16 phy_data;
1962         u32 strap = E1000_READ_REG(hw, E1000_STRAP);
1963         u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
1964                 E1000_STRAP_SMT_FREQ_SHIFT;
1965         s32 ret_val;
1966
1967         strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1968
1969         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1970         if (ret_val)
1971                 return ret_val;
1972
1973         phy_data &= ~HV_SMB_ADDR_MASK;
1974         phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1975         phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1976
1977         if (hw->phy.type == e1000_phy_i217) {
1978                 /* Restore SMBus frequency */
1979                 if (freq--) {
1980                         phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
1981                         phy_data |= (freq & (1 << 0)) <<
1982                                 HV_SMB_ADDR_FREQ_LOW_SHIFT;
1983                         phy_data |= (freq & (1 << 1)) <<
1984                                 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
1985                 } else {
1986                         DEBUGOUT("Unsupported SMB frequency in PHY\n");
1987                 }
1988         }
1989
1990         return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1991 }
1992
1993 /**
1994  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1995  *  @hw:   pointer to the HW structure
1996  *
1997  *  SW should configure the LCD from the NVM extended configuration region
1998  *  as a workaround for certain parts.
1999  **/
2000 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2001 {
2002         struct e1000_phy_info *phy = &hw->phy;
2003         u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2004         s32 ret_val = E1000_SUCCESS;
2005         u16 word_addr, reg_data, reg_addr, phy_page = 0;
2006
2007         DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2008
2009         /* Initialize the PHY from the NVM on ICH platforms.  This
2010          * is needed due to an issue where the NVM configuration is
2011          * not properly autoloaded after power transitions.
2012          * Therefore, after each PHY reset, we will load the
2013          * configuration data out of the NVM manually.
2014          */
2015         switch (hw->mac.type) {
2016         case e1000_ich8lan:
2017                 if (phy->type != e1000_phy_igp_3)
2018                         return ret_val;
2019
2020                 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2021                     (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2022                         sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2023                         break;
2024                 }
2025                 /* Fall-thru */
2026         case e1000_pchlan:
2027         case e1000_pch2lan:
2028         case e1000_pch_lpt:
2029                 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2030                 break;
2031         default:
2032                 return ret_val;
2033         }
2034
2035         ret_val = hw->phy.ops.acquire(hw);
2036         if (ret_val)
2037                 return ret_val;
2038
2039         data = E1000_READ_REG(hw, E1000_FEXTNVM);
2040         if (!(data & sw_cfg_mask))
2041                 goto release;
2042
2043         /* Make sure HW does not configure LCD from PHY
2044          * extended configuration before SW configuration
2045          */
2046         data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2047         if ((hw->mac.type < e1000_pch2lan) &&
2048             (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2049                         goto release;
2050
2051         cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2052         cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2053         cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2054         if (!cnf_size)
2055                 goto release;
2056
2057         cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2058         cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2059
2060         if (((hw->mac.type == e1000_pchlan) &&
2061              !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2062             (hw->mac.type > e1000_pchlan)) {
2063                 /* HW configures the SMBus address and LEDs when the
2064                  * OEM and LCD Write Enable bits are set in the NVM.
2065                  * When both NVM bits are cleared, SW will configure
2066                  * them instead.
2067                  */
2068                 ret_val = e1000_write_smbus_addr(hw);
2069                 if (ret_val)
2070                         goto release;
2071
2072                 data = E1000_READ_REG(hw, E1000_LEDCTL);
2073                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2074                                                         (u16)data);
2075                 if (ret_val)
2076                         goto release;
2077         }
2078
2079         /* Configure LCD from extended configuration region. */
2080
2081         /* cnf_base_addr is in DWORD */
2082         word_addr = (u16)(cnf_base_addr << 1);
2083
2084         for (i = 0; i < cnf_size; i++) {
2085                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2086                                            &reg_data);
2087                 if (ret_val)
2088                         goto release;
2089
2090                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2091                                            1, &reg_addr);
2092                 if (ret_val)
2093                         goto release;
2094
2095                 /* Save off the PHY page for future writes. */
2096                 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2097                         phy_page = reg_data;
2098                         continue;
2099                 }
2100
2101                 reg_addr &= PHY_REG_MASK;
2102                 reg_addr |= phy_page;
2103
2104                 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2105                                                     reg_data);
2106                 if (ret_val)
2107                         goto release;
2108         }
2109
2110 release:
2111         hw->phy.ops.release(hw);
2112         return ret_val;
2113 }
2114
2115 /**
2116  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2117  *  @hw:   pointer to the HW structure
2118  *  @link: link up bool flag
2119  *
2120  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2121  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2122  *  If link is down, the function will restore the default K1 setting located
2123  *  in the NVM.
2124  **/
2125 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2126 {
2127         s32 ret_val = E1000_SUCCESS;
2128         u16 status_reg = 0;
2129         bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2130
2131         DEBUGFUNC("e1000_k1_gig_workaround_hv");
2132
2133         if (hw->mac.type != e1000_pchlan)
2134                 return E1000_SUCCESS;
2135
2136         /* Wrap the whole flow with the sw flag */
2137         ret_val = hw->phy.ops.acquire(hw);
2138         if (ret_val)
2139                 return ret_val;
2140
2141         /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2142         if (link) {
2143                 if (hw->phy.type == e1000_phy_82578) {
2144                         ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2145                                                               &status_reg);
2146                         if (ret_val)
2147                                 goto release;
2148
2149                         status_reg &= (BM_CS_STATUS_LINK_UP |
2150                                        BM_CS_STATUS_RESOLVED |
2151                                        BM_CS_STATUS_SPEED_MASK);
2152
2153                         if (status_reg == (BM_CS_STATUS_LINK_UP |
2154                                            BM_CS_STATUS_RESOLVED |
2155                                            BM_CS_STATUS_SPEED_1000))
2156                                 k1_enable = false;
2157                 }
2158
2159                 if (hw->phy.type == e1000_phy_82577) {
2160                         ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2161                                                               &status_reg);
2162                         if (ret_val)
2163                                 goto release;
2164
2165                         status_reg &= (HV_M_STATUS_LINK_UP |
2166                                        HV_M_STATUS_AUTONEG_COMPLETE |
2167                                        HV_M_STATUS_SPEED_MASK);
2168
2169                         if (status_reg == (HV_M_STATUS_LINK_UP |
2170                                            HV_M_STATUS_AUTONEG_COMPLETE |
2171                                            HV_M_STATUS_SPEED_1000))
2172                                 k1_enable = false;
2173                 }
2174
2175                 /* Link stall fix for link up */
2176                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2177                                                        0x0100);
2178                 if (ret_val)
2179                         goto release;
2180
2181         } else {
2182                 /* Link stall fix for link down */
2183                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2184                                                        0x4100);
2185                 if (ret_val)
2186                         goto release;
2187         }
2188
2189         ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2190
2191 release:
2192         hw->phy.ops.release(hw);
2193
2194         return ret_val;
2195 }
2196
2197 /**
2198  *  e1000_configure_k1_ich8lan - Configure K1 power state
2199  *  @hw: pointer to the HW structure
2200  *  @enable: K1 state to configure
2201  *
2202  *  Configure the K1 power state based on the provided parameter.
2203  *  Assumes semaphore already acquired.
2204  *
2205  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2206  **/
2207 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2208 {
2209         s32 ret_val;
2210         u32 ctrl_reg = 0;
2211         u32 ctrl_ext = 0;
2212         u32 reg = 0;
2213         u16 kmrn_reg = 0;
2214
2215         DEBUGFUNC("e1000_configure_k1_ich8lan");
2216
2217         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2218                                              &kmrn_reg);
2219         if (ret_val)
2220                 return ret_val;
2221
2222         if (k1_enable)
2223                 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2224         else
2225                 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2226
2227         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2228                                               kmrn_reg);
2229         if (ret_val)
2230                 return ret_val;
2231
2232         usec_delay(20);
2233         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2234         ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2235
2236         reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2237         reg |= E1000_CTRL_FRCSPD;
2238         E1000_WRITE_REG(hw, E1000_CTRL, reg);
2239
2240         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2241         E1000_WRITE_FLUSH(hw);
2242         usec_delay(20);
2243         E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2244         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2245         E1000_WRITE_FLUSH(hw);
2246         usec_delay(20);
2247
2248         return E1000_SUCCESS;
2249 }
2250
2251 /**
2252  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2253  *  @hw:       pointer to the HW structure
2254  *  @d0_state: boolean if entering d0 or d3 device state
2255  *
2256  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2257  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2258  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2259  **/
2260 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2261 {
2262         s32 ret_val = 0;
2263         u32 mac_reg;
2264         u16 oem_reg;
2265
2266         DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2267
2268         if (hw->mac.type < e1000_pchlan)
2269                 return ret_val;
2270
2271         ret_val = hw->phy.ops.acquire(hw);
2272         if (ret_val)
2273                 return ret_val;
2274
2275         if (hw->mac.type == e1000_pchlan) {
2276                 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2277                 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2278                         goto release;
2279         }
2280
2281         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2282         if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2283                 goto release;
2284
2285         mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2286
2287         ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2288         if (ret_val)
2289                 goto release;
2290
2291         oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2292
2293         if (d0_state) {
2294                 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2295                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2296
2297                 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2298                         oem_reg |= HV_OEM_BITS_LPLU;
2299         } else {
2300                 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2301                     E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2302                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2303
2304                 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2305                     E1000_PHY_CTRL_NOND0A_LPLU))
2306                         oem_reg |= HV_OEM_BITS_LPLU;
2307         }
2308
2309         /* Set Restart auto-neg to activate the bits */
2310         if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2311             !hw->phy.ops.check_reset_block(hw))
2312                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2313
2314         ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2315
2316 release:
2317         hw->phy.ops.release(hw);
2318
2319         return ret_val;
2320 }
2321
2322
2323 /**
2324  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2325  *  @hw:   pointer to the HW structure
2326  **/
2327 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2328 {
2329         s32 ret_val;
2330         u16 data;
2331
2332         DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2333
2334         ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2335         if (ret_val)
2336                 return ret_val;
2337
2338         data |= HV_KMRN_MDIO_SLOW;
2339
2340         ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2341
2342         return ret_val;
2343 }
2344
2345 /**
2346  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2347  *  done after every PHY reset.
2348  **/
2349 STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2350 {
2351         s32 ret_val = E1000_SUCCESS;
2352         u16 phy_data;
2353
2354         DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2355
2356         if (hw->mac.type != e1000_pchlan)
2357                 return E1000_SUCCESS;
2358
2359         /* Set MDIO slow mode before any other MDIO access */
2360         if (hw->phy.type == e1000_phy_82577) {
2361                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2362                 if (ret_val)
2363                         return ret_val;
2364         }
2365
2366         if (((hw->phy.type == e1000_phy_82577) &&
2367              ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2368             ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2369                 /* Disable generation of early preamble */
2370                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2371                 if (ret_val)
2372                         return ret_val;
2373
2374                 /* Preamble tuning for SSC */
2375                 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2376                                                 0xA204);
2377                 if (ret_val)
2378                         return ret_val;
2379         }
2380
2381         if (hw->phy.type == e1000_phy_82578) {
2382                 /* Return registers to default by doing a soft reset then
2383                  * writing 0x3140 to the control register.
2384                  */
2385                 if (hw->phy.revision < 2) {
2386                         e1000_phy_sw_reset_generic(hw);
2387                         ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2388                                                         0x3140);
2389                 }
2390         }
2391
2392         /* Select page 0 */
2393         ret_val = hw->phy.ops.acquire(hw);
2394         if (ret_val)
2395                 return ret_val;
2396
2397         hw->phy.addr = 1;
2398         ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2399         hw->phy.ops.release(hw);
2400         if (ret_val)
2401                 return ret_val;
2402
2403         /* Configure the K1 Si workaround during phy reset assuming there is
2404          * link so that it disables K1 if link is in 1Gbps.
2405          */
2406         ret_val = e1000_k1_gig_workaround_hv(hw, true);
2407         if (ret_val)
2408                 return ret_val;
2409
2410         /* Workaround for link disconnects on a busy hub in half duplex */
2411         ret_val = hw->phy.ops.acquire(hw);
2412         if (ret_val)
2413                 return ret_val;
2414         ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2415         if (ret_val)
2416                 goto release;
2417         ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2418                                                phy_data & 0x00FF);
2419         if (ret_val)
2420                 goto release;
2421
2422         /* set MSE higher to enable link to stay up when noise is high */
2423         ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2424 release:
2425         hw->phy.ops.release(hw);
2426
2427         return ret_val;
2428 }
2429
2430 /**
2431  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2432  *  @hw:   pointer to the HW structure
2433  **/
2434 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2435 {
2436         u32 mac_reg;
2437         u16 i, phy_reg = 0;
2438         s32 ret_val;
2439
2440         DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2441
2442         ret_val = hw->phy.ops.acquire(hw);
2443         if (ret_val)
2444                 return;
2445         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2446         if (ret_val)
2447                 goto release;
2448
2449         /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2450         for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2451                 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2452                 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2453                                            (u16)(mac_reg & 0xFFFF));
2454                 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2455                                            (u16)((mac_reg >> 16) & 0xFFFF));
2456
2457                 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2458                 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2459                                            (u16)(mac_reg & 0xFFFF));
2460                 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2461                                            (u16)((mac_reg & E1000_RAH_AV)
2462                                                  >> 16));
2463         }
2464
2465         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2466
2467 release:
2468         hw->phy.ops.release(hw);
2469 }
2470
2471 #ifndef CRC32_OS_SUPPORT
2472 STATIC u32 e1000_calc_rx_da_crc(u8 mac[])
2473 {
2474         u32 poly = 0xEDB88320;  /* Polynomial for 802.3 CRC calculation */
2475         u32 i, j, mask, crc;
2476
2477         DEBUGFUNC("e1000_calc_rx_da_crc");
2478
2479         crc = 0xffffffff;
2480         for (i = 0; i < 6; i++) {
2481                 crc = crc ^ mac[i];
2482                 for (j = 8; j > 0; j--) {
2483                         mask = (crc & 1) * (-1);
2484                         crc = (crc >> 1) ^ (poly & mask);
2485                 }
2486         }
2487         return ~crc;
2488 }
2489
2490 #endif /* CRC32_OS_SUPPORT */
2491 /**
2492  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2493  *  with 82579 PHY
2494  *  @hw: pointer to the HW structure
2495  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2496  **/
2497 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2498 {
2499         s32 ret_val = E1000_SUCCESS;
2500         u16 phy_reg, data;
2501         u32 mac_reg;
2502         u16 i;
2503
2504         DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2505
2506         if (hw->mac.type < e1000_pch2lan)
2507                 return E1000_SUCCESS;
2508
2509         /* disable Rx path while enabling/disabling workaround */
2510         hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2511         ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2512                                         phy_reg | (1 << 14));
2513         if (ret_val)
2514                 return ret_val;
2515
2516         if (enable) {
2517                 /* Write Rx addresses (rar_entry_count for RAL/H, and
2518                  * SHRAL/H) and initial CRC values to the MAC
2519                  */
2520                 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2521                         u8 mac_addr[ETH_ADDR_LEN] = {0};
2522                         u32 addr_high, addr_low;
2523
2524                         addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2525                         if (!(addr_high & E1000_RAH_AV))
2526                                 continue;
2527                         addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2528                         mac_addr[0] = (addr_low & 0xFF);
2529                         mac_addr[1] = ((addr_low >> 8) & 0xFF);
2530                         mac_addr[2] = ((addr_low >> 16) & 0xFF);
2531                         mac_addr[3] = ((addr_low >> 24) & 0xFF);
2532                         mac_addr[4] = (addr_high & 0xFF);
2533                         mac_addr[5] = ((addr_high >> 8) & 0xFF);
2534
2535 #ifndef CRC32_OS_SUPPORT
2536                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2537                                         e1000_calc_rx_da_crc(mac_addr));
2538 #else /* CRC32_OS_SUPPORT */
2539                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2540                                         E1000_CRC32(ETH_ADDR_LEN, mac_addr));
2541 #endif /* CRC32_OS_SUPPORT */
2542                 }
2543
2544                 /* Write Rx addresses to the PHY */
2545                 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2546
2547                 /* Enable jumbo frame workaround in the MAC */
2548                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2549                 mac_reg &= ~(1 << 14);
2550                 mac_reg |= (7 << 15);
2551                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2552
2553                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2554                 mac_reg |= E1000_RCTL_SECRC;
2555                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2556
2557                 ret_val = e1000_read_kmrn_reg_generic(hw,
2558                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2559                                                 &data);
2560                 if (ret_val)
2561                         return ret_val;
2562                 ret_val = e1000_write_kmrn_reg_generic(hw,
2563                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2564                                                 data | (1 << 0));
2565                 if (ret_val)
2566                         return ret_val;
2567                 ret_val = e1000_read_kmrn_reg_generic(hw,
2568                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2569                                                 &data);
2570                 if (ret_val)
2571                         return ret_val;
2572                 data &= ~(0xF << 8);
2573                 data |= (0xB << 8);
2574                 ret_val = e1000_write_kmrn_reg_generic(hw,
2575                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2576                                                 data);
2577                 if (ret_val)
2578                         return ret_val;
2579
2580                 /* Enable jumbo frame workaround in the PHY */
2581                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2582                 data &= ~(0x7F << 5);
2583                 data |= (0x37 << 5);
2584                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2585                 if (ret_val)
2586                         return ret_val;
2587                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2588                 data &= ~(1 << 13);
2589                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2590                 if (ret_val)
2591                         return ret_val;
2592                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2593                 data &= ~(0x3FF << 2);
2594                 data |= (0x1A << 2);
2595                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2596                 if (ret_val)
2597                         return ret_val;
2598                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2599                 if (ret_val)
2600                         return ret_val;
2601                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2602                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2603                                                 (1 << 10));
2604                 if (ret_val)
2605                         return ret_val;
2606         } else {
2607                 /* Write MAC register values back to h/w defaults */
2608                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2609                 mac_reg &= ~(0xF << 14);
2610                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2611
2612                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2613                 mac_reg &= ~E1000_RCTL_SECRC;
2614                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2615
2616                 ret_val = e1000_read_kmrn_reg_generic(hw,
2617                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2618                                                 &data);
2619                 if (ret_val)
2620                         return ret_val;
2621                 ret_val = e1000_write_kmrn_reg_generic(hw,
2622                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2623                                                 data & ~(1 << 0));
2624                 if (ret_val)
2625                         return ret_val;
2626                 ret_val = e1000_read_kmrn_reg_generic(hw,
2627                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2628                                                 &data);
2629                 if (ret_val)
2630                         return ret_val;
2631                 data &= ~(0xF << 8);
2632                 data |= (0xB << 8);
2633                 ret_val = e1000_write_kmrn_reg_generic(hw,
2634                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2635                                                 data);
2636                 if (ret_val)
2637                         return ret_val;
2638
2639                 /* Write PHY register values back to h/w defaults */
2640                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2641                 data &= ~(0x7F << 5);
2642                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2643                 if (ret_val)
2644                         return ret_val;
2645                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2646                 data |= (1 << 13);
2647                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2648                 if (ret_val)
2649                         return ret_val;
2650                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2651                 data &= ~(0x3FF << 2);
2652                 data |= (0x8 << 2);
2653                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2654                 if (ret_val)
2655                         return ret_val;
2656                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2657                 if (ret_val)
2658                         return ret_val;
2659                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2660                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2661                                                 ~(1 << 10));
2662                 if (ret_val)
2663                         return ret_val;
2664         }
2665
2666         /* re-enable Rx path after enabling/disabling workaround */
2667         return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2668                                      ~(1 << 14));
2669 }
2670
2671 /**
2672  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2673  *  done after every PHY reset.
2674  **/
2675 STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2676 {
2677         s32 ret_val = E1000_SUCCESS;
2678
2679         DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2680
2681         if (hw->mac.type != e1000_pch2lan)
2682                 return E1000_SUCCESS;
2683
2684         /* Set MDIO slow mode before any other MDIO access */
2685         ret_val = e1000_set_mdio_slow_mode_hv(hw);
2686         if (ret_val)
2687                 return ret_val;
2688
2689         ret_val = hw->phy.ops.acquire(hw);
2690         if (ret_val)
2691                 return ret_val;
2692         /* set MSE higher to enable link to stay up when noise is high */
2693         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2694         if (ret_val)
2695                 goto release;
2696         /* drop link after 5 times MSE threshold was reached */
2697         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2698 release:
2699         hw->phy.ops.release(hw);
2700
2701         return ret_val;
2702 }
2703
2704 /**
2705  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2706  *  @hw:   pointer to the HW structure
2707  *
2708  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2709  *  Disable K1 for 1000 and 100 speeds
2710  **/
2711 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2712 {
2713         s32 ret_val = E1000_SUCCESS;
2714         u16 status_reg = 0;
2715
2716         DEBUGFUNC("e1000_k1_workaround_lv");
2717
2718         if (hw->mac.type != e1000_pch2lan)
2719                 return E1000_SUCCESS;
2720
2721         /* Set K1 beacon duration based on 10Mbs speed */
2722         ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2723         if (ret_val)
2724                 return ret_val;
2725
2726         if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2727             == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2728                 if (status_reg &
2729                     (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2730                         u16 pm_phy_reg;
2731
2732                         /* LV 1G/100 Packet drop issue wa  */
2733                         ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2734                                                        &pm_phy_reg);
2735                         if (ret_val)
2736                                 return ret_val;
2737                         pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2738                         ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2739                                                         pm_phy_reg);
2740                         if (ret_val)
2741                                 return ret_val;
2742                 } else {
2743                         u32 mac_reg;
2744                         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2745                         mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2746                         mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2747                         E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2748                 }
2749         }
2750
2751         return ret_val;
2752 }
2753
2754 /**
2755  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2756  *  @hw:   pointer to the HW structure
2757  *  @gate: boolean set to true to gate, false to ungate
2758  *
2759  *  Gate/ungate the automatic PHY configuration via hardware; perform
2760  *  the configuration via software instead.
2761  **/
2762 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2763 {
2764         u32 extcnf_ctrl;
2765
2766         DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2767
2768         if (hw->mac.type < e1000_pch2lan)
2769                 return;
2770
2771         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2772
2773         if (gate)
2774                 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2775         else
2776                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2777
2778         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2779 }
2780
2781 /**
2782  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2783  *  @hw: pointer to the HW structure
2784  *
2785  *  Check the appropriate indication the MAC has finished configuring the
2786  *  PHY after a software reset.
2787  **/
2788 STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2789 {
2790         u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2791
2792         DEBUGFUNC("e1000_lan_init_done_ich8lan");
2793
2794         /* Wait for basic configuration completes before proceeding */
2795         do {
2796                 data = E1000_READ_REG(hw, E1000_STATUS);
2797                 data &= E1000_STATUS_LAN_INIT_DONE;
2798                 usec_delay(100);
2799         } while ((!data) && --loop);
2800
2801         /* If basic configuration is incomplete before the above loop
2802          * count reaches 0, loading the configuration from NVM will
2803          * leave the PHY in a bad state possibly resulting in no link.
2804          */
2805         if (loop == 0)
2806                 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2807
2808         /* Clear the Init Done bit for the next init event */
2809         data = E1000_READ_REG(hw, E1000_STATUS);
2810         data &= ~E1000_STATUS_LAN_INIT_DONE;
2811         E1000_WRITE_REG(hw, E1000_STATUS, data);
2812 }
2813
2814 /**
2815  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2816  *  @hw: pointer to the HW structure
2817  **/
2818 STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2819 {
2820         s32 ret_val = E1000_SUCCESS;
2821         u16 reg;
2822
2823         DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2824
2825         if (hw->phy.ops.check_reset_block(hw))
2826                 return E1000_SUCCESS;
2827
2828         /* Allow time for h/w to get to quiescent state after reset */
2829         msec_delay(10);
2830
2831         /* Perform any necessary post-reset workarounds */
2832         switch (hw->mac.type) {
2833         case e1000_pchlan:
2834                 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2835                 if (ret_val)
2836                         return ret_val;
2837                 break;
2838         case e1000_pch2lan:
2839                 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2840                 if (ret_val)
2841                         return ret_val;
2842                 break;
2843         default:
2844                 break;
2845         }
2846
2847         /* Clear the host wakeup bit after lcd reset */
2848         if (hw->mac.type >= e1000_pchlan) {
2849                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
2850                 reg &= ~BM_WUC_HOST_WU_BIT;
2851                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
2852         }
2853
2854         /* Configure the LCD with the extended configuration region in NVM */
2855         ret_val = e1000_sw_lcd_config_ich8lan(hw);
2856         if (ret_val)
2857                 return ret_val;
2858
2859         /* Configure the LCD with the OEM bits in NVM */
2860         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2861
2862         if (hw->mac.type == e1000_pch2lan) {
2863                 /* Ungate automatic PHY configuration on non-managed 82579 */
2864                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
2865                     E1000_ICH_FWSM_FW_VALID)) {
2866                         msec_delay(10);
2867                         e1000_gate_hw_phy_config_ich8lan(hw, false);
2868                 }
2869
2870                 /* Set EEE LPI Update Timer to 200usec */
2871                 ret_val = hw->phy.ops.acquire(hw);
2872                 if (ret_val)
2873                         return ret_val;
2874                 ret_val = e1000_write_emi_reg_locked(hw,
2875                                                      I82579_LPI_UPDATE_TIMER,
2876                                                      0x1387);
2877                 hw->phy.ops.release(hw);
2878         }
2879
2880         return ret_val;
2881 }
2882
2883 /**
2884  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2885  *  @hw: pointer to the HW structure
2886  *
2887  *  Resets the PHY
2888  *  This is a function pointer entry point called by drivers
2889  *  or other shared routines.
2890  **/
2891 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2892 {
2893         s32 ret_val = E1000_SUCCESS;
2894
2895         DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2896
2897         /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2898         if ((hw->mac.type == e1000_pch2lan) &&
2899             !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2900                 e1000_gate_hw_phy_config_ich8lan(hw, true);
2901
2902         ret_val = e1000_phy_hw_reset_generic(hw);
2903         if (ret_val)
2904                 return ret_val;
2905
2906         return e1000_post_phy_reset_ich8lan(hw);
2907 }
2908
2909 /**
2910  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2911  *  @hw: pointer to the HW structure
2912  *  @active: true to enable LPLU, false to disable
2913  *
2914  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2915  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2916  *  the phy speed. This function will manually set the LPLU bit and restart
2917  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2918  *  since it configures the same bit.
2919  **/
2920 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2921 {
2922         s32 ret_val;
2923         u16 oem_reg;
2924
2925         DEBUGFUNC("e1000_set_lplu_state_pchlan");
2926
2927         ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2928         if (ret_val)
2929                 return ret_val;
2930
2931         if (active)
2932                 oem_reg |= HV_OEM_BITS_LPLU;
2933         else
2934                 oem_reg &= ~HV_OEM_BITS_LPLU;
2935
2936         if (!hw->phy.ops.check_reset_block(hw))
2937                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2938
2939         return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2940 }
2941
2942 /**
2943  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2944  *  @hw: pointer to the HW structure
2945  *  @active: true to enable LPLU, false to disable
2946  *
2947  *  Sets the LPLU D0 state according to the active flag.  When
2948  *  activating LPLU this function also disables smart speed
2949  *  and vice versa.  LPLU will not be activated unless the
2950  *  device autonegotiation advertisement meets standards of
2951  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2952  *  This is a function pointer entry point only called by
2953  *  PHY setup routines.
2954  **/
2955 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2956 {
2957         struct e1000_phy_info *phy = &hw->phy;
2958         u32 phy_ctrl;
2959         s32 ret_val = E1000_SUCCESS;
2960         u16 data;
2961
2962         DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2963
2964         if (phy->type == e1000_phy_ife)
2965                 return E1000_SUCCESS;
2966
2967         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2968
2969         if (active) {
2970                 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2971                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2972
2973                 if (phy->type != e1000_phy_igp_3)
2974                         return E1000_SUCCESS;
2975
2976                 /* Call gig speed drop workaround on LPLU before accessing
2977                  * any PHY registers
2978                  */
2979                 if (hw->mac.type == e1000_ich8lan)
2980                         e1000_gig_downshift_workaround_ich8lan(hw);
2981
2982                 /* When LPLU is enabled, we should disable SmartSpeed */
2983                 ret_val = phy->ops.read_reg(hw,
2984                                             IGP01E1000_PHY_PORT_CONFIG,
2985                                             &data);
2986                 if (ret_val)
2987                         return ret_val;
2988                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2989                 ret_val = phy->ops.write_reg(hw,
2990                                              IGP01E1000_PHY_PORT_CONFIG,
2991                                              data);
2992                 if (ret_val)
2993                         return ret_val;
2994         } else {
2995                 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2996                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2997
2998                 if (phy->type != e1000_phy_igp_3)
2999                         return E1000_SUCCESS;
3000
3001                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3002                  * during Dx states where the power conservation is most
3003                  * important.  During driver activity we should enable
3004                  * SmartSpeed, so performance is maintained.
3005                  */
3006                 if (phy->smart_speed == e1000_smart_speed_on) {
3007                         ret_val = phy->ops.read_reg(hw,
3008                                                     IGP01E1000_PHY_PORT_CONFIG,
3009                                                     &data);
3010                         if (ret_val)
3011                                 return ret_val;
3012
3013                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3014                         ret_val = phy->ops.write_reg(hw,
3015                                                      IGP01E1000_PHY_PORT_CONFIG,
3016                                                      data);
3017                         if (ret_val)
3018                                 return ret_val;
3019                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3020                         ret_val = phy->ops.read_reg(hw,
3021                                                     IGP01E1000_PHY_PORT_CONFIG,
3022                                                     &data);
3023                         if (ret_val)
3024                                 return ret_val;
3025
3026                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3027                         ret_val = phy->ops.write_reg(hw,
3028                                                      IGP01E1000_PHY_PORT_CONFIG,
3029                                                      data);
3030                         if (ret_val)
3031                                 return ret_val;
3032                 }
3033         }
3034
3035         return E1000_SUCCESS;
3036 }
3037
3038 /**
3039  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3040  *  @hw: pointer to the HW structure
3041  *  @active: true to enable LPLU, false to disable
3042  *
3043  *  Sets the LPLU D3 state according to the active flag.  When
3044  *  activating LPLU this function also disables smart speed
3045  *  and vice versa.  LPLU will not be activated unless the
3046  *  device autonegotiation advertisement meets standards of
3047  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3048  *  This is a function pointer entry point only called by
3049  *  PHY setup routines.
3050  **/
3051 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3052 {
3053         struct e1000_phy_info *phy = &hw->phy;
3054         u32 phy_ctrl;
3055         s32 ret_val = E1000_SUCCESS;
3056         u16 data;
3057
3058         DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3059
3060         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3061
3062         if (!active) {
3063                 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3064                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3065
3066                 if (phy->type != e1000_phy_igp_3)
3067                         return E1000_SUCCESS;
3068
3069                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3070                  * during Dx states where the power conservation is most
3071                  * important.  During driver activity we should enable
3072                  * SmartSpeed, so performance is maintained.
3073                  */
3074                 if (phy->smart_speed == e1000_smart_speed_on) {
3075                         ret_val = phy->ops.read_reg(hw,
3076                                                     IGP01E1000_PHY_PORT_CONFIG,
3077                                                     &data);
3078                         if (ret_val)
3079                                 return ret_val;
3080
3081                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3082                         ret_val = phy->ops.write_reg(hw,
3083                                                      IGP01E1000_PHY_PORT_CONFIG,
3084                                                      data);
3085                         if (ret_val)
3086                                 return ret_val;
3087                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3088                         ret_val = phy->ops.read_reg(hw,
3089                                                     IGP01E1000_PHY_PORT_CONFIG,
3090                                                     &data);
3091                         if (ret_val)
3092                                 return ret_val;
3093
3094                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3095                         ret_val = phy->ops.write_reg(hw,
3096                                                      IGP01E1000_PHY_PORT_CONFIG,
3097                                                      data);
3098                         if (ret_val)
3099                                 return ret_val;
3100                 }
3101         } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3102                    (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3103                    (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3104                 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3105                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3106
3107                 if (phy->type != e1000_phy_igp_3)
3108                         return E1000_SUCCESS;
3109
3110                 /* Call gig speed drop workaround on LPLU before accessing
3111                  * any PHY registers
3112                  */
3113                 if (hw->mac.type == e1000_ich8lan)
3114                         e1000_gig_downshift_workaround_ich8lan(hw);
3115
3116                 /* When LPLU is enabled, we should disable SmartSpeed */
3117                 ret_val = phy->ops.read_reg(hw,
3118                                             IGP01E1000_PHY_PORT_CONFIG,
3119                                             &data);
3120                 if (ret_val)
3121                         return ret_val;
3122
3123                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3124                 ret_val = phy->ops.write_reg(hw,
3125                                              IGP01E1000_PHY_PORT_CONFIG,
3126                                              data);
3127         }
3128
3129         return ret_val;
3130 }
3131
3132 /**
3133  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3134  *  @hw: pointer to the HW structure
3135  *  @bank:  pointer to the variable that returns the active bank
3136  *
3137  *  Reads signature byte from the NVM using the flash access registers.
3138  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3139  **/
3140 STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3141 {
3142         u32 eecd;
3143         struct e1000_nvm_info *nvm = &hw->nvm;
3144         u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3145         u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3146         u8 sig_byte = 0;
3147         s32 ret_val;
3148
3149         DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3150
3151         switch (hw->mac.type) {
3152         case e1000_ich8lan:
3153         case e1000_ich9lan:
3154                 eecd = E1000_READ_REG(hw, E1000_EECD);
3155                 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3156                     E1000_EECD_SEC1VAL_VALID_MASK) {
3157                         if (eecd & E1000_EECD_SEC1VAL)
3158                                 *bank = 1;
3159                         else
3160                                 *bank = 0;
3161
3162                         return E1000_SUCCESS;
3163                 }
3164                 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3165                 /* fall-thru */
3166         default:
3167                 /* set bank to 0 in case flash read fails */
3168                 *bank = 0;
3169
3170                 /* Check bank 0 */
3171                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3172                                                         &sig_byte);
3173                 if (ret_val)
3174                         return ret_val;
3175                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3176                     E1000_ICH_NVM_SIG_VALUE) {
3177                         *bank = 0;
3178                         return E1000_SUCCESS;
3179                 }
3180
3181                 /* Check bank 1 */
3182                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3183                                                         bank1_offset,
3184                                                         &sig_byte);
3185                 if (ret_val)
3186                         return ret_val;
3187                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3188                     E1000_ICH_NVM_SIG_VALUE) {
3189                         *bank = 1;
3190                         return E1000_SUCCESS;
3191                 }
3192
3193                 DEBUGOUT("ERROR: No valid NVM bank present\n");
3194                 return -E1000_ERR_NVM;
3195         }
3196 }
3197
3198 /**
3199  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3200  *  @hw: pointer to the HW structure
3201  *  @offset: The offset (in bytes) of the word(s) to read.
3202  *  @words: Size of data to read in words
3203  *  @data: Pointer to the word(s) to read at offset.
3204  *
3205  *  Reads a word(s) from the NVM using the flash access registers.
3206  **/
3207 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3208                                   u16 *data)
3209 {
3210         struct e1000_nvm_info *nvm = &hw->nvm;
3211         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3212         u32 act_offset;
3213         s32 ret_val = E1000_SUCCESS;
3214         u32 bank = 0;
3215         u16 i, word;
3216
3217         DEBUGFUNC("e1000_read_nvm_ich8lan");
3218
3219         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3220             (words == 0)) {
3221                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3222                 ret_val = -E1000_ERR_NVM;
3223                 goto out;
3224         }
3225
3226         nvm->ops.acquire(hw);
3227
3228         ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3229         if (ret_val != E1000_SUCCESS) {
3230                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3231                 bank = 0;
3232         }
3233
3234         act_offset = (bank) ? nvm->flash_bank_size : 0;
3235         act_offset += offset;
3236
3237         ret_val = E1000_SUCCESS;
3238         for (i = 0; i < words; i++) {
3239                 if (dev_spec->shadow_ram[offset+i].modified) {
3240                         data[i] = dev_spec->shadow_ram[offset+i].value;
3241                 } else {
3242                         ret_val = e1000_read_flash_word_ich8lan(hw,
3243                                                                 act_offset + i,
3244                                                                 &word);
3245                         if (ret_val)
3246                                 break;
3247                         data[i] = word;
3248                 }
3249         }
3250
3251         nvm->ops.release(hw);
3252
3253 out:
3254         if (ret_val)
3255                 DEBUGOUT1("NVM read error: %d\n", ret_val);
3256
3257         return ret_val;
3258 }
3259
3260 /**
3261  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3262  *  @hw: pointer to the HW structure
3263  *
3264  *  This function does initial flash setup so that a new read/write/erase cycle
3265  *  can be started.
3266  **/
3267 STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3268 {
3269         union ich8_hws_flash_status hsfsts;
3270         s32 ret_val = -E1000_ERR_NVM;
3271
3272         DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3273
3274         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3275
3276         /* Check if the flash descriptor is valid */
3277         if (!hsfsts.hsf_status.fldesvalid) {
3278                 DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3279                 return -E1000_ERR_NVM;
3280         }
3281
3282         /* Clear FCERR and DAEL in hw status by writing 1 */
3283         hsfsts.hsf_status.flcerr = 1;
3284         hsfsts.hsf_status.dael = 1;
3285         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3286
3287         /* Either we should have a hardware SPI cycle in progress
3288          * bit to check against, in order to start a new cycle or
3289          * FDONE bit should be changed in the hardware so that it
3290          * is 1 after hardware reset, which can then be used as an
3291          * indication whether a cycle is in progress or has been
3292          * completed.
3293          */
3294
3295         if (!hsfsts.hsf_status.flcinprog) {
3296                 /* There is no cycle running at present,
3297                  * so we can start a cycle.
3298                  * Begin by setting Flash Cycle Done.
3299                  */
3300                 hsfsts.hsf_status.flcdone = 1;
3301                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3302                 ret_val = E1000_SUCCESS;
3303         } else {
3304                 s32 i;
3305
3306                 /* Otherwise poll for sometime so the current
3307                  * cycle has a chance to end before giving up.
3308                  */
3309                 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3310                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3311                                                               ICH_FLASH_HSFSTS);
3312                         if (!hsfsts.hsf_status.flcinprog) {
3313                                 ret_val = E1000_SUCCESS;
3314                                 break;
3315                         }
3316                         usec_delay(1);
3317                 }
3318                 if (ret_val == E1000_SUCCESS) {
3319                         /* Successful in waiting for previous cycle to timeout,
3320                          * now set the Flash Cycle Done.
3321                          */
3322                         hsfsts.hsf_status.flcdone = 1;
3323                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3324                                                 hsfsts.regval);
3325                 } else {
3326                         DEBUGOUT("Flash controller busy, cannot get access\n");
3327                 }
3328         }
3329
3330         return ret_val;
3331 }
3332
3333 /**
3334  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3335  *  @hw: pointer to the HW structure
3336  *  @timeout: maximum time to wait for completion
3337  *
3338  *  This function starts a flash cycle and waits for its completion.
3339  **/
3340 STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3341 {
3342         union ich8_hws_flash_ctrl hsflctl;
3343         union ich8_hws_flash_status hsfsts;
3344         u32 i = 0;
3345
3346         DEBUGFUNC("e1000_flash_cycle_ich8lan");
3347
3348         /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3349         hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3350         hsflctl.hsf_ctrl.flcgo = 1;
3351
3352         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3353
3354         /* wait till FDONE bit is set to 1 */
3355         do {
3356                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3357                 if (hsfsts.hsf_status.flcdone)
3358                         break;
3359                 usec_delay(1);
3360         } while (i++ < timeout);
3361
3362         if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3363                 return E1000_SUCCESS;
3364
3365         return -E1000_ERR_NVM;
3366 }
3367
3368 /**
3369  *  e1000_read_flash_word_ich8lan - Read word from flash
3370  *  @hw: pointer to the HW structure
3371  *  @offset: offset to data location
3372  *  @data: pointer to the location for storing the data
3373  *
3374  *  Reads the flash word at offset into data.  Offset is converted
3375  *  to bytes before read.
3376  **/
3377 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3378                                          u16 *data)
3379 {
3380         DEBUGFUNC("e1000_read_flash_word_ich8lan");
3381
3382         if (!data)
3383                 return -E1000_ERR_NVM;
3384
3385         /* Must convert offset into bytes. */
3386         offset <<= 1;
3387
3388         return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3389 }
3390
3391 /**
3392  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3393  *  @hw: pointer to the HW structure
3394  *  @offset: The offset of the byte to read.
3395  *  @data: Pointer to a byte to store the value read.
3396  *
3397  *  Reads a single byte from the NVM using the flash access registers.
3398  **/
3399 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3400                                          u8 *data)
3401 {
3402         s32 ret_val;
3403         u16 word = 0;
3404
3405         ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3406
3407         if (ret_val)
3408                 return ret_val;
3409
3410         *data = (u8)word;
3411
3412         return E1000_SUCCESS;
3413 }
3414
3415 /**
3416  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3417  *  @hw: pointer to the HW structure
3418  *  @offset: The offset (in bytes) of the byte or word to read.
3419  *  @size: Size of data to read, 1=byte 2=word
3420  *  @data: Pointer to the word to store the value read.
3421  *
3422  *  Reads a byte or word from the NVM using the flash access registers.
3423  **/
3424 STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3425                                          u8 size, u16 *data)
3426 {
3427         union ich8_hws_flash_status hsfsts;
3428         union ich8_hws_flash_ctrl hsflctl;
3429         u32 flash_linear_addr;
3430         u32 flash_data = 0;
3431         s32 ret_val = -E1000_ERR_NVM;
3432         u8 count = 0;
3433
3434         DEBUGFUNC("e1000_read_flash_data_ich8lan");
3435
3436         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3437                 return -E1000_ERR_NVM;
3438         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3439                              hw->nvm.flash_base_addr);
3440
3441         do {
3442                 usec_delay(1);
3443                 /* Steps */
3444                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3445                 if (ret_val != E1000_SUCCESS)
3446                         break;
3447                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3448
3449                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3450                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3451                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3452                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3453
3454                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3455
3456                 ret_val =
3457                     e1000_flash_cycle_ich8lan(hw,
3458                                               ICH_FLASH_READ_COMMAND_TIMEOUT);
3459
3460                 /* Check if FCERR is set to 1, if set to 1, clear it
3461                  * and try the whole sequence a few more times, else
3462                  * read in (shift in) the Flash Data0, the order is
3463                  * least significant byte first msb to lsb
3464                  */
3465                 if (ret_val == E1000_SUCCESS) {
3466                         flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3467                         if (size == 1)
3468                                 *data = (u8)(flash_data & 0x000000FF);
3469                         else if (size == 2)
3470                                 *data = (u16)(flash_data & 0x0000FFFF);
3471                         break;
3472                 } else {
3473                         /* If we've gotten here, then things are probably
3474                          * completely hosed, but if the error condition is
3475                          * detected, it won't hurt to give it another try...
3476                          * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3477                          */
3478                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3479                                                               ICH_FLASH_HSFSTS);
3480                         if (hsfsts.hsf_status.flcerr) {
3481                                 /* Repeat for some time before giving up. */
3482                                 continue;
3483                         } else if (!hsfsts.hsf_status.flcdone) {
3484                                 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3485                                 break;
3486                         }
3487                 }
3488         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3489
3490         return ret_val;
3491 }
3492
3493 /**
3494  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3495  *  @hw: pointer to the HW structure
3496  *  @offset: The offset (in bytes) of the word(s) to write.
3497  *  @words: Size of data to write in words
3498  *  @data: Pointer to the word(s) to write at offset.
3499  *
3500  *  Writes a byte or word to the NVM using the flash access registers.
3501  **/
3502 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3503                                    u16 *data)
3504 {
3505         struct e1000_nvm_info *nvm = &hw->nvm;
3506         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3507         u16 i;
3508
3509         DEBUGFUNC("e1000_write_nvm_ich8lan");
3510
3511         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3512             (words == 0)) {
3513                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3514                 return -E1000_ERR_NVM;
3515         }
3516
3517         nvm->ops.acquire(hw);
3518
3519         for (i = 0; i < words; i++) {
3520                 dev_spec->shadow_ram[offset+i].modified = true;
3521                 dev_spec->shadow_ram[offset+i].value = data[i];
3522         }
3523
3524         nvm->ops.release(hw);
3525
3526         return E1000_SUCCESS;
3527 }
3528
3529 /**
3530  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3531  *  @hw: pointer to the HW structure
3532  *
3533  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3534  *  which writes the checksum to the shadow ram.  The changes in the shadow
3535  *  ram are then committed to the EEPROM by processing each bank at a time
3536  *  checking for the modified bit and writing only the pending changes.
3537  *  After a successful commit, the shadow ram is cleared and is ready for
3538  *  future writes.
3539  **/
3540 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3541 {
3542         struct e1000_nvm_info *nvm = &hw->nvm;
3543         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3544         u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3545         s32 ret_val;
3546         u16 data;
3547
3548         DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
3549
3550         ret_val = e1000_update_nvm_checksum_generic(hw);
3551         if (ret_val)
3552                 goto out;
3553
3554         if (nvm->type != e1000_nvm_flash_sw)
3555                 goto out;
3556
3557         nvm->ops.acquire(hw);
3558
3559         /* We're writing to the opposite bank so if we're on bank 1,
3560          * write to bank 0 etc.  We also need to erase the segment that
3561          * is going to be written
3562          */
3563         ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3564         if (ret_val != E1000_SUCCESS) {
3565                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3566                 bank = 0;
3567         }
3568
3569         if (bank == 0) {
3570                 new_bank_offset = nvm->flash_bank_size;
3571                 old_bank_offset = 0;
3572                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3573                 if (ret_val)
3574                         goto release;
3575         } else {
3576                 old_bank_offset = nvm->flash_bank_size;
3577                 new_bank_offset = 0;
3578                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3579                 if (ret_val)
3580                         goto release;
3581         }
3582
3583         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3584                 /* Determine whether to write the value stored
3585                  * in the other NVM bank or a modified value stored
3586                  * in the shadow RAM
3587                  */
3588                 if (dev_spec->shadow_ram[i].modified) {
3589                         data = dev_spec->shadow_ram[i].value;
3590                 } else {
3591                         ret_val = e1000_read_flash_word_ich8lan(hw, i +
3592                                                                 old_bank_offset,
3593                                                                 &data);
3594                         if (ret_val)
3595                                 break;
3596                 }
3597
3598                 /* If the word is 0x13, then make sure the signature bits
3599                  * (15:14) are 11b until the commit has completed.
3600                  * This will allow us to write 10b which indicates the
3601                  * signature is valid.  We want to do this after the write
3602                  * has completed so that we don't mark the segment valid
3603                  * while the write is still in progress
3604                  */
3605                 if (i == E1000_ICH_NVM_SIG_WORD)
3606                         data |= E1000_ICH_NVM_SIG_MASK;
3607
3608                 /* Convert offset to bytes. */
3609                 act_offset = (i + new_bank_offset) << 1;
3610
3611                 usec_delay(100);
3612                 /* Write the bytes to the new bank. */
3613                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3614                                                                act_offset,
3615                                                                (u8)data);
3616                 if (ret_val)
3617                         break;
3618
3619                 usec_delay(100);
3620                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3621                                                           act_offset + 1,
3622                                                           (u8)(data >> 8));
3623                 if (ret_val)
3624                         break;
3625         }
3626
3627         /* Don't bother writing the segment valid bits if sector
3628          * programming failed.
3629          */
3630         if (ret_val) {
3631                 DEBUGOUT("Flash commit failed.\n");
3632                 goto release;
3633         }
3634
3635         /* Finally validate the new segment by setting bit 15:14
3636          * to 10b in word 0x13 , this can be done without an
3637          * erase as well since these bits are 11 to start with
3638          * and we need to change bit 14 to 0b
3639          */
3640         act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3641         ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
3642         if (ret_val)
3643                 goto release;
3644
3645         data &= 0xBFFF;
3646         ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3647                                                        act_offset * 2 + 1,
3648                                                        (u8)(data >> 8));
3649         if (ret_val)
3650                 goto release;
3651
3652         /* And invalidate the previously valid segment by setting
3653          * its signature word (0x13) high_byte to 0b. This can be
3654          * done without an erase because flash erase sets all bits
3655          * to 1's. We can write 1's to 0's without an erase
3656          */
3657         act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3658         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
3659         if (ret_val)
3660                 goto release;
3661
3662         /* Great!  Everything worked, we can now clear the cached entries. */
3663         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3664                 dev_spec->shadow_ram[i].modified = false;
3665                 dev_spec->shadow_ram[i].value = 0xFFFF;
3666         }
3667
3668 release:
3669         nvm->ops.release(hw);
3670
3671         /* Reload the EEPROM, or else modifications will not appear
3672          * until after the next adapter reset.
3673          */
3674         if (!ret_val) {
3675                 nvm->ops.reload(hw);
3676                 msec_delay(10);
3677         }
3678
3679 out:
3680         if (ret_val)
3681                 DEBUGOUT1("NVM update error: %d\n", ret_val);
3682
3683         return ret_val;
3684 }
3685
3686 /**
3687  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
3688  *  @hw: pointer to the HW structure
3689  *
3690  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
3691  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
3692  *  calculated, in which case we need to calculate the checksum and set bit 6.
3693  **/
3694 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
3695 {
3696         s32 ret_val;
3697         u16 data;
3698         u16 word;
3699         u16 valid_csum_mask;
3700
3701         DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
3702
3703         /* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
3704          * the checksum needs to be fixed.  This bit is an indication that
3705          * the NVM was prepared by OEM software and did not calculate
3706          * the checksum...a likely scenario.
3707          */
3708         switch (hw->mac.type) {
3709         case e1000_pch_lpt:
3710                 word = NVM_COMPAT;
3711                 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
3712                 break;
3713         default:
3714                 word = NVM_FUTURE_INIT_WORD1;
3715                 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
3716                 break;
3717         }
3718
3719         ret_val = hw->nvm.ops.read(hw, word, 1, &data);
3720         if (ret_val)
3721                 return ret_val;
3722
3723         if (!(data & valid_csum_mask)) {
3724                 data |= valid_csum_mask;
3725                 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
3726                 if (ret_val)
3727                         return ret_val;
3728                 ret_val = hw->nvm.ops.update(hw);
3729                 if (ret_val)
3730                         return ret_val;
3731         }
3732
3733         return e1000_validate_nvm_checksum_generic(hw);
3734 }
3735
3736 /**
3737  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3738  *  @hw: pointer to the HW structure
3739  *  @offset: The offset (in bytes) of the byte/word to read.
3740  *  @size: Size of data to read, 1=byte 2=word
3741  *  @data: The byte(s) to write to the NVM.
3742  *
3743  *  Writes one/two bytes to the NVM using the flash access registers.
3744  **/
3745 STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3746                                           u8 size, u16 data)
3747 {
3748         union ich8_hws_flash_status hsfsts;
3749         union ich8_hws_flash_ctrl hsflctl;
3750         u32 flash_linear_addr;
3751         u32 flash_data = 0;
3752         s32 ret_val;
3753         u8 count = 0;
3754
3755         DEBUGFUNC("e1000_write_ich8_data");
3756
3757         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3758                 return -E1000_ERR_NVM;
3759
3760         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3761                              hw->nvm.flash_base_addr);
3762
3763         do {
3764                 usec_delay(1);
3765                 /* Steps */
3766                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3767                 if (ret_val != E1000_SUCCESS)
3768                         break;
3769                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3770
3771                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3772                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3773                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3774                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3775
3776                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3777
3778                 if (size == 1)
3779                         flash_data = (u32)data & 0x00FF;
3780                 else
3781                         flash_data = (u32)data;
3782
3783                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
3784
3785                 /* check if FCERR is set to 1 , if set to 1, clear it
3786                  * and try the whole sequence a few more times else done
3787                  */
3788                 ret_val =
3789                     e1000_flash_cycle_ich8lan(hw,
3790                                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3791                 if (ret_val == E1000_SUCCESS)
3792                         break;
3793
3794                 /* If we're here, then things are most likely
3795                  * completely hosed, but if the error condition
3796                  * is detected, it won't hurt to give it another
3797                  * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3798                  */
3799                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3800                 if (hsfsts.hsf_status.flcerr)
3801                         /* Repeat for some time before giving up. */
3802                         continue;
3803                 if (!hsfsts.hsf_status.flcdone) {
3804                         DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3805                         break;
3806                 }
3807         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3808
3809         return ret_val;
3810 }
3811
3812 /**
3813  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3814  *  @hw: pointer to the HW structure
3815  *  @offset: The index of the byte to read.
3816  *  @data: The byte to write to the NVM.
3817  *
3818  *  Writes a single byte to the NVM using the flash access registers.
3819  **/
3820 STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3821                                           u8 data)
3822 {
3823         u16 word = (u16)data;
3824
3825         DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3826
3827         return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3828 }
3829
3830 /**
3831  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3832  *  @hw: pointer to the HW structure
3833  *  @offset: The offset of the byte to write.
3834  *  @byte: The byte to write to the NVM.
3835  *
3836  *  Writes a single byte to the NVM using the flash access registers.
3837  *  Goes through a retry algorithm before giving up.
3838  **/
3839 STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3840                                                 u32 offset, u8 byte)
3841 {
3842         s32 ret_val;
3843         u16 program_retries;
3844
3845         DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3846
3847         ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3848         if (!ret_val)
3849                 return ret_val;
3850
3851         for (program_retries = 0; program_retries < 100; program_retries++) {
3852                 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3853                 usec_delay(100);
3854                 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3855                 if (ret_val == E1000_SUCCESS)
3856                         break;
3857         }
3858         if (program_retries == 100)
3859                 return -E1000_ERR_NVM;
3860
3861         return E1000_SUCCESS;
3862 }
3863
3864 /**
3865  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3866  *  @hw: pointer to the HW structure
3867  *  @bank: 0 for first bank, 1 for second bank, etc.
3868  *
3869  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3870  *  bank N is 4096 * N + flash_reg_addr.
3871  **/
3872 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3873 {
3874         struct e1000_nvm_info *nvm = &hw->nvm;
3875         union ich8_hws_flash_status hsfsts;
3876         union ich8_hws_flash_ctrl hsflctl;
3877         u32 flash_linear_addr;
3878         /* bank size is in 16bit words - adjust to bytes */
3879         u32 flash_bank_size = nvm->flash_bank_size * 2;
3880         s32 ret_val;
3881         s32 count = 0;
3882         s32 j, iteration, sector_size;
3883
3884         DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3885
3886         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3887
3888         /* Determine HW Sector size: Read BERASE bits of hw flash status
3889          * register
3890          * 00: The Hw sector is 256 bytes, hence we need to erase 16
3891          *     consecutive sectors.  The start index for the nth Hw sector
3892          *     can be calculated as = bank * 4096 + n * 256
3893          * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3894          *     The start index for the nth Hw sector can be calculated
3895          *     as = bank * 4096
3896          * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3897          *     (ich9 only, otherwise error condition)
3898          * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3899          */
3900         switch (hsfsts.hsf_status.berasesz) {
3901         case 0:
3902                 /* Hw sector size 256 */
3903                 sector_size = ICH_FLASH_SEG_SIZE_256;
3904                 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3905                 break;
3906         case 1:
3907                 sector_size = ICH_FLASH_SEG_SIZE_4K;
3908                 iteration = 1;
3909                 break;
3910         case 2:
3911                 sector_size = ICH_FLASH_SEG_SIZE_8K;
3912                 iteration = 1;
3913                 break;
3914         case 3:
3915                 sector_size = ICH_FLASH_SEG_SIZE_64K;
3916                 iteration = 1;
3917                 break;
3918         default:
3919                 return -E1000_ERR_NVM;
3920         }
3921
3922         /* Start with the base address, then add the sector offset. */
3923         flash_linear_addr = hw->nvm.flash_base_addr;
3924         flash_linear_addr += (bank) ? flash_bank_size : 0;
3925
3926         for (j = 0; j < iteration; j++) {
3927                 do {
3928                         u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
3929
3930                         /* Steps */
3931                         ret_val = e1000_flash_cycle_init_ich8lan(hw);
3932                         if (ret_val)
3933                                 return ret_val;
3934
3935                         /* Write a value 11 (block Erase) in Flash
3936                          * Cycle field in hw flash control
3937                          */
3938                         hsflctl.regval =
3939                             E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3940
3941                         hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3942                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3943                                                 hsflctl.regval);
3944
3945                         /* Write the last 24 bits of an index within the
3946                          * block into Flash Linear address field in Flash
3947                          * Address.
3948                          */
3949                         flash_linear_addr += (j * sector_size);
3950                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3951                                               flash_linear_addr);
3952
3953                         ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
3954                         if (ret_val == E1000_SUCCESS)
3955                                 break;
3956
3957                         /* Check if FCERR is set to 1.  If 1,
3958                          * clear it and try the whole sequence
3959                          * a few more times else Done
3960                          */
3961                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3962                                                       ICH_FLASH_HSFSTS);
3963                         if (hsfsts.hsf_status.flcerr)
3964                                 /* repeat for some time before giving up */
3965                                 continue;
3966                         else if (!hsfsts.hsf_status.flcdone)
3967                                 return ret_val;
3968                 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3969         }
3970
3971         return E1000_SUCCESS;
3972 }
3973
3974 /**
3975  *  e1000_valid_led_default_ich8lan - Set the default LED settings
3976  *  @hw: pointer to the HW structure
3977  *  @data: Pointer to the LED settings
3978  *
3979  *  Reads the LED default settings from the NVM to data.  If the NVM LED
3980  *  settings is all 0's or F's, set the LED default to a valid LED default
3981  *  setting.
3982  **/
3983 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3984 {
3985         s32 ret_val;
3986
3987         DEBUGFUNC("e1000_valid_led_default_ich8lan");
3988
3989         ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3990         if (ret_val) {
3991                 DEBUGOUT("NVM Read Error\n");
3992                 return ret_val;
3993         }
3994
3995         if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
3996                 *data = ID_LED_DEFAULT_ICH8LAN;
3997
3998         return E1000_SUCCESS;
3999 }
4000
4001 /**
4002  *  e1000_id_led_init_pchlan - store LED configurations
4003  *  @hw: pointer to the HW structure
4004  *
4005  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4006  *  the PHY LED configuration register.
4007  *
4008  *  PCH also does not have an "always on" or "always off" mode which
4009  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4010  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4011  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4012  *  link based on logic in e1000_led_[on|off]_pchlan().
4013  **/
4014 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4015 {
4016         struct e1000_mac_info *mac = &hw->mac;
4017         s32 ret_val;
4018         const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4019         const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4020         u16 data, i, temp, shift;
4021
4022         DEBUGFUNC("e1000_id_led_init_pchlan");
4023
4024         /* Get default ID LED modes */
4025         ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4026         if (ret_val)
4027                 return ret_val;
4028
4029         mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4030         mac->ledctl_mode1 = mac->ledctl_default;
4031         mac->ledctl_mode2 = mac->ledctl_default;
4032
4033         for (i = 0; i < 4; i++) {
4034                 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4035                 shift = (i * 5);
4036                 switch (temp) {
4037                 case ID_LED_ON1_DEF2:
4038                 case ID_LED_ON1_ON2:
4039                 case ID_LED_ON1_OFF2:
4040                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4041                         mac->ledctl_mode1 |= (ledctl_on << shift);
4042                         break;
4043                 case ID_LED_OFF1_DEF2:
4044                 case ID_LED_OFF1_ON2:
4045                 case ID_LED_OFF1_OFF2:
4046                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4047                         mac->ledctl_mode1 |= (ledctl_off << shift);
4048                         break;
4049                 default:
4050                         /* Do nothing */
4051                         break;
4052                 }
4053                 switch (temp) {
4054                 case ID_LED_DEF1_ON2:
4055                 case ID_LED_ON1_ON2:
4056                 case ID_LED_OFF1_ON2:
4057                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4058                         mac->ledctl_mode2 |= (ledctl_on << shift);
4059                         break;
4060                 case ID_LED_DEF1_OFF2:
4061                 case ID_LED_ON1_OFF2:
4062                 case ID_LED_OFF1_OFF2:
4063                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4064                         mac->ledctl_mode2 |= (ledctl_off << shift);
4065                         break;
4066                 default:
4067                         /* Do nothing */
4068                         break;
4069                 }
4070         }
4071
4072         return E1000_SUCCESS;
4073 }
4074
4075 /**
4076  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4077  *  @hw: pointer to the HW structure
4078  *
4079  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4080  *  register, so the the bus width is hard coded.
4081  **/
4082 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4083 {
4084         struct e1000_bus_info *bus = &hw->bus;
4085         s32 ret_val;
4086
4087         DEBUGFUNC("e1000_get_bus_info_ich8lan");
4088
4089         ret_val = e1000_get_bus_info_pcie_generic(hw);
4090
4091         /* ICH devices are "PCI Express"-ish.  They have
4092          * a configuration space, but do not contain
4093          * PCI Express Capability registers, so bus width
4094          * must be hardcoded.
4095          */
4096         if (bus->width == e1000_bus_width_unknown)
4097                 bus->width = e1000_bus_width_pcie_x1;
4098
4099         return ret_val;
4100 }
4101
4102 /**
4103  *  e1000_reset_hw_ich8lan - Reset the hardware
4104  *  @hw: pointer to the HW structure
4105  *
4106  *  Does a full reset of the hardware which includes a reset of the PHY and
4107  *  MAC.
4108  **/
4109 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4110 {
4111         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4112         u16 kum_cfg;
4113         u32 ctrl, reg;
4114         s32 ret_val;
4115
4116         DEBUGFUNC("e1000_reset_hw_ich8lan");
4117
4118         /* Prevent the PCI-E bus from sticking if there is no TLP connection
4119          * on the last TLP read/write transaction when MAC is reset.
4120          */
4121         ret_val = e1000_disable_pcie_master_generic(hw);
4122         if (ret_val)
4123                 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4124
4125         DEBUGOUT("Masking off all interrupts\n");
4126         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4127
4128         /* Disable the Transmit and Receive units.  Then delay to allow
4129          * any pending transactions to complete before we hit the MAC
4130          * with the global reset.
4131          */
4132         E1000_WRITE_REG(hw, E1000_RCTL, 0);
4133         E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4134         E1000_WRITE_FLUSH(hw);
4135
4136         msec_delay(10);
4137
4138         /* Workaround for ICH8 bit corruption issue in FIFO memory */
4139         if (hw->mac.type == e1000_ich8lan) {
4140                 /* Set Tx and Rx buffer allocation to 8k apiece. */
4141                 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4142                 /* Set Packet Buffer Size to 16k. */
4143                 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4144         }
4145
4146         if (hw->mac.type == e1000_pchlan) {
4147                 /* Save the NVM K1 bit setting*/
4148                 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4149                 if (ret_val)
4150                         return ret_val;
4151
4152                 if (kum_cfg & E1000_NVM_K1_ENABLE)
4153                         dev_spec->nvm_k1_enabled = true;
4154                 else
4155                         dev_spec->nvm_k1_enabled = false;
4156         }
4157
4158         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4159
4160         if (!hw->phy.ops.check_reset_block(hw)) {
4161                 /* Full-chip reset requires MAC and PHY reset at the same
4162                  * time to make sure the interface between MAC and the
4163                  * external PHY is reset.
4164                  */
4165                 ctrl |= E1000_CTRL_PHY_RST;
4166
4167                 /* Gate automatic PHY configuration by hardware on
4168                  * non-managed 82579
4169                  */
4170                 if ((hw->mac.type == e1000_pch2lan) &&
4171                     !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4172                         e1000_gate_hw_phy_config_ich8lan(hw, true);
4173         }
4174         ret_val = e1000_acquire_swflag_ich8lan(hw);
4175         DEBUGOUT("Issuing a global reset to ich8lan\n");
4176         E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4177         /* cannot issue a flush here because it hangs the hardware */
4178         msec_delay(20);
4179
4180         /* Set Phy Config Counter to 50msec */
4181         if (hw->mac.type == e1000_pch2lan) {
4182                 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4183                 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4184                 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4185                 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4186         }
4187
4188         if (!ret_val)
4189                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
4190
4191         if (ctrl & E1000_CTRL_PHY_RST) {
4192                 ret_val = hw->phy.ops.get_cfg_done(hw);
4193                 if (ret_val)
4194                         return ret_val;
4195
4196                 ret_val = e1000_post_phy_reset_ich8lan(hw);
4197                 if (ret_val)
4198                         return ret_val;
4199         }
4200
4201         /* For PCH, this write will make sure that any noise
4202          * will be detected as a CRC error and be dropped rather than show up
4203          * as a bad packet to the DMA engine.
4204          */
4205         if (hw->mac.type == e1000_pchlan)
4206                 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
4207
4208         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4209         E1000_READ_REG(hw, E1000_ICR);
4210
4211         reg = E1000_READ_REG(hw, E1000_KABGTXD);
4212         reg |= E1000_KABGTXD_BGSQLBIAS;
4213         E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
4214
4215         return E1000_SUCCESS;
4216 }
4217
4218 /**
4219  *  e1000_init_hw_ich8lan - Initialize the hardware
4220  *  @hw: pointer to the HW structure
4221  *
4222  *  Prepares the hardware for transmit and receive by doing the following:
4223  *   - initialize hardware bits
4224  *   - initialize LED identification
4225  *   - setup receive address registers
4226  *   - setup flow control
4227  *   - setup transmit descriptors
4228  *   - clear statistics
4229  **/
4230 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4231 {
4232         struct e1000_mac_info *mac = &hw->mac;
4233         u32 ctrl_ext, txdctl, snoop;
4234         s32 ret_val;
4235         u16 i;
4236
4237         DEBUGFUNC("e1000_init_hw_ich8lan");
4238
4239         e1000_initialize_hw_bits_ich8lan(hw);
4240
4241         /* Initialize identification LED */
4242         ret_val = mac->ops.id_led_init(hw);
4243         /* An error is not fatal and we should not stop init due to this */
4244         if (ret_val)
4245                 DEBUGOUT("Error initializing identification LED\n");
4246
4247         /* Setup the receive address. */
4248         e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
4249
4250         /* Zero out the Multicast HASH table */
4251         DEBUGOUT("Zeroing the MTA\n");
4252         for (i = 0; i < mac->mta_reg_count; i++)
4253                 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4254
4255         /* The 82578 Rx buffer will stall if wakeup is enabled in host and
4256          * the ME.  Disable wakeup by clearing the host wakeup bit.
4257          * Reset the phy after disabling host wakeup to reset the Rx buffer.
4258          */
4259         if (hw->phy.type == e1000_phy_82578) {
4260                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
4261                 i &= ~BM_WUC_HOST_WU_BIT;
4262                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
4263                 ret_val = e1000_phy_hw_reset_ich8lan(hw);
4264                 if (ret_val)
4265                         return ret_val;
4266         }
4267
4268         /* Setup link and flow control */
4269         ret_val = mac->ops.setup_link(hw);
4270
4271         /* Set the transmit descriptor write-back policy for both queues */
4272         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
4273         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4274                   E1000_TXDCTL_FULL_TX_DESC_WB);
4275         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4276                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4277         E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
4278         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
4279         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4280                   E1000_TXDCTL_FULL_TX_DESC_WB);
4281         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4282                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4283         E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
4284
4285         /* ICH8 has opposite polarity of no_snoop bits.
4286          * By default, we should use snoop behavior.
4287          */
4288         if (mac->type == e1000_ich8lan)
4289                 snoop = PCIE_ICH8_SNOOP_ALL;
4290         else
4291                 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
4292         e1000_set_pcie_no_snoop_generic(hw, snoop);
4293
4294         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
4295         ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4296         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
4297
4298         /* Clear all of the statistics registers (clear on read).  It is
4299          * important that we do this after we have tried to establish link
4300          * because the symbol error count will increment wildly if there
4301          * is no link.
4302          */
4303         e1000_clear_hw_cntrs_ich8lan(hw);
4304
4305         return ret_val;
4306 }
4307
4308 /**
4309  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4310  *  @hw: pointer to the HW structure
4311  *
4312  *  Sets/Clears required hardware bits necessary for correctly setting up the
4313  *  hardware for transmit and receive.
4314  **/
4315 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4316 {
4317         u32 reg;
4318
4319         DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
4320
4321         /* Extended Device Control */
4322         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
4323         reg |= (1 << 22);
4324         /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4325         if (hw->mac.type >= e1000_pchlan)
4326                 reg |= E1000_CTRL_EXT_PHYPDEN;
4327         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
4328
4329         /* Transmit Descriptor Control 0 */
4330         reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
4331         reg |= (1 << 22);
4332         E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
4333
4334         /* Transmit Descriptor Control 1 */
4335         reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
4336         reg |= (1 << 22);
4337         E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
4338
4339         /* Transmit Arbitration Control 0 */
4340         reg = E1000_READ_REG(hw, E1000_TARC(0));
4341         if (hw->mac.type == e1000_ich8lan)
4342                 reg |= (1 << 28) | (1 << 29);
4343         reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
4344         E1000_WRITE_REG(hw, E1000_TARC(0), reg);
4345
4346         /* Transmit Arbitration Control 1 */
4347         reg = E1000_READ_REG(hw, E1000_TARC(1));
4348         if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
4349                 reg &= ~(1 << 28);
4350         else
4351                 reg |= (1 << 28);
4352         reg |= (1 << 24) | (1 << 26) | (1 << 30);
4353         E1000_WRITE_REG(hw, E1000_TARC(1), reg);
4354
4355         /* Device Status */
4356         if (hw->mac.type == e1000_ich8lan) {
4357                 reg = E1000_READ_REG(hw, E1000_STATUS);
4358                 reg &= ~(1 << 31);
4359                 E1000_WRITE_REG(hw, E1000_STATUS, reg);
4360         }
4361
4362         /* work-around descriptor data corruption issue during nfs v2 udp
4363          * traffic, just disable the nfs filtering capability
4364          */
4365         reg = E1000_READ_REG(hw, E1000_RFCTL);
4366         reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4367
4368         /* Disable IPv6 extension header parsing because some malformed
4369          * IPv6 headers can hang the Rx.
4370          */
4371         if (hw->mac.type == e1000_ich8lan)
4372                 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4373         E1000_WRITE_REG(hw, E1000_RFCTL, reg);
4374
4375         /* Enable ECC on Lynxpoint */
4376         if (hw->mac.type == e1000_pch_lpt) {
4377                 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
4378                 reg |= E1000_PBECCSTS_ECC_ENABLE;
4379                 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
4380
4381                 reg = E1000_READ_REG(hw, E1000_CTRL);
4382                 reg |= E1000_CTRL_MEHE;
4383                 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4384         }
4385
4386         return;
4387 }
4388
4389 /**
4390  *  e1000_setup_link_ich8lan - Setup flow control and link settings
4391  *  @hw: pointer to the HW structure
4392  *
4393  *  Determines which flow control settings to use, then configures flow
4394  *  control.  Calls the appropriate media-specific link configuration
4395  *  function.  Assuming the adapter has a valid link partner, a valid link
4396  *  should be established.  Assumes the hardware has previously been reset
4397  *  and the transmitter and receiver are not enabled.
4398  **/
4399 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4400 {
4401         s32 ret_val;
4402
4403         DEBUGFUNC("e1000_setup_link_ich8lan");
4404
4405         if (hw->phy.ops.check_reset_block(hw))
4406                 return E1000_SUCCESS;
4407
4408         /* ICH parts do not have a word in the NVM to determine
4409          * the default flow control setting, so we explicitly
4410          * set it to full.
4411          */
4412         if (hw->fc.requested_mode == e1000_fc_default)
4413                 hw->fc.requested_mode = e1000_fc_full;
4414
4415         /* Save off the requested flow control mode for use later.  Depending
4416          * on the link partner's capabilities, we may or may not use this mode.
4417          */
4418         hw->fc.current_mode = hw->fc.requested_mode;
4419
4420         DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
4421                 hw->fc.current_mode);
4422
4423         /* Continue to configure the copper link. */
4424         ret_val = hw->mac.ops.setup_physical_interface(hw);
4425         if (ret_val)
4426                 return ret_val;
4427
4428         E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
4429         if ((hw->phy.type == e1000_phy_82578) ||
4430             (hw->phy.type == e1000_phy_82579) ||
4431             (hw->phy.type == e1000_phy_i217) ||
4432             (hw->phy.type == e1000_phy_82577)) {
4433                 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
4434
4435                 ret_val = hw->phy.ops.write_reg(hw,
4436                                              PHY_REG(BM_PORT_CTRL_PAGE, 27),
4437                                              hw->fc.pause_time);
4438                 if (ret_val)
4439                         return ret_val;
4440         }
4441
4442         return e1000_set_fc_watermarks_generic(hw);
4443 }
4444
4445 /**
4446  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4447  *  @hw: pointer to the HW structure
4448  *
4449  *  Configures the kumeran interface to the PHY to wait the appropriate time
4450  *  when polling the PHY, then call the generic setup_copper_link to finish
4451  *  configuring the copper link.
4452  **/
4453 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4454 {
4455         u32 ctrl;
4456         s32 ret_val;
4457         u16 reg_data;
4458
4459         DEBUGFUNC("e1000_setup_copper_link_ich8lan");
4460
4461         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4462         ctrl |= E1000_CTRL_SLU;
4463         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4464         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4465
4466         /* Set the mac to wait the maximum time between each iteration
4467          * and increase the max iterations when polling the phy;
4468          * this fixes erroneous timeouts at 10Mbps.
4469          */
4470         ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
4471                                                0xFFFF);
4472         if (ret_val)
4473                 return ret_val;
4474         ret_val = e1000_read_kmrn_reg_generic(hw,
4475                                               E1000_KMRNCTRLSTA_INBAND_PARAM,
4476                                               &reg_data);
4477         if (ret_val)
4478                 return ret_val;
4479         reg_data |= 0x3F;
4480         ret_val = e1000_write_kmrn_reg_generic(hw,
4481                                                E1000_KMRNCTRLSTA_INBAND_PARAM,
4482                                                reg_data);
4483         if (ret_val)
4484                 return ret_val;
4485
4486         switch (hw->phy.type) {
4487         case e1000_phy_igp_3:
4488                 ret_val = e1000_copper_link_setup_igp(hw);
4489                 if (ret_val)
4490                         return ret_val;
4491                 break;
4492         case e1000_phy_bm:
4493         case e1000_phy_82578:
4494                 ret_val = e1000_copper_link_setup_m88(hw);
4495                 if (ret_val)
4496                         return ret_val;
4497                 break;
4498         case e1000_phy_82577:
4499         case e1000_phy_82579:
4500                 ret_val = e1000_copper_link_setup_82577(hw);
4501                 if (ret_val)
4502                         return ret_val;
4503                 break;
4504         case e1000_phy_ife:
4505                 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
4506                                                &reg_data);
4507                 if (ret_val)
4508                         return ret_val;
4509
4510                 reg_data &= ~IFE_PMC_AUTO_MDIX;
4511
4512                 switch (hw->phy.mdix) {
4513                 case 1:
4514                         reg_data &= ~IFE_PMC_FORCE_MDIX;
4515                         break;
4516                 case 2:
4517                         reg_data |= IFE_PMC_FORCE_MDIX;
4518                         break;
4519                 case 0:
4520                 default:
4521                         reg_data |= IFE_PMC_AUTO_MDIX;
4522                         break;
4523                 }
4524                 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
4525                                                 reg_data);
4526                 if (ret_val)
4527                         return ret_val;
4528                 break;
4529         default:
4530                 break;
4531         }
4532
4533         return e1000_setup_copper_link_generic(hw);
4534 }
4535
4536 /**
4537  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
4538  *  @hw: pointer to the HW structure
4539  *
4540  *  Calls the PHY specific link setup function and then calls the
4541  *  generic setup_copper_link to finish configuring the link for
4542  *  Lynxpoint PCH devices
4543  **/
4544 STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
4545 {
4546         u32 ctrl;
4547         s32 ret_val;
4548
4549         DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
4550
4551         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4552         ctrl |= E1000_CTRL_SLU;
4553         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4554         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4555
4556         ret_val = e1000_copper_link_setup_82577(hw);
4557         if (ret_val)
4558                 return ret_val;
4559
4560         return e1000_setup_copper_link_generic(hw);
4561 }
4562
4563 /**
4564  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
4565  *  @hw: pointer to the HW structure
4566  *  @speed: pointer to store current link speed
4567  *  @duplex: pointer to store the current link duplex
4568  *
4569  *  Calls the generic get_speed_and_duplex to retrieve the current link
4570  *  information and then calls the Kumeran lock loss workaround for links at
4571  *  gigabit speeds.
4572  **/
4573 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
4574                                           u16 *duplex)
4575 {
4576         s32 ret_val;
4577
4578         DEBUGFUNC("e1000_get_link_up_info_ich8lan");
4579
4580         ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
4581         if (ret_val)
4582                 return ret_val;
4583
4584         if ((hw->mac.type == e1000_ich8lan) &&
4585             (hw->phy.type == e1000_phy_igp_3) &&
4586             (*speed == SPEED_1000)) {
4587                 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
4588         }
4589
4590         return ret_val;
4591 }
4592
4593 /**
4594  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
4595  *  @hw: pointer to the HW structure
4596  *
4597  *  Work-around for 82566 Kumeran PCS lock loss:
4598  *  On link status change (i.e. PCI reset, speed change) and link is up and
4599  *  speed is gigabit-
4600  *    0) if workaround is optionally disabled do nothing
4601  *    1) wait 1ms for Kumeran link to come up
4602  *    2) check Kumeran Diagnostic register PCS lock loss bit
4603  *    3) if not set the link is locked (all is good), otherwise...
4604  *    4) reset the PHY
4605  *    5) repeat up to 10 times
4606  *  Note: this is only called for IGP3 copper when speed is 1gb.
4607  **/
4608 STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
4609 {
4610         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4611         u32 phy_ctrl;
4612         s32 ret_val;
4613         u16 i, data;
4614         bool link;
4615
4616         DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
4617
4618         if (!dev_spec->kmrn_lock_loss_workaround_enabled)
4619                 return E1000_SUCCESS;
4620
4621         /* Make sure link is up before proceeding.  If not just return.
4622          * Attempting this while link is negotiating fouled up link
4623          * stability
4624          */
4625         ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
4626         if (!link)
4627                 return E1000_SUCCESS;
4628
4629         for (i = 0; i < 10; i++) {
4630                 /* read once to clear */
4631                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4632                 if (ret_val)
4633                         return ret_val;
4634                 /* and again to get new status */
4635                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4636                 if (ret_val)
4637                         return ret_val;
4638
4639                 /* check for PCS lock */
4640                 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
4641                         return E1000_SUCCESS;
4642
4643                 /* Issue PHY reset */
4644                 hw->phy.ops.reset(hw);
4645                 msec_delay_irq(5);
4646         }
4647         /* Disable GigE link negotiation */
4648         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4649         phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
4650                      E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4651         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4652
4653         /* Call gig speed drop workaround on Gig disable before accessing
4654          * any PHY registers
4655          */
4656         e1000_gig_downshift_workaround_ich8lan(hw);
4657
4658         /* unable to acquire PCS lock */
4659         return -E1000_ERR_PHY;
4660 }
4661
4662 /**
4663  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
4664  *  @hw: pointer to the HW structure
4665  *  @state: boolean value used to set the current Kumeran workaround state
4666  *
4667  *  If ICH8, set the current Kumeran workaround state (enabled - true
4668  *  /disabled - false).
4669  **/
4670 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
4671                                                  bool state)
4672 {
4673         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4674
4675         DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
4676
4677         if (hw->mac.type != e1000_ich8lan) {
4678                 DEBUGOUT("Workaround applies to ICH8 only.\n");
4679                 return;
4680         }
4681
4682         dev_spec->kmrn_lock_loss_workaround_enabled = state;
4683
4684         return;
4685 }
4686
4687 /**
4688  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
4689  *  @hw: pointer to the HW structure
4690  *
4691  *  Workaround for 82566 power-down on D3 entry:
4692  *    1) disable gigabit link
4693  *    2) write VR power-down enable
4694  *    3) read it back
4695  *  Continue if successful, else issue LCD reset and repeat
4696  **/
4697 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
4698 {
4699         u32 reg;
4700         u16 data;
4701         u8  retry = 0;
4702
4703         DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
4704
4705         if (hw->phy.type != e1000_phy_igp_3)
4706                 return;
4707
4708         /* Try the workaround twice (if needed) */
4709         do {
4710                 /* Disable link */
4711                 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
4712                 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
4713                         E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4714                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
4715
4716                 /* Call gig speed drop workaround on Gig disable before
4717                  * accessing any PHY registers
4718                  */
4719                 if (hw->mac.type == e1000_ich8lan)
4720                         e1000_gig_downshift_workaround_ich8lan(hw);
4721
4722                 /* Write VR power-down enable */
4723                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4724                 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4725                 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
4726                                       data | IGP3_VR_CTRL_MODE_SHUTDOWN);
4727
4728                 /* Read it back and test */
4729                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4730                 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4731                 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
4732                         break;
4733
4734                 /* Issue PHY reset and repeat at most one more time */
4735                 reg = E1000_READ_REG(hw, E1000_CTRL);
4736                 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
4737                 retry++;
4738         } while (retry);
4739 }
4740
4741 /**
4742  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
4743  *  @hw: pointer to the HW structure
4744  *
4745  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
4746  *  LPLU, Gig disable, MDIC PHY reset):
4747  *    1) Set Kumeran Near-end loopback
4748  *    2) Clear Kumeran Near-end loopback
4749  *  Should only be called for ICH8[m] devices with any 1G Phy.
4750  **/
4751 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
4752 {
4753         s32 ret_val;
4754         u16 reg_data;
4755
4756         DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
4757
4758         if ((hw->mac.type != e1000_ich8lan) ||
4759             (hw->phy.type == e1000_phy_ife))
4760                 return;
4761
4762         ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4763                                               &reg_data);
4764         if (ret_val)
4765                 return;
4766         reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
4767         ret_val = e1000_write_kmrn_reg_generic(hw,
4768                                                E1000_KMRNCTRLSTA_DIAG_OFFSET,
4769                                                reg_data);
4770         if (ret_val)
4771                 return;
4772         reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
4773         e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4774                                      reg_data);
4775 }
4776
4777 /**
4778  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4779  *  @hw: pointer to the HW structure
4780  *
4781  *  During S0 to Sx transition, it is possible the link remains at gig
4782  *  instead of negotiating to a lower speed.  Before going to Sx, set
4783  *  'Gig Disable' to force link speed negotiation to a lower speed based on
4784  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
4785  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4786  *  needs to be written.
4787  *  Parts that support (and are linked to a partner which support) EEE in
4788  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4789  *  than 10Mbps w/o EEE.
4790  **/
4791 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4792 {
4793         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4794         u32 phy_ctrl;
4795         s32 ret_val;
4796
4797         DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
4798
4799         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4800         phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4801
4802         if (hw->phy.type == e1000_phy_i217) {
4803                 u16 phy_reg, device_id = hw->device_id;
4804
4805                 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4806                     (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
4807                     (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
4808                     (device_id == E1000_DEV_ID_PCH_I218_V3)) {
4809                         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
4810
4811                         E1000_WRITE_REG(hw, E1000_FEXTNVM6,
4812                                         fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4813                 }
4814
4815                 ret_val = hw->phy.ops.acquire(hw);
4816                 if (ret_val)
4817                         goto out;
4818
4819                 if (!dev_spec->eee_disable) {
4820                         u16 eee_advert;
4821
4822                         ret_val =
4823                             e1000_read_emi_reg_locked(hw,
4824                                                       I217_EEE_ADVERTISEMENT,
4825                                                       &eee_advert);
4826                         if (ret_val)
4827                                 goto release;
4828
4829                         /* Disable LPLU if both link partners support 100BaseT
4830                          * EEE and 100Full is advertised on both ends of the
4831                          * link, and enable Auto Enable LPI since there will
4832                          * be no driver to enable LPI while in Sx.
4833                          */
4834                         if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4835                             (dev_spec->eee_lp_ability &
4836                              I82579_EEE_100_SUPPORTED) &&
4837                             (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
4838                                 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4839                                               E1000_PHY_CTRL_NOND0A_LPLU);
4840
4841                                 /* Set Auto Enable LPI after link up */
4842                                 hw->phy.ops.read_reg_locked(hw,
4843                                                             I217_LPI_GPIO_CTRL,
4844                                                             &phy_reg);
4845                                 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4846                                 hw->phy.ops.write_reg_locked(hw,
4847                                                              I217_LPI_GPIO_CTRL,
4848                                                              phy_reg);
4849                         }
4850                 }
4851
4852                 /* For i217 Intel Rapid Start Technology support,
4853                  * when the system is going into Sx and no manageability engine
4854                  * is present, the driver must configure proxy to reset only on
4855                  * power good.  LPI (Low Power Idle) state must also reset only
4856                  * on power good, as well as the MTA (Multicast table array).
4857                  * The SMBus release must also be disabled on LCD reset.
4858                  */
4859                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4860                       E1000_ICH_FWSM_FW_VALID)) {
4861                         /* Enable proxy to reset only on power good. */
4862                         hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
4863                                                     &phy_reg);
4864                         phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4865                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
4866                                                      phy_reg);
4867
4868                         /* Set bit enable LPI (EEE) to reset only on
4869                          * power good.
4870                         */
4871                         hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
4872                         phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4873                         hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
4874
4875                         /* Disable the SMB release on LCD reset. */
4876                         hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
4877                         phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4878                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4879                 }
4880
4881                 /* Enable MTA to reset for Intel Rapid Start Technology
4882                  * Support
4883                  */
4884                 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
4885                 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4886                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4887
4888 release:
4889                 hw->phy.ops.release(hw);
4890         }
4891 out:
4892         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4893
4894         if (hw->mac.type == e1000_ich8lan)
4895                 e1000_gig_downshift_workaround_ich8lan(hw);
4896
4897         if (hw->mac.type >= e1000_pchlan) {
4898                 e1000_oem_bits_config_ich8lan(hw, false);
4899
4900                 /* Reset PHY to activate OEM bits on 82577/8 */
4901                 if (hw->mac.type == e1000_pchlan)
4902                         e1000_phy_hw_reset_generic(hw);
4903
4904                 ret_val = hw->phy.ops.acquire(hw);
4905                 if (ret_val)
4906                         return;
4907                 e1000_write_smbus_addr(hw);
4908                 hw->phy.ops.release(hw);
4909         }
4910
4911         return;
4912 }
4913
4914 /**
4915  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
4916  *  @hw: pointer to the HW structure
4917  *
4918  *  During Sx to S0 transitions on non-managed devices or managed devices
4919  *  on which PHY resets are not blocked, if the PHY registers cannot be
4920  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
4921  *  the PHY.
4922  *  On i217, setup Intel Rapid Start Technology.
4923  **/
4924 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4925 {
4926         s32 ret_val;
4927
4928         DEBUGFUNC("e1000_resume_workarounds_pchlan");
4929
4930         if (hw->mac.type < e1000_pch2lan)
4931                 return;
4932
4933         ret_val = e1000_init_phy_workarounds_pchlan(hw);
4934         if (ret_val) {
4935                 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
4936                 return;
4937         }
4938
4939         /* For i217 Intel Rapid Start Technology support when the system
4940          * is transitioning from Sx and no manageability engine is present
4941          * configure SMBus to restore on reset, disable proxy, and enable
4942          * the reset on MTA (Multicast table array).
4943          */
4944         if (hw->phy.type == e1000_phy_i217) {
4945                 u16 phy_reg;
4946
4947                 ret_val = hw->phy.ops.acquire(hw);
4948                 if (ret_val) {
4949                         DEBUGOUT("Failed to setup iRST\n");
4950                         return;
4951                 }
4952
4953                 /* Clear Auto Enable LPI after link up */
4954                 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
4955                 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4956                 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
4957
4958                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4959                     E1000_ICH_FWSM_FW_VALID)) {
4960                         /* Restore clear on SMB if no manageability engine
4961                          * is present
4962                          */
4963                         ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
4964                                                               &phy_reg);
4965                         if (ret_val)
4966                                 goto release;
4967                         phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
4968                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4969
4970                         /* Disable Proxy */
4971                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
4972                 }
4973                 /* Enable reset on MTA */
4974                 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
4975                                                       &phy_reg);
4976                 if (ret_val)
4977                         goto release;
4978                 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
4979                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4980 release:
4981                 if (ret_val)
4982                         DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
4983                 hw->phy.ops.release(hw);
4984         }
4985 }
4986
4987 /**
4988  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
4989  *  @hw: pointer to the HW structure
4990  *
4991  *  Return the LED back to the default configuration.
4992  **/
4993 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
4994 {
4995         DEBUGFUNC("e1000_cleanup_led_ich8lan");
4996
4997         if (hw->phy.type == e1000_phy_ife)
4998                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4999                                              0);
5000
5001         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5002         return E1000_SUCCESS;
5003 }
5004
5005 /**
5006  *  e1000_led_on_ich8lan - Turn LEDs on
5007  *  @hw: pointer to the HW structure
5008  *
5009  *  Turn on the LEDs.
5010  **/
5011 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5012 {
5013         DEBUGFUNC("e1000_led_on_ich8lan");
5014
5015         if (hw->phy.type == e1000_phy_ife)
5016                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5017                                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5018
5019         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5020         return E1000_SUCCESS;
5021 }
5022
5023 /**
5024  *  e1000_led_off_ich8lan - Turn LEDs off
5025  *  @hw: pointer to the HW structure
5026  *
5027  *  Turn off the LEDs.
5028  **/
5029 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5030 {
5031         DEBUGFUNC("e1000_led_off_ich8lan");
5032
5033         if (hw->phy.type == e1000_phy_ife)
5034                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5035                                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5036
5037         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5038         return E1000_SUCCESS;
5039 }
5040
5041 /**
5042  *  e1000_setup_led_pchlan - Configures SW controllable LED
5043  *  @hw: pointer to the HW structure
5044  *
5045  *  This prepares the SW controllable LED for use.
5046  **/
5047 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5048 {
5049         DEBUGFUNC("e1000_setup_led_pchlan");
5050
5051         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5052                                      (u16)hw->mac.ledctl_mode1);
5053 }
5054
5055 /**
5056  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5057  *  @hw: pointer to the HW structure
5058  *
5059  *  Return the LED back to the default configuration.
5060  **/
5061 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5062 {
5063         DEBUGFUNC("e1000_cleanup_led_pchlan");
5064
5065         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5066                                      (u16)hw->mac.ledctl_default);
5067 }
5068
5069 /**
5070  *  e1000_led_on_pchlan - Turn LEDs on
5071  *  @hw: pointer to the HW structure
5072  *
5073  *  Turn on the LEDs.
5074  **/
5075 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5076 {
5077         u16 data = (u16)hw->mac.ledctl_mode2;
5078         u32 i, led;
5079
5080         DEBUGFUNC("e1000_led_on_pchlan");
5081
5082         /* If no link, then turn LED on by setting the invert bit
5083          * for each LED that's mode is "link_up" in ledctl_mode2.
5084          */
5085         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5086                 for (i = 0; i < 3; i++) {
5087                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5088                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5089                             E1000_LEDCTL_MODE_LINK_UP)
5090                                 continue;
5091                         if (led & E1000_PHY_LED0_IVRT)
5092                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5093                         else
5094                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5095                 }
5096         }
5097
5098         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5099 }
5100
5101 /**
5102  *  e1000_led_off_pchlan - Turn LEDs off
5103  *  @hw: pointer to the HW structure
5104  *
5105  *  Turn off the LEDs.
5106  **/
5107 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5108 {
5109         u16 data = (u16)hw->mac.ledctl_mode1;
5110         u32 i, led;
5111
5112         DEBUGFUNC("e1000_led_off_pchlan");
5113
5114         /* If no link, then turn LED off by clearing the invert bit
5115          * for each LED that's mode is "link_up" in ledctl_mode1.
5116          */
5117         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5118                 for (i = 0; i < 3; i++) {
5119                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5120                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5121                             E1000_LEDCTL_MODE_LINK_UP)
5122                                 continue;
5123                         if (led & E1000_PHY_LED0_IVRT)
5124                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5125                         else
5126                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5127                 }
5128         }
5129
5130         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5131 }
5132
5133 /**
5134  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5135  *  @hw: pointer to the HW structure
5136  *
5137  *  Read appropriate register for the config done bit for completion status
5138  *  and configure the PHY through s/w for EEPROM-less parts.
5139  *
5140  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5141  *  config done bit, so only an error is logged and continues.  If we were
5142  *  to return with error, EEPROM-less silicon would not be able to be reset
5143  *  or change link.
5144  **/
5145 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5146 {
5147         s32 ret_val = E1000_SUCCESS;
5148         u32 bank = 0;
5149         u32 status;
5150
5151         DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5152
5153         e1000_get_cfg_done_generic(hw);
5154
5155         /* Wait for indication from h/w that it has completed basic config */
5156         if (hw->mac.type >= e1000_ich10lan) {
5157                 e1000_lan_init_done_ich8lan(hw);
5158         } else {
5159                 ret_val = e1000_get_auto_rd_done_generic(hw);
5160                 if (ret_val) {
5161                         /* When auto config read does not complete, do not
5162                          * return with an error. This can happen in situations
5163                          * where there is no eeprom and prevents getting link.
5164                          */
5165                         DEBUGOUT("Auto Read Done did not complete\n");
5166                         ret_val = E1000_SUCCESS;
5167                 }
5168         }
5169
5170         /* Clear PHY Reset Asserted bit */
5171         status = E1000_READ_REG(hw, E1000_STATUS);
5172         if (status & E1000_STATUS_PHYRA)
5173                 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5174         else
5175                 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5176
5177         /* If EEPROM is not marked present, init the IGP 3 PHY manually */
5178         if (hw->mac.type <= e1000_ich9lan) {
5179                 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5180                     (hw->phy.type == e1000_phy_igp_3)) {
5181                         e1000_phy_init_script_igp3(hw);
5182                 }
5183         } else {
5184                 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5185                         /* Maybe we should do a basic PHY config */
5186                         DEBUGOUT("EEPROM not present\n");
5187                         ret_val = -E1000_ERR_CONFIG;
5188                 }
5189         }
5190
5191         return ret_val;
5192 }
5193
5194 /**
5195  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5196  * @hw: pointer to the HW structure
5197  *
5198  * In the case of a PHY power down to save power, or to turn off link during a
5199  * driver unload, or wake on lan is not enabled, remove the link.
5200  **/
5201 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5202 {
5203         /* If the management interface is not enabled, then power down */
5204         if (!(hw->mac.ops.check_mng_mode(hw) ||
5205               hw->phy.ops.check_reset_block(hw)))
5206                 e1000_power_down_phy_copper(hw);
5207
5208         return;
5209 }
5210
5211 /**
5212  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5213  *  @hw: pointer to the HW structure
5214  *
5215  *  Clears hardware counters specific to the silicon family and calls
5216  *  clear_hw_cntrs_generic to clear all general purpose counters.
5217  **/
5218 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5219 {
5220         u16 phy_data;
5221         s32 ret_val;
5222
5223         DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
5224
5225         e1000_clear_hw_cntrs_base_generic(hw);
5226
5227         E1000_READ_REG(hw, E1000_ALGNERRC);
5228         E1000_READ_REG(hw, E1000_RXERRC);
5229         E1000_READ_REG(hw, E1000_TNCRS);
5230         E1000_READ_REG(hw, E1000_CEXTERR);
5231         E1000_READ_REG(hw, E1000_TSCTC);
5232         E1000_READ_REG(hw, E1000_TSCTFC);
5233
5234         E1000_READ_REG(hw, E1000_MGTPRC);
5235         E1000_READ_REG(hw, E1000_MGTPDC);
5236         E1000_READ_REG(hw, E1000_MGTPTC);
5237
5238         E1000_READ_REG(hw, E1000_IAC);
5239         E1000_READ_REG(hw, E1000_ICRXOC);
5240
5241         /* Clear PHY statistics registers */
5242         if ((hw->phy.type == e1000_phy_82578) ||
5243             (hw->phy.type == e1000_phy_82579) ||
5244             (hw->phy.type == e1000_phy_i217) ||
5245             (hw->phy.type == e1000_phy_82577)) {
5246                 ret_val = hw->phy.ops.acquire(hw);
5247                 if (ret_val)
5248                         return;
5249                 ret_val = hw->phy.ops.set_page(hw,
5250                                                HV_STATS_PAGE << IGP_PAGE_SHIFT);
5251                 if (ret_val)
5252                         goto release;
5253                 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5254                 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5255                 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5256                 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5257                 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5258                 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5259                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5260                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5261                 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5262                 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5263                 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5264                 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5265                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5266                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5267 release:
5268                 hw->phy.ops.release(hw);
5269         }
5270 }
5271