60c31c21009f635b65af9b5badce2a4ebcf35dc3
[dpdk.git] / drivers / net / e1000 / base / e1000_ich8lan.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /* 82562G 10/100 Network Connection
35  * 82562G-2 10/100 Network Connection
36  * 82562GT 10/100 Network Connection
37  * 82562GT-2 10/100 Network Connection
38  * 82562V 10/100 Network Connection
39  * 82562V-2 10/100 Network Connection
40  * 82566DC-2 Gigabit Network Connection
41  * 82566DC Gigabit Network Connection
42  * 82566DM-2 Gigabit Network Connection
43  * 82566DM Gigabit Network Connection
44  * 82566MC Gigabit Network Connection
45  * 82566MM Gigabit Network Connection
46  * 82567LM Gigabit Network Connection
47  * 82567LF Gigabit Network Connection
48  * 82567V Gigabit Network Connection
49  * 82567LM-2 Gigabit Network Connection
50  * 82567LF-2 Gigabit Network Connection
51  * 82567V-2 Gigabit Network Connection
52  * 82567LF-3 Gigabit Network Connection
53  * 82567LM-3 Gigabit Network Connection
54  * 82567LM-4 Gigabit Network Connection
55  * 82577LM Gigabit Network Connection
56  * 82577LC Gigabit Network Connection
57  * 82578DM Gigabit Network Connection
58  * 82578DC Gigabit Network Connection
59  * 82579LM Gigabit Network Connection
60  * 82579V Gigabit Network Connection
61  * Ethernet Connection I217-LM
62  * Ethernet Connection I217-V
63  * Ethernet Connection I218-V
64  * Ethernet Connection I218-LM
65  * Ethernet Connection (2) I218-LM
66  * Ethernet Connection (2) I218-V
67  * Ethernet Connection (3) I218-LM
68  * Ethernet Connection (3) I218-V
69  */
70
71 #include "e1000_api.h"
72
73 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
74 STATIC s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 STATIC s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 STATIC int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 STATIC int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
84 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
85                                               u8 *mc_addr_list,
86                                               u32 mc_addr_count);
87 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
88 STATIC s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
89 STATIC s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
90 STATIC s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
91 STATIC s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
92                                             bool active);
93 STATIC s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
94                                             bool active);
95 STATIC s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
96                                    u16 words, u16 *data);
97 STATIC s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
98                                     u16 words, u16 *data);
99 STATIC s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100 STATIC s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 STATIC s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
102                                             u16 *data);
103 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
104 STATIC s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
105 STATIC s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
106 STATIC s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
107 STATIC s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
108 STATIC s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
109 STATIC s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
110 STATIC s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
111                                            u16 *speed, u16 *duplex);
112 STATIC s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
113 STATIC s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
114 STATIC s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
115 STATIC s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
116 STATIC s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
117 STATIC s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
118 STATIC s32  e1000_led_on_pchlan(struct e1000_hw *hw);
119 STATIC s32  e1000_led_off_pchlan(struct e1000_hw *hw);
120 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
121 STATIC s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
122 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
123 STATIC s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
124 STATIC s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
125                                           u32 offset, u8 *data);
126 STATIC s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
127                                           u8 size, u16 *data);
128 STATIC s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
129                                           u32 offset, u16 *data);
130 STATIC s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
131                                                  u32 offset, u8 byte);
132 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
133 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
134 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
135 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
136 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
137 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
138
139 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
140 /* Offset 04h HSFSTS */
141 union ich8_hws_flash_status {
142         struct ich8_hsfsts {
143                 u16 flcdone:1; /* bit 0 Flash Cycle Done */
144                 u16 flcerr:1; /* bit 1 Flash Cycle Error */
145                 u16 dael:1; /* bit 2 Direct Access error Log */
146                 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
147                 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
148                 u16 reserved1:2; /* bit 13:6 Reserved */
149                 u16 reserved2:6; /* bit 13:6 Reserved */
150                 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
151                 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
152         } hsf_status;
153         u16 regval;
154 };
155
156 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
157 /* Offset 06h FLCTL */
158 union ich8_hws_flash_ctrl {
159         struct ich8_hsflctl {
160                 u16 flcgo:1;   /* 0 Flash Cycle Go */
161                 u16 flcycle:2;   /* 2:1 Flash Cycle */
162                 u16 reserved:5;   /* 7:3 Reserved  */
163                 u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
164                 u16 flockdn:6;   /* 15:10 Reserved */
165         } hsf_ctrl;
166         u16 regval;
167 };
168
169 /* ICH Flash Region Access Permissions */
170 union ich8_hws_flash_regacc {
171         struct ich8_flracc {
172                 u32 grra:8; /* 0:7 GbE region Read Access */
173                 u32 grwa:8; /* 8:15 GbE region Write Access */
174                 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
175                 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
176         } hsf_flregacc;
177         u16 regval;
178 };
179
180 /**
181  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
182  *  @hw: pointer to the HW structure
183  *
184  *  Test access to the PHY registers by reading the PHY ID registers.  If
185  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
186  *  otherwise assume the read PHY ID is correct if it is valid.
187  *
188  *  Assumes the sw/fw/hw semaphore is already acquired.
189  **/
190 STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
191 {
192         u16 phy_reg = 0;
193         u32 phy_id = 0;
194         s32 ret_val = 0;
195         u16 retry_count;
196         u32 mac_reg = 0;
197
198         for (retry_count = 0; retry_count < 2; retry_count++) {
199                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
200                 if (ret_val || (phy_reg == 0xFFFF))
201                         continue;
202                 phy_id = (u32)(phy_reg << 16);
203
204                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
205                 if (ret_val || (phy_reg == 0xFFFF)) {
206                         phy_id = 0;
207                         continue;
208                 }
209                 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
210                 break;
211         }
212
213         if (hw->phy.id) {
214                 if  (hw->phy.id == phy_id)
215                         goto out;
216         } else if (phy_id) {
217                 hw->phy.id = phy_id;
218                 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
219                 goto out;
220         }
221
222         /* In case the PHY needs to be in mdio slow mode,
223          * set slow mode and try to get the PHY id again.
224          */
225         if (hw->mac.type < e1000_pch_lpt) {
226                 hw->phy.ops.release(hw);
227                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
228                 if (!ret_val)
229                         ret_val = e1000_get_phy_id(hw);
230                 hw->phy.ops.acquire(hw);
231         }
232
233         if (ret_val)
234                 return false;
235 out:
236         if (hw->mac.type == e1000_pch_lpt) {
237                 /* Unforce SMBus mode in PHY */
238                 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
239                 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
240                 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
241
242                 /* Unforce SMBus mode in MAC */
243                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
244                 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
245                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
246         }
247
248         return true;
249 }
250
251 /**
252  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
253  *  @hw: pointer to the HW structure
254  *
255  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
256  *  used to reset the PHY to a quiescent state when necessary.
257  **/
258 STATIC void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
259 {
260         u32 mac_reg;
261
262         DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
263
264         /* Set Phy Config Counter to 50msec */
265         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
266         mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
267         mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
268         E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
269
270         /* Toggle LANPHYPC Value bit */
271         mac_reg = E1000_READ_REG(hw, E1000_CTRL);
272         mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
273         mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
274         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
275         E1000_WRITE_FLUSH(hw);
276         usec_delay(10);
277         mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
278         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
279         E1000_WRITE_FLUSH(hw);
280
281         if (hw->mac.type < e1000_pch_lpt) {
282                 msec_delay(50);
283         } else {
284                 u16 count = 20;
285
286                 do {
287                         msec_delay(5);
288                 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
289                            E1000_CTRL_EXT_LPCD) && count--);
290
291                 msec_delay(30);
292         }
293 }
294
295 /**
296  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
297  *  @hw: pointer to the HW structure
298  *
299  *  Workarounds/flow necessary for PHY initialization during driver load
300  *  and resume paths.
301  **/
302 STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
303 {
304         u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
305         s32 ret_val;
306
307         DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
308
309         /* Gate automatic PHY configuration by hardware on managed and
310          * non-managed 82579 and newer adapters.
311          */
312         e1000_gate_hw_phy_config_ich8lan(hw, true);
313
314 #ifdef ULP_SUPPORT
315         /* It is not possible to be certain of the current state of ULP
316          * so forcibly disable it.
317          */
318         hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
319
320 #endif /* ULP_SUPPORT */
321         ret_val = hw->phy.ops.acquire(hw);
322         if (ret_val) {
323                 DEBUGOUT("Failed to initialize PHY flow\n");
324                 goto out;
325         }
326
327         /* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
328          * inaccessible and resetting the PHY is not blocked, toggle the
329          * LANPHYPC Value bit to force the interconnect to PCIe mode.
330          */
331         switch (hw->mac.type) {
332         case e1000_pch_lpt:
333                 if (e1000_phy_is_accessible_pchlan(hw))
334                         break;
335
336                 /* Before toggling LANPHYPC, see if PHY is accessible by
337                  * forcing MAC to SMBus mode first.
338                  */
339                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
340                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
341                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
342
343                 /* Wait 50 milliseconds for MAC to finish any retries
344                  * that it might be trying to perform from previous
345                  * attempts to acknowledge any phy read requests.
346                  */
347                  msec_delay(50);
348
349                 /* fall-through */
350         case e1000_pch2lan:
351                 if (e1000_phy_is_accessible_pchlan(hw))
352                         break;
353
354                 /* fall-through */
355         case e1000_pchlan:
356                 if ((hw->mac.type == e1000_pchlan) &&
357                     (fwsm & E1000_ICH_FWSM_FW_VALID))
358                         break;
359
360                 if (hw->phy.ops.check_reset_block(hw)) {
361                         DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
362                         ret_val = -E1000_ERR_PHY;
363                         break;
364                 }
365
366                 /* Toggle LANPHYPC Value bit */
367                 e1000_toggle_lanphypc_pch_lpt(hw);
368                 if (hw->mac.type >= e1000_pch_lpt) {
369                         if (e1000_phy_is_accessible_pchlan(hw))
370                                 break;
371
372                         /* Toggling LANPHYPC brings the PHY out of SMBus mode
373                          * so ensure that the MAC is also out of SMBus mode
374                          */
375                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
376                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
377                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
378
379                         if (e1000_phy_is_accessible_pchlan(hw))
380                                 break;
381
382                         ret_val = -E1000_ERR_PHY;
383                 }
384                 break;
385         default:
386                 break;
387         }
388
389         hw->phy.ops.release(hw);
390         if (!ret_val) {
391
392                 /* Check to see if able to reset PHY.  Print error if not */
393                 if (hw->phy.ops.check_reset_block(hw)) {
394                         ERROR_REPORT("Reset blocked by ME\n");
395                         goto out;
396                 }
397
398                 /* Reset the PHY before any access to it.  Doing so, ensures
399                  * that the PHY is in a known good state before we read/write
400                  * PHY registers.  The generic reset is sufficient here,
401                  * because we haven't determined the PHY type yet.
402                  */
403                 ret_val = e1000_phy_hw_reset_generic(hw);
404                 if (ret_val)
405                         goto out;
406
407                 /* On a successful reset, possibly need to wait for the PHY
408                  * to quiesce to an accessible state before returning control
409                  * to the calling function.  If the PHY does not quiesce, then
410                  * return E1000E_BLK_PHY_RESET, as this is the condition that
411                  *  the PHY is in.
412                  */
413                 ret_val = hw->phy.ops.check_reset_block(hw);
414                 if (ret_val)
415                         ERROR_REPORT("ME blocked access to PHY after reset\n");
416         }
417
418 out:
419         /* Ungate automatic PHY configuration on non-managed 82579 */
420         if ((hw->mac.type == e1000_pch2lan) &&
421             !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
422                 msec_delay(10);
423                 e1000_gate_hw_phy_config_ich8lan(hw, false);
424         }
425
426         return ret_val;
427 }
428
429 /**
430  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
431  *  @hw: pointer to the HW structure
432  *
433  *  Initialize family-specific PHY parameters and function pointers.
434  **/
435 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
436 {
437         struct e1000_phy_info *phy = &hw->phy;
438         s32 ret_val;
439
440         DEBUGFUNC("e1000_init_phy_params_pchlan");
441
442         phy->addr               = 1;
443         phy->reset_delay_us     = 100;
444
445         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
446         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
447         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
448         phy->ops.set_page       = e1000_set_page_igp;
449         phy->ops.read_reg       = e1000_read_phy_reg_hv;
450         phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
451         phy->ops.read_reg_page  = e1000_read_phy_reg_page_hv;
452         phy->ops.release        = e1000_release_swflag_ich8lan;
453         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
454         phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
455         phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
456         phy->ops.write_reg      = e1000_write_phy_reg_hv;
457         phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
458         phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
459         phy->ops.power_up       = e1000_power_up_phy_copper;
460         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
461         phy->autoneg_mask       = AUTONEG_ADVERTISE_SPEED_DEFAULT;
462
463         phy->id = e1000_phy_unknown;
464
465         ret_val = e1000_init_phy_workarounds_pchlan(hw);
466         if (ret_val)
467                 return ret_val;
468
469         if (phy->id == e1000_phy_unknown)
470                 switch (hw->mac.type) {
471                 default:
472                         ret_val = e1000_get_phy_id(hw);
473                         if (ret_val)
474                                 return ret_val;
475                         if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
476                                 break;
477                         /* fall-through */
478                 case e1000_pch2lan:
479                 case e1000_pch_lpt:
480                         /* In case the PHY needs to be in mdio slow mode,
481                          * set slow mode and try to get the PHY id again.
482                          */
483                         ret_val = e1000_set_mdio_slow_mode_hv(hw);
484                         if (ret_val)
485                                 return ret_val;
486                         ret_val = e1000_get_phy_id(hw);
487                         if (ret_val)
488                                 return ret_val;
489                         break;
490                 }
491         phy->type = e1000_get_phy_type_from_id(phy->id);
492
493         switch (phy->type) {
494         case e1000_phy_82577:
495         case e1000_phy_82579:
496         case e1000_phy_i217:
497                 phy->ops.check_polarity = e1000_check_polarity_82577;
498                 phy->ops.force_speed_duplex =
499                         e1000_phy_force_speed_duplex_82577;
500                 phy->ops.get_cable_length = e1000_get_cable_length_82577;
501                 phy->ops.get_info = e1000_get_phy_info_82577;
502                 phy->ops.commit = e1000_phy_sw_reset_generic;
503                 break;
504         case e1000_phy_82578:
505                 phy->ops.check_polarity = e1000_check_polarity_m88;
506                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
507                 phy->ops.get_cable_length = e1000_get_cable_length_m88;
508                 phy->ops.get_info = e1000_get_phy_info_m88;
509                 break;
510         default:
511                 ret_val = -E1000_ERR_PHY;
512                 break;
513         }
514
515         return ret_val;
516 }
517
518 /**
519  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
520  *  @hw: pointer to the HW structure
521  *
522  *  Initialize family-specific PHY parameters and function pointers.
523  **/
524 STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
525 {
526         struct e1000_phy_info *phy = &hw->phy;
527         s32 ret_val;
528         u16 i = 0;
529
530         DEBUGFUNC("e1000_init_phy_params_ich8lan");
531
532         phy->addr               = 1;
533         phy->reset_delay_us     = 100;
534
535         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
536         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
537         phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
538         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
539         phy->ops.read_reg       = e1000_read_phy_reg_igp;
540         phy->ops.release        = e1000_release_swflag_ich8lan;
541         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
542         phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
543         phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
544         phy->ops.write_reg      = e1000_write_phy_reg_igp;
545         phy->ops.power_up       = e1000_power_up_phy_copper;
546         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
547
548         /* We may need to do this twice - once for IGP and if that fails,
549          * we'll set BM func pointers and try again
550          */
551         ret_val = e1000_determine_phy_address(hw);
552         if (ret_val) {
553                 phy->ops.write_reg = e1000_write_phy_reg_bm;
554                 phy->ops.read_reg  = e1000_read_phy_reg_bm;
555                 ret_val = e1000_determine_phy_address(hw);
556                 if (ret_val) {
557                         DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
558                         return ret_val;
559                 }
560         }
561
562         phy->id = 0;
563         while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
564                (i++ < 100)) {
565                 msec_delay(1);
566                 ret_val = e1000_get_phy_id(hw);
567                 if (ret_val)
568                         return ret_val;
569         }
570
571         /* Verify phy id */
572         switch (phy->id) {
573         case IGP03E1000_E_PHY_ID:
574                 phy->type = e1000_phy_igp_3;
575                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
576                 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
577                 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
578                 phy->ops.get_info = e1000_get_phy_info_igp;
579                 phy->ops.check_polarity = e1000_check_polarity_igp;
580                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
581                 break;
582         case IFE_E_PHY_ID:
583         case IFE_PLUS_E_PHY_ID:
584         case IFE_C_E_PHY_ID:
585                 phy->type = e1000_phy_ife;
586                 phy->autoneg_mask = E1000_ALL_NOT_GIG;
587                 phy->ops.get_info = e1000_get_phy_info_ife;
588                 phy->ops.check_polarity = e1000_check_polarity_ife;
589                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
590                 break;
591         case BME1000_E_PHY_ID:
592                 phy->type = e1000_phy_bm;
593                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
594                 phy->ops.read_reg = e1000_read_phy_reg_bm;
595                 phy->ops.write_reg = e1000_write_phy_reg_bm;
596                 phy->ops.commit = e1000_phy_sw_reset_generic;
597                 phy->ops.get_info = e1000_get_phy_info_m88;
598                 phy->ops.check_polarity = e1000_check_polarity_m88;
599                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
600                 break;
601         default:
602                 return -E1000_ERR_PHY;
603                 break;
604         }
605
606         return E1000_SUCCESS;
607 }
608
609 /**
610  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
611  *  @hw: pointer to the HW structure
612  *
613  *  Initialize family-specific NVM parameters and function
614  *  pointers.
615  **/
616 STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
617 {
618         struct e1000_nvm_info *nvm = &hw->nvm;
619         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
620         u32 gfpreg, sector_base_addr, sector_end_addr;
621         u16 i;
622
623         DEBUGFUNC("e1000_init_nvm_params_ich8lan");
624
625         /* Can't read flash registers if the register set isn't mapped. */
626         nvm->type = e1000_nvm_flash_sw;
627         if (!hw->flash_address) {
628                 DEBUGOUT("ERROR: Flash registers not mapped\n");
629                 return -E1000_ERR_CONFIG;
630         }
631
632         gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
633
634         /* sector_X_addr is a "sector"-aligned address (4096 bytes)
635          * Add 1 to sector_end_addr since this sector is included in
636          * the overall size.
637          */
638         sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
639         sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
640
641         /* flash_base_addr is byte-aligned */
642         nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
643
644         /* find total size of the NVM, then cut in half since the total
645          * size represents two separate NVM banks.
646          */
647         nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
648                                 << FLASH_SECTOR_ADDR_SHIFT);
649         nvm->flash_bank_size /= 2;
650         /* Adjust to word count */
651         nvm->flash_bank_size /= sizeof(u16);
652
653         nvm->word_size = E1000_SHADOW_RAM_WORDS;
654
655         /* Clear shadow ram */
656         for (i = 0; i < nvm->word_size; i++) {
657                 dev_spec->shadow_ram[i].modified = false;
658                 dev_spec->shadow_ram[i].value    = 0xFFFF;
659         }
660
661         E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
662         E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
663
664         /* Function Pointers */
665         nvm->ops.acquire        = e1000_acquire_nvm_ich8lan;
666         nvm->ops.release        = e1000_release_nvm_ich8lan;
667         nvm->ops.read           = e1000_read_nvm_ich8lan;
668         nvm->ops.update         = e1000_update_nvm_checksum_ich8lan;
669         nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
670         nvm->ops.validate       = e1000_validate_nvm_checksum_ich8lan;
671         nvm->ops.write          = e1000_write_nvm_ich8lan;
672
673         return E1000_SUCCESS;
674 }
675
676 /**
677  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
678  *  @hw: pointer to the HW structure
679  *
680  *  Initialize family-specific MAC parameters and function
681  *  pointers.
682  **/
683 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
684 {
685         struct e1000_mac_info *mac = &hw->mac;
686 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
687         u16 pci_cfg;
688 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
689
690         DEBUGFUNC("e1000_init_mac_params_ich8lan");
691
692         /* Set media type function pointer */
693         hw->phy.media_type = e1000_media_type_copper;
694
695         /* Set mta register count */
696         mac->mta_reg_count = 32;
697         /* Set rar entry count */
698         mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
699         if (mac->type == e1000_ich8lan)
700                 mac->rar_entry_count--;
701         /* Set if part includes ASF firmware */
702         mac->asf_firmware_present = true;
703         /* FWSM register */
704         mac->has_fwsm = true;
705         /* ARC subsystem not supported */
706         mac->arc_subsystem_valid = false;
707         /* Adaptive IFS supported */
708         mac->adaptive_ifs = true;
709
710         /* Function pointers */
711
712         /* bus type/speed/width */
713         mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
714         /* function id */
715         mac->ops.set_lan_id = e1000_set_lan_id_single_port;
716         /* reset */
717         mac->ops.reset_hw = e1000_reset_hw_ich8lan;
718         /* hw initialization */
719         mac->ops.init_hw = e1000_init_hw_ich8lan;
720         /* link setup */
721         mac->ops.setup_link = e1000_setup_link_ich8lan;
722         /* physical interface setup */
723         mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
724         /* check for link */
725         mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
726         /* link info */
727         mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
728         /* multicast address update */
729         mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
730         /* clear hardware counters */
731         mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
732
733         /* LED and other operations */
734         switch (mac->type) {
735         case e1000_ich8lan:
736         case e1000_ich9lan:
737         case e1000_ich10lan:
738                 /* check management mode */
739                 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
740                 /* ID LED init */
741                 mac->ops.id_led_init = e1000_id_led_init_generic;
742                 /* blink LED */
743                 mac->ops.blink_led = e1000_blink_led_generic;
744                 /* setup LED */
745                 mac->ops.setup_led = e1000_setup_led_generic;
746                 /* cleanup LED */
747                 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
748                 /* turn on/off LED */
749                 mac->ops.led_on = e1000_led_on_ich8lan;
750                 mac->ops.led_off = e1000_led_off_ich8lan;
751                 break;
752         case e1000_pch2lan:
753                 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
754                 mac->ops.rar_set = e1000_rar_set_pch2lan;
755                 /* fall-through */
756         case e1000_pch_lpt:
757 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
758                 /* multicast address update for pch2 */
759                 mac->ops.update_mc_addr_list =
760                         e1000_update_mc_addr_list_pch2lan;
761                 /* fall-through */
762 #endif
763         case e1000_pchlan:
764 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
765                 /* save PCH revision_id */
766                 e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
767                 hw->revision_id = (u8)(pci_cfg &= 0x000F);
768 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
769                 /* check management mode */
770                 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
771                 /* ID LED init */
772                 mac->ops.id_led_init = e1000_id_led_init_pchlan;
773                 /* setup LED */
774                 mac->ops.setup_led = e1000_setup_led_pchlan;
775                 /* cleanup LED */
776                 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
777                 /* turn on/off LED */
778                 mac->ops.led_on = e1000_led_on_pchlan;
779                 mac->ops.led_off = e1000_led_off_pchlan;
780                 break;
781         default:
782                 break;
783         }
784
785         if (mac->type == e1000_pch_lpt) {
786                 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
787                 mac->ops.rar_set = e1000_rar_set_pch_lpt;
788                 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
789         }
790
791         /* Enable PCS Lock-loss workaround for ICH8 */
792         if (mac->type == e1000_ich8lan)
793                 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
794
795         return E1000_SUCCESS;
796 }
797
798 /**
799  *  __e1000_access_emi_reg_locked - Read/write EMI register
800  *  @hw: pointer to the HW structure
801  *  @addr: EMI address to program
802  *  @data: pointer to value to read/write from/to the EMI address
803  *  @read: boolean flag to indicate read or write
804  *
805  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
806  **/
807 STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
808                                          u16 *data, bool read)
809 {
810         s32 ret_val;
811
812         DEBUGFUNC("__e1000_access_emi_reg_locked");
813
814         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
815         if (ret_val)
816                 return ret_val;
817
818         if (read)
819                 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
820                                                       data);
821         else
822                 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
823                                                        *data);
824
825         return ret_val;
826 }
827
828 /**
829  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
830  *  @hw: pointer to the HW structure
831  *  @addr: EMI address to program
832  *  @data: value to be read from the EMI address
833  *
834  *  Assumes the SW/FW/HW Semaphore is already acquired.
835  **/
836 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
837 {
838         DEBUGFUNC("e1000_read_emi_reg_locked");
839
840         return __e1000_access_emi_reg_locked(hw, addr, data, true);
841 }
842
843 /**
844  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
845  *  @hw: pointer to the HW structure
846  *  @addr: EMI address to program
847  *  @data: value to be written to the EMI address
848  *
849  *  Assumes the SW/FW/HW Semaphore is already acquired.
850  **/
851 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
852 {
853         DEBUGFUNC("e1000_read_emi_reg_locked");
854
855         return __e1000_access_emi_reg_locked(hw, addr, &data, false);
856 }
857
858 /**
859  *  e1000_set_eee_pchlan - Enable/disable EEE support
860  *  @hw: pointer to the HW structure
861  *
862  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
863  *  the link and the EEE capabilities of the link partner.  The LPI Control
864  *  register bits will remain set only if/when link is up.
865  *
866  *  EEE LPI must not be asserted earlier than one second after link is up.
867  *  On 82579, EEE LPI should not be enabled until such time otherwise there
868  *  can be link issues with some switches.  Other devices can have EEE LPI
869  *  enabled immediately upon link up since they have a timer in hardware which
870  *  prevents LPI from being asserted too early.
871  **/
872 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
873 {
874         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
875         s32 ret_val;
876         u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
877
878         DEBUGFUNC("e1000_set_eee_pchlan");
879
880         switch (hw->phy.type) {
881         case e1000_phy_82579:
882                 lpa = I82579_EEE_LP_ABILITY;
883                 pcs_status = I82579_EEE_PCS_STATUS;
884                 adv_addr = I82579_EEE_ADVERTISEMENT;
885                 break;
886         case e1000_phy_i217:
887                 lpa = I217_EEE_LP_ABILITY;
888                 pcs_status = I217_EEE_PCS_STATUS;
889                 adv_addr = I217_EEE_ADVERTISEMENT;
890                 break;
891         default:
892                 return E1000_SUCCESS;
893         }
894
895         ret_val = hw->phy.ops.acquire(hw);
896         if (ret_val)
897                 return ret_val;
898
899         ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
900         if (ret_val)
901                 goto release;
902
903         /* Clear bits that enable EEE in various speeds */
904         lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
905
906         /* Enable EEE if not disabled by user */
907         if (!dev_spec->eee_disable) {
908                 /* Save off link partner's EEE ability */
909                 ret_val = e1000_read_emi_reg_locked(hw, lpa,
910                                                     &dev_spec->eee_lp_ability);
911                 if (ret_val)
912                         goto release;
913
914                 /* Read EEE advertisement */
915                 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
916                 if (ret_val)
917                         goto release;
918
919                 /* Enable EEE only for speeds in which the link partner is
920                  * EEE capable and for which we advertise EEE.
921                  */
922                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
923                         lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
924
925                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
926                         hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
927                         if (data & NWAY_LPAR_100TX_FD_CAPS)
928                                 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
929                         else
930                                 /* EEE is not supported in 100Half, so ignore
931                                  * partner's EEE in 100 ability if full-duplex
932                                  * is not advertised.
933                                  */
934                                 dev_spec->eee_lp_ability &=
935                                     ~I82579_EEE_100_SUPPORTED;
936                 }
937         }
938
939         if (hw->phy.type == e1000_phy_82579) {
940                 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
941                                                     &data);
942                 if (ret_val)
943                         goto release;
944
945                 data &= ~I82579_LPI_100_PLL_SHUT;
946                 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
947                                                      data);
948         }
949
950         /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
951         ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
952         if (ret_val)
953                 goto release;
954
955         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
956 release:
957         hw->phy.ops.release(hw);
958
959         return ret_val;
960 }
961
962 /**
963  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
964  *  @hw:   pointer to the HW structure
965  *  @link: link up bool flag
966  *
967  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
968  *  preventing further DMA write requests.  Workaround the issue by disabling
969  *  the de-assertion of the clock request when in 1Gpbs mode.
970  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
971  *  speeds in order to avoid Tx hangs.
972  **/
973 STATIC s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
974 {
975         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
976         u32 status = E1000_READ_REG(hw, E1000_STATUS);
977         s32 ret_val = E1000_SUCCESS;
978         u16 reg;
979
980         if (link && (status & E1000_STATUS_SPEED_1000)) {
981                 ret_val = hw->phy.ops.acquire(hw);
982                 if (ret_val)
983                         return ret_val;
984
985                 ret_val =
986                     e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
987                                                &reg);
988                 if (ret_val)
989                         goto release;
990
991                 ret_val =
992                     e1000_write_kmrn_reg_locked(hw,
993                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
994                                                 reg &
995                                                 ~E1000_KMRNCTRLSTA_K1_ENABLE);
996                 if (ret_val)
997                         goto release;
998
999                 usec_delay(10);
1000
1001                 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1002                                 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1003
1004                 ret_val =
1005                     e1000_write_kmrn_reg_locked(hw,
1006                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
1007                                                 reg);
1008 release:
1009                 hw->phy.ops.release(hw);
1010         } else {
1011                 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1012                 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1013
1014                 if (!link || ((status & E1000_STATUS_SPEED_100) &&
1015                               (status & E1000_STATUS_FD)))
1016                         goto update_fextnvm6;
1017
1018                 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1019                 if (ret_val)
1020                         return ret_val;
1021
1022                 /* Clear link status transmit timeout */
1023                 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1024
1025                 if (status & E1000_STATUS_SPEED_100) {
1026                         /* Set inband Tx timeout to 5x10us for 100Half */
1027                         reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1028
1029                         /* Do not extend the K1 entry latency for 100Half */
1030                         fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1031                 } else {
1032                         /* Set inband Tx timeout to 50x10us for 10Full/Half */
1033                         reg |= 50 <<
1034                                I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1035
1036                         /* Extend the K1 entry latency for 10 Mbps */
1037                         fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1038                 }
1039
1040                 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1041                 if (ret_val)
1042                         return ret_val;
1043
1044 update_fextnvm6:
1045                 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1046         }
1047
1048         return ret_val;
1049 }
1050
1051 #ifdef ULP_SUPPORT
1052 /**
1053  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1054  *  @hw: pointer to the HW structure
1055  *  @to_sx: boolean indicating a system power state transition to Sx
1056  *
1057  *  When link is down, configure ULP mode to significantly reduce the power
1058  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1059  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1060  *  system, configure the ULP mode by software.
1061  */
1062 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1063 {
1064         u32 mac_reg;
1065         s32 ret_val = E1000_SUCCESS;
1066         u16 phy_reg;
1067
1068         if ((hw->mac.type < e1000_pch_lpt) ||
1069             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1070             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1071             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1072             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1073             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1074                 return 0;
1075
1076         if (!to_sx) {
1077                 int i = 0;
1078                 /* Poll up to 5 seconds for Cable Disconnected indication */
1079                 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1080                          E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1081                         /* Bail if link is re-acquired */
1082                         if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1083                                 return -E1000_ERR_PHY;
1084                         if (i++ == 100)
1085                                 break;
1086
1087                         msec_delay(50);
1088                 }
1089                 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1090                           (E1000_READ_REG(hw, E1000_FEXT) &
1091                            E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1092                           i * 50);
1093         }
1094
1095         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1096                 /* Request ME configure ULP mode in the PHY */
1097                 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1098                 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1099                 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1100
1101                 goto out;
1102         }
1103
1104         ret_val = hw->phy.ops.acquire(hw);
1105         if (ret_val)
1106                 goto out;
1107
1108         /* During S0 Idle keep the phy in PCI-E mode */
1109         if (hw->dev_spec.ich8lan.smbus_disable)
1110                 goto skip_smbus;
1111
1112         /* Force SMBus mode in PHY */
1113         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1114         if (ret_val)
1115                 goto release;
1116         phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1117         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1118
1119         /* Force SMBus mode in MAC */
1120         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1121         mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1122         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1123
1124 skip_smbus:
1125         if (!to_sx) {
1126                 /* Change the 'Link Status Change' interrupt to trigger
1127                  * on 'Cable Status Change'
1128                  */
1129                 ret_val = e1000_read_kmrn_reg_locked(hw,
1130                                                      E1000_KMRNCTRLSTA_OP_MODES,
1131                                                      &phy_reg);
1132                 if (ret_val)
1133                         goto release;
1134                 phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1135                 e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1136                                             phy_reg);
1137         }
1138
1139         /* Set Inband ULP Exit, Reset to SMBus mode and
1140          * Disable SMBus Release on PERST# in PHY
1141          */
1142         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1143         if (ret_val)
1144                 goto release;
1145         phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1146                     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1147         if (to_sx) {
1148                 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1149                         phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1150
1151                 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1152         } else {
1153                 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1154         }
1155         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1156
1157         /* Set Disable SMBus Release on PERST# in MAC */
1158         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1159         mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1160         E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1161
1162         /* Commit ULP changes in PHY by starting auto ULP configuration */
1163         phy_reg |= I218_ULP_CONFIG1_START;
1164         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1165
1166         if (!to_sx) {
1167                 /* Disable Tx so that the MAC doesn't send any (buffered)
1168                  * packets to the PHY.
1169                  */
1170                 mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1171                 mac_reg &= ~E1000_TCTL_EN;
1172                 E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1173         }
1174 release:
1175         hw->phy.ops.release(hw);
1176 out:
1177         if (ret_val)
1178                 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1179         else
1180                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1181
1182         return ret_val;
1183 }
1184
1185 /**
1186  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1187  *  @hw: pointer to the HW structure
1188  *  @force: boolean indicating whether or not to force disabling ULP
1189  *
1190  *  Un-configure ULP mode when link is up, the system is transitioned from
1191  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1192  *  system, poll for an indication from ME that ULP has been un-configured.
1193  *  If not on an ME enabled system, un-configure the ULP mode by software.
1194  *
1195  *  During nominal operation, this function is called when link is acquired
1196  *  to disable ULP mode (force=false); otherwise, for example when unloading
1197  *  the driver or during Sx->S0 transitions, this is called with force=true
1198  *  to forcibly disable ULP.
1199
1200  *  When the cable is plugged in while the device is in D0, a Cable Status
1201  *  Change interrupt is generated which causes this function to be called
1202  *  to partially disable ULP mode and restart autonegotiation.  This function
1203  *  is then called again due to the resulting Link Status Change interrupt
1204  *  to finish cleaning up after the ULP flow.
1205  */
1206 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1207 {
1208         s32 ret_val = E1000_SUCCESS;
1209         u32 mac_reg;
1210         u16 phy_reg;
1211         int i = 0;
1212
1213         if ((hw->mac.type < e1000_pch_lpt) ||
1214             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1215             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1216             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1217             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1218             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1219                 return 0;
1220
1221         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1222                 if (force) {
1223                         /* Request ME un-configure ULP mode in the PHY */
1224                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1225                         mac_reg &= ~E1000_H2ME_ULP;
1226                         mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1227                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1228                 }
1229
1230                 /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
1231                 while (E1000_READ_REG(hw, E1000_FWSM) &
1232                        E1000_FWSM_ULP_CFG_DONE) {
1233                         if (i++ == 10) {
1234                                 ret_val = -E1000_ERR_PHY;
1235                                 goto out;
1236                         }
1237
1238                         msec_delay(10);
1239                 }
1240                 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1241
1242                 if (force) {
1243                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1244                         mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1245                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1246                 } else {
1247                         /* Clear H2ME.ULP after ME ULP configuration */
1248                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1249                         mac_reg &= ~E1000_H2ME_ULP;
1250                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1251
1252                         /* Restore link speed advertisements and restart
1253                          * Auto-negotiation
1254                          */
1255                         ret_val = e1000_phy_setup_autoneg(hw);
1256                         if (ret_val)
1257                                 goto out;
1258
1259                         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1260                 }
1261
1262                 goto out;
1263         }
1264
1265         ret_val = hw->phy.ops.acquire(hw);
1266         if (ret_val)
1267                 goto out;
1268
1269         /* Revert the change to the 'Link Status Change'
1270          * interrupt to trigger on 'Cable Status Change'
1271          */
1272         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1273                                              &phy_reg);
1274         if (ret_val)
1275                 goto release;
1276         phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1277         e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg);
1278
1279         if (force)
1280                 /* Toggle LANPHYPC Value bit */
1281                 e1000_toggle_lanphypc_pch_lpt(hw);
1282
1283         /* Unforce SMBus mode in PHY */
1284         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1285         if (ret_val) {
1286                 /* The MAC might be in PCIe mode, so temporarily force to
1287                  * SMBus mode in order to access the PHY.
1288                  */
1289                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1290                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1291                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1292
1293                 msec_delay(50);
1294
1295                 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1296                                                        &phy_reg);
1297                 if (ret_val)
1298                         goto release;
1299         }
1300         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1301         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1302
1303         /* Unforce SMBus mode in MAC */
1304         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1305         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1306         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1307
1308         /* When ULP mode was previously entered, K1 was disabled by the
1309          * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1310          */
1311         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1312         if (ret_val)
1313                 goto release;
1314         phy_reg |= HV_PM_CTRL_K1_ENABLE;
1315         e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1316
1317         /* Clear ULP enabled configuration */
1318         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1319         if (ret_val)
1320                 goto release;
1321         /* CSC interrupt received due to ULP Indication */
1322         if ((phy_reg & I218_ULP_CONFIG1_IND) || force) {
1323                 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1324                              I218_ULP_CONFIG1_STICKY_ULP |
1325                              I218_ULP_CONFIG1_RESET_TO_SMBUS |
1326                              I218_ULP_CONFIG1_WOL_HOST |
1327                              I218_ULP_CONFIG1_INBAND_EXIT |
1328                              I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1329                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1330
1331                 /* Commit ULP changes by starting auto ULP configuration */
1332                 phy_reg |= I218_ULP_CONFIG1_START;
1333                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1334
1335                 /* Clear Disable SMBus Release on PERST# in MAC */
1336                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1337                 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1338                 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1339
1340                 if (!force) {
1341                         hw->phy.ops.release(hw);
1342
1343                         if (hw->mac.autoneg)
1344                                 e1000_phy_setup_autoneg(hw);
1345
1346                         e1000_sw_lcd_config_ich8lan(hw);
1347
1348                         e1000_oem_bits_config_ich8lan(hw, true);
1349
1350                         /* Set ULP state to unknown and return non-zero to
1351                          * indicate no link (yet) and re-enter on the next LSC
1352                          * to finish disabling ULP flow.
1353                          */
1354                         hw->dev_spec.ich8lan.ulp_state =
1355                             e1000_ulp_state_unknown;
1356
1357                         return 1;
1358                 }
1359         }
1360
1361         /* Re-enable Tx */
1362         mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1363         mac_reg |= E1000_TCTL_EN;
1364         E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1365
1366 release:
1367         hw->phy.ops.release(hw);
1368         if (force) {
1369                 hw->phy.ops.reset(hw);
1370                 msec_delay(50);
1371         }
1372 out:
1373         if (ret_val)
1374                 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1375         else
1376                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1377
1378         return ret_val;
1379 }
1380
1381 #endif /* ULP_SUPPORT */
1382 /**
1383  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1384  *  @hw: pointer to the HW structure
1385  *
1386  *  Checks to see of the link status of the hardware has changed.  If a
1387  *  change in link status has been detected, then we read the PHY registers
1388  *  to get the current speed/duplex if link exists.
1389  **/
1390 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1391 {
1392         struct e1000_mac_info *mac = &hw->mac;
1393         s32 ret_val, tipg_reg = 0;
1394         u16 emi_addr, emi_val = 0;
1395         bool link = false;
1396         u16 phy_reg;
1397
1398         DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1399
1400         /* We only want to go out to the PHY registers to see if Auto-Neg
1401          * has completed and/or if our link status has changed.  The
1402          * get_link_status flag is set upon receiving a Link Status
1403          * Change or Rx Sequence Error interrupt.
1404          */
1405         if (!mac->get_link_status)
1406                 return E1000_SUCCESS;
1407
1408         if ((hw->mac.type < e1000_pch_lpt) ||
1409             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1410             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) {
1411                 /* First we want to see if the MII Status Register reports
1412                  * link.  If so, then we want to get the current speed/duplex
1413                  * of the PHY.
1414                  */
1415                 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1416                 if (ret_val)
1417                         return ret_val;
1418         } else {
1419                 /* Check the MAC's STATUS register to determine link state
1420                  * since the PHY could be inaccessible while in ULP mode.
1421                  */
1422                 link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
1423                 if (link)
1424                         ret_val = e1000_disable_ulp_lpt_lp(hw, false);
1425                 else
1426                         ret_val = e1000_enable_ulp_lpt_lp(hw, false);
1427
1428                 if (ret_val)
1429                         return ret_val;
1430         }
1431
1432         if (hw->mac.type == e1000_pchlan) {
1433                 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1434                 if (ret_val)
1435                         return ret_val;
1436         }
1437
1438         /* When connected at 10Mbps half-duplex, some parts are excessively
1439          * aggressive resulting in many collisions. To avoid this, increase
1440          * the IPG and reduce Rx latency in the PHY.
1441          */
1442         if (((hw->mac.type == e1000_pch2lan) ||
1443              (hw->mac.type == e1000_pch_lpt)) && link) {
1444                 u16 speed, duplex;
1445
1446                 e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1447                 tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1448                 tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1449
1450                 if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1451                         tipg_reg |= 0xFF;
1452                         /* Reduce Rx latency in analog PHY */
1453                         emi_val = 0;
1454                 } else {
1455                         /* Roll back the default values */
1456                         tipg_reg |= 0x08;
1457                         emi_val = 1;
1458                 }
1459
1460                 E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1461
1462                 ret_val = hw->phy.ops.acquire(hw);
1463                 if (ret_val)
1464                         return ret_val;
1465
1466                 if (hw->mac.type == e1000_pch2lan)
1467                         emi_addr = I82579_RX_CONFIG;
1468                 else
1469                         emi_addr = I217_RX_CONFIG;
1470                 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1471
1472                 hw->phy.ops.release(hw);
1473
1474                 if (ret_val)
1475                         return ret_val;
1476         }
1477
1478         /* Work-around I218 hang issue */
1479         if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1480             (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1481             (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1482             (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1483                 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1484                 if (ret_val)
1485                         return ret_val;
1486         }
1487
1488         /* Clear link partner's EEE ability */
1489         hw->dev_spec.ich8lan.eee_lp_ability = 0;
1490
1491         if (!link)
1492                 return E1000_SUCCESS; /* No link detected */
1493
1494         mac->get_link_status = false;
1495
1496         switch (hw->mac.type) {
1497         case e1000_pch2lan:
1498                 ret_val = e1000_k1_workaround_lv(hw);
1499                 if (ret_val)
1500                         return ret_val;
1501                 /* fall-thru */
1502         case e1000_pchlan:
1503                 if (hw->phy.type == e1000_phy_82578) {
1504                         ret_val = e1000_link_stall_workaround_hv(hw);
1505                         if (ret_val)
1506                                 return ret_val;
1507                 }
1508
1509                 /* Workaround for PCHx parts in half-duplex:
1510                  * Set the number of preambles removed from the packet
1511                  * when it is passed from the PHY to the MAC to prevent
1512                  * the MAC from misinterpreting the packet type.
1513                  */
1514                 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1515                 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1516
1517                 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1518                     E1000_STATUS_FD)
1519                         phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1520
1521                 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1522                 break;
1523         default:
1524                 break;
1525         }
1526
1527         /* Check if there was DownShift, must be checked
1528          * immediately after link-up
1529          */
1530         e1000_check_downshift_generic(hw);
1531
1532         /* Enable/Disable EEE after link up */
1533         if (hw->phy.type > e1000_phy_82579) {
1534                 ret_val = e1000_set_eee_pchlan(hw);
1535                 if (ret_val)
1536                         return ret_val;
1537         }
1538
1539         /* If we are forcing speed/duplex, then we simply return since
1540          * we have already determined whether we have link or not.
1541          */
1542         if (!mac->autoneg)
1543                 return -E1000_ERR_CONFIG;
1544
1545         /* Auto-Neg is enabled.  Auto Speed Detection takes care
1546          * of MAC speed/duplex configuration.  So we only need to
1547          * configure Collision Distance in the MAC.
1548          */
1549         mac->ops.config_collision_dist(hw);
1550
1551         /* Configure Flow Control now that Auto-Neg has completed.
1552          * First, we need to restore the desired flow control
1553          * settings because we may have had to re-autoneg with a
1554          * different link partner.
1555          */
1556         ret_val = e1000_config_fc_after_link_up_generic(hw);
1557         if (ret_val)
1558                 DEBUGOUT("Error configuring flow control\n");
1559
1560         return ret_val;
1561 }
1562
1563 /**
1564  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1565  *  @hw: pointer to the HW structure
1566  *
1567  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1568  **/
1569 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1570 {
1571         DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1572
1573         hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1574         hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1575         switch (hw->mac.type) {
1576         case e1000_ich8lan:
1577         case e1000_ich9lan:
1578         case e1000_ich10lan:
1579                 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1580                 break;
1581         case e1000_pchlan:
1582         case e1000_pch2lan:
1583         case e1000_pch_lpt:
1584                 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1585                 break;
1586         default:
1587                 break;
1588         }
1589 }
1590
1591 /**
1592  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1593  *  @hw: pointer to the HW structure
1594  *
1595  *  Acquires the mutex for performing NVM operations.
1596  **/
1597 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1598 {
1599         DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1600
1601         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1602
1603         return E1000_SUCCESS;
1604 }
1605
1606 /**
1607  *  e1000_release_nvm_ich8lan - Release NVM mutex
1608  *  @hw: pointer to the HW structure
1609  *
1610  *  Releases the mutex used while performing NVM operations.
1611  **/
1612 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1613 {
1614         DEBUGFUNC("e1000_release_nvm_ich8lan");
1615
1616         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1617
1618         return;
1619 }
1620
1621 /**
1622  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1623  *  @hw: pointer to the HW structure
1624  *
1625  *  Acquires the software control flag for performing PHY and select
1626  *  MAC CSR accesses.
1627  **/
1628 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1629 {
1630         u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1631         s32 ret_val = E1000_SUCCESS;
1632
1633         DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1634
1635         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1636
1637         while (timeout) {
1638                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1639                 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1640                         break;
1641
1642                 msec_delay_irq(1);
1643                 timeout--;
1644         }
1645
1646         if (!timeout) {
1647                 DEBUGOUT("SW has already locked the resource.\n");
1648                 ret_val = -E1000_ERR_CONFIG;
1649                 goto out;
1650         }
1651
1652         timeout = SW_FLAG_TIMEOUT;
1653
1654         extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1655         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1656
1657         while (timeout) {
1658                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1659                 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1660                         break;
1661
1662                 msec_delay_irq(1);
1663                 timeout--;
1664         }
1665
1666         if (!timeout) {
1667                 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1668                           E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1669                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1670                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1671                 ret_val = -E1000_ERR_CONFIG;
1672                 goto out;
1673         }
1674
1675 out:
1676         if (ret_val)
1677                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1678
1679         return ret_val;
1680 }
1681
1682 /**
1683  *  e1000_release_swflag_ich8lan - Release software control flag
1684  *  @hw: pointer to the HW structure
1685  *
1686  *  Releases the software control flag for performing PHY and select
1687  *  MAC CSR accesses.
1688  **/
1689 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1690 {
1691         u32 extcnf_ctrl;
1692
1693         DEBUGFUNC("e1000_release_swflag_ich8lan");
1694
1695         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1696
1697         if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1698                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1699                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1700         } else {
1701                 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1702         }
1703
1704         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1705
1706         return;
1707 }
1708
1709 /**
1710  *  e1000_check_mng_mode_ich8lan - Checks management mode
1711  *  @hw: pointer to the HW structure
1712  *
1713  *  This checks if the adapter has any manageability enabled.
1714  *  This is a function pointer entry point only called by read/write
1715  *  routines for the PHY and NVM parts.
1716  **/
1717 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1718 {
1719         u32 fwsm;
1720
1721         DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1722
1723         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1724
1725         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1726                ((fwsm & E1000_FWSM_MODE_MASK) ==
1727                 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1728 }
1729
1730 /**
1731  *  e1000_check_mng_mode_pchlan - Checks management mode
1732  *  @hw: pointer to the HW structure
1733  *
1734  *  This checks if the adapter has iAMT enabled.
1735  *  This is a function pointer entry point only called by read/write
1736  *  routines for the PHY and NVM parts.
1737  **/
1738 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1739 {
1740         u32 fwsm;
1741
1742         DEBUGFUNC("e1000_check_mng_mode_pchlan");
1743
1744         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1745
1746         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1747                (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1748 }
1749
1750 /**
1751  *  e1000_rar_set_pch2lan - Set receive address register
1752  *  @hw: pointer to the HW structure
1753  *  @addr: pointer to the receive address
1754  *  @index: receive address array register
1755  *
1756  *  Sets the receive address array register at index to the address passed
1757  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1758  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1759  *  Use SHRA[0-3] in place of those reserved for ME.
1760  **/
1761 STATIC int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1762 {
1763         u32 rar_low, rar_high;
1764
1765         DEBUGFUNC("e1000_rar_set_pch2lan");
1766
1767         /* HW expects these in little endian so we reverse the byte order
1768          * from network order (big endian) to little endian
1769          */
1770         rar_low = ((u32) addr[0] |
1771                    ((u32) addr[1] << 8) |
1772                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1773
1774         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1775
1776         /* If MAC address zero, no need to set the AV bit */
1777         if (rar_low || rar_high)
1778                 rar_high |= E1000_RAH_AV;
1779
1780         if (index == 0) {
1781                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1782                 E1000_WRITE_FLUSH(hw);
1783                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1784                 E1000_WRITE_FLUSH(hw);
1785                 return E1000_SUCCESS;
1786         }
1787
1788         /* RAR[1-6] are owned by manageability.  Skip those and program the
1789          * next address into the SHRA register array.
1790          */
1791         if (index < (u32) (hw->mac.rar_entry_count)) {
1792                 s32 ret_val;
1793
1794                 ret_val = e1000_acquire_swflag_ich8lan(hw);
1795                 if (ret_val)
1796                         goto out;
1797
1798                 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1799                 E1000_WRITE_FLUSH(hw);
1800                 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1801                 E1000_WRITE_FLUSH(hw);
1802
1803                 e1000_release_swflag_ich8lan(hw);
1804
1805                 /* verify the register updates */
1806                 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1807                     (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1808                         return E1000_SUCCESS;
1809
1810                 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1811                          (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1812         }
1813
1814 out:
1815         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1816         return -E1000_ERR_CONFIG;
1817 }
1818
1819 /**
1820  *  e1000_rar_set_pch_lpt - Set receive address registers
1821  *  @hw: pointer to the HW structure
1822  *  @addr: pointer to the receive address
1823  *  @index: receive address array register
1824  *
1825  *  Sets the receive address register array at index to the address passed
1826  *  in by addr. For LPT, RAR[0] is the base address register that is to
1827  *  contain the MAC address. SHRA[0-10] are the shared receive address
1828  *  registers that are shared between the Host and manageability engine (ME).
1829  **/
1830 STATIC int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1831 {
1832         u32 rar_low, rar_high;
1833         u32 wlock_mac;
1834
1835         DEBUGFUNC("e1000_rar_set_pch_lpt");
1836
1837         /* HW expects these in little endian so we reverse the byte order
1838          * from network order (big endian) to little endian
1839          */
1840         rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1841                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1842
1843         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1844
1845         /* If MAC address zero, no need to set the AV bit */
1846         if (rar_low || rar_high)
1847                 rar_high |= E1000_RAH_AV;
1848
1849         if (index == 0) {
1850                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1851                 E1000_WRITE_FLUSH(hw);
1852                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1853                 E1000_WRITE_FLUSH(hw);
1854                 return E1000_SUCCESS;
1855         }
1856
1857         /* The manageability engine (ME) can lock certain SHRAR registers that
1858          * it is using - those registers are unavailable for use.
1859          */
1860         if (index < hw->mac.rar_entry_count) {
1861                 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
1862                             E1000_FWSM_WLOCK_MAC_MASK;
1863                 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1864
1865                 /* Check if all SHRAR registers are locked */
1866                 if (wlock_mac == 1)
1867                         goto out;
1868
1869                 if ((wlock_mac == 0) || (index <= wlock_mac)) {
1870                         s32 ret_val;
1871
1872                         ret_val = e1000_acquire_swflag_ich8lan(hw);
1873
1874                         if (ret_val)
1875                                 goto out;
1876
1877                         E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
1878                                         rar_low);
1879                         E1000_WRITE_FLUSH(hw);
1880                         E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
1881                                         rar_high);
1882                         E1000_WRITE_FLUSH(hw);
1883
1884                         e1000_release_swflag_ich8lan(hw);
1885
1886                         /* verify the register updates */
1887                         if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1888                             (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
1889                                 return E1000_SUCCESS;
1890                 }
1891         }
1892
1893 out:
1894         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1895         return -E1000_ERR_CONFIG;
1896 }
1897
1898 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
1899 /**
1900  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
1901  *  @hw: pointer to the HW structure
1902  *  @mc_addr_list: array of multicast addresses to program
1903  *  @mc_addr_count: number of multicast addresses to program
1904  *
1905  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
1906  *  The caller must have a packed mc_addr_list of multicast addresses.
1907  **/
1908 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
1909                                               u8 *mc_addr_list,
1910                                               u32 mc_addr_count)
1911 {
1912         u16 phy_reg = 0;
1913         int i;
1914         s32 ret_val;
1915
1916         DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
1917
1918         e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
1919
1920         ret_val = hw->phy.ops.acquire(hw);
1921         if (ret_val)
1922                 return;
1923
1924         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1925         if (ret_val)
1926                 goto release;
1927
1928         for (i = 0; i < hw->mac.mta_reg_count; i++) {
1929                 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
1930                                            (u16)(hw->mac.mta_shadow[i] &
1931                                                  0xFFFF));
1932                 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
1933                                            (u16)((hw->mac.mta_shadow[i] >> 16) &
1934                                                  0xFFFF));
1935         }
1936
1937         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1938
1939 release:
1940         hw->phy.ops.release(hw);
1941 }
1942
1943 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
1944 /**
1945  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1946  *  @hw: pointer to the HW structure
1947  *
1948  *  Checks if firmware is blocking the reset of the PHY.
1949  *  This is a function pointer entry point only called by
1950  *  reset routines.
1951  **/
1952 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1953 {
1954         u32 fwsm;
1955         bool blocked = false;
1956         int i = 0;
1957
1958         DEBUGFUNC("e1000_check_reset_block_ich8lan");
1959
1960         do {
1961                 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1962                 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
1963                         blocked = true;
1964                         msec_delay(10);
1965                         continue;
1966                 }
1967                 blocked = false;
1968         } while (blocked && (i++ < 10));
1969         return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
1970 }
1971
1972 /**
1973  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1974  *  @hw: pointer to the HW structure
1975  *
1976  *  Assumes semaphore already acquired.
1977  *
1978  **/
1979 STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1980 {
1981         u16 phy_data;
1982         u32 strap = E1000_READ_REG(hw, E1000_STRAP);
1983         u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
1984                 E1000_STRAP_SMT_FREQ_SHIFT;
1985         s32 ret_val;
1986
1987         strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1988
1989         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1990         if (ret_val)
1991                 return ret_val;
1992
1993         phy_data &= ~HV_SMB_ADDR_MASK;
1994         phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1995         phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1996
1997         if (hw->phy.type == e1000_phy_i217) {
1998                 /* Restore SMBus frequency */
1999                 if (freq--) {
2000                         phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2001                         phy_data |= (freq & (1 << 0)) <<
2002                                 HV_SMB_ADDR_FREQ_LOW_SHIFT;
2003                         phy_data |= (freq & (1 << 1)) <<
2004                                 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2005                 } else {
2006                         DEBUGOUT("Unsupported SMB frequency in PHY\n");
2007                 }
2008         }
2009
2010         return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2011 }
2012
2013 /**
2014  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2015  *  @hw:   pointer to the HW structure
2016  *
2017  *  SW should configure the LCD from the NVM extended configuration region
2018  *  as a workaround for certain parts.
2019  **/
2020 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2021 {
2022         struct e1000_phy_info *phy = &hw->phy;
2023         u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2024         s32 ret_val = E1000_SUCCESS;
2025         u16 word_addr, reg_data, reg_addr, phy_page = 0;
2026
2027         DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2028
2029         /* Initialize the PHY from the NVM on ICH platforms.  This
2030          * is needed due to an issue where the NVM configuration is
2031          * not properly autoloaded after power transitions.
2032          * Therefore, after each PHY reset, we will load the
2033          * configuration data out of the NVM manually.
2034          */
2035         switch (hw->mac.type) {
2036         case e1000_ich8lan:
2037                 if (phy->type != e1000_phy_igp_3)
2038                         return ret_val;
2039
2040                 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2041                     (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2042                         sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2043                         break;
2044                 }
2045                 /* Fall-thru */
2046         case e1000_pchlan:
2047         case e1000_pch2lan:
2048         case e1000_pch_lpt:
2049                 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2050                 break;
2051         default:
2052                 return ret_val;
2053         }
2054
2055         ret_val = hw->phy.ops.acquire(hw);
2056         if (ret_val)
2057                 return ret_val;
2058
2059         data = E1000_READ_REG(hw, E1000_FEXTNVM);
2060         if (!(data & sw_cfg_mask))
2061                 goto release;
2062
2063         /* Make sure HW does not configure LCD from PHY
2064          * extended configuration before SW configuration
2065          */
2066         data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2067         if ((hw->mac.type < e1000_pch2lan) &&
2068             (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2069                         goto release;
2070
2071         cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2072         cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2073         cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2074         if (!cnf_size)
2075                 goto release;
2076
2077         cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2078         cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2079
2080         if (((hw->mac.type == e1000_pchlan) &&
2081              !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2082             (hw->mac.type > e1000_pchlan)) {
2083                 /* HW configures the SMBus address and LEDs when the
2084                  * OEM and LCD Write Enable bits are set in the NVM.
2085                  * When both NVM bits are cleared, SW will configure
2086                  * them instead.
2087                  */
2088                 ret_val = e1000_write_smbus_addr(hw);
2089                 if (ret_val)
2090                         goto release;
2091
2092                 data = E1000_READ_REG(hw, E1000_LEDCTL);
2093                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2094                                                         (u16)data);
2095                 if (ret_val)
2096                         goto release;
2097         }
2098
2099         /* Configure LCD from extended configuration region. */
2100
2101         /* cnf_base_addr is in DWORD */
2102         word_addr = (u16)(cnf_base_addr << 1);
2103
2104         for (i = 0; i < cnf_size; i++) {
2105                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2106                                            &reg_data);
2107                 if (ret_val)
2108                         goto release;
2109
2110                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2111                                            1, &reg_addr);
2112                 if (ret_val)
2113                         goto release;
2114
2115                 /* Save off the PHY page for future writes. */
2116                 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2117                         phy_page = reg_data;
2118                         continue;
2119                 }
2120
2121                 reg_addr &= PHY_REG_MASK;
2122                 reg_addr |= phy_page;
2123
2124                 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2125                                                     reg_data);
2126                 if (ret_val)
2127                         goto release;
2128         }
2129
2130 release:
2131         hw->phy.ops.release(hw);
2132         return ret_val;
2133 }
2134
2135 /**
2136  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2137  *  @hw:   pointer to the HW structure
2138  *  @link: link up bool flag
2139  *
2140  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2141  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2142  *  If link is down, the function will restore the default K1 setting located
2143  *  in the NVM.
2144  **/
2145 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2146 {
2147         s32 ret_val = E1000_SUCCESS;
2148         u16 status_reg = 0;
2149         bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2150
2151         DEBUGFUNC("e1000_k1_gig_workaround_hv");
2152
2153         if (hw->mac.type != e1000_pchlan)
2154                 return E1000_SUCCESS;
2155
2156         /* Wrap the whole flow with the sw flag */
2157         ret_val = hw->phy.ops.acquire(hw);
2158         if (ret_val)
2159                 return ret_val;
2160
2161         /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2162         if (link) {
2163                 if (hw->phy.type == e1000_phy_82578) {
2164                         ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2165                                                               &status_reg);
2166                         if (ret_val)
2167                                 goto release;
2168
2169                         status_reg &= (BM_CS_STATUS_LINK_UP |
2170                                        BM_CS_STATUS_RESOLVED |
2171                                        BM_CS_STATUS_SPEED_MASK);
2172
2173                         if (status_reg == (BM_CS_STATUS_LINK_UP |
2174                                            BM_CS_STATUS_RESOLVED |
2175                                            BM_CS_STATUS_SPEED_1000))
2176                                 k1_enable = false;
2177                 }
2178
2179                 if (hw->phy.type == e1000_phy_82577) {
2180                         ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2181                                                               &status_reg);
2182                         if (ret_val)
2183                                 goto release;
2184
2185                         status_reg &= (HV_M_STATUS_LINK_UP |
2186                                        HV_M_STATUS_AUTONEG_COMPLETE |
2187                                        HV_M_STATUS_SPEED_MASK);
2188
2189                         if (status_reg == (HV_M_STATUS_LINK_UP |
2190                                            HV_M_STATUS_AUTONEG_COMPLETE |
2191                                            HV_M_STATUS_SPEED_1000))
2192                                 k1_enable = false;
2193                 }
2194
2195                 /* Link stall fix for link up */
2196                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2197                                                        0x0100);
2198                 if (ret_val)
2199                         goto release;
2200
2201         } else {
2202                 /* Link stall fix for link down */
2203                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2204                                                        0x4100);
2205                 if (ret_val)
2206                         goto release;
2207         }
2208
2209         ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2210
2211 release:
2212         hw->phy.ops.release(hw);
2213
2214         return ret_val;
2215 }
2216
2217 /**
2218  *  e1000_configure_k1_ich8lan - Configure K1 power state
2219  *  @hw: pointer to the HW structure
2220  *  @enable: K1 state to configure
2221  *
2222  *  Configure the K1 power state based on the provided parameter.
2223  *  Assumes semaphore already acquired.
2224  *
2225  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2226  **/
2227 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2228 {
2229         s32 ret_val;
2230         u32 ctrl_reg = 0;
2231         u32 ctrl_ext = 0;
2232         u32 reg = 0;
2233         u16 kmrn_reg = 0;
2234
2235         DEBUGFUNC("e1000_configure_k1_ich8lan");
2236
2237         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2238                                              &kmrn_reg);
2239         if (ret_val)
2240                 return ret_val;
2241
2242         if (k1_enable)
2243                 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2244         else
2245                 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2246
2247         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2248                                               kmrn_reg);
2249         if (ret_val)
2250                 return ret_val;
2251
2252         usec_delay(20);
2253         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2254         ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2255
2256         reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2257         reg |= E1000_CTRL_FRCSPD;
2258         E1000_WRITE_REG(hw, E1000_CTRL, reg);
2259
2260         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2261         E1000_WRITE_FLUSH(hw);
2262         usec_delay(20);
2263         E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2264         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2265         E1000_WRITE_FLUSH(hw);
2266         usec_delay(20);
2267
2268         return E1000_SUCCESS;
2269 }
2270
2271 /**
2272  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2273  *  @hw:       pointer to the HW structure
2274  *  @d0_state: boolean if entering d0 or d3 device state
2275  *
2276  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2277  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2278  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2279  **/
2280 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2281 {
2282         s32 ret_val = 0;
2283         u32 mac_reg;
2284         u16 oem_reg;
2285
2286         DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2287
2288         if (hw->mac.type < e1000_pchlan)
2289                 return ret_val;
2290
2291         ret_val = hw->phy.ops.acquire(hw);
2292         if (ret_val)
2293                 return ret_val;
2294
2295         if (hw->mac.type == e1000_pchlan) {
2296                 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2297                 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2298                         goto release;
2299         }
2300
2301         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2302         if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2303                 goto release;
2304
2305         mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2306
2307         ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2308         if (ret_val)
2309                 goto release;
2310
2311         oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2312
2313         if (d0_state) {
2314                 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2315                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2316
2317                 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2318                         oem_reg |= HV_OEM_BITS_LPLU;
2319         } else {
2320                 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2321                     E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2322                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2323
2324                 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2325                     E1000_PHY_CTRL_NOND0A_LPLU))
2326                         oem_reg |= HV_OEM_BITS_LPLU;
2327         }
2328
2329         /* Set Restart auto-neg to activate the bits */
2330         if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2331             !hw->phy.ops.check_reset_block(hw))
2332                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2333
2334         ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2335
2336 release:
2337         hw->phy.ops.release(hw);
2338
2339         return ret_val;
2340 }
2341
2342
2343 /**
2344  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2345  *  @hw:   pointer to the HW structure
2346  **/
2347 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2348 {
2349         s32 ret_val;
2350         u16 data;
2351
2352         DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2353
2354         ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2355         if (ret_val)
2356                 return ret_val;
2357
2358         data |= HV_KMRN_MDIO_SLOW;
2359
2360         ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2361
2362         return ret_val;
2363 }
2364
2365 /**
2366  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2367  *  done after every PHY reset.
2368  **/
2369 STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2370 {
2371         s32 ret_val = E1000_SUCCESS;
2372         u16 phy_data;
2373
2374         DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2375
2376         if (hw->mac.type != e1000_pchlan)
2377                 return E1000_SUCCESS;
2378
2379         /* Set MDIO slow mode before any other MDIO access */
2380         if (hw->phy.type == e1000_phy_82577) {
2381                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2382                 if (ret_val)
2383                         return ret_val;
2384         }
2385
2386         if (((hw->phy.type == e1000_phy_82577) &&
2387              ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2388             ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2389                 /* Disable generation of early preamble */
2390                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2391                 if (ret_val)
2392                         return ret_val;
2393
2394                 /* Preamble tuning for SSC */
2395                 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2396                                                 0xA204);
2397                 if (ret_val)
2398                         return ret_val;
2399         }
2400
2401         if (hw->phy.type == e1000_phy_82578) {
2402                 /* Return registers to default by doing a soft reset then
2403                  * writing 0x3140 to the control register.
2404                  */
2405                 if (hw->phy.revision < 2) {
2406                         e1000_phy_sw_reset_generic(hw);
2407                         ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2408                                                         0x3140);
2409                 }
2410         }
2411
2412         /* Select page 0 */
2413         ret_val = hw->phy.ops.acquire(hw);
2414         if (ret_val)
2415                 return ret_val;
2416
2417         hw->phy.addr = 1;
2418         ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2419         hw->phy.ops.release(hw);
2420         if (ret_val)
2421                 return ret_val;
2422
2423         /* Configure the K1 Si workaround during phy reset assuming there is
2424          * link so that it disables K1 if link is in 1Gbps.
2425          */
2426         ret_val = e1000_k1_gig_workaround_hv(hw, true);
2427         if (ret_val)
2428                 return ret_val;
2429
2430         /* Workaround for link disconnects on a busy hub in half duplex */
2431         ret_val = hw->phy.ops.acquire(hw);
2432         if (ret_val)
2433                 return ret_val;
2434         ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2435         if (ret_val)
2436                 goto release;
2437         ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2438                                                phy_data & 0x00FF);
2439         if (ret_val)
2440                 goto release;
2441
2442         /* set MSE higher to enable link to stay up when noise is high */
2443         ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2444 release:
2445         hw->phy.ops.release(hw);
2446
2447         return ret_val;
2448 }
2449
2450 /**
2451  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2452  *  @hw:   pointer to the HW structure
2453  **/
2454 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2455 {
2456         u32 mac_reg;
2457         u16 i, phy_reg = 0;
2458         s32 ret_val;
2459
2460         DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2461
2462         ret_val = hw->phy.ops.acquire(hw);
2463         if (ret_val)
2464                 return;
2465         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2466         if (ret_val)
2467                 goto release;
2468
2469         /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2470         for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2471                 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2472                 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2473                                            (u16)(mac_reg & 0xFFFF));
2474                 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2475                                            (u16)((mac_reg >> 16) & 0xFFFF));
2476
2477                 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2478                 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2479                                            (u16)(mac_reg & 0xFFFF));
2480                 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2481                                            (u16)((mac_reg & E1000_RAH_AV)
2482                                                  >> 16));
2483         }
2484
2485         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2486
2487 release:
2488         hw->phy.ops.release(hw);
2489 }
2490
2491 #ifndef CRC32_OS_SUPPORT
2492 STATIC u32 e1000_calc_rx_da_crc(u8 mac[])
2493 {
2494         u32 poly = 0xEDB88320;  /* Polynomial for 802.3 CRC calculation */
2495         u32 i, j, mask, crc;
2496
2497         DEBUGFUNC("e1000_calc_rx_da_crc");
2498
2499         crc = 0xffffffff;
2500         for (i = 0; i < 6; i++) {
2501                 crc = crc ^ mac[i];
2502                 for (j = 8; j > 0; j--) {
2503                         mask = (crc & 1) * (-1);
2504                         crc = (crc >> 1) ^ (poly & mask);
2505                 }
2506         }
2507         return ~crc;
2508 }
2509
2510 #endif /* CRC32_OS_SUPPORT */
2511 /**
2512  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2513  *  with 82579 PHY
2514  *  @hw: pointer to the HW structure
2515  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2516  **/
2517 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2518 {
2519         s32 ret_val = E1000_SUCCESS;
2520         u16 phy_reg, data;
2521         u32 mac_reg;
2522         u16 i;
2523
2524         DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2525
2526         if (hw->mac.type < e1000_pch2lan)
2527                 return E1000_SUCCESS;
2528
2529         /* disable Rx path while enabling/disabling workaround */
2530         hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2531         ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2532                                         phy_reg | (1 << 14));
2533         if (ret_val)
2534                 return ret_val;
2535
2536         if (enable) {
2537                 /* Write Rx addresses (rar_entry_count for RAL/H, and
2538                  * SHRAL/H) and initial CRC values to the MAC
2539                  */
2540                 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2541                         u8 mac_addr[ETH_ADDR_LEN] = {0};
2542                         u32 addr_high, addr_low;
2543
2544                         addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2545                         if (!(addr_high & E1000_RAH_AV))
2546                                 continue;
2547                         addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2548                         mac_addr[0] = (addr_low & 0xFF);
2549                         mac_addr[1] = ((addr_low >> 8) & 0xFF);
2550                         mac_addr[2] = ((addr_low >> 16) & 0xFF);
2551                         mac_addr[3] = ((addr_low >> 24) & 0xFF);
2552                         mac_addr[4] = (addr_high & 0xFF);
2553                         mac_addr[5] = ((addr_high >> 8) & 0xFF);
2554
2555 #ifndef CRC32_OS_SUPPORT
2556                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2557                                         e1000_calc_rx_da_crc(mac_addr));
2558 #else /* CRC32_OS_SUPPORT */
2559                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2560                                         E1000_CRC32(ETH_ADDR_LEN, mac_addr));
2561 #endif /* CRC32_OS_SUPPORT */
2562                 }
2563
2564                 /* Write Rx addresses to the PHY */
2565                 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2566
2567                 /* Enable jumbo frame workaround in the MAC */
2568                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2569                 mac_reg &= ~(1 << 14);
2570                 mac_reg |= (7 << 15);
2571                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2572
2573                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2574                 mac_reg |= E1000_RCTL_SECRC;
2575                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2576
2577                 ret_val = e1000_read_kmrn_reg_generic(hw,
2578                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2579                                                 &data);
2580                 if (ret_val)
2581                         return ret_val;
2582                 ret_val = e1000_write_kmrn_reg_generic(hw,
2583                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2584                                                 data | (1 << 0));
2585                 if (ret_val)
2586                         return ret_val;
2587                 ret_val = e1000_read_kmrn_reg_generic(hw,
2588                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2589                                                 &data);
2590                 if (ret_val)
2591                         return ret_val;
2592                 data &= ~(0xF << 8);
2593                 data |= (0xB << 8);
2594                 ret_val = e1000_write_kmrn_reg_generic(hw,
2595                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2596                                                 data);
2597                 if (ret_val)
2598                         return ret_val;
2599
2600                 /* Enable jumbo frame workaround in the PHY */
2601                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2602                 data &= ~(0x7F << 5);
2603                 data |= (0x37 << 5);
2604                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2605                 if (ret_val)
2606                         return ret_val;
2607                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2608                 data &= ~(1 << 13);
2609                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2610                 if (ret_val)
2611                         return ret_val;
2612                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2613                 data &= ~(0x3FF << 2);
2614                 data |= (E1000_TX_PTR_GAP << 2);
2615                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2616                 if (ret_val)
2617                         return ret_val;
2618                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2619                 if (ret_val)
2620                         return ret_val;
2621                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2622                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2623                                                 (1 << 10));
2624                 if (ret_val)
2625                         return ret_val;
2626         } else {
2627                 /* Write MAC register values back to h/w defaults */
2628                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2629                 mac_reg &= ~(0xF << 14);
2630                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2631
2632                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2633                 mac_reg &= ~E1000_RCTL_SECRC;
2634                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2635
2636                 ret_val = e1000_read_kmrn_reg_generic(hw,
2637                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2638                                                 &data);
2639                 if (ret_val)
2640                         return ret_val;
2641                 ret_val = e1000_write_kmrn_reg_generic(hw,
2642                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2643                                                 data & ~(1 << 0));
2644                 if (ret_val)
2645                         return ret_val;
2646                 ret_val = e1000_read_kmrn_reg_generic(hw,
2647                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2648                                                 &data);
2649                 if (ret_val)
2650                         return ret_val;
2651                 data &= ~(0xF << 8);
2652                 data |= (0xB << 8);
2653                 ret_val = e1000_write_kmrn_reg_generic(hw,
2654                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2655                                                 data);
2656                 if (ret_val)
2657                         return ret_val;
2658
2659                 /* Write PHY register values back to h/w defaults */
2660                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2661                 data &= ~(0x7F << 5);
2662                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2663                 if (ret_val)
2664                         return ret_val;
2665                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2666                 data |= (1 << 13);
2667                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2668                 if (ret_val)
2669                         return ret_val;
2670                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2671                 data &= ~(0x3FF << 2);
2672                 data |= (0x8 << 2);
2673                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2674                 if (ret_val)
2675                         return ret_val;
2676                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2677                 if (ret_val)
2678                         return ret_val;
2679                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2680                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2681                                                 ~(1 << 10));
2682                 if (ret_val)
2683                         return ret_val;
2684         }
2685
2686         /* re-enable Rx path after enabling/disabling workaround */
2687         return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2688                                      ~(1 << 14));
2689 }
2690
2691 /**
2692  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2693  *  done after every PHY reset.
2694  **/
2695 STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2696 {
2697         s32 ret_val = E1000_SUCCESS;
2698
2699         DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2700
2701         if (hw->mac.type != e1000_pch2lan)
2702                 return E1000_SUCCESS;
2703
2704         /* Set MDIO slow mode before any other MDIO access */
2705         ret_val = e1000_set_mdio_slow_mode_hv(hw);
2706         if (ret_val)
2707                 return ret_val;
2708
2709         ret_val = hw->phy.ops.acquire(hw);
2710         if (ret_val)
2711                 return ret_val;
2712         /* set MSE higher to enable link to stay up when noise is high */
2713         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2714         if (ret_val)
2715                 goto release;
2716         /* drop link after 5 times MSE threshold was reached */
2717         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2718 release:
2719         hw->phy.ops.release(hw);
2720
2721         return ret_val;
2722 }
2723
2724 /**
2725  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2726  *  @hw:   pointer to the HW structure
2727  *
2728  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2729  *  Disable K1 for 1000 and 100 speeds
2730  **/
2731 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2732 {
2733         s32 ret_val = E1000_SUCCESS;
2734         u16 status_reg = 0;
2735
2736         DEBUGFUNC("e1000_k1_workaround_lv");
2737
2738         if (hw->mac.type != e1000_pch2lan)
2739                 return E1000_SUCCESS;
2740
2741         /* Set K1 beacon duration based on 10Mbs speed */
2742         ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2743         if (ret_val)
2744                 return ret_val;
2745
2746         if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2747             == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2748                 if (status_reg &
2749                     (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2750                         u16 pm_phy_reg;
2751
2752                         /* LV 1G/100 Packet drop issue wa  */
2753                         ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2754                                                        &pm_phy_reg);
2755                         if (ret_val)
2756                                 return ret_val;
2757                         pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2758                         ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2759                                                         pm_phy_reg);
2760                         if (ret_val)
2761                                 return ret_val;
2762                 } else {
2763                         u32 mac_reg;
2764                         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2765                         mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2766                         mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2767                         E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2768                 }
2769         }
2770
2771         return ret_val;
2772 }
2773
2774 /**
2775  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2776  *  @hw:   pointer to the HW structure
2777  *  @gate: boolean set to true to gate, false to ungate
2778  *
2779  *  Gate/ungate the automatic PHY configuration via hardware; perform
2780  *  the configuration via software instead.
2781  **/
2782 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2783 {
2784         u32 extcnf_ctrl;
2785
2786         DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2787
2788         if (hw->mac.type < e1000_pch2lan)
2789                 return;
2790
2791         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2792
2793         if (gate)
2794                 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2795         else
2796                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2797
2798         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2799 }
2800
2801 /**
2802  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2803  *  @hw: pointer to the HW structure
2804  *
2805  *  Check the appropriate indication the MAC has finished configuring the
2806  *  PHY after a software reset.
2807  **/
2808 STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2809 {
2810         u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2811
2812         DEBUGFUNC("e1000_lan_init_done_ich8lan");
2813
2814         /* Wait for basic configuration completes before proceeding */
2815         do {
2816                 data = E1000_READ_REG(hw, E1000_STATUS);
2817                 data &= E1000_STATUS_LAN_INIT_DONE;
2818                 usec_delay(100);
2819         } while ((!data) && --loop);
2820
2821         /* If basic configuration is incomplete before the above loop
2822          * count reaches 0, loading the configuration from NVM will
2823          * leave the PHY in a bad state possibly resulting in no link.
2824          */
2825         if (loop == 0)
2826                 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2827
2828         /* Clear the Init Done bit for the next init event */
2829         data = E1000_READ_REG(hw, E1000_STATUS);
2830         data &= ~E1000_STATUS_LAN_INIT_DONE;
2831         E1000_WRITE_REG(hw, E1000_STATUS, data);
2832 }
2833
2834 /**
2835  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2836  *  @hw: pointer to the HW structure
2837  **/
2838 STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2839 {
2840         s32 ret_val = E1000_SUCCESS;
2841         u16 reg;
2842
2843         DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2844
2845         if (hw->phy.ops.check_reset_block(hw))
2846                 return E1000_SUCCESS;
2847
2848         /* Allow time for h/w to get to quiescent state after reset */
2849         msec_delay(10);
2850
2851         /* Perform any necessary post-reset workarounds */
2852         switch (hw->mac.type) {
2853         case e1000_pchlan:
2854                 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2855                 if (ret_val)
2856                         return ret_val;
2857                 break;
2858         case e1000_pch2lan:
2859                 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2860                 if (ret_val)
2861                         return ret_val;
2862                 break;
2863         default:
2864                 break;
2865         }
2866
2867         /* Clear the host wakeup bit after lcd reset */
2868         if (hw->mac.type >= e1000_pchlan) {
2869                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
2870                 reg &= ~BM_WUC_HOST_WU_BIT;
2871                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
2872         }
2873
2874         /* Configure the LCD with the extended configuration region in NVM */
2875         ret_val = e1000_sw_lcd_config_ich8lan(hw);
2876         if (ret_val)
2877                 return ret_val;
2878
2879         /* Configure the LCD with the OEM bits in NVM */
2880         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2881
2882         if (hw->mac.type == e1000_pch2lan) {
2883                 /* Ungate automatic PHY configuration on non-managed 82579 */
2884                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
2885                     E1000_ICH_FWSM_FW_VALID)) {
2886                         msec_delay(10);
2887                         e1000_gate_hw_phy_config_ich8lan(hw, false);
2888                 }
2889
2890                 /* Set EEE LPI Update Timer to 200usec */
2891                 ret_val = hw->phy.ops.acquire(hw);
2892                 if (ret_val)
2893                         return ret_val;
2894                 ret_val = e1000_write_emi_reg_locked(hw,
2895                                                      I82579_LPI_UPDATE_TIMER,
2896                                                      0x1387);
2897                 hw->phy.ops.release(hw);
2898         }
2899
2900         return ret_val;
2901 }
2902
2903 /**
2904  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2905  *  @hw: pointer to the HW structure
2906  *
2907  *  Resets the PHY
2908  *  This is a function pointer entry point called by drivers
2909  *  or other shared routines.
2910  **/
2911 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2912 {
2913         s32 ret_val = E1000_SUCCESS;
2914
2915         DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2916
2917         /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2918         if ((hw->mac.type == e1000_pch2lan) &&
2919             !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2920                 e1000_gate_hw_phy_config_ich8lan(hw, true);
2921
2922         ret_val = e1000_phy_hw_reset_generic(hw);
2923         if (ret_val)
2924                 return ret_val;
2925
2926         return e1000_post_phy_reset_ich8lan(hw);
2927 }
2928
2929 /**
2930  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2931  *  @hw: pointer to the HW structure
2932  *  @active: true to enable LPLU, false to disable
2933  *
2934  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2935  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2936  *  the phy speed. This function will manually set the LPLU bit and restart
2937  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2938  *  since it configures the same bit.
2939  **/
2940 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2941 {
2942         s32 ret_val;
2943         u16 oem_reg;
2944
2945         DEBUGFUNC("e1000_set_lplu_state_pchlan");
2946
2947         ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2948         if (ret_val)
2949                 return ret_val;
2950
2951         if (active)
2952                 oem_reg |= HV_OEM_BITS_LPLU;
2953         else
2954                 oem_reg &= ~HV_OEM_BITS_LPLU;
2955
2956         if (!hw->phy.ops.check_reset_block(hw))
2957                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2958
2959         return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2960 }
2961
2962 /**
2963  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2964  *  @hw: pointer to the HW structure
2965  *  @active: true to enable LPLU, false to disable
2966  *
2967  *  Sets the LPLU D0 state according to the active flag.  When
2968  *  activating LPLU this function also disables smart speed
2969  *  and vice versa.  LPLU will not be activated unless the
2970  *  device autonegotiation advertisement meets standards of
2971  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2972  *  This is a function pointer entry point only called by
2973  *  PHY setup routines.
2974  **/
2975 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2976 {
2977         struct e1000_phy_info *phy = &hw->phy;
2978         u32 phy_ctrl;
2979         s32 ret_val = E1000_SUCCESS;
2980         u16 data;
2981
2982         DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2983
2984         if (phy->type == e1000_phy_ife)
2985                 return E1000_SUCCESS;
2986
2987         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2988
2989         if (active) {
2990                 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2991                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2992
2993                 if (phy->type != e1000_phy_igp_3)
2994                         return E1000_SUCCESS;
2995
2996                 /* Call gig speed drop workaround on LPLU before accessing
2997                  * any PHY registers
2998                  */
2999                 if (hw->mac.type == e1000_ich8lan)
3000                         e1000_gig_downshift_workaround_ich8lan(hw);
3001
3002                 /* When LPLU is enabled, we should disable SmartSpeed */
3003                 ret_val = phy->ops.read_reg(hw,
3004                                             IGP01E1000_PHY_PORT_CONFIG,
3005                                             &data);
3006                 if (ret_val)
3007                         return ret_val;
3008                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3009                 ret_val = phy->ops.write_reg(hw,
3010                                              IGP01E1000_PHY_PORT_CONFIG,
3011                                              data);
3012                 if (ret_val)
3013                         return ret_val;
3014         } else {
3015                 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3016                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3017
3018                 if (phy->type != e1000_phy_igp_3)
3019                         return E1000_SUCCESS;
3020
3021                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3022                  * during Dx states where the power conservation is most
3023                  * important.  During driver activity we should enable
3024                  * SmartSpeed, so performance is maintained.
3025                  */
3026                 if (phy->smart_speed == e1000_smart_speed_on) {
3027                         ret_val = phy->ops.read_reg(hw,
3028                                                     IGP01E1000_PHY_PORT_CONFIG,
3029                                                     &data);
3030                         if (ret_val)
3031                                 return ret_val;
3032
3033                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3034                         ret_val = phy->ops.write_reg(hw,
3035                                                      IGP01E1000_PHY_PORT_CONFIG,
3036                                                      data);
3037                         if (ret_val)
3038                                 return ret_val;
3039                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3040                         ret_val = phy->ops.read_reg(hw,
3041                                                     IGP01E1000_PHY_PORT_CONFIG,
3042                                                     &data);
3043                         if (ret_val)
3044                                 return ret_val;
3045
3046                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3047                         ret_val = phy->ops.write_reg(hw,
3048                                                      IGP01E1000_PHY_PORT_CONFIG,
3049                                                      data);
3050                         if (ret_val)
3051                                 return ret_val;
3052                 }
3053         }
3054
3055         return E1000_SUCCESS;
3056 }
3057
3058 /**
3059  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3060  *  @hw: pointer to the HW structure
3061  *  @active: true to enable LPLU, false to disable
3062  *
3063  *  Sets the LPLU D3 state according to the active flag.  When
3064  *  activating LPLU this function also disables smart speed
3065  *  and vice versa.  LPLU will not be activated unless the
3066  *  device autonegotiation advertisement meets standards of
3067  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3068  *  This is a function pointer entry point only called by
3069  *  PHY setup routines.
3070  **/
3071 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3072 {
3073         struct e1000_phy_info *phy = &hw->phy;
3074         u32 phy_ctrl;
3075         s32 ret_val = E1000_SUCCESS;
3076         u16 data;
3077
3078         DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3079
3080         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3081
3082         if (!active) {
3083                 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3084                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3085
3086                 if (phy->type != e1000_phy_igp_3)
3087                         return E1000_SUCCESS;
3088
3089                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3090                  * during Dx states where the power conservation is most
3091                  * important.  During driver activity we should enable
3092                  * SmartSpeed, so performance is maintained.
3093                  */
3094                 if (phy->smart_speed == e1000_smart_speed_on) {
3095                         ret_val = phy->ops.read_reg(hw,
3096                                                     IGP01E1000_PHY_PORT_CONFIG,
3097                                                     &data);
3098                         if (ret_val)
3099                                 return ret_val;
3100
3101                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3102                         ret_val = phy->ops.write_reg(hw,
3103                                                      IGP01E1000_PHY_PORT_CONFIG,
3104                                                      data);
3105                         if (ret_val)
3106                                 return ret_val;
3107                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3108                         ret_val = phy->ops.read_reg(hw,
3109                                                     IGP01E1000_PHY_PORT_CONFIG,
3110                                                     &data);
3111                         if (ret_val)
3112                                 return ret_val;
3113
3114                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3115                         ret_val = phy->ops.write_reg(hw,
3116                                                      IGP01E1000_PHY_PORT_CONFIG,
3117                                                      data);
3118                         if (ret_val)
3119                                 return ret_val;
3120                 }
3121         } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3122                    (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3123                    (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3124                 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3125                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3126
3127                 if (phy->type != e1000_phy_igp_3)
3128                         return E1000_SUCCESS;
3129
3130                 /* Call gig speed drop workaround on LPLU before accessing
3131                  * any PHY registers
3132                  */
3133                 if (hw->mac.type == e1000_ich8lan)
3134                         e1000_gig_downshift_workaround_ich8lan(hw);
3135
3136                 /* When LPLU is enabled, we should disable SmartSpeed */
3137                 ret_val = phy->ops.read_reg(hw,
3138                                             IGP01E1000_PHY_PORT_CONFIG,
3139                                             &data);
3140                 if (ret_val)
3141                         return ret_val;
3142
3143                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3144                 ret_val = phy->ops.write_reg(hw,
3145                                              IGP01E1000_PHY_PORT_CONFIG,
3146                                              data);
3147         }
3148
3149         return ret_val;
3150 }
3151
3152 /**
3153  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3154  *  @hw: pointer to the HW structure
3155  *  @bank:  pointer to the variable that returns the active bank
3156  *
3157  *  Reads signature byte from the NVM using the flash access registers.
3158  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3159  **/
3160 STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3161 {
3162         u32 eecd;
3163         struct e1000_nvm_info *nvm = &hw->nvm;
3164         u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3165         u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3166         u8 sig_byte = 0;
3167         s32 ret_val;
3168
3169         DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3170
3171         switch (hw->mac.type) {
3172         case e1000_ich8lan:
3173         case e1000_ich9lan:
3174                 eecd = E1000_READ_REG(hw, E1000_EECD);
3175                 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3176                     E1000_EECD_SEC1VAL_VALID_MASK) {
3177                         if (eecd & E1000_EECD_SEC1VAL)
3178                                 *bank = 1;
3179                         else
3180                                 *bank = 0;
3181
3182                         return E1000_SUCCESS;
3183                 }
3184                 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3185                 /* fall-thru */
3186         default:
3187                 /* set bank to 0 in case flash read fails */
3188                 *bank = 0;
3189
3190                 /* Check bank 0 */
3191                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3192                                                         &sig_byte);
3193                 if (ret_val)
3194                         return ret_val;
3195                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3196                     E1000_ICH_NVM_SIG_VALUE) {
3197                         *bank = 0;
3198                         return E1000_SUCCESS;
3199                 }
3200
3201                 /* Check bank 1 */
3202                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3203                                                         bank1_offset,
3204                                                         &sig_byte);
3205                 if (ret_val)
3206                         return ret_val;
3207                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3208                     E1000_ICH_NVM_SIG_VALUE) {
3209                         *bank = 1;
3210                         return E1000_SUCCESS;
3211                 }
3212
3213                 DEBUGOUT("ERROR: No valid NVM bank present\n");
3214                 return -E1000_ERR_NVM;
3215         }
3216 }
3217
3218 /**
3219  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3220  *  @hw: pointer to the HW structure
3221  *  @offset: The offset (in bytes) of the word(s) to read.
3222  *  @words: Size of data to read in words
3223  *  @data: Pointer to the word(s) to read at offset.
3224  *
3225  *  Reads a word(s) from the NVM using the flash access registers.
3226  **/
3227 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3228                                   u16 *data)
3229 {
3230         struct e1000_nvm_info *nvm = &hw->nvm;
3231         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3232         u32 act_offset;
3233         s32 ret_val = E1000_SUCCESS;
3234         u32 bank = 0;
3235         u16 i, word;
3236
3237         DEBUGFUNC("e1000_read_nvm_ich8lan");
3238
3239         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3240             (words == 0)) {
3241                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3242                 ret_val = -E1000_ERR_NVM;
3243                 goto out;
3244         }
3245
3246         nvm->ops.acquire(hw);
3247
3248         ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3249         if (ret_val != E1000_SUCCESS) {
3250                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3251                 bank = 0;
3252         }
3253
3254         act_offset = (bank) ? nvm->flash_bank_size : 0;
3255         act_offset += offset;
3256
3257         ret_val = E1000_SUCCESS;
3258         for (i = 0; i < words; i++) {
3259                 if (dev_spec->shadow_ram[offset+i].modified) {
3260                         data[i] = dev_spec->shadow_ram[offset+i].value;
3261                 } else {
3262                         ret_val = e1000_read_flash_word_ich8lan(hw,
3263                                                                 act_offset + i,
3264                                                                 &word);
3265                         if (ret_val)
3266                                 break;
3267                         data[i] = word;
3268                 }
3269         }
3270
3271         nvm->ops.release(hw);
3272
3273 out:
3274         if (ret_val)
3275                 DEBUGOUT1("NVM read error: %d\n", ret_val);
3276
3277         return ret_val;
3278 }
3279
3280 /**
3281  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3282  *  @hw: pointer to the HW structure
3283  *
3284  *  This function does initial flash setup so that a new read/write/erase cycle
3285  *  can be started.
3286  **/
3287 STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3288 {
3289         union ich8_hws_flash_status hsfsts;
3290         s32 ret_val = -E1000_ERR_NVM;
3291
3292         DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3293
3294         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3295
3296         /* Check if the flash descriptor is valid */
3297         if (!hsfsts.hsf_status.fldesvalid) {
3298                 DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3299                 return -E1000_ERR_NVM;
3300         }
3301
3302         /* Clear FCERR and DAEL in hw status by writing 1 */
3303         hsfsts.hsf_status.flcerr = 1;
3304         hsfsts.hsf_status.dael = 1;
3305         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3306
3307         /* Either we should have a hardware SPI cycle in progress
3308          * bit to check against, in order to start a new cycle or
3309          * FDONE bit should be changed in the hardware so that it
3310          * is 1 after hardware reset, which can then be used as an
3311          * indication whether a cycle is in progress or has been
3312          * completed.
3313          */
3314
3315         if (!hsfsts.hsf_status.flcinprog) {
3316                 /* There is no cycle running at present,
3317                  * so we can start a cycle.
3318                  * Begin by setting Flash Cycle Done.
3319                  */
3320                 hsfsts.hsf_status.flcdone = 1;
3321                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3322                 ret_val = E1000_SUCCESS;
3323         } else {
3324                 s32 i;
3325
3326                 /* Otherwise poll for sometime so the current
3327                  * cycle has a chance to end before giving up.
3328                  */
3329                 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3330                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3331                                                               ICH_FLASH_HSFSTS);
3332                         if (!hsfsts.hsf_status.flcinprog) {
3333                                 ret_val = E1000_SUCCESS;
3334                                 break;
3335                         }
3336                         usec_delay(1);
3337                 }
3338                 if (ret_val == E1000_SUCCESS) {
3339                         /* Successful in waiting for previous cycle to timeout,
3340                          * now set the Flash Cycle Done.
3341                          */
3342                         hsfsts.hsf_status.flcdone = 1;
3343                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3344                                                 hsfsts.regval);
3345                 } else {
3346                         DEBUGOUT("Flash controller busy, cannot get access\n");
3347                 }
3348         }
3349
3350         return ret_val;
3351 }
3352
3353 /**
3354  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3355  *  @hw: pointer to the HW structure
3356  *  @timeout: maximum time to wait for completion
3357  *
3358  *  This function starts a flash cycle and waits for its completion.
3359  **/
3360 STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3361 {
3362         union ich8_hws_flash_ctrl hsflctl;
3363         union ich8_hws_flash_status hsfsts;
3364         u32 i = 0;
3365
3366         DEBUGFUNC("e1000_flash_cycle_ich8lan");
3367
3368         /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3369         hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3370         hsflctl.hsf_ctrl.flcgo = 1;
3371
3372         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3373
3374         /* wait till FDONE bit is set to 1 */
3375         do {
3376                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3377                 if (hsfsts.hsf_status.flcdone)
3378                         break;
3379                 usec_delay(1);
3380         } while (i++ < timeout);
3381
3382         if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3383                 return E1000_SUCCESS;
3384
3385         return -E1000_ERR_NVM;
3386 }
3387
3388 /**
3389  *  e1000_read_flash_word_ich8lan - Read word from flash
3390  *  @hw: pointer to the HW structure
3391  *  @offset: offset to data location
3392  *  @data: pointer to the location for storing the data
3393  *
3394  *  Reads the flash word at offset into data.  Offset is converted
3395  *  to bytes before read.
3396  **/
3397 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3398                                          u16 *data)
3399 {
3400         DEBUGFUNC("e1000_read_flash_word_ich8lan");
3401
3402         if (!data)
3403                 return -E1000_ERR_NVM;
3404
3405         /* Must convert offset into bytes. */
3406         offset <<= 1;
3407
3408         return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3409 }
3410
3411 /**
3412  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3413  *  @hw: pointer to the HW structure
3414  *  @offset: The offset of the byte to read.
3415  *  @data: Pointer to a byte to store the value read.
3416  *
3417  *  Reads a single byte from the NVM using the flash access registers.
3418  **/
3419 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3420                                          u8 *data)
3421 {
3422         s32 ret_val;
3423         u16 word = 0;
3424
3425         ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3426
3427         if (ret_val)
3428                 return ret_val;
3429
3430         *data = (u8)word;
3431
3432         return E1000_SUCCESS;
3433 }
3434
3435 /**
3436  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3437  *  @hw: pointer to the HW structure
3438  *  @offset: The offset (in bytes) of the byte or word to read.
3439  *  @size: Size of data to read, 1=byte 2=word
3440  *  @data: Pointer to the word to store the value read.
3441  *
3442  *  Reads a byte or word from the NVM using the flash access registers.
3443  **/
3444 STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3445                                          u8 size, u16 *data)
3446 {
3447         union ich8_hws_flash_status hsfsts;
3448         union ich8_hws_flash_ctrl hsflctl;
3449         u32 flash_linear_addr;
3450         u32 flash_data = 0;
3451         s32 ret_val = -E1000_ERR_NVM;
3452         u8 count = 0;
3453
3454         DEBUGFUNC("e1000_read_flash_data_ich8lan");
3455
3456         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3457                 return -E1000_ERR_NVM;
3458         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3459                              hw->nvm.flash_base_addr);
3460
3461         do {
3462                 usec_delay(1);
3463                 /* Steps */
3464                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3465                 if (ret_val != E1000_SUCCESS)
3466                         break;
3467                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3468
3469                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3470                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3471                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3472                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3473
3474                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3475
3476                 ret_val =
3477                     e1000_flash_cycle_ich8lan(hw,
3478                                               ICH_FLASH_READ_COMMAND_TIMEOUT);
3479
3480                 /* Check if FCERR is set to 1, if set to 1, clear it
3481                  * and try the whole sequence a few more times, else
3482                  * read in (shift in) the Flash Data0, the order is
3483                  * least significant byte first msb to lsb
3484                  */
3485                 if (ret_val == E1000_SUCCESS) {
3486                         flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3487                         if (size == 1)
3488                                 *data = (u8)(flash_data & 0x000000FF);
3489                         else if (size == 2)
3490                                 *data = (u16)(flash_data & 0x0000FFFF);
3491                         break;
3492                 } else {
3493                         /* If we've gotten here, then things are probably
3494                          * completely hosed, but if the error condition is
3495                          * detected, it won't hurt to give it another try...
3496                          * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3497                          */
3498                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3499                                                               ICH_FLASH_HSFSTS);
3500                         if (hsfsts.hsf_status.flcerr) {
3501                                 /* Repeat for some time before giving up. */
3502                                 continue;
3503                         } else if (!hsfsts.hsf_status.flcdone) {
3504                                 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3505                                 break;
3506                         }
3507                 }
3508         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3509
3510         return ret_val;
3511 }
3512
3513 /**
3514  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3515  *  @hw: pointer to the HW structure
3516  *  @offset: The offset (in bytes) of the word(s) to write.
3517  *  @words: Size of data to write in words
3518  *  @data: Pointer to the word(s) to write at offset.
3519  *
3520  *  Writes a byte or word to the NVM using the flash access registers.
3521  **/
3522 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3523                                    u16 *data)
3524 {
3525         struct e1000_nvm_info *nvm = &hw->nvm;
3526         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3527         u16 i;
3528
3529         DEBUGFUNC("e1000_write_nvm_ich8lan");
3530
3531         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3532             (words == 0)) {
3533                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3534                 return -E1000_ERR_NVM;
3535         }
3536
3537         nvm->ops.acquire(hw);
3538
3539         for (i = 0; i < words; i++) {
3540                 dev_spec->shadow_ram[offset+i].modified = true;
3541                 dev_spec->shadow_ram[offset+i].value = data[i];
3542         }
3543
3544         nvm->ops.release(hw);
3545
3546         return E1000_SUCCESS;
3547 }
3548
3549 /**
3550  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3551  *  @hw: pointer to the HW structure
3552  *
3553  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3554  *  which writes the checksum to the shadow ram.  The changes in the shadow
3555  *  ram are then committed to the EEPROM by processing each bank at a time
3556  *  checking for the modified bit and writing only the pending changes.
3557  *  After a successful commit, the shadow ram is cleared and is ready for
3558  *  future writes.
3559  **/
3560 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3561 {
3562         struct e1000_nvm_info *nvm = &hw->nvm;
3563         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3564         u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3565         s32 ret_val;
3566         u16 data;
3567
3568         DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
3569
3570         ret_val = e1000_update_nvm_checksum_generic(hw);
3571         if (ret_val)
3572                 goto out;
3573
3574         if (nvm->type != e1000_nvm_flash_sw)
3575                 goto out;
3576
3577         nvm->ops.acquire(hw);
3578
3579         /* We're writing to the opposite bank so if we're on bank 1,
3580          * write to bank 0 etc.  We also need to erase the segment that
3581          * is going to be written
3582          */
3583         ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3584         if (ret_val != E1000_SUCCESS) {
3585                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3586                 bank = 0;
3587         }
3588
3589         if (bank == 0) {
3590                 new_bank_offset = nvm->flash_bank_size;
3591                 old_bank_offset = 0;
3592                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3593                 if (ret_val)
3594                         goto release;
3595         } else {
3596                 old_bank_offset = nvm->flash_bank_size;
3597                 new_bank_offset = 0;
3598                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3599                 if (ret_val)
3600                         goto release;
3601         }
3602
3603         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3604                 /* Determine whether to write the value stored
3605                  * in the other NVM bank or a modified value stored
3606                  * in the shadow RAM
3607                  */
3608                 if (dev_spec->shadow_ram[i].modified) {
3609                         data = dev_spec->shadow_ram[i].value;
3610                 } else {
3611                         ret_val = e1000_read_flash_word_ich8lan(hw, i +
3612                                                                 old_bank_offset,
3613                                                                 &data);
3614                         if (ret_val)
3615                                 break;
3616                 }
3617
3618                 /* If the word is 0x13, then make sure the signature bits
3619                  * (15:14) are 11b until the commit has completed.
3620                  * This will allow us to write 10b which indicates the
3621                  * signature is valid.  We want to do this after the write
3622                  * has completed so that we don't mark the segment valid
3623                  * while the write is still in progress
3624                  */
3625                 if (i == E1000_ICH_NVM_SIG_WORD)
3626                         data |= E1000_ICH_NVM_SIG_MASK;
3627
3628                 /* Convert offset to bytes. */
3629                 act_offset = (i + new_bank_offset) << 1;
3630
3631                 usec_delay(100);
3632                 /* Write the bytes to the new bank. */
3633                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3634                                                                act_offset,
3635                                                                (u8)data);
3636                 if (ret_val)
3637                         break;
3638
3639                 usec_delay(100);
3640                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3641                                                           act_offset + 1,
3642                                                           (u8)(data >> 8));
3643                 if (ret_val)
3644                         break;
3645         }
3646
3647         /* Don't bother writing the segment valid bits if sector
3648          * programming failed.
3649          */
3650         if (ret_val) {
3651                 DEBUGOUT("Flash commit failed.\n");
3652                 goto release;
3653         }
3654
3655         /* Finally validate the new segment by setting bit 15:14
3656          * to 10b in word 0x13 , this can be done without an
3657          * erase as well since these bits are 11 to start with
3658          * and we need to change bit 14 to 0b
3659          */
3660         act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3661         ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
3662         if (ret_val)
3663                 goto release;
3664
3665         data &= 0xBFFF;
3666         ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3667                                                        act_offset * 2 + 1,
3668                                                        (u8)(data >> 8));
3669         if (ret_val)
3670                 goto release;
3671
3672         /* And invalidate the previously valid segment by setting
3673          * its signature word (0x13) high_byte to 0b. This can be
3674          * done without an erase because flash erase sets all bits
3675          * to 1's. We can write 1's to 0's without an erase
3676          */
3677         act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3678         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
3679         if (ret_val)
3680                 goto release;
3681
3682         /* Great!  Everything worked, we can now clear the cached entries. */
3683         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3684                 dev_spec->shadow_ram[i].modified = false;
3685                 dev_spec->shadow_ram[i].value = 0xFFFF;
3686         }
3687
3688 release:
3689         nvm->ops.release(hw);
3690
3691         /* Reload the EEPROM, or else modifications will not appear
3692          * until after the next adapter reset.
3693          */
3694         if (!ret_val) {
3695                 nvm->ops.reload(hw);
3696                 msec_delay(10);
3697         }
3698
3699 out:
3700         if (ret_val)
3701                 DEBUGOUT1("NVM update error: %d\n", ret_val);
3702
3703         return ret_val;
3704 }
3705
3706 /**
3707  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
3708  *  @hw: pointer to the HW structure
3709  *
3710  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
3711  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
3712  *  calculated, in which case we need to calculate the checksum and set bit 6.
3713  **/
3714 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
3715 {
3716         s32 ret_val;
3717         u16 data;
3718         u16 word;
3719         u16 valid_csum_mask;
3720
3721         DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
3722
3723         /* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
3724          * the checksum needs to be fixed.  This bit is an indication that
3725          * the NVM was prepared by OEM software and did not calculate
3726          * the checksum...a likely scenario.
3727          */
3728         switch (hw->mac.type) {
3729         case e1000_pch_lpt:
3730                 word = NVM_COMPAT;
3731                 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
3732                 break;
3733         default:
3734                 word = NVM_FUTURE_INIT_WORD1;
3735                 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
3736                 break;
3737         }
3738
3739         ret_val = hw->nvm.ops.read(hw, word, 1, &data);
3740         if (ret_val)
3741                 return ret_val;
3742
3743         if (!(data & valid_csum_mask)) {
3744                 data |= valid_csum_mask;
3745                 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
3746                 if (ret_val)
3747                         return ret_val;
3748                 ret_val = hw->nvm.ops.update(hw);
3749                 if (ret_val)
3750                         return ret_val;
3751         }
3752
3753         return e1000_validate_nvm_checksum_generic(hw);
3754 }
3755
3756 /**
3757  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3758  *  @hw: pointer to the HW structure
3759  *  @offset: The offset (in bytes) of the byte/word to read.
3760  *  @size: Size of data to read, 1=byte 2=word
3761  *  @data: The byte(s) to write to the NVM.
3762  *
3763  *  Writes one/two bytes to the NVM using the flash access registers.
3764  **/
3765 STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3766                                           u8 size, u16 data)
3767 {
3768         union ich8_hws_flash_status hsfsts;
3769         union ich8_hws_flash_ctrl hsflctl;
3770         u32 flash_linear_addr;
3771         u32 flash_data = 0;
3772         s32 ret_val;
3773         u8 count = 0;
3774
3775         DEBUGFUNC("e1000_write_ich8_data");
3776
3777         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3778                 return -E1000_ERR_NVM;
3779
3780         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3781                              hw->nvm.flash_base_addr);
3782
3783         do {
3784                 usec_delay(1);
3785                 /* Steps */
3786                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3787                 if (ret_val != E1000_SUCCESS)
3788                         break;
3789                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3790
3791                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3792                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3793                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3794                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3795
3796                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3797
3798                 if (size == 1)
3799                         flash_data = (u32)data & 0x00FF;
3800                 else
3801                         flash_data = (u32)data;
3802
3803                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
3804
3805                 /* check if FCERR is set to 1 , if set to 1, clear it
3806                  * and try the whole sequence a few more times else done
3807                  */
3808                 ret_val =
3809                     e1000_flash_cycle_ich8lan(hw,
3810                                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3811                 if (ret_val == E1000_SUCCESS)
3812                         break;
3813
3814                 /* If we're here, then things are most likely
3815                  * completely hosed, but if the error condition
3816                  * is detected, it won't hurt to give it another
3817                  * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3818                  */
3819                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3820                 if (hsfsts.hsf_status.flcerr)
3821                         /* Repeat for some time before giving up. */
3822                         continue;
3823                 if (!hsfsts.hsf_status.flcdone) {
3824                         DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3825                         break;
3826                 }
3827         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3828
3829         return ret_val;
3830 }
3831
3832 /**
3833  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3834  *  @hw: pointer to the HW structure
3835  *  @offset: The index of the byte to read.
3836  *  @data: The byte to write to the NVM.
3837  *
3838  *  Writes a single byte to the NVM using the flash access registers.
3839  **/
3840 STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3841                                           u8 data)
3842 {
3843         u16 word = (u16)data;
3844
3845         DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3846
3847         return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3848 }
3849
3850 /**
3851  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3852  *  @hw: pointer to the HW structure
3853  *  @offset: The offset of the byte to write.
3854  *  @byte: The byte to write to the NVM.
3855  *
3856  *  Writes a single byte to the NVM using the flash access registers.
3857  *  Goes through a retry algorithm before giving up.
3858  **/
3859 STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3860                                                 u32 offset, u8 byte)
3861 {
3862         s32 ret_val;
3863         u16 program_retries;
3864
3865         DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3866
3867         ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3868         if (!ret_val)
3869                 return ret_val;
3870
3871         for (program_retries = 0; program_retries < 100; program_retries++) {
3872                 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3873                 usec_delay(100);
3874                 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3875                 if (ret_val == E1000_SUCCESS)
3876                         break;
3877         }
3878         if (program_retries == 100)
3879                 return -E1000_ERR_NVM;
3880
3881         return E1000_SUCCESS;
3882 }
3883
3884 /**
3885  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3886  *  @hw: pointer to the HW structure
3887  *  @bank: 0 for first bank, 1 for second bank, etc.
3888  *
3889  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3890  *  bank N is 4096 * N + flash_reg_addr.
3891  **/
3892 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3893 {
3894         struct e1000_nvm_info *nvm = &hw->nvm;
3895         union ich8_hws_flash_status hsfsts;
3896         union ich8_hws_flash_ctrl hsflctl;
3897         u32 flash_linear_addr;
3898         /* bank size is in 16bit words - adjust to bytes */
3899         u32 flash_bank_size = nvm->flash_bank_size * 2;
3900         s32 ret_val;
3901         s32 count = 0;
3902         s32 j, iteration, sector_size;
3903
3904         DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3905
3906         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3907
3908         /* Determine HW Sector size: Read BERASE bits of hw flash status
3909          * register
3910          * 00: The Hw sector is 256 bytes, hence we need to erase 16
3911          *     consecutive sectors.  The start index for the nth Hw sector
3912          *     can be calculated as = bank * 4096 + n * 256
3913          * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3914          *     The start index for the nth Hw sector can be calculated
3915          *     as = bank * 4096
3916          * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3917          *     (ich9 only, otherwise error condition)
3918          * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3919          */
3920         switch (hsfsts.hsf_status.berasesz) {
3921         case 0:
3922                 /* Hw sector size 256 */
3923                 sector_size = ICH_FLASH_SEG_SIZE_256;
3924                 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3925                 break;
3926         case 1:
3927                 sector_size = ICH_FLASH_SEG_SIZE_4K;
3928                 iteration = 1;
3929                 break;
3930         case 2:
3931                 sector_size = ICH_FLASH_SEG_SIZE_8K;
3932                 iteration = 1;
3933                 break;
3934         case 3:
3935                 sector_size = ICH_FLASH_SEG_SIZE_64K;
3936                 iteration = 1;
3937                 break;
3938         default:
3939                 return -E1000_ERR_NVM;
3940         }
3941
3942         /* Start with the base address, then add the sector offset. */
3943         flash_linear_addr = hw->nvm.flash_base_addr;
3944         flash_linear_addr += (bank) ? flash_bank_size : 0;
3945
3946         for (j = 0; j < iteration; j++) {
3947                 do {
3948                         u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
3949
3950                         /* Steps */
3951                         ret_val = e1000_flash_cycle_init_ich8lan(hw);
3952                         if (ret_val)
3953                                 return ret_val;
3954
3955                         /* Write a value 11 (block Erase) in Flash
3956                          * Cycle field in hw flash control
3957                          */
3958                         hsflctl.regval =
3959                             E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3960
3961                         hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3962                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3963                                                 hsflctl.regval);
3964
3965                         /* Write the last 24 bits of an index within the
3966                          * block into Flash Linear address field in Flash
3967                          * Address.
3968                          */
3969                         flash_linear_addr += (j * sector_size);
3970                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3971                                               flash_linear_addr);
3972
3973                         ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
3974                         if (ret_val == E1000_SUCCESS)
3975                                 break;
3976
3977                         /* Check if FCERR is set to 1.  If 1,
3978                          * clear it and try the whole sequence
3979                          * a few more times else Done
3980                          */
3981                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3982                                                       ICH_FLASH_HSFSTS);
3983                         if (hsfsts.hsf_status.flcerr)
3984                                 /* repeat for some time before giving up */
3985                                 continue;
3986                         else if (!hsfsts.hsf_status.flcdone)
3987                                 return ret_val;
3988                 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3989         }
3990
3991         return E1000_SUCCESS;
3992 }
3993
3994 /**
3995  *  e1000_valid_led_default_ich8lan - Set the default LED settings
3996  *  @hw: pointer to the HW structure
3997  *  @data: Pointer to the LED settings
3998  *
3999  *  Reads the LED default settings from the NVM to data.  If the NVM LED
4000  *  settings is all 0's or F's, set the LED default to a valid LED default
4001  *  setting.
4002  **/
4003 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4004 {
4005         s32 ret_val;
4006
4007         DEBUGFUNC("e1000_valid_led_default_ich8lan");
4008
4009         ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4010         if (ret_val) {
4011                 DEBUGOUT("NVM Read Error\n");
4012                 return ret_val;
4013         }
4014
4015         if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4016                 *data = ID_LED_DEFAULT_ICH8LAN;
4017
4018         return E1000_SUCCESS;
4019 }
4020
4021 /**
4022  *  e1000_id_led_init_pchlan - store LED configurations
4023  *  @hw: pointer to the HW structure
4024  *
4025  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4026  *  the PHY LED configuration register.
4027  *
4028  *  PCH also does not have an "always on" or "always off" mode which
4029  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4030  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4031  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4032  *  link based on logic in e1000_led_[on|off]_pchlan().
4033  **/
4034 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4035 {
4036         struct e1000_mac_info *mac = &hw->mac;
4037         s32 ret_val;
4038         const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4039         const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4040         u16 data, i, temp, shift;
4041
4042         DEBUGFUNC("e1000_id_led_init_pchlan");
4043
4044         /* Get default ID LED modes */
4045         ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4046         if (ret_val)
4047                 return ret_val;
4048
4049         mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4050         mac->ledctl_mode1 = mac->ledctl_default;
4051         mac->ledctl_mode2 = mac->ledctl_default;
4052
4053         for (i = 0; i < 4; i++) {
4054                 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4055                 shift = (i * 5);
4056                 switch (temp) {
4057                 case ID_LED_ON1_DEF2:
4058                 case ID_LED_ON1_ON2:
4059                 case ID_LED_ON1_OFF2:
4060                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4061                         mac->ledctl_mode1 |= (ledctl_on << shift);
4062                         break;
4063                 case ID_LED_OFF1_DEF2:
4064                 case ID_LED_OFF1_ON2:
4065                 case ID_LED_OFF1_OFF2:
4066                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4067                         mac->ledctl_mode1 |= (ledctl_off << shift);
4068                         break;
4069                 default:
4070                         /* Do nothing */
4071                         break;
4072                 }
4073                 switch (temp) {
4074                 case ID_LED_DEF1_ON2:
4075                 case ID_LED_ON1_ON2:
4076                 case ID_LED_OFF1_ON2:
4077                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4078                         mac->ledctl_mode2 |= (ledctl_on << shift);
4079                         break;
4080                 case ID_LED_DEF1_OFF2:
4081                 case ID_LED_ON1_OFF2:
4082                 case ID_LED_OFF1_OFF2:
4083                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4084                         mac->ledctl_mode2 |= (ledctl_off << shift);
4085                         break;
4086                 default:
4087                         /* Do nothing */
4088                         break;
4089                 }
4090         }
4091
4092         return E1000_SUCCESS;
4093 }
4094
4095 /**
4096  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4097  *  @hw: pointer to the HW structure
4098  *
4099  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4100  *  register, so the the bus width is hard coded.
4101  **/
4102 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4103 {
4104         struct e1000_bus_info *bus = &hw->bus;
4105         s32 ret_val;
4106
4107         DEBUGFUNC("e1000_get_bus_info_ich8lan");
4108
4109         ret_val = e1000_get_bus_info_pcie_generic(hw);
4110
4111         /* ICH devices are "PCI Express"-ish.  They have
4112          * a configuration space, but do not contain
4113          * PCI Express Capability registers, so bus width
4114          * must be hardcoded.
4115          */
4116         if (bus->width == e1000_bus_width_unknown)
4117                 bus->width = e1000_bus_width_pcie_x1;
4118
4119         return ret_val;
4120 }
4121
4122 /**
4123  *  e1000_reset_hw_ich8lan - Reset the hardware
4124  *  @hw: pointer to the HW structure
4125  *
4126  *  Does a full reset of the hardware which includes a reset of the PHY and
4127  *  MAC.
4128  **/
4129 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4130 {
4131         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4132         u16 kum_cfg;
4133         u32 ctrl, reg;
4134         s32 ret_val;
4135
4136         DEBUGFUNC("e1000_reset_hw_ich8lan");
4137
4138         /* Prevent the PCI-E bus from sticking if there is no TLP connection
4139          * on the last TLP read/write transaction when MAC is reset.
4140          */
4141         ret_val = e1000_disable_pcie_master_generic(hw);
4142         if (ret_val)
4143                 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4144
4145         DEBUGOUT("Masking off all interrupts\n");
4146         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4147
4148         /* Disable the Transmit and Receive units.  Then delay to allow
4149          * any pending transactions to complete before we hit the MAC
4150          * with the global reset.
4151          */
4152         E1000_WRITE_REG(hw, E1000_RCTL, 0);
4153         E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4154         E1000_WRITE_FLUSH(hw);
4155
4156         msec_delay(10);
4157
4158         /* Workaround for ICH8 bit corruption issue in FIFO memory */
4159         if (hw->mac.type == e1000_ich8lan) {
4160                 /* Set Tx and Rx buffer allocation to 8k apiece. */
4161                 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4162                 /* Set Packet Buffer Size to 16k. */
4163                 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4164         }
4165
4166         if (hw->mac.type == e1000_pchlan) {
4167                 /* Save the NVM K1 bit setting*/
4168                 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4169                 if (ret_val)
4170                         return ret_val;
4171
4172                 if (kum_cfg & E1000_NVM_K1_ENABLE)
4173                         dev_spec->nvm_k1_enabled = true;
4174                 else
4175                         dev_spec->nvm_k1_enabled = false;
4176         }
4177
4178         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4179
4180         if (!hw->phy.ops.check_reset_block(hw)) {
4181                 /* Full-chip reset requires MAC and PHY reset at the same
4182                  * time to make sure the interface between MAC and the
4183                  * external PHY is reset.
4184                  */
4185                 ctrl |= E1000_CTRL_PHY_RST;
4186
4187                 /* Gate automatic PHY configuration by hardware on
4188                  * non-managed 82579
4189                  */
4190                 if ((hw->mac.type == e1000_pch2lan) &&
4191                     !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4192                         e1000_gate_hw_phy_config_ich8lan(hw, true);
4193         }
4194         ret_val = e1000_acquire_swflag_ich8lan(hw);
4195         DEBUGOUT("Issuing a global reset to ich8lan\n");
4196         E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4197         /* cannot issue a flush here because it hangs the hardware */
4198         msec_delay(20);
4199
4200         /* Set Phy Config Counter to 50msec */
4201         if (hw->mac.type == e1000_pch2lan) {
4202                 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4203                 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4204                 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4205                 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4206         }
4207
4208         if (!ret_val)
4209                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
4210
4211         if (ctrl & E1000_CTRL_PHY_RST) {
4212                 ret_val = hw->phy.ops.get_cfg_done(hw);
4213                 if (ret_val)
4214                         return ret_val;
4215
4216                 ret_val = e1000_post_phy_reset_ich8lan(hw);
4217                 if (ret_val)
4218                         return ret_val;
4219         }
4220
4221         /* For PCH, this write will make sure that any noise
4222          * will be detected as a CRC error and be dropped rather than show up
4223          * as a bad packet to the DMA engine.
4224          */
4225         if (hw->mac.type == e1000_pchlan)
4226                 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
4227
4228         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4229         E1000_READ_REG(hw, E1000_ICR);
4230
4231         reg = E1000_READ_REG(hw, E1000_KABGTXD);
4232         reg |= E1000_KABGTXD_BGSQLBIAS;
4233         E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
4234
4235         return E1000_SUCCESS;
4236 }
4237
4238 /**
4239  *  e1000_init_hw_ich8lan - Initialize the hardware
4240  *  @hw: pointer to the HW structure
4241  *
4242  *  Prepares the hardware for transmit and receive by doing the following:
4243  *   - initialize hardware bits
4244  *   - initialize LED identification
4245  *   - setup receive address registers
4246  *   - setup flow control
4247  *   - setup transmit descriptors
4248  *   - clear statistics
4249  **/
4250 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4251 {
4252         struct e1000_mac_info *mac = &hw->mac;
4253         u32 ctrl_ext, txdctl, snoop;
4254         s32 ret_val;
4255         u16 i;
4256
4257         DEBUGFUNC("e1000_init_hw_ich8lan");
4258
4259         e1000_initialize_hw_bits_ich8lan(hw);
4260
4261         /* Initialize identification LED */
4262         ret_val = mac->ops.id_led_init(hw);
4263         /* An error is not fatal and we should not stop init due to this */
4264         if (ret_val)
4265                 DEBUGOUT("Error initializing identification LED\n");
4266
4267         /* Setup the receive address. */
4268         e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
4269
4270         /* Zero out the Multicast HASH table */
4271         DEBUGOUT("Zeroing the MTA\n");
4272         for (i = 0; i < mac->mta_reg_count; i++)
4273                 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4274
4275         /* The 82578 Rx buffer will stall if wakeup is enabled in host and
4276          * the ME.  Disable wakeup by clearing the host wakeup bit.
4277          * Reset the phy after disabling host wakeup to reset the Rx buffer.
4278          */
4279         if (hw->phy.type == e1000_phy_82578) {
4280                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
4281                 i &= ~BM_WUC_HOST_WU_BIT;
4282                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
4283                 ret_val = e1000_phy_hw_reset_ich8lan(hw);
4284                 if (ret_val)
4285                         return ret_val;
4286         }
4287
4288         /* Setup link and flow control */
4289         ret_val = mac->ops.setup_link(hw);
4290
4291         /* Set the transmit descriptor write-back policy for both queues */
4292         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
4293         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4294                   E1000_TXDCTL_FULL_TX_DESC_WB);
4295         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4296                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4297         E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
4298         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
4299         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4300                   E1000_TXDCTL_FULL_TX_DESC_WB);
4301         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4302                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4303         E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
4304
4305         /* ICH8 has opposite polarity of no_snoop bits.
4306          * By default, we should use snoop behavior.
4307          */
4308         if (mac->type == e1000_ich8lan)
4309                 snoop = PCIE_ICH8_SNOOP_ALL;
4310         else
4311                 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
4312         e1000_set_pcie_no_snoop_generic(hw, snoop);
4313
4314         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
4315         ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4316         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
4317
4318         /* Clear all of the statistics registers (clear on read).  It is
4319          * important that we do this after we have tried to establish link
4320          * because the symbol error count will increment wildly if there
4321          * is no link.
4322          */
4323         e1000_clear_hw_cntrs_ich8lan(hw);
4324
4325         return ret_val;
4326 }
4327
4328 /**
4329  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4330  *  @hw: pointer to the HW structure
4331  *
4332  *  Sets/Clears required hardware bits necessary for correctly setting up the
4333  *  hardware for transmit and receive.
4334  **/
4335 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4336 {
4337         u32 reg;
4338
4339         DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
4340
4341         /* Extended Device Control */
4342         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
4343         reg |= (1 << 22);
4344         /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4345         if (hw->mac.type >= e1000_pchlan)
4346                 reg |= E1000_CTRL_EXT_PHYPDEN;
4347         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
4348
4349         /* Transmit Descriptor Control 0 */
4350         reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
4351         reg |= (1 << 22);
4352         E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
4353
4354         /* Transmit Descriptor Control 1 */
4355         reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
4356         reg |= (1 << 22);
4357         E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
4358
4359         /* Transmit Arbitration Control 0 */
4360         reg = E1000_READ_REG(hw, E1000_TARC(0));
4361         if (hw->mac.type == e1000_ich8lan)
4362                 reg |= (1 << 28) | (1 << 29);
4363         reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
4364         E1000_WRITE_REG(hw, E1000_TARC(0), reg);
4365
4366         /* Transmit Arbitration Control 1 */
4367         reg = E1000_READ_REG(hw, E1000_TARC(1));
4368         if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
4369                 reg &= ~(1 << 28);
4370         else
4371                 reg |= (1 << 28);
4372         reg |= (1 << 24) | (1 << 26) | (1 << 30);
4373         E1000_WRITE_REG(hw, E1000_TARC(1), reg);
4374
4375         /* Device Status */
4376         if (hw->mac.type == e1000_ich8lan) {
4377                 reg = E1000_READ_REG(hw, E1000_STATUS);
4378                 reg &= ~(1 << 31);
4379                 E1000_WRITE_REG(hw, E1000_STATUS, reg);
4380         }
4381
4382         /* work-around descriptor data corruption issue during nfs v2 udp
4383          * traffic, just disable the nfs filtering capability
4384          */
4385         reg = E1000_READ_REG(hw, E1000_RFCTL);
4386         reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4387
4388         /* Disable IPv6 extension header parsing because some malformed
4389          * IPv6 headers can hang the Rx.
4390          */
4391         if (hw->mac.type == e1000_ich8lan)
4392                 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4393         E1000_WRITE_REG(hw, E1000_RFCTL, reg);
4394
4395         /* Enable ECC on Lynxpoint */
4396         if (hw->mac.type == e1000_pch_lpt) {
4397                 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
4398                 reg |= E1000_PBECCSTS_ECC_ENABLE;
4399                 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
4400
4401                 reg = E1000_READ_REG(hw, E1000_CTRL);
4402                 reg |= E1000_CTRL_MEHE;
4403                 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4404         }
4405
4406         return;
4407 }
4408
4409 /**
4410  *  e1000_setup_link_ich8lan - Setup flow control and link settings
4411  *  @hw: pointer to the HW structure
4412  *
4413  *  Determines which flow control settings to use, then configures flow
4414  *  control.  Calls the appropriate media-specific link configuration
4415  *  function.  Assuming the adapter has a valid link partner, a valid link
4416  *  should be established.  Assumes the hardware has previously been reset
4417  *  and the transmitter and receiver are not enabled.
4418  **/
4419 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4420 {
4421         s32 ret_val;
4422
4423         DEBUGFUNC("e1000_setup_link_ich8lan");
4424
4425         if (hw->phy.ops.check_reset_block(hw))
4426                 return E1000_SUCCESS;
4427
4428         /* ICH parts do not have a word in the NVM to determine
4429          * the default flow control setting, so we explicitly
4430          * set it to full.
4431          */
4432         if (hw->fc.requested_mode == e1000_fc_default)
4433                 hw->fc.requested_mode = e1000_fc_full;
4434
4435         /* Save off the requested flow control mode for use later.  Depending
4436          * on the link partner's capabilities, we may or may not use this mode.
4437          */
4438         hw->fc.current_mode = hw->fc.requested_mode;
4439
4440         DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
4441                 hw->fc.current_mode);
4442
4443         /* Continue to configure the copper link. */
4444         ret_val = hw->mac.ops.setup_physical_interface(hw);
4445         if (ret_val)
4446                 return ret_val;
4447
4448         E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
4449         if ((hw->phy.type == e1000_phy_82578) ||
4450             (hw->phy.type == e1000_phy_82579) ||
4451             (hw->phy.type == e1000_phy_i217) ||
4452             (hw->phy.type == e1000_phy_82577)) {
4453                 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
4454
4455                 ret_val = hw->phy.ops.write_reg(hw,
4456                                              PHY_REG(BM_PORT_CTRL_PAGE, 27),
4457                                              hw->fc.pause_time);
4458                 if (ret_val)
4459                         return ret_val;
4460         }
4461
4462         return e1000_set_fc_watermarks_generic(hw);
4463 }
4464
4465 /**
4466  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4467  *  @hw: pointer to the HW structure
4468  *
4469  *  Configures the kumeran interface to the PHY to wait the appropriate time
4470  *  when polling the PHY, then call the generic setup_copper_link to finish
4471  *  configuring the copper link.
4472  **/
4473 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4474 {
4475         u32 ctrl;
4476         s32 ret_val;
4477         u16 reg_data;
4478
4479         DEBUGFUNC("e1000_setup_copper_link_ich8lan");
4480
4481         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4482         ctrl |= E1000_CTRL_SLU;
4483         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4484         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4485
4486         /* Set the mac to wait the maximum time between each iteration
4487          * and increase the max iterations when polling the phy;
4488          * this fixes erroneous timeouts at 10Mbps.
4489          */
4490         ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
4491                                                0xFFFF);
4492         if (ret_val)
4493                 return ret_val;
4494         ret_val = e1000_read_kmrn_reg_generic(hw,
4495                                               E1000_KMRNCTRLSTA_INBAND_PARAM,
4496                                               &reg_data);
4497         if (ret_val)
4498                 return ret_val;
4499         reg_data |= 0x3F;
4500         ret_val = e1000_write_kmrn_reg_generic(hw,
4501                                                E1000_KMRNCTRLSTA_INBAND_PARAM,
4502                                                reg_data);
4503         if (ret_val)
4504                 return ret_val;
4505
4506         switch (hw->phy.type) {
4507         case e1000_phy_igp_3:
4508                 ret_val = e1000_copper_link_setup_igp(hw);
4509                 if (ret_val)
4510                         return ret_val;
4511                 break;
4512         case e1000_phy_bm:
4513         case e1000_phy_82578:
4514                 ret_val = e1000_copper_link_setup_m88(hw);
4515                 if (ret_val)
4516                         return ret_val;
4517                 break;
4518         case e1000_phy_82577:
4519         case e1000_phy_82579:
4520                 ret_val = e1000_copper_link_setup_82577(hw);
4521                 if (ret_val)
4522                         return ret_val;
4523                 break;
4524         case e1000_phy_ife:
4525                 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
4526                                                &reg_data);
4527                 if (ret_val)
4528                         return ret_val;
4529
4530                 reg_data &= ~IFE_PMC_AUTO_MDIX;
4531
4532                 switch (hw->phy.mdix) {
4533                 case 1:
4534                         reg_data &= ~IFE_PMC_FORCE_MDIX;
4535                         break;
4536                 case 2:
4537                         reg_data |= IFE_PMC_FORCE_MDIX;
4538                         break;
4539                 case 0:
4540                 default:
4541                         reg_data |= IFE_PMC_AUTO_MDIX;
4542                         break;
4543                 }
4544                 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
4545                                                 reg_data);
4546                 if (ret_val)
4547                         return ret_val;
4548                 break;
4549         default:
4550                 break;
4551         }
4552
4553         return e1000_setup_copper_link_generic(hw);
4554 }
4555
4556 /**
4557  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
4558  *  @hw: pointer to the HW structure
4559  *
4560  *  Calls the PHY specific link setup function and then calls the
4561  *  generic setup_copper_link to finish configuring the link for
4562  *  Lynxpoint PCH devices
4563  **/
4564 STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
4565 {
4566         u32 ctrl;
4567         s32 ret_val;
4568
4569         DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
4570
4571         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4572         ctrl |= E1000_CTRL_SLU;
4573         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4574         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4575
4576         ret_val = e1000_copper_link_setup_82577(hw);
4577         if (ret_val)
4578                 return ret_val;
4579
4580         return e1000_setup_copper_link_generic(hw);
4581 }
4582
4583 /**
4584  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
4585  *  @hw: pointer to the HW structure
4586  *  @speed: pointer to store current link speed
4587  *  @duplex: pointer to store the current link duplex
4588  *
4589  *  Calls the generic get_speed_and_duplex to retrieve the current link
4590  *  information and then calls the Kumeran lock loss workaround for links at
4591  *  gigabit speeds.
4592  **/
4593 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
4594                                           u16 *duplex)
4595 {
4596         s32 ret_val;
4597
4598         DEBUGFUNC("e1000_get_link_up_info_ich8lan");
4599
4600         ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
4601         if (ret_val)
4602                 return ret_val;
4603
4604         if ((hw->mac.type == e1000_ich8lan) &&
4605             (hw->phy.type == e1000_phy_igp_3) &&
4606             (*speed == SPEED_1000)) {
4607                 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
4608         }
4609
4610         return ret_val;
4611 }
4612
4613 /**
4614  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
4615  *  @hw: pointer to the HW structure
4616  *
4617  *  Work-around for 82566 Kumeran PCS lock loss:
4618  *  On link status change (i.e. PCI reset, speed change) and link is up and
4619  *  speed is gigabit-
4620  *    0) if workaround is optionally disabled do nothing
4621  *    1) wait 1ms for Kumeran link to come up
4622  *    2) check Kumeran Diagnostic register PCS lock loss bit
4623  *    3) if not set the link is locked (all is good), otherwise...
4624  *    4) reset the PHY
4625  *    5) repeat up to 10 times
4626  *  Note: this is only called for IGP3 copper when speed is 1gb.
4627  **/
4628 STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
4629 {
4630         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4631         u32 phy_ctrl;
4632         s32 ret_val;
4633         u16 i, data;
4634         bool link;
4635
4636         DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
4637
4638         if (!dev_spec->kmrn_lock_loss_workaround_enabled)
4639                 return E1000_SUCCESS;
4640
4641         /* Make sure link is up before proceeding.  If not just return.
4642          * Attempting this while link is negotiating fouled up link
4643          * stability
4644          */
4645         ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
4646         if (!link)
4647                 return E1000_SUCCESS;
4648
4649         for (i = 0; i < 10; i++) {
4650                 /* read once to clear */
4651                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4652                 if (ret_val)
4653                         return ret_val;
4654                 /* and again to get new status */
4655                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4656                 if (ret_val)
4657                         return ret_val;
4658
4659                 /* check for PCS lock */
4660                 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
4661                         return E1000_SUCCESS;
4662
4663                 /* Issue PHY reset */
4664                 hw->phy.ops.reset(hw);
4665                 msec_delay_irq(5);
4666         }
4667         /* Disable GigE link negotiation */
4668         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4669         phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
4670                      E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4671         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4672
4673         /* Call gig speed drop workaround on Gig disable before accessing
4674          * any PHY registers
4675          */
4676         e1000_gig_downshift_workaround_ich8lan(hw);
4677
4678         /* unable to acquire PCS lock */
4679         return -E1000_ERR_PHY;
4680 }
4681
4682 /**
4683  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
4684  *  @hw: pointer to the HW structure
4685  *  @state: boolean value used to set the current Kumeran workaround state
4686  *
4687  *  If ICH8, set the current Kumeran workaround state (enabled - true
4688  *  /disabled - false).
4689  **/
4690 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
4691                                                  bool state)
4692 {
4693         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4694
4695         DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
4696
4697         if (hw->mac.type != e1000_ich8lan) {
4698                 DEBUGOUT("Workaround applies to ICH8 only.\n");
4699                 return;
4700         }
4701
4702         dev_spec->kmrn_lock_loss_workaround_enabled = state;
4703
4704         return;
4705 }
4706
4707 /**
4708  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
4709  *  @hw: pointer to the HW structure
4710  *
4711  *  Workaround for 82566 power-down on D3 entry:
4712  *    1) disable gigabit link
4713  *    2) write VR power-down enable
4714  *    3) read it back
4715  *  Continue if successful, else issue LCD reset and repeat
4716  **/
4717 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
4718 {
4719         u32 reg;
4720         u16 data;
4721         u8  retry = 0;
4722
4723         DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
4724
4725         if (hw->phy.type != e1000_phy_igp_3)
4726                 return;
4727
4728         /* Try the workaround twice (if needed) */
4729         do {
4730                 /* Disable link */
4731                 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
4732                 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
4733                         E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4734                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
4735
4736                 /* Call gig speed drop workaround on Gig disable before
4737                  * accessing any PHY registers
4738                  */
4739                 if (hw->mac.type == e1000_ich8lan)
4740                         e1000_gig_downshift_workaround_ich8lan(hw);
4741
4742                 /* Write VR power-down enable */
4743                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4744                 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4745                 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
4746                                       data | IGP3_VR_CTRL_MODE_SHUTDOWN);
4747
4748                 /* Read it back and test */
4749                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4750                 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4751                 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
4752                         break;
4753
4754                 /* Issue PHY reset and repeat at most one more time */
4755                 reg = E1000_READ_REG(hw, E1000_CTRL);
4756                 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
4757                 retry++;
4758         } while (retry);
4759 }
4760
4761 /**
4762  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
4763  *  @hw: pointer to the HW structure
4764  *
4765  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
4766  *  LPLU, Gig disable, MDIC PHY reset):
4767  *    1) Set Kumeran Near-end loopback
4768  *    2) Clear Kumeran Near-end loopback
4769  *  Should only be called for ICH8[m] devices with any 1G Phy.
4770  **/
4771 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
4772 {
4773         s32 ret_val;
4774         u16 reg_data;
4775
4776         DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
4777
4778         if ((hw->mac.type != e1000_ich8lan) ||
4779             (hw->phy.type == e1000_phy_ife))
4780                 return;
4781
4782         ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4783                                               &reg_data);
4784         if (ret_val)
4785                 return;
4786         reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
4787         ret_val = e1000_write_kmrn_reg_generic(hw,
4788                                                E1000_KMRNCTRLSTA_DIAG_OFFSET,
4789                                                reg_data);
4790         if (ret_val)
4791                 return;
4792         reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
4793         e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4794                                      reg_data);
4795 }
4796
4797 /**
4798  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4799  *  @hw: pointer to the HW structure
4800  *
4801  *  During S0 to Sx transition, it is possible the link remains at gig
4802  *  instead of negotiating to a lower speed.  Before going to Sx, set
4803  *  'Gig Disable' to force link speed negotiation to a lower speed based on
4804  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
4805  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4806  *  needs to be written.
4807  *  Parts that support (and are linked to a partner which support) EEE in
4808  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4809  *  than 10Mbps w/o EEE.
4810  **/
4811 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4812 {
4813         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4814         u32 phy_ctrl;
4815         s32 ret_val;
4816
4817         DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
4818
4819         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4820         phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4821
4822         if (hw->phy.type == e1000_phy_i217) {
4823                 u16 phy_reg, device_id = hw->device_id;
4824
4825                 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4826                     (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
4827                     (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
4828                     (device_id == E1000_DEV_ID_PCH_I218_V3)) {
4829                         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
4830
4831                         E1000_WRITE_REG(hw, E1000_FEXTNVM6,
4832                                         fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4833                 }
4834
4835                 ret_val = hw->phy.ops.acquire(hw);
4836                 if (ret_val)
4837                         goto out;
4838
4839                 if (!dev_spec->eee_disable) {
4840                         u16 eee_advert;
4841
4842                         ret_val =
4843                             e1000_read_emi_reg_locked(hw,
4844                                                       I217_EEE_ADVERTISEMENT,
4845                                                       &eee_advert);
4846                         if (ret_val)
4847                                 goto release;
4848
4849                         /* Disable LPLU if both link partners support 100BaseT
4850                          * EEE and 100Full is advertised on both ends of the
4851                          * link, and enable Auto Enable LPI since there will
4852                          * be no driver to enable LPI while in Sx.
4853                          */
4854                         if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4855                             (dev_spec->eee_lp_ability &
4856                              I82579_EEE_100_SUPPORTED) &&
4857                             (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
4858                                 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4859                                               E1000_PHY_CTRL_NOND0A_LPLU);
4860
4861                                 /* Set Auto Enable LPI after link up */
4862                                 hw->phy.ops.read_reg_locked(hw,
4863                                                             I217_LPI_GPIO_CTRL,
4864                                                             &phy_reg);
4865                                 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4866                                 hw->phy.ops.write_reg_locked(hw,
4867                                                              I217_LPI_GPIO_CTRL,
4868                                                              phy_reg);
4869                         }
4870                 }
4871
4872                 /* For i217 Intel Rapid Start Technology support,
4873                  * when the system is going into Sx and no manageability engine
4874                  * is present, the driver must configure proxy to reset only on
4875                  * power good.  LPI (Low Power Idle) state must also reset only
4876                  * on power good, as well as the MTA (Multicast table array).
4877                  * The SMBus release must also be disabled on LCD reset.
4878                  */
4879                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4880                       E1000_ICH_FWSM_FW_VALID)) {
4881                         /* Enable proxy to reset only on power good. */
4882                         hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
4883                                                     &phy_reg);
4884                         phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4885                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
4886                                                      phy_reg);
4887
4888                         /* Set bit enable LPI (EEE) to reset only on
4889                          * power good.
4890                         */
4891                         hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
4892                         phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4893                         hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
4894
4895                         /* Disable the SMB release on LCD reset. */
4896                         hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
4897                         phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4898                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4899                 }
4900
4901                 /* Enable MTA to reset for Intel Rapid Start Technology
4902                  * Support
4903                  */
4904                 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
4905                 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4906                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4907
4908 release:
4909                 hw->phy.ops.release(hw);
4910         }
4911 out:
4912         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4913
4914         if (hw->mac.type == e1000_ich8lan)
4915                 e1000_gig_downshift_workaround_ich8lan(hw);
4916
4917         if (hw->mac.type >= e1000_pchlan) {
4918                 e1000_oem_bits_config_ich8lan(hw, false);
4919
4920                 /* Reset PHY to activate OEM bits on 82577/8 */
4921                 if (hw->mac.type == e1000_pchlan)
4922                         e1000_phy_hw_reset_generic(hw);
4923
4924                 ret_val = hw->phy.ops.acquire(hw);
4925                 if (ret_val)
4926                         return;
4927                 e1000_write_smbus_addr(hw);
4928                 hw->phy.ops.release(hw);
4929         }
4930
4931         return;
4932 }
4933
4934 /**
4935  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
4936  *  @hw: pointer to the HW structure
4937  *
4938  *  During Sx to S0 transitions on non-managed devices or managed devices
4939  *  on which PHY resets are not blocked, if the PHY registers cannot be
4940  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
4941  *  the PHY.
4942  *  On i217, setup Intel Rapid Start Technology.
4943  **/
4944 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4945 {
4946         s32 ret_val;
4947
4948         DEBUGFUNC("e1000_resume_workarounds_pchlan");
4949         if (hw->mac.type < e1000_pch2lan)
4950                 return E1000_SUCCESS;
4951
4952         ret_val = e1000_init_phy_workarounds_pchlan(hw);
4953         if (ret_val) {
4954                 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
4955                 return ret_val;
4956         }
4957
4958         /* For i217 Intel Rapid Start Technology support when the system
4959          * is transitioning from Sx and no manageability engine is present
4960          * configure SMBus to restore on reset, disable proxy, and enable
4961          * the reset on MTA (Multicast table array).
4962          */
4963         if (hw->phy.type == e1000_phy_i217) {
4964                 u16 phy_reg;
4965
4966                 ret_val = hw->phy.ops.acquire(hw);
4967                 if (ret_val) {
4968                         DEBUGOUT("Failed to setup iRST\n");
4969                         return ret_val;
4970                 }
4971
4972                 /* Clear Auto Enable LPI after link up */
4973                 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
4974                 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4975                 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
4976
4977                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4978                     E1000_ICH_FWSM_FW_VALID)) {
4979                         /* Restore clear on SMB if no manageability engine
4980                          * is present
4981                          */
4982                         ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
4983                                                               &phy_reg);
4984                         if (ret_val)
4985                                 goto release;
4986                         phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
4987                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4988
4989                         /* Disable Proxy */
4990                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
4991                 }
4992                 /* Enable reset on MTA */
4993                 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
4994                                                       &phy_reg);
4995                 if (ret_val)
4996                         goto release;
4997                 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
4998                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4999 release:
5000                 if (ret_val)
5001                         DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5002                 hw->phy.ops.release(hw);
5003                 return ret_val;
5004         }
5005         return E1000_SUCCESS;
5006 }
5007
5008 /**
5009  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5010  *  @hw: pointer to the HW structure
5011  *
5012  *  Return the LED back to the default configuration.
5013  **/
5014 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5015 {
5016         DEBUGFUNC("e1000_cleanup_led_ich8lan");
5017
5018         if (hw->phy.type == e1000_phy_ife)
5019                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5020                                              0);
5021
5022         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5023         return E1000_SUCCESS;
5024 }
5025
5026 /**
5027  *  e1000_led_on_ich8lan - Turn LEDs on
5028  *  @hw: pointer to the HW structure
5029  *
5030  *  Turn on the LEDs.
5031  **/
5032 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5033 {
5034         DEBUGFUNC("e1000_led_on_ich8lan");
5035
5036         if (hw->phy.type == e1000_phy_ife)
5037                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5038                                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5039
5040         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5041         return E1000_SUCCESS;
5042 }
5043
5044 /**
5045  *  e1000_led_off_ich8lan - Turn LEDs off
5046  *  @hw: pointer to the HW structure
5047  *
5048  *  Turn off the LEDs.
5049  **/
5050 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5051 {
5052         DEBUGFUNC("e1000_led_off_ich8lan");
5053
5054         if (hw->phy.type == e1000_phy_ife)
5055                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5056                                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5057
5058         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5059         return E1000_SUCCESS;
5060 }
5061
5062 /**
5063  *  e1000_setup_led_pchlan - Configures SW controllable LED
5064  *  @hw: pointer to the HW structure
5065  *
5066  *  This prepares the SW controllable LED for use.
5067  **/
5068 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5069 {
5070         DEBUGFUNC("e1000_setup_led_pchlan");
5071
5072         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5073                                      (u16)hw->mac.ledctl_mode1);
5074 }
5075
5076 /**
5077  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5078  *  @hw: pointer to the HW structure
5079  *
5080  *  Return the LED back to the default configuration.
5081  **/
5082 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5083 {
5084         DEBUGFUNC("e1000_cleanup_led_pchlan");
5085
5086         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5087                                      (u16)hw->mac.ledctl_default);
5088 }
5089
5090 /**
5091  *  e1000_led_on_pchlan - Turn LEDs on
5092  *  @hw: pointer to the HW structure
5093  *
5094  *  Turn on the LEDs.
5095  **/
5096 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5097 {
5098         u16 data = (u16)hw->mac.ledctl_mode2;
5099         u32 i, led;
5100
5101         DEBUGFUNC("e1000_led_on_pchlan");
5102
5103         /* If no link, then turn LED on by setting the invert bit
5104          * for each LED that's mode is "link_up" in ledctl_mode2.
5105          */
5106         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5107                 for (i = 0; i < 3; i++) {
5108                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5109                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5110                             E1000_LEDCTL_MODE_LINK_UP)
5111                                 continue;
5112                         if (led & E1000_PHY_LED0_IVRT)
5113                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5114                         else
5115                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5116                 }
5117         }
5118
5119         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5120 }
5121
5122 /**
5123  *  e1000_led_off_pchlan - Turn LEDs off
5124  *  @hw: pointer to the HW structure
5125  *
5126  *  Turn off the LEDs.
5127  **/
5128 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5129 {
5130         u16 data = (u16)hw->mac.ledctl_mode1;
5131         u32 i, led;
5132
5133         DEBUGFUNC("e1000_led_off_pchlan");
5134
5135         /* If no link, then turn LED off by clearing the invert bit
5136          * for each LED that's mode is "link_up" in ledctl_mode1.
5137          */
5138         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5139                 for (i = 0; i < 3; i++) {
5140                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5141                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5142                             E1000_LEDCTL_MODE_LINK_UP)
5143                                 continue;
5144                         if (led & E1000_PHY_LED0_IVRT)
5145                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5146                         else
5147                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5148                 }
5149         }
5150
5151         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5152 }
5153
5154 /**
5155  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5156  *  @hw: pointer to the HW structure
5157  *
5158  *  Read appropriate register for the config done bit for completion status
5159  *  and configure the PHY through s/w for EEPROM-less parts.
5160  *
5161  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5162  *  config done bit, so only an error is logged and continues.  If we were
5163  *  to return with error, EEPROM-less silicon would not be able to be reset
5164  *  or change link.
5165  **/
5166 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5167 {
5168         s32 ret_val = E1000_SUCCESS;
5169         u32 bank = 0;
5170         u32 status;
5171
5172         DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5173
5174         e1000_get_cfg_done_generic(hw);
5175
5176         /* Wait for indication from h/w that it has completed basic config */
5177         if (hw->mac.type >= e1000_ich10lan) {
5178                 e1000_lan_init_done_ich8lan(hw);
5179         } else {
5180                 ret_val = e1000_get_auto_rd_done_generic(hw);
5181                 if (ret_val) {
5182                         /* When auto config read does not complete, do not
5183                          * return with an error. This can happen in situations
5184                          * where there is no eeprom and prevents getting link.
5185                          */
5186                         DEBUGOUT("Auto Read Done did not complete\n");
5187                         ret_val = E1000_SUCCESS;
5188                 }
5189         }
5190
5191         /* Clear PHY Reset Asserted bit */
5192         status = E1000_READ_REG(hw, E1000_STATUS);
5193         if (status & E1000_STATUS_PHYRA)
5194                 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5195         else
5196                 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5197
5198         /* If EEPROM is not marked present, init the IGP 3 PHY manually */
5199         if (hw->mac.type <= e1000_ich9lan) {
5200                 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5201                     (hw->phy.type == e1000_phy_igp_3)) {
5202                         e1000_phy_init_script_igp3(hw);
5203                 }
5204         } else {
5205                 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5206                         /* Maybe we should do a basic PHY config */
5207                         DEBUGOUT("EEPROM not present\n");
5208                         ret_val = -E1000_ERR_CONFIG;
5209                 }
5210         }
5211
5212         return ret_val;
5213 }
5214
5215 /**
5216  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5217  * @hw: pointer to the HW structure
5218  *
5219  * In the case of a PHY power down to save power, or to turn off link during a
5220  * driver unload, or wake on lan is not enabled, remove the link.
5221  **/
5222 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5223 {
5224         /* If the management interface is not enabled, then power down */
5225         if (!(hw->mac.ops.check_mng_mode(hw) ||
5226               hw->phy.ops.check_reset_block(hw)))
5227                 e1000_power_down_phy_copper(hw);
5228
5229         return;
5230 }
5231
5232 /**
5233  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5234  *  @hw: pointer to the HW structure
5235  *
5236  *  Clears hardware counters specific to the silicon family and calls
5237  *  clear_hw_cntrs_generic to clear all general purpose counters.
5238  **/
5239 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5240 {
5241         u16 phy_data;
5242         s32 ret_val;
5243
5244         DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
5245
5246         e1000_clear_hw_cntrs_base_generic(hw);
5247
5248         E1000_READ_REG(hw, E1000_ALGNERRC);
5249         E1000_READ_REG(hw, E1000_RXERRC);
5250         E1000_READ_REG(hw, E1000_TNCRS);
5251         E1000_READ_REG(hw, E1000_CEXTERR);
5252         E1000_READ_REG(hw, E1000_TSCTC);
5253         E1000_READ_REG(hw, E1000_TSCTFC);
5254
5255         E1000_READ_REG(hw, E1000_MGTPRC);
5256         E1000_READ_REG(hw, E1000_MGTPDC);
5257         E1000_READ_REG(hw, E1000_MGTPTC);
5258
5259         E1000_READ_REG(hw, E1000_IAC);
5260         E1000_READ_REG(hw, E1000_ICRXOC);
5261
5262         /* Clear PHY statistics registers */
5263         if ((hw->phy.type == e1000_phy_82578) ||
5264             (hw->phy.type == e1000_phy_82579) ||
5265             (hw->phy.type == e1000_phy_i217) ||
5266             (hw->phy.type == e1000_phy_82577)) {
5267                 ret_val = hw->phy.ops.acquire(hw);
5268                 if (ret_val)
5269                         return;
5270                 ret_val = hw->phy.ops.set_page(hw,
5271                                                HV_STATS_PAGE << IGP_PAGE_SHIFT);
5272                 if (ret_val)
5273                         goto release;
5274                 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5275                 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5276                 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5277                 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5278                 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5279                 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5280                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5281                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5282                 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5283                 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5284                 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5285                 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5286                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5287                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5288 release:
5289                 hw->phy.ops.release(hw);
5290         }
5291 }
5292