net/e1000/base: enable new I219 devices
[dpdk.git] / drivers / net / e1000 / base / e1000_ich8lan.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /* 82562G 10/100 Network Connection
35  * 82562G-2 10/100 Network Connection
36  * 82562GT 10/100 Network Connection
37  * 82562GT-2 10/100 Network Connection
38  * 82562V 10/100 Network Connection
39  * 82562V-2 10/100 Network Connection
40  * 82566DC-2 Gigabit Network Connection
41  * 82566DC Gigabit Network Connection
42  * 82566DM-2 Gigabit Network Connection
43  * 82566DM Gigabit Network Connection
44  * 82566MC Gigabit Network Connection
45  * 82566MM Gigabit Network Connection
46  * 82567LM Gigabit Network Connection
47  * 82567LF Gigabit Network Connection
48  * 82567V Gigabit Network Connection
49  * 82567LM-2 Gigabit Network Connection
50  * 82567LF-2 Gigabit Network Connection
51  * 82567V-2 Gigabit Network Connection
52  * 82567LF-3 Gigabit Network Connection
53  * 82567LM-3 Gigabit Network Connection
54  * 82567LM-4 Gigabit Network Connection
55  * 82577LM Gigabit Network Connection
56  * 82577LC Gigabit Network Connection
57  * 82578DM Gigabit Network Connection
58  * 82578DC Gigabit Network Connection
59  * 82579LM Gigabit Network Connection
60  * 82579V Gigabit Network Connection
61  * Ethernet Connection I217-LM
62  * Ethernet Connection I217-V
63  * Ethernet Connection I218-V
64  * Ethernet Connection I218-LM
65  * Ethernet Connection (2) I218-LM
66  * Ethernet Connection (2) I218-V
67  * Ethernet Connection (3) I218-LM
68  * Ethernet Connection (3) I218-V
69  */
70
71 #include "e1000_api.h"
72
73 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
74 STATIC s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 STATIC s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 STATIC int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 STATIC int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
84 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
85                                               u8 *mc_addr_list,
86                                               u32 mc_addr_count);
87 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
88 STATIC s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
89 STATIC s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
90 STATIC s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
91 STATIC s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
92                                             bool active);
93 STATIC s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
94                                             bool active);
95 STATIC s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
96                                    u16 words, u16 *data);
97 STATIC s32  e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
98                                u16 *data);
99 STATIC s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
100                                     u16 words, u16 *data);
101 STATIC s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
102 STATIC s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
103 STATIC s32  e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
104 STATIC s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
105                                             u16 *data);
106 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
107 STATIC s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
108 STATIC s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
109 STATIC s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
110 STATIC s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
111 STATIC s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
112 STATIC s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
113 STATIC s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
114                                            u16 *speed, u16 *duplex);
115 STATIC s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
116 STATIC s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
117 STATIC s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
118 STATIC s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
119 STATIC s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
120 STATIC s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
121 STATIC s32  e1000_led_on_pchlan(struct e1000_hw *hw);
122 STATIC s32  e1000_led_off_pchlan(struct e1000_hw *hw);
123 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
124 STATIC s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
125 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
126 STATIC s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
127 STATIC s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
128                                           u32 offset, u8 *data);
129 STATIC s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
130                                           u8 size, u16 *data);
131 STATIC s32  e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
132                                             u32 *data);
133 STATIC s32  e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
134                                            u32 offset, u32 *data);
135 STATIC s32  e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
136                                              u32 offset, u32 data);
137 STATIC s32  e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
138                                                   u32 offset, u32 dword);
139 STATIC s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
140                                           u32 offset, u16 *data);
141 STATIC s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
142                                                  u32 offset, u8 byte);
143 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
144 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
145 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
146 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
147 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
148 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
149
150 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
151 /* Offset 04h HSFSTS */
152 union ich8_hws_flash_status {
153         struct ich8_hsfsts {
154                 u16 flcdone:1; /* bit 0 Flash Cycle Done */
155                 u16 flcerr:1; /* bit 1 Flash Cycle Error */
156                 u16 dael:1; /* bit 2 Direct Access error Log */
157                 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
158                 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
159                 u16 reserved1:2; /* bit 13:6 Reserved */
160                 u16 reserved2:6; /* bit 13:6 Reserved */
161                 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
162                 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
163         } hsf_status;
164         u16 regval;
165 };
166
167 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
168 /* Offset 06h FLCTL */
169 union ich8_hws_flash_ctrl {
170         struct ich8_hsflctl {
171                 u16 flcgo:1;   /* 0 Flash Cycle Go */
172                 u16 flcycle:2;   /* 2:1 Flash Cycle */
173                 u16 reserved:5;   /* 7:3 Reserved  */
174                 u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
175                 u16 flockdn:6;   /* 15:10 Reserved */
176         } hsf_ctrl;
177         u16 regval;
178 };
179
180 /* ICH Flash Region Access Permissions */
181 union ich8_hws_flash_regacc {
182         struct ich8_flracc {
183                 u32 grra:8; /* 0:7 GbE region Read Access */
184                 u32 grwa:8; /* 8:15 GbE region Write Access */
185                 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
186                 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
187         } hsf_flregacc;
188         u16 regval;
189 };
190
191 /**
192  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
193  *  @hw: pointer to the HW structure
194  *
195  *  Test access to the PHY registers by reading the PHY ID registers.  If
196  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
197  *  otherwise assume the read PHY ID is correct if it is valid.
198  *
199  *  Assumes the sw/fw/hw semaphore is already acquired.
200  **/
201 STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
202 {
203         u16 phy_reg = 0;
204         u32 phy_id = 0;
205         s32 ret_val = 0;
206         u16 retry_count;
207         u32 mac_reg = 0;
208
209         for (retry_count = 0; retry_count < 2; retry_count++) {
210                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
211                 if (ret_val || (phy_reg == 0xFFFF))
212                         continue;
213                 phy_id = (u32)(phy_reg << 16);
214
215                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
216                 if (ret_val || (phy_reg == 0xFFFF)) {
217                         phy_id = 0;
218                         continue;
219                 }
220                 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
221                 break;
222         }
223
224         if (hw->phy.id) {
225                 if  (hw->phy.id == phy_id)
226                         goto out;
227         } else if (phy_id) {
228                 hw->phy.id = phy_id;
229                 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
230                 goto out;
231         }
232
233         /* In case the PHY needs to be in mdio slow mode,
234          * set slow mode and try to get the PHY id again.
235          */
236         if (hw->mac.type < e1000_pch_lpt) {
237                 hw->phy.ops.release(hw);
238                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
239                 if (!ret_val)
240                         ret_val = e1000_get_phy_id(hw);
241                 hw->phy.ops.acquire(hw);
242         }
243
244         if (ret_val)
245                 return false;
246 out:
247         if (hw->mac.type >= e1000_pch_lpt) {
248                 /* Only unforce SMBus if ME is not active */
249                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
250                     E1000_ICH_FWSM_FW_VALID)) {
251                         /* Unforce SMBus mode in PHY */
252                         hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
253                         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
254                         hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
255
256                         /* Unforce SMBus mode in MAC */
257                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
258                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
259                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
260                 }
261         }
262
263         return true;
264 }
265
266 /**
267  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
268  *  @hw: pointer to the HW structure
269  *
270  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
271  *  used to reset the PHY to a quiescent state when necessary.
272  **/
273 STATIC void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
274 {
275         u32 mac_reg;
276
277         DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
278
279         /* Set Phy Config Counter to 50msec */
280         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
281         mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
282         mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
283         E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
284
285         /* Toggle LANPHYPC Value bit */
286         mac_reg = E1000_READ_REG(hw, E1000_CTRL);
287         mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
288         mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
289         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
290         E1000_WRITE_FLUSH(hw);
291         msec_delay(1);
292         mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
293         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
294         E1000_WRITE_FLUSH(hw);
295
296         if (hw->mac.type < e1000_pch_lpt) {
297                 msec_delay(50);
298         } else {
299                 u16 count = 20;
300
301                 do {
302                         msec_delay(5);
303                 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
304                            E1000_CTRL_EXT_LPCD) && count--);
305
306                 msec_delay(30);
307         }
308 }
309
310 /**
311  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
312  *  @hw: pointer to the HW structure
313  *
314  *  Workarounds/flow necessary for PHY initialization during driver load
315  *  and resume paths.
316  **/
317 STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
318 {
319         u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
320         s32 ret_val;
321
322         DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
323
324         /* Gate automatic PHY configuration by hardware on managed and
325          * non-managed 82579 and newer adapters.
326          */
327         e1000_gate_hw_phy_config_ich8lan(hw, true);
328
329 #ifdef ULP_SUPPORT
330         /* It is not possible to be certain of the current state of ULP
331          * so forcibly disable it.
332          */
333         hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
334
335 #endif /* ULP_SUPPORT */
336         ret_val = hw->phy.ops.acquire(hw);
337         if (ret_val) {
338                 DEBUGOUT("Failed to initialize PHY flow\n");
339                 goto out;
340         }
341
342         /* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
343          * inaccessible and resetting the PHY is not blocked, toggle the
344          * LANPHYPC Value bit to force the interconnect to PCIe mode.
345          */
346         switch (hw->mac.type) {
347         case e1000_pch_lpt:
348         case e1000_pch_spt:
349                 if (e1000_phy_is_accessible_pchlan(hw))
350                         break;
351
352                 /* Before toggling LANPHYPC, see if PHY is accessible by
353                  * forcing MAC to SMBus mode first.
354                  */
355                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
356                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
357                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
358
359                 /* Wait 50 milliseconds for MAC to finish any retries
360                  * that it might be trying to perform from previous
361                  * attempts to acknowledge any phy read requests.
362                  */
363                  msec_delay(50);
364
365                 /* fall-through */
366         case e1000_pch2lan:
367                 if (e1000_phy_is_accessible_pchlan(hw))
368                         break;
369
370                 /* fall-through */
371         case e1000_pchlan:
372                 if ((hw->mac.type == e1000_pchlan) &&
373                     (fwsm & E1000_ICH_FWSM_FW_VALID))
374                         break;
375
376                 if (hw->phy.ops.check_reset_block(hw)) {
377                         DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
378                         ret_val = -E1000_ERR_PHY;
379                         break;
380                 }
381
382                 /* Toggle LANPHYPC Value bit */
383                 e1000_toggle_lanphypc_pch_lpt(hw);
384                 if (hw->mac.type >= e1000_pch_lpt) {
385                         if (e1000_phy_is_accessible_pchlan(hw))
386                                 break;
387
388                         /* Toggling LANPHYPC brings the PHY out of SMBus mode
389                          * so ensure that the MAC is also out of SMBus mode
390                          */
391                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
392                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
393                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
394
395                         if (e1000_phy_is_accessible_pchlan(hw))
396                                 break;
397
398                         ret_val = -E1000_ERR_PHY;
399                 }
400                 break;
401         default:
402                 break;
403         }
404
405         hw->phy.ops.release(hw);
406         if (!ret_val) {
407
408                 /* Check to see if able to reset PHY.  Print error if not */
409                 if (hw->phy.ops.check_reset_block(hw)) {
410                         ERROR_REPORT("Reset blocked by ME\n");
411                         goto out;
412                 }
413
414                 /* Reset the PHY before any access to it.  Doing so, ensures
415                  * that the PHY is in a known good state before we read/write
416                  * PHY registers.  The generic reset is sufficient here,
417                  * because we haven't determined the PHY type yet.
418                  */
419                 ret_val = e1000_phy_hw_reset_generic(hw);
420                 if (ret_val)
421                         goto out;
422
423                 /* On a successful reset, possibly need to wait for the PHY
424                  * to quiesce to an accessible state before returning control
425                  * to the calling function.  If the PHY does not quiesce, then
426                  * return E1000E_BLK_PHY_RESET, as this is the condition that
427                  *  the PHY is in.
428                  */
429                 ret_val = hw->phy.ops.check_reset_block(hw);
430                 if (ret_val)
431                         ERROR_REPORT("ME blocked access to PHY after reset\n");
432         }
433
434 out:
435         /* Ungate automatic PHY configuration on non-managed 82579 */
436         if ((hw->mac.type == e1000_pch2lan) &&
437             !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
438                 msec_delay(10);
439                 e1000_gate_hw_phy_config_ich8lan(hw, false);
440         }
441
442         return ret_val;
443 }
444
445 /**
446  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
447  *  @hw: pointer to the HW structure
448  *
449  *  Initialize family-specific PHY parameters and function pointers.
450  **/
451 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
452 {
453         struct e1000_phy_info *phy = &hw->phy;
454         s32 ret_val;
455
456         DEBUGFUNC("e1000_init_phy_params_pchlan");
457
458         phy->addr               = 1;
459         phy->reset_delay_us     = 100;
460
461         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
462         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
463         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
464         phy->ops.set_page       = e1000_set_page_igp;
465         phy->ops.read_reg       = e1000_read_phy_reg_hv;
466         phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
467         phy->ops.read_reg_page  = e1000_read_phy_reg_page_hv;
468         phy->ops.release        = e1000_release_swflag_ich8lan;
469         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
470         phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
471         phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
472         phy->ops.write_reg      = e1000_write_phy_reg_hv;
473         phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
474         phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
475         phy->ops.power_up       = e1000_power_up_phy_copper;
476         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
477         phy->autoneg_mask       = AUTONEG_ADVERTISE_SPEED_DEFAULT;
478
479         phy->id = e1000_phy_unknown;
480
481         ret_val = e1000_init_phy_workarounds_pchlan(hw);
482         if (ret_val)
483                 return ret_val;
484
485         if (phy->id == e1000_phy_unknown)
486                 switch (hw->mac.type) {
487                 default:
488                         ret_val = e1000_get_phy_id(hw);
489                         if (ret_val)
490                                 return ret_val;
491                         if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
492                                 break;
493                         /* fall-through */
494                 case e1000_pch2lan:
495                 case e1000_pch_lpt:
496                 case e1000_pch_spt:
497                         /* In case the PHY needs to be in mdio slow mode,
498                          * set slow mode and try to get the PHY id again.
499                          */
500                         ret_val = e1000_set_mdio_slow_mode_hv(hw);
501                         if (ret_val)
502                                 return ret_val;
503                         ret_val = e1000_get_phy_id(hw);
504                         if (ret_val)
505                                 return ret_val;
506                         break;
507                 }
508         phy->type = e1000_get_phy_type_from_id(phy->id);
509
510         switch (phy->type) {
511         case e1000_phy_82577:
512         case e1000_phy_82579:
513         case e1000_phy_i217:
514                 phy->ops.check_polarity = e1000_check_polarity_82577;
515                 phy->ops.force_speed_duplex =
516                         e1000_phy_force_speed_duplex_82577;
517                 phy->ops.get_cable_length = e1000_get_cable_length_82577;
518                 phy->ops.get_info = e1000_get_phy_info_82577;
519                 phy->ops.commit = e1000_phy_sw_reset_generic;
520                 break;
521         case e1000_phy_82578:
522                 phy->ops.check_polarity = e1000_check_polarity_m88;
523                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
524                 phy->ops.get_cable_length = e1000_get_cable_length_m88;
525                 phy->ops.get_info = e1000_get_phy_info_m88;
526                 break;
527         default:
528                 ret_val = -E1000_ERR_PHY;
529                 break;
530         }
531
532         return ret_val;
533 }
534
535 /**
536  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
537  *  @hw: pointer to the HW structure
538  *
539  *  Initialize family-specific PHY parameters and function pointers.
540  **/
541 STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
542 {
543         struct e1000_phy_info *phy = &hw->phy;
544         s32 ret_val;
545         u16 i = 0;
546
547         DEBUGFUNC("e1000_init_phy_params_ich8lan");
548
549         phy->addr               = 1;
550         phy->reset_delay_us     = 100;
551
552         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
553         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
554         phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
555         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
556         phy->ops.read_reg       = e1000_read_phy_reg_igp;
557         phy->ops.release        = e1000_release_swflag_ich8lan;
558         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
559         phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
560         phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
561         phy->ops.write_reg      = e1000_write_phy_reg_igp;
562         phy->ops.power_up       = e1000_power_up_phy_copper;
563         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
564
565         /* We may need to do this twice - once for IGP and if that fails,
566          * we'll set BM func pointers and try again
567          */
568         ret_val = e1000_determine_phy_address(hw);
569         if (ret_val) {
570                 phy->ops.write_reg = e1000_write_phy_reg_bm;
571                 phy->ops.read_reg  = e1000_read_phy_reg_bm;
572                 ret_val = e1000_determine_phy_address(hw);
573                 if (ret_val) {
574                         DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
575                         return ret_val;
576                 }
577         }
578
579         phy->id = 0;
580         while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
581                (i++ < 100)) {
582                 msec_delay(1);
583                 ret_val = e1000_get_phy_id(hw);
584                 if (ret_val)
585                         return ret_val;
586         }
587
588         /* Verify phy id */
589         switch (phy->id) {
590         case IGP03E1000_E_PHY_ID:
591                 phy->type = e1000_phy_igp_3;
592                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
593                 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
594                 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
595                 phy->ops.get_info = e1000_get_phy_info_igp;
596                 phy->ops.check_polarity = e1000_check_polarity_igp;
597                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
598                 break;
599         case IFE_E_PHY_ID:
600         case IFE_PLUS_E_PHY_ID:
601         case IFE_C_E_PHY_ID:
602                 phy->type = e1000_phy_ife;
603                 phy->autoneg_mask = E1000_ALL_NOT_GIG;
604                 phy->ops.get_info = e1000_get_phy_info_ife;
605                 phy->ops.check_polarity = e1000_check_polarity_ife;
606                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
607                 break;
608         case BME1000_E_PHY_ID:
609                 phy->type = e1000_phy_bm;
610                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
611                 phy->ops.read_reg = e1000_read_phy_reg_bm;
612                 phy->ops.write_reg = e1000_write_phy_reg_bm;
613                 phy->ops.commit = e1000_phy_sw_reset_generic;
614                 phy->ops.get_info = e1000_get_phy_info_m88;
615                 phy->ops.check_polarity = e1000_check_polarity_m88;
616                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
617                 break;
618         default:
619                 return -E1000_ERR_PHY;
620                 break;
621         }
622
623         return E1000_SUCCESS;
624 }
625
626 /**
627  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
628  *  @hw: pointer to the HW structure
629  *
630  *  Initialize family-specific NVM parameters and function
631  *  pointers.
632  **/
633 STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
634 {
635         struct e1000_nvm_info *nvm = &hw->nvm;
636         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
637         u32 gfpreg, sector_base_addr, sector_end_addr;
638         u16 i;
639         u32 nvm_size;
640
641         DEBUGFUNC("e1000_init_nvm_params_ich8lan");
642
643         nvm->type = e1000_nvm_flash_sw;
644
645         if (hw->mac.type >= e1000_pch_spt) {
646                 /* in SPT, gfpreg doesn't exist. NVM size is taken from the
647                  * STRAP register. This is because in SPT the GbE Flash region
648                  * is no longer accessed through the flash registers. Instead,
649                  * the mechanism has changed, and the Flash region access
650                  * registers are now implemented in GbE memory space.
651                  */
652                 nvm->flash_base_addr = 0;
653                 nvm_size =
654                     (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
655                     * NVM_SIZE_MULTIPLIER;
656                 nvm->flash_bank_size = nvm_size / 2;
657                 /* Adjust to word count */
658                 nvm->flash_bank_size /= sizeof(u16);
659                 /* Set the base address for flash register access */
660                 hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
661         } else {
662                 /* Can't read flash registers if register set isn't mapped. */
663                 if (!hw->flash_address) {
664                         DEBUGOUT("ERROR: Flash registers not mapped\n");
665                         return -E1000_ERR_CONFIG;
666                 }
667
668                 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
669
670                 /* sector_X_addr is a "sector"-aligned address (4096 bytes)
671                  * Add 1 to sector_end_addr since this sector is included in
672                  * the overall size.
673                  */
674                 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
675                 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
676
677                 /* flash_base_addr is byte-aligned */
678                 nvm->flash_base_addr = sector_base_addr
679                                        << FLASH_SECTOR_ADDR_SHIFT;
680
681                 /* find total size of the NVM, then cut in half since the total
682                  * size represents two separate NVM banks.
683                  */
684                 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
685                                         << FLASH_SECTOR_ADDR_SHIFT);
686                 nvm->flash_bank_size /= 2;
687                 /* Adjust to word count */
688                 nvm->flash_bank_size /= sizeof(u16);
689         }
690
691         nvm->word_size = E1000_SHADOW_RAM_WORDS;
692
693         /* Clear shadow ram */
694         for (i = 0; i < nvm->word_size; i++) {
695                 dev_spec->shadow_ram[i].modified = false;
696                 dev_spec->shadow_ram[i].value    = 0xFFFF;
697         }
698
699         E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
700         E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
701
702         /* Function Pointers */
703         nvm->ops.acquire        = e1000_acquire_nvm_ich8lan;
704         nvm->ops.release        = e1000_release_nvm_ich8lan;
705         if (hw->mac.type >= e1000_pch_spt) {
706                 nvm->ops.read   = e1000_read_nvm_spt;
707                 nvm->ops.update = e1000_update_nvm_checksum_spt;
708         } else {
709                 nvm->ops.read   = e1000_read_nvm_ich8lan;
710                 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
711         }
712         nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
713         nvm->ops.validate       = e1000_validate_nvm_checksum_ich8lan;
714         nvm->ops.write          = e1000_write_nvm_ich8lan;
715
716         return E1000_SUCCESS;
717 }
718
719 /**
720  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
721  *  @hw: pointer to the HW structure
722  *
723  *  Initialize family-specific MAC parameters and function
724  *  pointers.
725  **/
726 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
727 {
728         struct e1000_mac_info *mac = &hw->mac;
729 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
730         u16 pci_cfg;
731 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
732
733         DEBUGFUNC("e1000_init_mac_params_ich8lan");
734
735         /* Set media type function pointer */
736         hw->phy.media_type = e1000_media_type_copper;
737
738         /* Set mta register count */
739         mac->mta_reg_count = 32;
740         /* Set rar entry count */
741         mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
742         if (mac->type == e1000_ich8lan)
743                 mac->rar_entry_count--;
744         /* Set if part includes ASF firmware */
745         mac->asf_firmware_present = true;
746         /* FWSM register */
747         mac->has_fwsm = true;
748         /* ARC subsystem not supported */
749         mac->arc_subsystem_valid = false;
750         /* Adaptive IFS supported */
751         mac->adaptive_ifs = true;
752
753         /* Function pointers */
754
755         /* bus type/speed/width */
756         mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
757         /* function id */
758         mac->ops.set_lan_id = e1000_set_lan_id_single_port;
759         /* reset */
760         mac->ops.reset_hw = e1000_reset_hw_ich8lan;
761         /* hw initialization */
762         mac->ops.init_hw = e1000_init_hw_ich8lan;
763         /* link setup */
764         mac->ops.setup_link = e1000_setup_link_ich8lan;
765         /* physical interface setup */
766         mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
767         /* check for link */
768         mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
769         /* link info */
770         mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
771         /* multicast address update */
772         mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
773         /* clear hardware counters */
774         mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
775
776         /* LED and other operations */
777         switch (mac->type) {
778         case e1000_ich8lan:
779         case e1000_ich9lan:
780         case e1000_ich10lan:
781                 /* check management mode */
782                 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
783                 /* ID LED init */
784                 mac->ops.id_led_init = e1000_id_led_init_generic;
785                 /* blink LED */
786                 mac->ops.blink_led = e1000_blink_led_generic;
787                 /* setup LED */
788                 mac->ops.setup_led = e1000_setup_led_generic;
789                 /* cleanup LED */
790                 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
791                 /* turn on/off LED */
792                 mac->ops.led_on = e1000_led_on_ich8lan;
793                 mac->ops.led_off = e1000_led_off_ich8lan;
794                 break;
795         case e1000_pch2lan:
796                 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
797                 mac->ops.rar_set = e1000_rar_set_pch2lan;
798                 /* fall-through */
799         case e1000_pch_lpt:
800         case e1000_pch_spt:
801 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
802                 /* multicast address update for pch2 */
803                 mac->ops.update_mc_addr_list =
804                         e1000_update_mc_addr_list_pch2lan;
805                 /* fall-through */
806 #endif
807         case e1000_pchlan:
808 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
809                 /* save PCH revision_id */
810                 e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
811                 /* SPT uses full byte for revision ID,
812                  * as opposed to previous generations
813                  */
814                 if (hw->mac.type >= e1000_pch_spt)
815                         hw->revision_id = (u8)(pci_cfg &= 0x00FF);
816                 else
817                         hw->revision_id = (u8)(pci_cfg &= 0x000F);
818 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
819                 /* check management mode */
820                 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
821                 /* ID LED init */
822                 mac->ops.id_led_init = e1000_id_led_init_pchlan;
823                 /* setup LED */
824                 mac->ops.setup_led = e1000_setup_led_pchlan;
825                 /* cleanup LED */
826                 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
827                 /* turn on/off LED */
828                 mac->ops.led_on = e1000_led_on_pchlan;
829                 mac->ops.led_off = e1000_led_off_pchlan;
830                 break;
831         default:
832                 break;
833         }
834
835         if (mac->type >= e1000_pch_lpt) {
836                 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
837                 mac->ops.rar_set = e1000_rar_set_pch_lpt;
838                 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
839         }
840
841         /* Enable PCS Lock-loss workaround for ICH8 */
842         if (mac->type == e1000_ich8lan)
843                 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
844
845         return E1000_SUCCESS;
846 }
847
848 /**
849  *  __e1000_access_emi_reg_locked - Read/write EMI register
850  *  @hw: pointer to the HW structure
851  *  @addr: EMI address to program
852  *  @data: pointer to value to read/write from/to the EMI address
853  *  @read: boolean flag to indicate read or write
854  *
855  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
856  **/
857 STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
858                                          u16 *data, bool read)
859 {
860         s32 ret_val;
861
862         DEBUGFUNC("__e1000_access_emi_reg_locked");
863
864         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
865         if (ret_val)
866                 return ret_val;
867
868         if (read)
869                 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
870                                                       data);
871         else
872                 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
873                                                        *data);
874
875         return ret_val;
876 }
877
878 /**
879  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
880  *  @hw: pointer to the HW structure
881  *  @addr: EMI address to program
882  *  @data: value to be read from the EMI address
883  *
884  *  Assumes the SW/FW/HW Semaphore is already acquired.
885  **/
886 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
887 {
888         DEBUGFUNC("e1000_read_emi_reg_locked");
889
890         return __e1000_access_emi_reg_locked(hw, addr, data, true);
891 }
892
893 /**
894  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
895  *  @hw: pointer to the HW structure
896  *  @addr: EMI address to program
897  *  @data: value to be written to the EMI address
898  *
899  *  Assumes the SW/FW/HW Semaphore is already acquired.
900  **/
901 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
902 {
903         DEBUGFUNC("e1000_read_emi_reg_locked");
904
905         return __e1000_access_emi_reg_locked(hw, addr, &data, false);
906 }
907
908 /**
909  *  e1000_set_eee_pchlan - Enable/disable EEE support
910  *  @hw: pointer to the HW structure
911  *
912  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
913  *  the link and the EEE capabilities of the link partner.  The LPI Control
914  *  register bits will remain set only if/when link is up.
915  *
916  *  EEE LPI must not be asserted earlier than one second after link is up.
917  *  On 82579, EEE LPI should not be enabled until such time otherwise there
918  *  can be link issues with some switches.  Other devices can have EEE LPI
919  *  enabled immediately upon link up since they have a timer in hardware which
920  *  prevents LPI from being asserted too early.
921  **/
922 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
923 {
924         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
925         s32 ret_val;
926         u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
927
928         DEBUGFUNC("e1000_set_eee_pchlan");
929
930         switch (hw->phy.type) {
931         case e1000_phy_82579:
932                 lpa = I82579_EEE_LP_ABILITY;
933                 pcs_status = I82579_EEE_PCS_STATUS;
934                 adv_addr = I82579_EEE_ADVERTISEMENT;
935                 break;
936         case e1000_phy_i217:
937                 lpa = I217_EEE_LP_ABILITY;
938                 pcs_status = I217_EEE_PCS_STATUS;
939                 adv_addr = I217_EEE_ADVERTISEMENT;
940                 break;
941         default:
942                 return E1000_SUCCESS;
943         }
944
945         ret_val = hw->phy.ops.acquire(hw);
946         if (ret_val)
947                 return ret_val;
948
949         ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
950         if (ret_val)
951                 goto release;
952
953         /* Clear bits that enable EEE in various speeds */
954         lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
955
956         /* Enable EEE if not disabled by user */
957         if (!dev_spec->eee_disable) {
958                 /* Save off link partner's EEE ability */
959                 ret_val = e1000_read_emi_reg_locked(hw, lpa,
960                                                     &dev_spec->eee_lp_ability);
961                 if (ret_val)
962                         goto release;
963
964                 /* Read EEE advertisement */
965                 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
966                 if (ret_val)
967                         goto release;
968
969                 /* Enable EEE only for speeds in which the link partner is
970                  * EEE capable and for which we advertise EEE.
971                  */
972                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
973                         lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
974
975                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
976                         hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
977                         if (data & NWAY_LPAR_100TX_FD_CAPS)
978                                 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
979                         else
980                                 /* EEE is not supported in 100Half, so ignore
981                                  * partner's EEE in 100 ability if full-duplex
982                                  * is not advertised.
983                                  */
984                                 dev_spec->eee_lp_ability &=
985                                     ~I82579_EEE_100_SUPPORTED;
986                 }
987         }
988
989         if (hw->phy.type == e1000_phy_82579) {
990                 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
991                                                     &data);
992                 if (ret_val)
993                         goto release;
994
995                 data &= ~I82579_LPI_100_PLL_SHUT;
996                 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
997                                                      data);
998         }
999
1000         /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
1001         ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
1002         if (ret_val)
1003                 goto release;
1004
1005         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
1006 release:
1007         hw->phy.ops.release(hw);
1008
1009         return ret_val;
1010 }
1011
1012 /**
1013  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
1014  *  @hw:   pointer to the HW structure
1015  *  @link: link up bool flag
1016  *
1017  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
1018  *  preventing further DMA write requests.  Workaround the issue by disabling
1019  *  the de-assertion of the clock request when in 1Gpbs mode.
1020  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
1021  *  speeds in order to avoid Tx hangs.
1022  **/
1023 STATIC s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
1024 {
1025         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1026         u32 status = E1000_READ_REG(hw, E1000_STATUS);
1027         s32 ret_val = E1000_SUCCESS;
1028         u16 reg;
1029
1030         if (link && (status & E1000_STATUS_SPEED_1000)) {
1031                 ret_val = hw->phy.ops.acquire(hw);
1032                 if (ret_val)
1033                         return ret_val;
1034
1035                 ret_val =
1036                     e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1037                                                &reg);
1038                 if (ret_val)
1039                         goto release;
1040
1041                 ret_val =
1042                     e1000_write_kmrn_reg_locked(hw,
1043                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
1044                                                 reg &
1045                                                 ~E1000_KMRNCTRLSTA_K1_ENABLE);
1046                 if (ret_val)
1047                         goto release;
1048
1049                 usec_delay(10);
1050
1051                 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1052                                 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1053
1054                 ret_val =
1055                     e1000_write_kmrn_reg_locked(hw,
1056                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
1057                                                 reg);
1058 release:
1059                 hw->phy.ops.release(hw);
1060         } else {
1061                 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1062                 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1063
1064                 if ((hw->phy.revision > 5) || !link ||
1065                     ((status & E1000_STATUS_SPEED_100) &&
1066                      (status & E1000_STATUS_FD)))
1067                         goto update_fextnvm6;
1068
1069                 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1070                 if (ret_val)
1071                         return ret_val;
1072
1073                 /* Clear link status transmit timeout */
1074                 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1075
1076                 if (status & E1000_STATUS_SPEED_100) {
1077                         /* Set inband Tx timeout to 5x10us for 100Half */
1078                         reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1079
1080                         /* Do not extend the K1 entry latency for 100Half */
1081                         fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1082                 } else {
1083                         /* Set inband Tx timeout to 50x10us for 10Full/Half */
1084                         reg |= 50 <<
1085                                I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1086
1087                         /* Extend the K1 entry latency for 10 Mbps */
1088                         fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1089                 }
1090
1091                 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1092                 if (ret_val)
1093                         return ret_val;
1094
1095 update_fextnvm6:
1096                 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1097         }
1098
1099         return ret_val;
1100 }
1101
1102 #ifdef ULP_SUPPORT
1103 /**
1104  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1105  *  @hw: pointer to the HW structure
1106  *  @to_sx: boolean indicating a system power state transition to Sx
1107  *
1108  *  When link is down, configure ULP mode to significantly reduce the power
1109  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1110  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1111  *  system, configure the ULP mode by software.
1112  */
1113 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1114 {
1115         u32 mac_reg;
1116         s32 ret_val = E1000_SUCCESS;
1117         u16 phy_reg;
1118         u16 oem_reg = 0;
1119
1120         if ((hw->mac.type < e1000_pch_lpt) ||
1121             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1122             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1123             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1124             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1125             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1126                 return 0;
1127
1128         if (!to_sx) {
1129                 int i = 0;
1130                 /* Poll up to 5 seconds for Cable Disconnected indication */
1131                 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1132                          E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1133                         /* Bail if link is re-acquired */
1134                         if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1135                                 return -E1000_ERR_PHY;
1136                         if (i++ == 100)
1137                                 break;
1138
1139                         msec_delay(50);
1140                 }
1141                 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1142                           (E1000_READ_REG(hw, E1000_FEXT) &
1143                            E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1144                           i * 50);
1145                 if (!(E1000_READ_REG(hw, E1000_FEXT) &
1146                     E1000_FEXT_PHY_CABLE_DISCONNECTED))
1147                         return 0;
1148         }
1149
1150         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1151                 /* Request ME configure ULP mode in the PHY */
1152                 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1153                 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1154                 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1155
1156                 goto out;
1157         }
1158
1159         ret_val = hw->phy.ops.acquire(hw);
1160         if (ret_val)
1161                 goto out;
1162
1163         /* During S0 Idle keep the phy in PCI-E mode */
1164         if (hw->dev_spec.ich8lan.smbus_disable)
1165                 goto skip_smbus;
1166
1167         /* Force SMBus mode in PHY */
1168         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1169         if (ret_val)
1170                 goto release;
1171         phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1172         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1173
1174         /* Force SMBus mode in MAC */
1175         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1176         mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1177         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1178
1179         /* Si workaround for ULP entry flow on i127/rev6 h/w.  Enable
1180          * LPLU and disable Gig speed when entering ULP
1181          */
1182         if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1183                 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1184                                                        &oem_reg);
1185                 if (ret_val)
1186                         goto release;
1187
1188                 phy_reg = oem_reg;
1189                 phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1190
1191                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1192                                                         phy_reg);
1193
1194                 if (ret_val)
1195                         goto release;
1196         }
1197
1198 skip_smbus:
1199         if (!to_sx) {
1200                 /* Change the 'Link Status Change' interrupt to trigger
1201                  * on 'Cable Status Change'
1202                  */
1203                 ret_val = e1000_read_kmrn_reg_locked(hw,
1204                                                      E1000_KMRNCTRLSTA_OP_MODES,
1205                                                      &phy_reg);
1206                 if (ret_val)
1207                         goto release;
1208                 phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1209                 e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1210                                             phy_reg);
1211         }
1212
1213         /* Set Inband ULP Exit, Reset to SMBus mode and
1214          * Disable SMBus Release on PERST# in PHY
1215          */
1216         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1217         if (ret_val)
1218                 goto release;
1219         phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1220                     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1221         if (to_sx) {
1222                 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1223                         phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1224                 else
1225                         phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1226
1227                 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1228                 phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1229         } else {
1230                 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1231                 phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1232                 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1233         }
1234         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1235
1236         /* Set Disable SMBus Release on PERST# in MAC */
1237         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1238         mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1239         E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1240
1241         /* Commit ULP changes in PHY by starting auto ULP configuration */
1242         phy_reg |= I218_ULP_CONFIG1_START;
1243         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1244
1245         if (!to_sx) {
1246                 /* Disable Tx so that the MAC doesn't send any (buffered)
1247                  * packets to the PHY.
1248                  */
1249                 mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1250                 mac_reg &= ~E1000_TCTL_EN;
1251                 E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1252         }
1253
1254         if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
1255             to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1256                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1257                                                         oem_reg);
1258                 if (ret_val)
1259                         goto release;
1260         }
1261
1262 release:
1263         hw->phy.ops.release(hw);
1264 out:
1265         if (ret_val)
1266                 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1267         else
1268                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1269
1270         return ret_val;
1271 }
1272
1273 /**
1274  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1275  *  @hw: pointer to the HW structure
1276  *  @force: boolean indicating whether or not to force disabling ULP
1277  *
1278  *  Un-configure ULP mode when link is up, the system is transitioned from
1279  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1280  *  system, poll for an indication from ME that ULP has been un-configured.
1281  *  If not on an ME enabled system, un-configure the ULP mode by software.
1282  *
1283  *  During nominal operation, this function is called when link is acquired
1284  *  to disable ULP mode (force=false); otherwise, for example when unloading
1285  *  the driver or during Sx->S0 transitions, this is called with force=true
1286  *  to forcibly disable ULP.
1287
1288  *  When the cable is plugged in while the device is in D0, a Cable Status
1289  *  Change interrupt is generated which causes this function to be called
1290  *  to partially disable ULP mode and restart autonegotiation.  This function
1291  *  is then called again due to the resulting Link Status Change interrupt
1292  *  to finish cleaning up after the ULP flow.
1293  */
1294 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1295 {
1296         s32 ret_val = E1000_SUCCESS;
1297         u32 mac_reg;
1298         u16 phy_reg;
1299         int i = 0;
1300
1301         if ((hw->mac.type < e1000_pch_lpt) ||
1302             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1303             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1304             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1305             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1306             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1307                 return 0;
1308
1309         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1310                 if (force) {
1311                         /* Request ME un-configure ULP mode in the PHY */
1312                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1313                         mac_reg &= ~E1000_H2ME_ULP;
1314                         mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1315                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1316                 }
1317
1318                 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
1319                 while (E1000_READ_REG(hw, E1000_FWSM) &
1320                        E1000_FWSM_ULP_CFG_DONE) {
1321                         if (i++ == 30) {
1322                                 ret_val = -E1000_ERR_PHY;
1323                                 goto out;
1324                         }
1325
1326                         msec_delay(10);
1327                 }
1328                 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1329
1330                 if (force) {
1331                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1332                         mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1333                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1334                 } else {
1335                         /* Clear H2ME.ULP after ME ULP configuration */
1336                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1337                         mac_reg &= ~E1000_H2ME_ULP;
1338                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1339
1340                         /* Restore link speed advertisements and restart
1341                          * Auto-negotiation
1342                          */
1343                         if (hw->mac.autoneg) {
1344                                 ret_val = e1000_phy_setup_autoneg(hw);
1345                                 if (ret_val)
1346                                         goto out;
1347                         } else {
1348                                 ret_val = e1000_setup_copper_link_generic(hw);
1349                                 if (ret_val)
1350                                         goto out;
1351                         }
1352                         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1353                 }
1354
1355                 goto out;
1356         }
1357
1358         ret_val = hw->phy.ops.acquire(hw);
1359         if (ret_val)
1360                 goto out;
1361
1362         /* Revert the change to the 'Link Status Change'
1363          * interrupt to trigger on 'Cable Status Change'
1364          */
1365         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1366                                              &phy_reg);
1367         if (ret_val)
1368                 goto release;
1369         phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1370         e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg);
1371
1372         if (force)
1373                 /* Toggle LANPHYPC Value bit */
1374                 e1000_toggle_lanphypc_pch_lpt(hw);
1375
1376         /* Unforce SMBus mode in PHY */
1377         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1378         if (ret_val) {
1379                 /* The MAC might be in PCIe mode, so temporarily force to
1380                  * SMBus mode in order to access the PHY.
1381                  */
1382                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1383                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1384                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1385
1386                 msec_delay(50);
1387
1388                 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1389                                                        &phy_reg);
1390                 if (ret_val)
1391                         goto release;
1392         }
1393         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1394         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1395
1396         /* Unforce SMBus mode in MAC */
1397         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1398         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1399         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1400
1401         /* When ULP mode was previously entered, K1 was disabled by the
1402          * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1403          */
1404         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1405         if (ret_val)
1406                 goto release;
1407         phy_reg |= HV_PM_CTRL_K1_ENABLE;
1408         e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1409
1410         /* Clear ULP enabled configuration */
1411         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1412         if (ret_val)
1413                 goto release;
1414         /* CSC interrupt received due to ULP Indication */
1415         if ((phy_reg & I218_ULP_CONFIG1_IND) || force) {
1416                 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1417                              I218_ULP_CONFIG1_STICKY_ULP |
1418                              I218_ULP_CONFIG1_RESET_TO_SMBUS |
1419                              I218_ULP_CONFIG1_WOL_HOST |
1420                              I218_ULP_CONFIG1_INBAND_EXIT |
1421                              I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
1422                              I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
1423                              I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1424                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1425
1426                 /* Commit ULP changes by starting auto ULP configuration */
1427                 phy_reg |= I218_ULP_CONFIG1_START;
1428                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1429
1430                 /* Clear Disable SMBus Release on PERST# in MAC */
1431                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1432                 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1433                 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1434
1435                 if (!force) {
1436                         hw->phy.ops.release(hw);
1437
1438                         if (hw->mac.autoneg)
1439                                 e1000_phy_setup_autoneg(hw);
1440                         else
1441                                 e1000_setup_copper_link_generic(hw);
1442
1443                         e1000_sw_lcd_config_ich8lan(hw);
1444
1445                         e1000_oem_bits_config_ich8lan(hw, true);
1446
1447                         /* Set ULP state to unknown and return non-zero to
1448                          * indicate no link (yet) and re-enter on the next LSC
1449                          * to finish disabling ULP flow.
1450                          */
1451                         hw->dev_spec.ich8lan.ulp_state =
1452                             e1000_ulp_state_unknown;
1453
1454                         return 1;
1455                 }
1456         }
1457
1458         /* Re-enable Tx */
1459         mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1460         mac_reg |= E1000_TCTL_EN;
1461         E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1462
1463 release:
1464         hw->phy.ops.release(hw);
1465         if (force) {
1466                 hw->phy.ops.reset(hw);
1467                 msec_delay(50);
1468         }
1469 out:
1470         if (ret_val)
1471                 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1472         else
1473                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1474
1475         return ret_val;
1476 }
1477
1478 #endif /* ULP_SUPPORT */
1479
1480
1481 /**
1482  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1483  *  @hw: pointer to the HW structure
1484  *
1485  *  Checks to see of the link status of the hardware has changed.  If a
1486  *  change in link status has been detected, then we read the PHY registers
1487  *  to get the current speed/duplex if link exists.
1488  **/
1489 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1490 {
1491         struct e1000_mac_info *mac = &hw->mac;
1492         s32 ret_val, tipg_reg = 0;
1493         u16 emi_addr, emi_val = 0;
1494         bool link = false;
1495         u16 phy_reg;
1496
1497         DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1498
1499         /* We only want to go out to the PHY registers to see if Auto-Neg
1500          * has completed and/or if our link status has changed.  The
1501          * get_link_status flag is set upon receiving a Link Status
1502          * Change or Rx Sequence Error interrupt.
1503          */
1504         if (!mac->get_link_status)
1505                 return E1000_SUCCESS;
1506
1507         if ((hw->mac.type < e1000_pch_lpt) ||
1508             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1509             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) {
1510                 /* First we want to see if the MII Status Register reports
1511                  * link.  If so, then we want to get the current speed/duplex
1512                  * of the PHY.
1513                  */
1514                 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1515                 if (ret_val)
1516                         return ret_val;
1517         } else {
1518                 /* Check the MAC's STATUS register to determine link state
1519                  * since the PHY could be inaccessible while in ULP mode.
1520                  */
1521                 link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
1522                 if (link)
1523                         ret_val = e1000_disable_ulp_lpt_lp(hw, false);
1524                 else
1525                         ret_val = e1000_enable_ulp_lpt_lp(hw, false);
1526                 if (ret_val)
1527                         return ret_val;
1528         }
1529
1530         if (hw->mac.type == e1000_pchlan) {
1531                 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1532                 if (ret_val)
1533                         return ret_val;
1534         }
1535
1536         /* When connected at 10Mbps half-duplex, some parts are excessively
1537          * aggressive resulting in many collisions. To avoid this, increase
1538          * the IPG and reduce Rx latency in the PHY.
1539          */
1540         if ((hw->mac.type >= e1000_pch2lan) && link) {
1541                 u16 speed, duplex;
1542
1543                 e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1544                 tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1545                 tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1546
1547                 if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1548                         tipg_reg |= 0xFF;
1549                         /* Reduce Rx latency in analog PHY */
1550                         emi_val = 0;
1551                 } else if (hw->mac.type >= e1000_pch_spt &&
1552                            duplex == FULL_DUPLEX && speed != SPEED_1000) {
1553                         tipg_reg |= 0xC;
1554                         emi_val = 1;
1555                 } else {
1556                         /* Roll back the default values */
1557                         tipg_reg |= 0x08;
1558                         emi_val = 1;
1559                 }
1560
1561                 E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1562
1563                 ret_val = hw->phy.ops.acquire(hw);
1564                 if (ret_val)
1565                         return ret_val;
1566
1567                 if (hw->mac.type == e1000_pch2lan)
1568                         emi_addr = I82579_RX_CONFIG;
1569                 else
1570                         emi_addr = I217_RX_CONFIG;
1571                 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1572
1573
1574                 if (hw->mac.type >= e1000_pch_lpt) {
1575                         u16 phy_reg;
1576
1577                         hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
1578                                                     &phy_reg);
1579                         phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1580                         if (speed == SPEED_100 || speed == SPEED_10)
1581                                 phy_reg |= 0x3E8;
1582                         else
1583                                 phy_reg |= 0xFA;
1584                         hw->phy.ops.write_reg_locked(hw,
1585                                                      I217_PLL_CLOCK_GATE_REG,
1586                                                      phy_reg);
1587                  }
1588                 hw->phy.ops.release(hw);
1589
1590                 if (ret_val)
1591                         return ret_val;
1592
1593                 if (hw->mac.type >= e1000_pch_spt) {
1594                         u16 data;
1595                         u16 ptr_gap;
1596
1597                         if (speed == SPEED_1000) {
1598                                 ret_val = hw->phy.ops.acquire(hw);
1599                                 if (ret_val)
1600                                         return ret_val;
1601
1602                                 ret_val = hw->phy.ops.read_reg_locked(hw,
1603                                                               PHY_REG(776, 20),
1604                                                               &data);
1605                                 if (ret_val) {
1606                                         hw->phy.ops.release(hw);
1607                                         return ret_val;
1608                                 }
1609
1610                                 ptr_gap = (data & (0x3FF << 2)) >> 2;
1611                                 if (ptr_gap < 0x18) {
1612                                         data &= ~(0x3FF << 2);
1613                                         data |= (0x18 << 2);
1614                                         ret_val =
1615                                                 hw->phy.ops.write_reg_locked(hw,
1616                                                         PHY_REG(776, 20), data);
1617                                 }
1618                                 hw->phy.ops.release(hw);
1619                                 if (ret_val)
1620                                         return ret_val;
1621                         } else {
1622                                 ret_val = hw->phy.ops.acquire(hw);
1623                                 if (ret_val)
1624                                         return ret_val;
1625
1626                                 ret_val = hw->phy.ops.write_reg_locked(hw,
1627                                                              PHY_REG(776, 20),
1628                                                              0xC023);
1629                                 hw->phy.ops.release(hw);
1630                                 if (ret_val)
1631                                         return ret_val;
1632
1633                         }
1634                 }
1635         }
1636
1637         /* I217 Packet Loss issue:
1638          * ensure that FEXTNVM4 Beacon Duration is set correctly
1639          * on power up.
1640          * Set the Beacon Duration for I217 to 8 usec
1641          */
1642         if (hw->mac.type >= e1000_pch_lpt) {
1643                 u32 mac_reg;
1644
1645                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1646                 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1647                 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1648                 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1649         }
1650
1651         /* Work-around I218 hang issue */
1652         if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1653             (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1654             (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1655             (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1656                 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1657                 if (ret_val)
1658                         return ret_val;
1659         }
1660         /* Clear link partner's EEE ability */
1661         hw->dev_spec.ich8lan.eee_lp_ability = 0;
1662
1663         /* Configure K0s minimum time */
1664         if (hw->mac.type >= e1000_pch_lpt) {
1665                 e1000_configure_k0s_lpt(hw, K1_ENTRY_LATENCY, K1_MIN_TIME);
1666         }
1667
1668         if (hw->mac.type >= e1000_pch_lpt) {
1669                 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1670
1671                 if (hw->mac.type == e1000_pch_spt) {
1672                         /* FEXTNVM6 K1-off workaround - for SPT only */
1673                         u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
1674
1675                         if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1676                                 fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1677                         else
1678                                 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1679                 }
1680
1681                 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1682         }
1683
1684         if (!link)
1685                 return E1000_SUCCESS; /* No link detected */
1686
1687         mac->get_link_status = false;
1688
1689         switch (hw->mac.type) {
1690         case e1000_pch2lan:
1691                 ret_val = e1000_k1_workaround_lv(hw);
1692                 if (ret_val)
1693                         return ret_val;
1694                 /* fall-thru */
1695         case e1000_pchlan:
1696                 if (hw->phy.type == e1000_phy_82578) {
1697                         ret_val = e1000_link_stall_workaround_hv(hw);
1698                         if (ret_val)
1699                                 return ret_val;
1700                 }
1701
1702                 /* Workaround for PCHx parts in half-duplex:
1703                  * Set the number of preambles removed from the packet
1704                  * when it is passed from the PHY to the MAC to prevent
1705                  * the MAC from misinterpreting the packet type.
1706                  */
1707                 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1708                 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1709
1710                 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1711                     E1000_STATUS_FD)
1712                         phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1713
1714                 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1715                 break;
1716         default:
1717                 break;
1718         }
1719
1720         /* Check if there was DownShift, must be checked
1721          * immediately after link-up
1722          */
1723         e1000_check_downshift_generic(hw);
1724
1725         /* Enable/Disable EEE after link up */
1726         if (hw->phy.type > e1000_phy_82579) {
1727                 ret_val = e1000_set_eee_pchlan(hw);
1728                 if (ret_val)
1729                         return ret_val;
1730         }
1731
1732         /* If we are forcing speed/duplex, then we simply return since
1733          * we have already determined whether we have link or not.
1734          */
1735         if (!mac->autoneg)
1736                 return -E1000_ERR_CONFIG;
1737
1738         /* Auto-Neg is enabled.  Auto Speed Detection takes care
1739          * of MAC speed/duplex configuration.  So we only need to
1740          * configure Collision Distance in the MAC.
1741          */
1742         mac->ops.config_collision_dist(hw);
1743
1744         /* Configure Flow Control now that Auto-Neg has completed.
1745          * First, we need to restore the desired flow control
1746          * settings because we may have had to re-autoneg with a
1747          * different link partner.
1748          */
1749         ret_val = e1000_config_fc_after_link_up_generic(hw);
1750         if (ret_val)
1751                 DEBUGOUT("Error configuring flow control\n");
1752
1753         return ret_val;
1754 }
1755
1756 /**
1757  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1758  *  @hw: pointer to the HW structure
1759  *
1760  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1761  **/
1762 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1763 {
1764         DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1765
1766         hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1767         hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1768         switch (hw->mac.type) {
1769         case e1000_ich8lan:
1770         case e1000_ich9lan:
1771         case e1000_ich10lan:
1772                 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1773                 break;
1774         case e1000_pchlan:
1775         case e1000_pch2lan:
1776         case e1000_pch_lpt:
1777         case e1000_pch_spt:
1778                 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1779                 break;
1780         default:
1781                 break;
1782         }
1783 }
1784
1785 /**
1786  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1787  *  @hw: pointer to the HW structure
1788  *
1789  *  Acquires the mutex for performing NVM operations.
1790  **/
1791 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1792 {
1793         DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1794
1795         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1796
1797         return E1000_SUCCESS;
1798 }
1799
1800 /**
1801  *  e1000_release_nvm_ich8lan - Release NVM mutex
1802  *  @hw: pointer to the HW structure
1803  *
1804  *  Releases the mutex used while performing NVM operations.
1805  **/
1806 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1807 {
1808         DEBUGFUNC("e1000_release_nvm_ich8lan");
1809
1810         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1811
1812         return;
1813 }
1814
1815 /**
1816  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1817  *  @hw: pointer to the HW structure
1818  *
1819  *  Acquires the software control flag for performing PHY and select
1820  *  MAC CSR accesses.
1821  **/
1822 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1823 {
1824         u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1825         s32 ret_val = E1000_SUCCESS;
1826
1827         DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1828
1829         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1830
1831         while (timeout) {
1832                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1833                 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1834                         break;
1835
1836                 msec_delay_irq(1);
1837                 timeout--;
1838         }
1839
1840         if (!timeout) {
1841                 DEBUGOUT("SW has already locked the resource.\n");
1842                 ret_val = -E1000_ERR_CONFIG;
1843                 goto out;
1844         }
1845
1846         timeout = SW_FLAG_TIMEOUT;
1847
1848         extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1849         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1850
1851         while (timeout) {
1852                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1853                 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1854                         break;
1855
1856                 msec_delay_irq(1);
1857                 timeout--;
1858         }
1859
1860         if (!timeout) {
1861                 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1862                           E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1863                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1864                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1865                 ret_val = -E1000_ERR_CONFIG;
1866                 goto out;
1867         }
1868
1869 out:
1870         if (ret_val)
1871                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1872
1873         return ret_val;
1874 }
1875
1876 /**
1877  *  e1000_release_swflag_ich8lan - Release software control flag
1878  *  @hw: pointer to the HW structure
1879  *
1880  *  Releases the software control flag for performing PHY and select
1881  *  MAC CSR accesses.
1882  **/
1883 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1884 {
1885         u32 extcnf_ctrl;
1886
1887         DEBUGFUNC("e1000_release_swflag_ich8lan");
1888
1889         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1890
1891         if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1892                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1893                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1894         } else {
1895                 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1896         }
1897
1898         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1899
1900         return;
1901 }
1902
1903 /**
1904  *  e1000_check_mng_mode_ich8lan - Checks management mode
1905  *  @hw: pointer to the HW structure
1906  *
1907  *  This checks if the adapter has any manageability enabled.
1908  *  This is a function pointer entry point only called by read/write
1909  *  routines for the PHY and NVM parts.
1910  **/
1911 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1912 {
1913         u32 fwsm;
1914
1915         DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1916
1917         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1918
1919         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1920                ((fwsm & E1000_FWSM_MODE_MASK) ==
1921                 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1922 }
1923
1924 /**
1925  *  e1000_check_mng_mode_pchlan - Checks management mode
1926  *  @hw: pointer to the HW structure
1927  *
1928  *  This checks if the adapter has iAMT enabled.
1929  *  This is a function pointer entry point only called by read/write
1930  *  routines for the PHY and NVM parts.
1931  **/
1932 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1933 {
1934         u32 fwsm;
1935
1936         DEBUGFUNC("e1000_check_mng_mode_pchlan");
1937
1938         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1939
1940         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1941                (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1942 }
1943
1944 /**
1945  *  e1000_rar_set_pch2lan - Set receive address register
1946  *  @hw: pointer to the HW structure
1947  *  @addr: pointer to the receive address
1948  *  @index: receive address array register
1949  *
1950  *  Sets the receive address array register at index to the address passed
1951  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1952  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1953  *  Use SHRA[0-3] in place of those reserved for ME.
1954  **/
1955 STATIC int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1956 {
1957         u32 rar_low, rar_high;
1958
1959         DEBUGFUNC("e1000_rar_set_pch2lan");
1960
1961         /* HW expects these in little endian so we reverse the byte order
1962          * from network order (big endian) to little endian
1963          */
1964         rar_low = ((u32) addr[0] |
1965                    ((u32) addr[1] << 8) |
1966                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1967
1968         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1969
1970         /* If MAC address zero, no need to set the AV bit */
1971         if (rar_low || rar_high)
1972                 rar_high |= E1000_RAH_AV;
1973
1974         if (index == 0) {
1975                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1976                 E1000_WRITE_FLUSH(hw);
1977                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1978                 E1000_WRITE_FLUSH(hw);
1979                 return E1000_SUCCESS;
1980         }
1981
1982         /* RAR[1-6] are owned by manageability.  Skip those and program the
1983          * next address into the SHRA register array.
1984          */
1985         if (index < (u32) (hw->mac.rar_entry_count)) {
1986                 s32 ret_val;
1987
1988                 ret_val = e1000_acquire_swflag_ich8lan(hw);
1989                 if (ret_val)
1990                         goto out;
1991
1992                 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1993                 E1000_WRITE_FLUSH(hw);
1994                 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1995                 E1000_WRITE_FLUSH(hw);
1996
1997                 e1000_release_swflag_ich8lan(hw);
1998
1999                 /* verify the register updates */
2000                 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
2001                     (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
2002                         return E1000_SUCCESS;
2003
2004                 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
2005                          (index - 1), E1000_READ_REG(hw, E1000_FWSM));
2006         }
2007
2008 out:
2009         DEBUGOUT1("Failed to write receive address at index %d\n", index);
2010         return -E1000_ERR_CONFIG;
2011 }
2012
2013 /**
2014  *  e1000_rar_set_pch_lpt - Set receive address registers
2015  *  @hw: pointer to the HW structure
2016  *  @addr: pointer to the receive address
2017  *  @index: receive address array register
2018  *
2019  *  Sets the receive address register array at index to the address passed
2020  *  in by addr. For LPT, RAR[0] is the base address register that is to
2021  *  contain the MAC address. SHRA[0-10] are the shared receive address
2022  *  registers that are shared between the Host and manageability engine (ME).
2023  **/
2024 STATIC int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
2025 {
2026         u32 rar_low, rar_high;
2027         u32 wlock_mac;
2028
2029         DEBUGFUNC("e1000_rar_set_pch_lpt");
2030
2031         /* HW expects these in little endian so we reverse the byte order
2032          * from network order (big endian) to little endian
2033          */
2034         rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
2035                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2036
2037         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2038
2039         /* If MAC address zero, no need to set the AV bit */
2040         if (rar_low || rar_high)
2041                 rar_high |= E1000_RAH_AV;
2042
2043         if (index == 0) {
2044                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2045                 E1000_WRITE_FLUSH(hw);
2046                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2047                 E1000_WRITE_FLUSH(hw);
2048                 return E1000_SUCCESS;
2049         }
2050
2051         /* The manageability engine (ME) can lock certain SHRAR registers that
2052          * it is using - those registers are unavailable for use.
2053          */
2054         if (index < hw->mac.rar_entry_count) {
2055                 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
2056                             E1000_FWSM_WLOCK_MAC_MASK;
2057                 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2058
2059                 /* Check if all SHRAR registers are locked */
2060                 if (wlock_mac == 1)
2061                         goto out;
2062
2063                 if ((wlock_mac == 0) || (index <= wlock_mac)) {
2064                         s32 ret_val;
2065
2066                         ret_val = e1000_acquire_swflag_ich8lan(hw);
2067
2068                         if (ret_val)
2069                                 goto out;
2070
2071                         E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
2072                                         rar_low);
2073                         E1000_WRITE_FLUSH(hw);
2074                         E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
2075                                         rar_high);
2076                         E1000_WRITE_FLUSH(hw);
2077
2078                         e1000_release_swflag_ich8lan(hw);
2079
2080                         /* verify the register updates */
2081                         if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
2082                             (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
2083                                 return E1000_SUCCESS;
2084                 }
2085         }
2086
2087 out:
2088         DEBUGOUT1("Failed to write receive address at index %d\n", index);
2089         return -E1000_ERR_CONFIG;
2090 }
2091
2092 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
2093 /**
2094  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
2095  *  @hw: pointer to the HW structure
2096  *  @mc_addr_list: array of multicast addresses to program
2097  *  @mc_addr_count: number of multicast addresses to program
2098  *
2099  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
2100  *  The caller must have a packed mc_addr_list of multicast addresses.
2101  **/
2102 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
2103                                               u8 *mc_addr_list,
2104                                               u32 mc_addr_count)
2105 {
2106         u16 phy_reg = 0;
2107         int i;
2108         s32 ret_val;
2109
2110         DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
2111
2112         e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
2113
2114         ret_val = hw->phy.ops.acquire(hw);
2115         if (ret_val)
2116                 return;
2117
2118         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2119         if (ret_val)
2120                 goto release;
2121
2122         for (i = 0; i < hw->mac.mta_reg_count; i++) {
2123                 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
2124                                            (u16)(hw->mac.mta_shadow[i] &
2125                                                  0xFFFF));
2126                 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
2127                                            (u16)((hw->mac.mta_shadow[i] >> 16) &
2128                                                  0xFFFF));
2129         }
2130
2131         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2132
2133 release:
2134         hw->phy.ops.release(hw);
2135 }
2136
2137 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
2138 /**
2139  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2140  *  @hw: pointer to the HW structure
2141  *
2142  *  Checks if firmware is blocking the reset of the PHY.
2143  *  This is a function pointer entry point only called by
2144  *  reset routines.
2145  **/
2146 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2147 {
2148         u32 fwsm;
2149         bool blocked = false;
2150         int i = 0;
2151
2152         DEBUGFUNC("e1000_check_reset_block_ich8lan");
2153
2154         do {
2155                 fwsm = E1000_READ_REG(hw, E1000_FWSM);
2156                 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2157                         blocked = true;
2158                         msec_delay(10);
2159                         continue;
2160                 }
2161                 blocked = false;
2162         } while (blocked && (i++ < 30));
2163         return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2164 }
2165
2166 /**
2167  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2168  *  @hw: pointer to the HW structure
2169  *
2170  *  Assumes semaphore already acquired.
2171  *
2172  **/
2173 STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2174 {
2175         u16 phy_data;
2176         u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2177         u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2178                 E1000_STRAP_SMT_FREQ_SHIFT;
2179         s32 ret_val;
2180
2181         strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2182
2183         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2184         if (ret_val)
2185                 return ret_val;
2186
2187         phy_data &= ~HV_SMB_ADDR_MASK;
2188         phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2189         phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2190
2191         if (hw->phy.type == e1000_phy_i217) {
2192                 /* Restore SMBus frequency */
2193                 if (freq--) {
2194                         phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2195                         phy_data |= (freq & (1 << 0)) <<
2196                                 HV_SMB_ADDR_FREQ_LOW_SHIFT;
2197                         phy_data |= (freq & (1 << 1)) <<
2198                                 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2199                 } else {
2200                         DEBUGOUT("Unsupported SMB frequency in PHY\n");
2201                 }
2202         }
2203
2204         return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2205 }
2206
2207 /**
2208  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2209  *  @hw:   pointer to the HW structure
2210  *
2211  *  SW should configure the LCD from the NVM extended configuration region
2212  *  as a workaround for certain parts.
2213  **/
2214 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2215 {
2216         struct e1000_phy_info *phy = &hw->phy;
2217         u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2218         s32 ret_val = E1000_SUCCESS;
2219         u16 word_addr, reg_data, reg_addr, phy_page = 0;
2220
2221         DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2222
2223         /* Initialize the PHY from the NVM on ICH platforms.  This
2224          * is needed due to an issue where the NVM configuration is
2225          * not properly autoloaded after power transitions.
2226          * Therefore, after each PHY reset, we will load the
2227          * configuration data out of the NVM manually.
2228          */
2229         switch (hw->mac.type) {
2230         case e1000_ich8lan:
2231                 if (phy->type != e1000_phy_igp_3)
2232                         return ret_val;
2233
2234                 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2235                     (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2236                         sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2237                         break;
2238                 }
2239                 /* Fall-thru */
2240         case e1000_pchlan:
2241         case e1000_pch2lan:
2242         case e1000_pch_lpt:
2243         case e1000_pch_spt:
2244                 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2245                 break;
2246         default:
2247                 return ret_val;
2248         }
2249
2250         ret_val = hw->phy.ops.acquire(hw);
2251         if (ret_val)
2252                 return ret_val;
2253
2254         data = E1000_READ_REG(hw, E1000_FEXTNVM);
2255         if (!(data & sw_cfg_mask))
2256                 goto release;
2257
2258         /* Make sure HW does not configure LCD from PHY
2259          * extended configuration before SW configuration
2260          */
2261         data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2262         if ((hw->mac.type < e1000_pch2lan) &&
2263             (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2264                         goto release;
2265
2266         cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2267         cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2268         cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2269         if (!cnf_size)
2270                 goto release;
2271
2272         cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2273         cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2274
2275         if (((hw->mac.type == e1000_pchlan) &&
2276              !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2277             (hw->mac.type > e1000_pchlan)) {
2278                 /* HW configures the SMBus address and LEDs when the
2279                  * OEM and LCD Write Enable bits are set in the NVM.
2280                  * When both NVM bits are cleared, SW will configure
2281                  * them instead.
2282                  */
2283                 ret_val = e1000_write_smbus_addr(hw);
2284                 if (ret_val)
2285                         goto release;
2286
2287                 data = E1000_READ_REG(hw, E1000_LEDCTL);
2288                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2289                                                         (u16)data);
2290                 if (ret_val)
2291                         goto release;
2292         }
2293
2294         /* Configure LCD from extended configuration region. */
2295
2296         /* cnf_base_addr is in DWORD */
2297         word_addr = (u16)(cnf_base_addr << 1);
2298
2299         for (i = 0; i < cnf_size; i++) {
2300                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2301                                            &reg_data);
2302                 if (ret_val)
2303                         goto release;
2304
2305                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2306                                            1, &reg_addr);
2307                 if (ret_val)
2308                         goto release;
2309
2310                 /* Save off the PHY page for future writes. */
2311                 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2312                         phy_page = reg_data;
2313                         continue;
2314                 }
2315
2316                 reg_addr &= PHY_REG_MASK;
2317                 reg_addr |= phy_page;
2318
2319                 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2320                                                     reg_data);
2321                 if (ret_val)
2322                         goto release;
2323         }
2324
2325 release:
2326         hw->phy.ops.release(hw);
2327         return ret_val;
2328 }
2329
2330 /**
2331  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2332  *  @hw:   pointer to the HW structure
2333  *  @link: link up bool flag
2334  *
2335  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2336  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2337  *  If link is down, the function will restore the default K1 setting located
2338  *  in the NVM.
2339  **/
2340 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2341 {
2342         s32 ret_val = E1000_SUCCESS;
2343         u16 status_reg = 0;
2344         bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2345
2346         DEBUGFUNC("e1000_k1_gig_workaround_hv");
2347
2348         if (hw->mac.type != e1000_pchlan)
2349                 return E1000_SUCCESS;
2350
2351         /* Wrap the whole flow with the sw flag */
2352         ret_val = hw->phy.ops.acquire(hw);
2353         if (ret_val)
2354                 return ret_val;
2355
2356         /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2357         if (link) {
2358                 if (hw->phy.type == e1000_phy_82578) {
2359                         ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2360                                                               &status_reg);
2361                         if (ret_val)
2362                                 goto release;
2363
2364                         status_reg &= (BM_CS_STATUS_LINK_UP |
2365                                        BM_CS_STATUS_RESOLVED |
2366                                        BM_CS_STATUS_SPEED_MASK);
2367
2368                         if (status_reg == (BM_CS_STATUS_LINK_UP |
2369                                            BM_CS_STATUS_RESOLVED |
2370                                            BM_CS_STATUS_SPEED_1000))
2371                                 k1_enable = false;
2372                 }
2373
2374                 if (hw->phy.type == e1000_phy_82577) {
2375                         ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2376                                                               &status_reg);
2377                         if (ret_val)
2378                                 goto release;
2379
2380                         status_reg &= (HV_M_STATUS_LINK_UP |
2381                                        HV_M_STATUS_AUTONEG_COMPLETE |
2382                                        HV_M_STATUS_SPEED_MASK);
2383
2384                         if (status_reg == (HV_M_STATUS_LINK_UP |
2385                                            HV_M_STATUS_AUTONEG_COMPLETE |
2386                                            HV_M_STATUS_SPEED_1000))
2387                                 k1_enable = false;
2388                 }
2389
2390                 /* Link stall fix for link up */
2391                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2392                                                        0x0100);
2393                 if (ret_val)
2394                         goto release;
2395
2396         } else {
2397                 /* Link stall fix for link down */
2398                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2399                                                        0x4100);
2400                 if (ret_val)
2401                         goto release;
2402         }
2403
2404         ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2405
2406 release:
2407         hw->phy.ops.release(hw);
2408
2409         return ret_val;
2410 }
2411
2412 /**
2413  *  e1000_configure_k1_ich8lan - Configure K1 power state
2414  *  @hw: pointer to the HW structure
2415  *  @enable: K1 state to configure
2416  *
2417  *  Configure the K1 power state based on the provided parameter.
2418  *  Assumes semaphore already acquired.
2419  *
2420  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2421  **/
2422 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2423 {
2424         s32 ret_val;
2425         u32 ctrl_reg = 0;
2426         u32 ctrl_ext = 0;
2427         u32 reg = 0;
2428         u16 kmrn_reg = 0;
2429
2430         DEBUGFUNC("e1000_configure_k1_ich8lan");
2431
2432         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2433                                              &kmrn_reg);
2434         if (ret_val)
2435                 return ret_val;
2436
2437         if (k1_enable)
2438                 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2439         else
2440                 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2441
2442         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2443                                               kmrn_reg);
2444         if (ret_val)
2445                 return ret_val;
2446
2447         usec_delay(20);
2448         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2449         ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2450
2451         reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2452         reg |= E1000_CTRL_FRCSPD;
2453         E1000_WRITE_REG(hw, E1000_CTRL, reg);
2454
2455         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2456         E1000_WRITE_FLUSH(hw);
2457         usec_delay(20);
2458         E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2459         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2460         E1000_WRITE_FLUSH(hw);
2461         usec_delay(20);
2462
2463         return E1000_SUCCESS;
2464 }
2465
2466 /**
2467  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2468  *  @hw:       pointer to the HW structure
2469  *  @d0_state: boolean if entering d0 or d3 device state
2470  *
2471  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2472  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2473  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2474  **/
2475 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2476 {
2477         s32 ret_val = 0;
2478         u32 mac_reg;
2479         u16 oem_reg;
2480
2481         DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2482
2483         if (hw->mac.type < e1000_pchlan)
2484                 return ret_val;
2485
2486         ret_val = hw->phy.ops.acquire(hw);
2487         if (ret_val)
2488                 return ret_val;
2489
2490         if (hw->mac.type == e1000_pchlan) {
2491                 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2492                 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2493                         goto release;
2494         }
2495
2496         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2497         if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2498                 goto release;
2499
2500         mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2501
2502         ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2503         if (ret_val)
2504                 goto release;
2505
2506         oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2507
2508         if (d0_state) {
2509                 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2510                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2511
2512                 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2513                         oem_reg |= HV_OEM_BITS_LPLU;
2514         } else {
2515                 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2516                     E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2517                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2518
2519                 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2520                     E1000_PHY_CTRL_NOND0A_LPLU))
2521                         oem_reg |= HV_OEM_BITS_LPLU;
2522         }
2523
2524         /* Set Restart auto-neg to activate the bits */
2525         if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2526             !hw->phy.ops.check_reset_block(hw))
2527                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2528
2529         ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2530
2531 release:
2532         hw->phy.ops.release(hw);
2533
2534         return ret_val;
2535 }
2536
2537
2538 /**
2539  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2540  *  @hw:   pointer to the HW structure
2541  **/
2542 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2543 {
2544         s32 ret_val;
2545         u16 data;
2546
2547         DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2548
2549         ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2550         if (ret_val)
2551                 return ret_val;
2552
2553         data |= HV_KMRN_MDIO_SLOW;
2554
2555         ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2556
2557         return ret_val;
2558 }
2559
2560 /**
2561  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2562  *  done after every PHY reset.
2563  **/
2564 STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2565 {
2566         s32 ret_val = E1000_SUCCESS;
2567         u16 phy_data;
2568
2569         DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2570
2571         if (hw->mac.type != e1000_pchlan)
2572                 return E1000_SUCCESS;
2573
2574         /* Set MDIO slow mode before any other MDIO access */
2575         if (hw->phy.type == e1000_phy_82577) {
2576                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2577                 if (ret_val)
2578                         return ret_val;
2579         }
2580
2581         if (((hw->phy.type == e1000_phy_82577) &&
2582              ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2583             ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2584                 /* Disable generation of early preamble */
2585                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2586                 if (ret_val)
2587                         return ret_val;
2588
2589                 /* Preamble tuning for SSC */
2590                 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2591                                                 0xA204);
2592                 if (ret_val)
2593                         return ret_val;
2594         }
2595
2596         if (hw->phy.type == e1000_phy_82578) {
2597                 /* Return registers to default by doing a soft reset then
2598                  * writing 0x3140 to the control register.
2599                  */
2600                 if (hw->phy.revision < 2) {
2601                         e1000_phy_sw_reset_generic(hw);
2602                         ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2603                                                         0x3140);
2604                 }
2605         }
2606
2607         /* Select page 0 */
2608         ret_val = hw->phy.ops.acquire(hw);
2609         if (ret_val)
2610                 return ret_val;
2611
2612         hw->phy.addr = 1;
2613         ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2614         hw->phy.ops.release(hw);
2615         if (ret_val)
2616                 return ret_val;
2617
2618         /* Configure the K1 Si workaround during phy reset assuming there is
2619          * link so that it disables K1 if link is in 1Gbps.
2620          */
2621         ret_val = e1000_k1_gig_workaround_hv(hw, true);
2622         if (ret_val)
2623                 return ret_val;
2624
2625         /* Workaround for link disconnects on a busy hub in half duplex */
2626         ret_val = hw->phy.ops.acquire(hw);
2627         if (ret_val)
2628                 return ret_val;
2629         ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2630         if (ret_val)
2631                 goto release;
2632         ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2633                                                phy_data & 0x00FF);
2634         if (ret_val)
2635                 goto release;
2636
2637         /* set MSE higher to enable link to stay up when noise is high */
2638         ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2639 release:
2640         hw->phy.ops.release(hw);
2641
2642         return ret_val;
2643 }
2644
2645 /**
2646  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2647  *  @hw:   pointer to the HW structure
2648  **/
2649 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2650 {
2651         u32 mac_reg;
2652         u16 i, phy_reg = 0;
2653         s32 ret_val;
2654
2655         DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2656
2657         ret_val = hw->phy.ops.acquire(hw);
2658         if (ret_val)
2659                 return;
2660         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2661         if (ret_val)
2662                 goto release;
2663
2664         /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2665         for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2666                 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2667                 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2668                                            (u16)(mac_reg & 0xFFFF));
2669                 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2670                                            (u16)((mac_reg >> 16) & 0xFFFF));
2671
2672                 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2673                 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2674                                            (u16)(mac_reg & 0xFFFF));
2675                 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2676                                            (u16)((mac_reg & E1000_RAH_AV)
2677                                                  >> 16));
2678         }
2679
2680         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2681
2682 release:
2683         hw->phy.ops.release(hw);
2684 }
2685
2686 #ifndef CRC32_OS_SUPPORT
2687 STATIC u32 e1000_calc_rx_da_crc(u8 mac[])
2688 {
2689         u32 poly = 0xEDB88320;  /* Polynomial for 802.3 CRC calculation */
2690         u32 i, j, mask, crc;
2691
2692         DEBUGFUNC("e1000_calc_rx_da_crc");
2693
2694         crc = 0xffffffff;
2695         for (i = 0; i < 6; i++) {
2696                 crc = crc ^ mac[i];
2697                 for (j = 8; j > 0; j--) {
2698                         mask = (crc & 1) * (-1);
2699                         crc = (crc >> 1) ^ (poly & mask);
2700                 }
2701         }
2702         return ~crc;
2703 }
2704
2705 #endif /* CRC32_OS_SUPPORT */
2706 /**
2707  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2708  *  with 82579 PHY
2709  *  @hw: pointer to the HW structure
2710  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2711  **/
2712 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2713 {
2714         s32 ret_val = E1000_SUCCESS;
2715         u16 phy_reg, data;
2716         u32 mac_reg;
2717         u16 i;
2718
2719         DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2720
2721         if (hw->mac.type < e1000_pch2lan)
2722                 return E1000_SUCCESS;
2723
2724         /* disable Rx path while enabling/disabling workaround */
2725         hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2726         ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2727                                         phy_reg | (1 << 14));
2728         if (ret_val)
2729                 return ret_val;
2730
2731         if (enable) {
2732                 /* Write Rx addresses (rar_entry_count for RAL/H, and
2733                  * SHRAL/H) and initial CRC values to the MAC
2734                  */
2735                 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2736                         u8 mac_addr[ETH_ADDR_LEN] = {0};
2737                         u32 addr_high, addr_low;
2738
2739                         addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2740                         if (!(addr_high & E1000_RAH_AV))
2741                                 continue;
2742                         addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2743                         mac_addr[0] = (addr_low & 0xFF);
2744                         mac_addr[1] = ((addr_low >> 8) & 0xFF);
2745                         mac_addr[2] = ((addr_low >> 16) & 0xFF);
2746                         mac_addr[3] = ((addr_low >> 24) & 0xFF);
2747                         mac_addr[4] = (addr_high & 0xFF);
2748                         mac_addr[5] = ((addr_high >> 8) & 0xFF);
2749
2750 #ifndef CRC32_OS_SUPPORT
2751                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2752                                         e1000_calc_rx_da_crc(mac_addr));
2753 #else /* CRC32_OS_SUPPORT */
2754                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2755                                         E1000_CRC32(ETH_ADDR_LEN, mac_addr));
2756 #endif /* CRC32_OS_SUPPORT */
2757                 }
2758
2759                 /* Write Rx addresses to the PHY */
2760                 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2761
2762                 /* Enable jumbo frame workaround in the MAC */
2763                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2764                 mac_reg &= ~(1 << 14);
2765                 mac_reg |= (7 << 15);
2766                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2767
2768                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2769                 mac_reg |= E1000_RCTL_SECRC;
2770                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2771
2772                 ret_val = e1000_read_kmrn_reg_generic(hw,
2773                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2774                                                 &data);
2775                 if (ret_val)
2776                         return ret_val;
2777                 ret_val = e1000_write_kmrn_reg_generic(hw,
2778                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2779                                                 data | (1 << 0));
2780                 if (ret_val)
2781                         return ret_val;
2782                 ret_val = e1000_read_kmrn_reg_generic(hw,
2783                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2784                                                 &data);
2785                 if (ret_val)
2786                         return ret_val;
2787                 data &= ~(0xF << 8);
2788                 data |= (0xB << 8);
2789                 ret_val = e1000_write_kmrn_reg_generic(hw,
2790                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2791                                                 data);
2792                 if (ret_val)
2793                         return ret_val;
2794
2795                 /* Enable jumbo frame workaround in the PHY */
2796                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2797                 data &= ~(0x7F << 5);
2798                 data |= (0x37 << 5);
2799                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2800                 if (ret_val)
2801                         return ret_val;
2802                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2803                 data &= ~(1 << 13);
2804                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2805                 if (ret_val)
2806                         return ret_val;
2807                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2808                 data &= ~(0x3FF << 2);
2809                 data |= (E1000_TX_PTR_GAP << 2);
2810                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2811                 if (ret_val)
2812                         return ret_val;
2813                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2814                 if (ret_val)
2815                         return ret_val;
2816                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2817                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2818                                                 (1 << 10));
2819                 if (ret_val)
2820                         return ret_val;
2821         } else {
2822                 /* Write MAC register values back to h/w defaults */
2823                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2824                 mac_reg &= ~(0xF << 14);
2825                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2826
2827                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2828                 mac_reg &= ~E1000_RCTL_SECRC;
2829                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2830
2831                 ret_val = e1000_read_kmrn_reg_generic(hw,
2832                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2833                                                 &data);
2834                 if (ret_val)
2835                         return ret_val;
2836                 ret_val = e1000_write_kmrn_reg_generic(hw,
2837                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2838                                                 data & ~(1 << 0));
2839                 if (ret_val)
2840                         return ret_val;
2841                 ret_val = e1000_read_kmrn_reg_generic(hw,
2842                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2843                                                 &data);
2844                 if (ret_val)
2845                         return ret_val;
2846                 data &= ~(0xF << 8);
2847                 data |= (0xB << 8);
2848                 ret_val = e1000_write_kmrn_reg_generic(hw,
2849                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2850                                                 data);
2851                 if (ret_val)
2852                         return ret_val;
2853
2854                 /* Write PHY register values back to h/w defaults */
2855                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2856                 data &= ~(0x7F << 5);
2857                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2858                 if (ret_val)
2859                         return ret_val;
2860                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2861                 data |= (1 << 13);
2862                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2863                 if (ret_val)
2864                         return ret_val;
2865                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2866                 data &= ~(0x3FF << 2);
2867                 data |= (0x8 << 2);
2868                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2869                 if (ret_val)
2870                         return ret_val;
2871                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2872                 if (ret_val)
2873                         return ret_val;
2874                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2875                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2876                                                 ~(1 << 10));
2877                 if (ret_val)
2878                         return ret_val;
2879         }
2880
2881         /* re-enable Rx path after enabling/disabling workaround */
2882         return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2883                                      ~(1 << 14));
2884 }
2885
2886 /**
2887  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2888  *  done after every PHY reset.
2889  **/
2890 STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2891 {
2892         s32 ret_val = E1000_SUCCESS;
2893
2894         DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2895
2896         if (hw->mac.type != e1000_pch2lan)
2897                 return E1000_SUCCESS;
2898
2899         /* Set MDIO slow mode before any other MDIO access */
2900         ret_val = e1000_set_mdio_slow_mode_hv(hw);
2901         if (ret_val)
2902                 return ret_val;
2903
2904         ret_val = hw->phy.ops.acquire(hw);
2905         if (ret_val)
2906                 return ret_val;
2907         /* set MSE higher to enable link to stay up when noise is high */
2908         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2909         if (ret_val)
2910                 goto release;
2911         /* drop link after 5 times MSE threshold was reached */
2912         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2913 release:
2914         hw->phy.ops.release(hw);
2915
2916         return ret_val;
2917 }
2918
2919 /**
2920  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2921  *  @hw:   pointer to the HW structure
2922  *
2923  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2924  *  Disable K1 for 1000 and 100 speeds
2925  **/
2926 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2927 {
2928         s32 ret_val = E1000_SUCCESS;
2929         u16 status_reg = 0;
2930
2931         DEBUGFUNC("e1000_k1_workaround_lv");
2932
2933         if (hw->mac.type != e1000_pch2lan)
2934                 return E1000_SUCCESS;
2935
2936         /* Set K1 beacon duration based on 10Mbs speed */
2937         ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2938         if (ret_val)
2939                 return ret_val;
2940
2941         if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2942             == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2943                 if (status_reg &
2944                     (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2945                         u16 pm_phy_reg;
2946
2947                         /* LV 1G/100 Packet drop issue wa  */
2948                         ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2949                                                        &pm_phy_reg);
2950                         if (ret_val)
2951                                 return ret_val;
2952                         pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2953                         ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2954                                                         pm_phy_reg);
2955                         if (ret_val)
2956                                 return ret_val;
2957                 } else {
2958                         u32 mac_reg;
2959                         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2960                         mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2961                         mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2962                         E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2963                 }
2964         }
2965
2966         return ret_val;
2967 }
2968
2969 /**
2970  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2971  *  @hw:   pointer to the HW structure
2972  *  @gate: boolean set to true to gate, false to ungate
2973  *
2974  *  Gate/ungate the automatic PHY configuration via hardware; perform
2975  *  the configuration via software instead.
2976  **/
2977 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2978 {
2979         u32 extcnf_ctrl;
2980
2981         DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2982
2983         if (hw->mac.type < e1000_pch2lan)
2984                 return;
2985
2986         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2987
2988         if (gate)
2989                 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2990         else
2991                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2992
2993         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2994 }
2995
2996 /**
2997  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2998  *  @hw: pointer to the HW structure
2999  *
3000  *  Check the appropriate indication the MAC has finished configuring the
3001  *  PHY after a software reset.
3002  **/
3003 STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
3004 {
3005         u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
3006
3007         DEBUGFUNC("e1000_lan_init_done_ich8lan");
3008
3009         /* Wait for basic configuration completes before proceeding */
3010         do {
3011                 data = E1000_READ_REG(hw, E1000_STATUS);
3012                 data &= E1000_STATUS_LAN_INIT_DONE;
3013                 usec_delay(100);
3014         } while ((!data) && --loop);
3015
3016         /* If basic configuration is incomplete before the above loop
3017          * count reaches 0, loading the configuration from NVM will
3018          * leave the PHY in a bad state possibly resulting in no link.
3019          */
3020         if (loop == 0)
3021                 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
3022
3023         /* Clear the Init Done bit for the next init event */
3024         data = E1000_READ_REG(hw, E1000_STATUS);
3025         data &= ~E1000_STATUS_LAN_INIT_DONE;
3026         E1000_WRITE_REG(hw, E1000_STATUS, data);
3027 }
3028
3029 /**
3030  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
3031  *  @hw: pointer to the HW structure
3032  **/
3033 STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
3034 {
3035         s32 ret_val = E1000_SUCCESS;
3036         u16 reg;
3037
3038         DEBUGFUNC("e1000_post_phy_reset_ich8lan");
3039
3040         if (hw->phy.ops.check_reset_block(hw))
3041                 return E1000_SUCCESS;
3042
3043         /* Allow time for h/w to get to quiescent state after reset */
3044         msec_delay(10);
3045
3046         /* Perform any necessary post-reset workarounds */
3047         switch (hw->mac.type) {
3048         case e1000_pchlan:
3049                 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
3050                 if (ret_val)
3051                         return ret_val;
3052                 break;
3053         case e1000_pch2lan:
3054                 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
3055                 if (ret_val)
3056                         return ret_val;
3057                 break;
3058         default:
3059                 break;
3060         }
3061
3062         /* Clear the host wakeup bit after lcd reset */
3063         if (hw->mac.type >= e1000_pchlan) {
3064                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
3065                 reg &= ~BM_WUC_HOST_WU_BIT;
3066                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
3067         }
3068
3069         /* Configure the LCD with the extended configuration region in NVM */
3070         ret_val = e1000_sw_lcd_config_ich8lan(hw);
3071         if (ret_val)
3072                 return ret_val;
3073
3074         /* Configure the LCD with the OEM bits in NVM */
3075         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
3076
3077         if (hw->mac.type == e1000_pch2lan) {
3078                 /* Ungate automatic PHY configuration on non-managed 82579 */
3079                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
3080                     E1000_ICH_FWSM_FW_VALID)) {
3081                         msec_delay(10);
3082                         e1000_gate_hw_phy_config_ich8lan(hw, false);
3083                 }
3084
3085                 /* Set EEE LPI Update Timer to 200usec */
3086                 ret_val = hw->phy.ops.acquire(hw);
3087                 if (ret_val)
3088                         return ret_val;
3089                 ret_val = e1000_write_emi_reg_locked(hw,
3090                                                      I82579_LPI_UPDATE_TIMER,
3091                                                      0x1387);
3092                 hw->phy.ops.release(hw);
3093         }
3094
3095         return ret_val;
3096 }
3097
3098 /**
3099  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
3100  *  @hw: pointer to the HW structure
3101  *
3102  *  Resets the PHY
3103  *  This is a function pointer entry point called by drivers
3104  *  or other shared routines.
3105  **/
3106 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
3107 {
3108         s32 ret_val = E1000_SUCCESS;
3109
3110         DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
3111
3112         /* Gate automatic PHY configuration by hardware on non-managed 82579 */
3113         if ((hw->mac.type == e1000_pch2lan) &&
3114             !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3115                 e1000_gate_hw_phy_config_ich8lan(hw, true);
3116
3117         ret_val = e1000_phy_hw_reset_generic(hw);
3118         if (ret_val)
3119                 return ret_val;
3120
3121         return e1000_post_phy_reset_ich8lan(hw);
3122 }
3123
3124 /**
3125  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3126  *  @hw: pointer to the HW structure
3127  *  @active: true to enable LPLU, false to disable
3128  *
3129  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
3130  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3131  *  the phy speed. This function will manually set the LPLU bit and restart
3132  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
3133  *  since it configures the same bit.
3134  **/
3135 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3136 {
3137         s32 ret_val;
3138         u16 oem_reg;
3139
3140         DEBUGFUNC("e1000_set_lplu_state_pchlan");
3141         ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3142         if (ret_val)
3143                 return ret_val;
3144
3145         if (active)
3146                 oem_reg |= HV_OEM_BITS_LPLU;
3147         else
3148                 oem_reg &= ~HV_OEM_BITS_LPLU;
3149
3150         if (!hw->phy.ops.check_reset_block(hw))
3151                 oem_reg |= HV_OEM_BITS_RESTART_AN;
3152
3153         return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3154 }
3155
3156 /**
3157  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3158  *  @hw: pointer to the HW structure
3159  *  @active: true to enable LPLU, false to disable
3160  *
3161  *  Sets the LPLU D0 state according to the active flag.  When
3162  *  activating LPLU this function also disables smart speed
3163  *  and vice versa.  LPLU will not be activated unless the
3164  *  device autonegotiation advertisement meets standards of
3165  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3166  *  This is a function pointer entry point only called by
3167  *  PHY setup routines.
3168  **/
3169 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3170 {
3171         struct e1000_phy_info *phy = &hw->phy;
3172         u32 phy_ctrl;
3173         s32 ret_val = E1000_SUCCESS;
3174         u16 data;
3175
3176         DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3177
3178         if (phy->type == e1000_phy_ife)
3179                 return E1000_SUCCESS;
3180
3181         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3182
3183         if (active) {
3184                 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3185                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3186
3187                 if (phy->type != e1000_phy_igp_3)
3188                         return E1000_SUCCESS;
3189
3190                 /* Call gig speed drop workaround on LPLU before accessing
3191                  * any PHY registers
3192                  */
3193                 if (hw->mac.type == e1000_ich8lan)
3194                         e1000_gig_downshift_workaround_ich8lan(hw);
3195
3196                 /* When LPLU is enabled, we should disable SmartSpeed */
3197                 ret_val = phy->ops.read_reg(hw,
3198                                             IGP01E1000_PHY_PORT_CONFIG,
3199                                             &data);
3200                 if (ret_val)
3201                         return ret_val;
3202                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3203                 ret_val = phy->ops.write_reg(hw,
3204                                              IGP01E1000_PHY_PORT_CONFIG,
3205                                              data);
3206                 if (ret_val)
3207                         return ret_val;
3208         } else {
3209                 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3210                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3211
3212                 if (phy->type != e1000_phy_igp_3)
3213                         return E1000_SUCCESS;
3214
3215                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3216                  * during Dx states where the power conservation is most
3217                  * important.  During driver activity we should enable
3218                  * SmartSpeed, so performance is maintained.
3219                  */
3220                 if (phy->smart_speed == e1000_smart_speed_on) {
3221                         ret_val = phy->ops.read_reg(hw,
3222                                                     IGP01E1000_PHY_PORT_CONFIG,
3223                                                     &data);
3224                         if (ret_val)
3225                                 return ret_val;
3226
3227                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3228                         ret_val = phy->ops.write_reg(hw,
3229                                                      IGP01E1000_PHY_PORT_CONFIG,
3230                                                      data);
3231                         if (ret_val)
3232                                 return ret_val;
3233                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3234                         ret_val = phy->ops.read_reg(hw,
3235                                                     IGP01E1000_PHY_PORT_CONFIG,
3236                                                     &data);
3237                         if (ret_val)
3238                                 return ret_val;
3239
3240                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3241                         ret_val = phy->ops.write_reg(hw,
3242                                                      IGP01E1000_PHY_PORT_CONFIG,
3243                                                      data);
3244                         if (ret_val)
3245                                 return ret_val;
3246                 }
3247         }
3248
3249         return E1000_SUCCESS;
3250 }
3251
3252 /**
3253  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3254  *  @hw: pointer to the HW structure
3255  *  @active: true to enable LPLU, false to disable
3256  *
3257  *  Sets the LPLU D3 state according to the active flag.  When
3258  *  activating LPLU this function also disables smart speed
3259  *  and vice versa.  LPLU will not be activated unless the
3260  *  device autonegotiation advertisement meets standards of
3261  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3262  *  This is a function pointer entry point only called by
3263  *  PHY setup routines.
3264  **/
3265 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3266 {
3267         struct e1000_phy_info *phy = &hw->phy;
3268         u32 phy_ctrl;
3269         s32 ret_val = E1000_SUCCESS;
3270         u16 data;
3271
3272         DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3273
3274         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3275
3276         if (!active) {
3277                 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3278                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3279
3280                 if (phy->type != e1000_phy_igp_3)
3281                         return E1000_SUCCESS;
3282
3283                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3284                  * during Dx states where the power conservation is most
3285                  * important.  During driver activity we should enable
3286                  * SmartSpeed, so performance is maintained.
3287                  */
3288                 if (phy->smart_speed == e1000_smart_speed_on) {
3289                         ret_val = phy->ops.read_reg(hw,
3290                                                     IGP01E1000_PHY_PORT_CONFIG,
3291                                                     &data);
3292                         if (ret_val)
3293                                 return ret_val;
3294
3295                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3296                         ret_val = phy->ops.write_reg(hw,
3297                                                      IGP01E1000_PHY_PORT_CONFIG,
3298                                                      data);
3299                         if (ret_val)
3300                                 return ret_val;
3301                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3302                         ret_val = phy->ops.read_reg(hw,
3303                                                     IGP01E1000_PHY_PORT_CONFIG,
3304                                                     &data);
3305                         if (ret_val)
3306                                 return ret_val;
3307
3308                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3309                         ret_val = phy->ops.write_reg(hw,
3310                                                      IGP01E1000_PHY_PORT_CONFIG,
3311                                                      data);
3312                         if (ret_val)
3313                                 return ret_val;
3314                 }
3315         } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3316                    (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3317                    (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3318                 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3319                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3320
3321                 if (phy->type != e1000_phy_igp_3)
3322                         return E1000_SUCCESS;
3323
3324                 /* Call gig speed drop workaround on LPLU before accessing
3325                  * any PHY registers
3326                  */
3327                 if (hw->mac.type == e1000_ich8lan)
3328                         e1000_gig_downshift_workaround_ich8lan(hw);
3329
3330                 /* When LPLU is enabled, we should disable SmartSpeed */
3331                 ret_val = phy->ops.read_reg(hw,
3332                                             IGP01E1000_PHY_PORT_CONFIG,
3333                                             &data);
3334                 if (ret_val)
3335                         return ret_val;
3336
3337                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3338                 ret_val = phy->ops.write_reg(hw,
3339                                              IGP01E1000_PHY_PORT_CONFIG,
3340                                              data);
3341         }
3342
3343         return ret_val;
3344 }
3345
3346 /**
3347  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3348  *  @hw: pointer to the HW structure
3349  *  @bank:  pointer to the variable that returns the active bank
3350  *
3351  *  Reads signature byte from the NVM using the flash access registers.
3352  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3353  **/
3354 STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3355 {
3356         u32 eecd;
3357         struct e1000_nvm_info *nvm = &hw->nvm;
3358         u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3359         u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3360         u32 nvm_dword = 0;
3361         u8 sig_byte = 0;
3362         s32 ret_val;
3363
3364         DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3365
3366         switch (hw->mac.type) {
3367         case e1000_pch_spt:
3368                 bank1_offset = nvm->flash_bank_size;
3369                 act_offset = E1000_ICH_NVM_SIG_WORD;
3370
3371                 /* set bank to 0 in case flash read fails */
3372                 *bank = 0;
3373
3374                 /* Check bank 0 */
3375                 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3376                                                          &nvm_dword);
3377                 if (ret_val)
3378                         return ret_val;
3379                 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3380                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3381                     E1000_ICH_NVM_SIG_VALUE) {
3382                         *bank = 0;
3383                         return E1000_SUCCESS;
3384                 }
3385
3386                 /* Check bank 1 */
3387                 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3388                                                          bank1_offset,
3389                                                          &nvm_dword);
3390                 if (ret_val)
3391                         return ret_val;
3392                 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3393                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3394                     E1000_ICH_NVM_SIG_VALUE) {
3395                         *bank = 1;
3396                         return E1000_SUCCESS;
3397                 }
3398
3399                 DEBUGOUT("ERROR: No valid NVM bank present\n");
3400                 return -E1000_ERR_NVM;
3401         case e1000_ich8lan:
3402         case e1000_ich9lan:
3403                 eecd = E1000_READ_REG(hw, E1000_EECD);
3404                 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3405                     E1000_EECD_SEC1VAL_VALID_MASK) {
3406                         if (eecd & E1000_EECD_SEC1VAL)
3407                                 *bank = 1;
3408                         else
3409                                 *bank = 0;
3410
3411                         return E1000_SUCCESS;
3412                 }
3413                 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3414                 /* fall-thru */
3415         default:
3416                 /* set bank to 0 in case flash read fails */
3417                 *bank = 0;
3418
3419                 /* Check bank 0 */
3420                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3421                                                         &sig_byte);
3422                 if (ret_val)
3423                         return ret_val;
3424                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3425                     E1000_ICH_NVM_SIG_VALUE) {
3426                         *bank = 0;
3427                         return E1000_SUCCESS;
3428                 }
3429
3430                 /* Check bank 1 */
3431                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3432                                                         bank1_offset,
3433                                                         &sig_byte);
3434                 if (ret_val)
3435                         return ret_val;
3436                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3437                     E1000_ICH_NVM_SIG_VALUE) {
3438                         *bank = 1;
3439                         return E1000_SUCCESS;
3440                 }
3441
3442                 DEBUGOUT("ERROR: No valid NVM bank present\n");
3443                 return -E1000_ERR_NVM;
3444         }
3445 }
3446
3447 /**
3448  *  e1000_read_nvm_spt - NVM access for SPT
3449  *  @hw: pointer to the HW structure
3450  *  @offset: The offset (in bytes) of the word(s) to read.
3451  *  @words: Size of data to read in words.
3452  *  @data: pointer to the word(s) to read at offset.
3453  *
3454  *  Reads a word(s) from the NVM
3455  **/
3456 STATIC s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3457                               u16 *data)
3458 {
3459         struct e1000_nvm_info *nvm = &hw->nvm;
3460         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3461         u32 act_offset;
3462         s32 ret_val = E1000_SUCCESS;
3463         u32 bank = 0;
3464         u32 dword = 0;
3465         u16 offset_to_read;
3466         u16 i;
3467
3468         DEBUGFUNC("e1000_read_nvm_spt");
3469
3470         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3471             (words == 0)) {
3472                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3473                 ret_val = -E1000_ERR_NVM;
3474                 goto out;
3475         }
3476
3477         nvm->ops.acquire(hw);
3478
3479         ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3480         if (ret_val != E1000_SUCCESS) {
3481                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3482                 bank = 0;
3483         }
3484
3485         act_offset = (bank) ? nvm->flash_bank_size : 0;
3486         act_offset += offset;
3487
3488         ret_val = E1000_SUCCESS;
3489
3490         for (i = 0; i < words; i += 2) {
3491                 if (words - i == 1) {
3492                         if (dev_spec->shadow_ram[offset+i].modified) {
3493                                 data[i] = dev_spec->shadow_ram[offset+i].value;
3494                         } else {
3495                                 offset_to_read = act_offset + i -
3496                                                  ((act_offset + i) % 2);
3497                                 ret_val =
3498                                    e1000_read_flash_dword_ich8lan(hw,
3499                                                                  offset_to_read,
3500                                                                  &dword);
3501                                 if (ret_val)
3502                                         break;
3503                                 if ((act_offset + i) % 2 == 0)
3504                                         data[i] = (u16)(dword & 0xFFFF);
3505                                 else
3506                                         data[i] = (u16)((dword >> 16) & 0xFFFF);
3507                         }
3508                 } else {
3509                         offset_to_read = act_offset + i;
3510                         if (!(dev_spec->shadow_ram[offset+i].modified) ||
3511                             !(dev_spec->shadow_ram[offset+i+1].modified)) {
3512                                 ret_val =
3513                                    e1000_read_flash_dword_ich8lan(hw,
3514                                                                  offset_to_read,
3515                                                                  &dword);
3516                                 if (ret_val)
3517                                         break;
3518                         }
3519                         if (dev_spec->shadow_ram[offset+i].modified)
3520                                 data[i] = dev_spec->shadow_ram[offset+i].value;
3521                         else
3522                                 data[i] = (u16) (dword & 0xFFFF);
3523                         if (dev_spec->shadow_ram[offset+i].modified)
3524                                 data[i+1] =
3525                                    dev_spec->shadow_ram[offset+i+1].value;
3526                         else
3527                                 data[i+1] = (u16) (dword >> 16 & 0xFFFF);
3528                 }
3529         }
3530
3531         nvm->ops.release(hw);
3532
3533 out:
3534         if (ret_val)
3535                 DEBUGOUT1("NVM read error: %d\n", ret_val);
3536
3537         return ret_val;
3538 }
3539
3540 /**
3541  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3542  *  @hw: pointer to the HW structure
3543  *  @offset: The offset (in bytes) of the word(s) to read.
3544  *  @words: Size of data to read in words
3545  *  @data: Pointer to the word(s) to read at offset.
3546  *
3547  *  Reads a word(s) from the NVM using the flash access registers.
3548  **/
3549 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3550                                   u16 *data)
3551 {
3552         struct e1000_nvm_info *nvm = &hw->nvm;
3553         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3554         u32 act_offset;
3555         s32 ret_val = E1000_SUCCESS;
3556         u32 bank = 0;
3557         u16 i, word;
3558
3559         DEBUGFUNC("e1000_read_nvm_ich8lan");
3560
3561         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3562             (words == 0)) {
3563                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3564                 ret_val = -E1000_ERR_NVM;
3565                 goto out;
3566         }
3567
3568         nvm->ops.acquire(hw);
3569
3570         ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3571         if (ret_val != E1000_SUCCESS) {
3572                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3573                 bank = 0;
3574         }
3575
3576         act_offset = (bank) ? nvm->flash_bank_size : 0;
3577         act_offset += offset;
3578
3579         ret_val = E1000_SUCCESS;
3580         for (i = 0; i < words; i++) {
3581                 if (dev_spec->shadow_ram[offset+i].modified) {
3582                         data[i] = dev_spec->shadow_ram[offset+i].value;
3583                 } else {
3584                         ret_val = e1000_read_flash_word_ich8lan(hw,
3585                                                                 act_offset + i,
3586                                                                 &word);
3587                         if (ret_val)
3588                                 break;
3589                         data[i] = word;
3590                 }
3591         }
3592
3593         nvm->ops.release(hw);
3594
3595 out:
3596         if (ret_val)
3597                 DEBUGOUT1("NVM read error: %d\n", ret_val);
3598
3599         return ret_val;
3600 }
3601
3602 /**
3603  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3604  *  @hw: pointer to the HW structure
3605  *
3606  *  This function does initial flash setup so that a new read/write/erase cycle
3607  *  can be started.
3608  **/
3609 STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3610 {
3611         union ich8_hws_flash_status hsfsts;
3612         s32 ret_val = -E1000_ERR_NVM;
3613
3614         DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3615
3616         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3617
3618         /* Check if the flash descriptor is valid */
3619         if (!hsfsts.hsf_status.fldesvalid) {
3620                 DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3621                 return -E1000_ERR_NVM;
3622         }
3623
3624         /* Clear FCERR and DAEL in hw status by writing 1 */
3625         hsfsts.hsf_status.flcerr = 1;
3626         hsfsts.hsf_status.dael = 1;
3627         if (hw->mac.type >= e1000_pch_spt)
3628                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3629                                       hsfsts.regval & 0xFFFF);
3630         else
3631                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3632
3633         /* Either we should have a hardware SPI cycle in progress
3634          * bit to check against, in order to start a new cycle or
3635          * FDONE bit should be changed in the hardware so that it
3636          * is 1 after hardware reset, which can then be used as an
3637          * indication whether a cycle is in progress or has been
3638          * completed.
3639          */
3640
3641         if (!hsfsts.hsf_status.flcinprog) {
3642                 /* There is no cycle running at present,
3643                  * so we can start a cycle.
3644                  * Begin by setting Flash Cycle Done.
3645                  */
3646                 hsfsts.hsf_status.flcdone = 1;
3647                 if (hw->mac.type >= e1000_pch_spt)
3648                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3649                                               hsfsts.regval & 0xFFFF);
3650                 else
3651                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3652                                                 hsfsts.regval);
3653                 ret_val = E1000_SUCCESS;
3654         } else {
3655                 s32 i;
3656
3657                 /* Otherwise poll for sometime so the current
3658                  * cycle has a chance to end before giving up.
3659                  */
3660                 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3661                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3662                                                               ICH_FLASH_HSFSTS);
3663                         if (!hsfsts.hsf_status.flcinprog) {
3664                                 ret_val = E1000_SUCCESS;
3665                                 break;
3666                         }
3667                         usec_delay(1);
3668                 }
3669                 if (ret_val == E1000_SUCCESS) {
3670                         /* Successful in waiting for previous cycle to timeout,
3671                          * now set the Flash Cycle Done.
3672                          */
3673                         hsfsts.hsf_status.flcdone = 1;
3674                         if (hw->mac.type >= e1000_pch_spt)
3675                                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3676                                                       hsfsts.regval & 0xFFFF);
3677                         else
3678                                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3679                                                         hsfsts.regval);
3680                 } else {
3681                         DEBUGOUT("Flash controller busy, cannot get access\n");
3682                 }
3683         }
3684
3685         return ret_val;
3686 }
3687
3688 /**
3689  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3690  *  @hw: pointer to the HW structure
3691  *  @timeout: maximum time to wait for completion
3692  *
3693  *  This function starts a flash cycle and waits for its completion.
3694  **/
3695 STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3696 {
3697         union ich8_hws_flash_ctrl hsflctl;
3698         union ich8_hws_flash_status hsfsts;
3699         u32 i = 0;
3700
3701         DEBUGFUNC("e1000_flash_cycle_ich8lan");
3702
3703         /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3704         if (hw->mac.type >= e1000_pch_spt)
3705                 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3706         else
3707                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3708         hsflctl.hsf_ctrl.flcgo = 1;
3709
3710         if (hw->mac.type >= e1000_pch_spt)
3711                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3712                                       hsflctl.regval << 16);
3713         else
3714                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3715
3716         /* wait till FDONE bit is set to 1 */
3717         do {
3718                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3719                 if (hsfsts.hsf_status.flcdone)
3720                         break;
3721                 usec_delay(1);
3722         } while (i++ < timeout);
3723
3724         if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3725                 return E1000_SUCCESS;
3726
3727         return -E1000_ERR_NVM;
3728 }
3729
3730 /**
3731  *  e1000_read_flash_dword_ich8lan - Read dword from flash
3732  *  @hw: pointer to the HW structure
3733  *  @offset: offset to data location
3734  *  @data: pointer to the location for storing the data
3735  *
3736  *  Reads the flash dword at offset into data.  Offset is converted
3737  *  to bytes before read.
3738  **/
3739 STATIC s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3740                                           u32 *data)
3741 {
3742         DEBUGFUNC("e1000_read_flash_dword_ich8lan");
3743
3744         if (!data)
3745                 return -E1000_ERR_NVM;
3746
3747         /* Must convert word offset into bytes. */
3748         offset <<= 1;
3749
3750         return e1000_read_flash_data32_ich8lan(hw, offset, data);
3751 }
3752
3753 /**
3754  *  e1000_read_flash_word_ich8lan - Read word from flash
3755  *  @hw: pointer to the HW structure
3756  *  @offset: offset to data location
3757  *  @data: pointer to the location for storing the data
3758  *
3759  *  Reads the flash word at offset into data.  Offset is converted
3760  *  to bytes before read.
3761  **/
3762 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3763                                          u16 *data)
3764 {
3765         DEBUGFUNC("e1000_read_flash_word_ich8lan");
3766
3767         if (!data)
3768                 return -E1000_ERR_NVM;
3769
3770         /* Must convert offset into bytes. */
3771         offset <<= 1;
3772
3773         return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3774 }
3775
3776 /**
3777  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3778  *  @hw: pointer to the HW structure
3779  *  @offset: The offset of the byte to read.
3780  *  @data: Pointer to a byte to store the value read.
3781  *
3782  *  Reads a single byte from the NVM using the flash access registers.
3783  **/
3784 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3785                                          u8 *data)
3786 {
3787         s32 ret_val;
3788         u16 word = 0;
3789
3790         /* In SPT, only 32 bits access is supported,
3791          * so this function should not be called.
3792          */
3793         if (hw->mac.type >= e1000_pch_spt)
3794                 return -E1000_ERR_NVM;
3795         else
3796                 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3797
3798         if (ret_val)
3799                 return ret_val;
3800
3801         *data = (u8)word;
3802
3803         return E1000_SUCCESS;
3804 }
3805
3806 /**
3807  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3808  *  @hw: pointer to the HW structure
3809  *  @offset: The offset (in bytes) of the byte or word to read.
3810  *  @size: Size of data to read, 1=byte 2=word
3811  *  @data: Pointer to the word to store the value read.
3812  *
3813  *  Reads a byte or word from the NVM using the flash access registers.
3814  **/
3815 STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3816                                          u8 size, u16 *data)
3817 {
3818         union ich8_hws_flash_status hsfsts;
3819         union ich8_hws_flash_ctrl hsflctl;
3820         u32 flash_linear_addr;
3821         u32 flash_data = 0;
3822         s32 ret_val = -E1000_ERR_NVM;
3823         u8 count = 0;
3824
3825         DEBUGFUNC("e1000_read_flash_data_ich8lan");
3826
3827         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3828                 return -E1000_ERR_NVM;
3829         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3830                              hw->nvm.flash_base_addr);
3831
3832         do {
3833                 usec_delay(1);
3834                 /* Steps */
3835                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3836                 if (ret_val != E1000_SUCCESS)
3837                         break;
3838                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3839
3840                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3841                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3842                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3843                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3844                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3845
3846                 ret_val = e1000_flash_cycle_ich8lan(hw,
3847                                                 ICH_FLASH_READ_COMMAND_TIMEOUT);
3848
3849                 /* Check if FCERR is set to 1, if set to 1, clear it
3850                  * and try the whole sequence a few more times, else
3851                  * read in (shift in) the Flash Data0, the order is
3852                  * least significant byte first msb to lsb
3853                  */
3854                 if (ret_val == E1000_SUCCESS) {
3855                         flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3856                         if (size == 1)
3857                                 *data = (u8)(flash_data & 0x000000FF);
3858                         else if (size == 2)
3859                                 *data = (u16)(flash_data & 0x0000FFFF);
3860                         break;
3861                 } else {
3862                         /* If we've gotten here, then things are probably
3863                          * completely hosed, but if the error condition is
3864                          * detected, it won't hurt to give it another try...
3865                          * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3866                          */
3867                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3868                                                               ICH_FLASH_HSFSTS);
3869                         if (hsfsts.hsf_status.flcerr) {
3870                                 /* Repeat for some time before giving up. */
3871                                 continue;
3872                         } else if (!hsfsts.hsf_status.flcdone) {
3873                                 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3874                                 break;
3875                         }
3876                 }
3877         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3878
3879         return ret_val;
3880 }
3881
3882 /**
3883  *  e1000_read_flash_data32_ich8lan - Read dword from NVM
3884  *  @hw: pointer to the HW structure
3885  *  @offset: The offset (in bytes) of the dword to read.
3886  *  @data: Pointer to the dword to store the value read.
3887  *
3888  *  Reads a byte or word from the NVM using the flash access registers.
3889  **/
3890 STATIC s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3891                                            u32 *data)
3892 {
3893         union ich8_hws_flash_status hsfsts;
3894         union ich8_hws_flash_ctrl hsflctl;
3895         u32 flash_linear_addr;
3896         s32 ret_val = -E1000_ERR_NVM;
3897         u8 count = 0;
3898
3899         DEBUGFUNC("e1000_read_flash_data_ich8lan");
3900
3901                 if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
3902                     hw->mac.type < e1000_pch_spt)
3903                         return -E1000_ERR_NVM;
3904         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3905                              hw->nvm.flash_base_addr);
3906
3907         do {
3908                 usec_delay(1);
3909                 /* Steps */
3910                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3911                 if (ret_val != E1000_SUCCESS)
3912                         break;
3913                 /* In SPT, This register is in Lan memory space, not flash.
3914                  * Therefore, only 32 bit access is supported
3915                  */
3916                 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3917
3918                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3919                 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3920                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3921                 /* In SPT, This register is in Lan memory space, not flash.
3922                  * Therefore, only 32 bit access is supported
3923                  */
3924                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3925                                       (u32)hsflctl.regval << 16);
3926                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3927
3928                 ret_val = e1000_flash_cycle_ich8lan(hw,
3929                                                 ICH_FLASH_READ_COMMAND_TIMEOUT);
3930
3931                 /* Check if FCERR is set to 1, if set to 1, clear it
3932                  * and try the whole sequence a few more times, else
3933                  * read in (shift in) the Flash Data0, the order is
3934                  * least significant byte first msb to lsb
3935                  */
3936                 if (ret_val == E1000_SUCCESS) {
3937                         *data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3938                         break;
3939                 } else {
3940                         /* If we've gotten here, then things are probably
3941                          * completely hosed, but if the error condition is
3942                          * detected, it won't hurt to give it another try...
3943                          * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3944                          */
3945                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3946                                                               ICH_FLASH_HSFSTS);
3947                         if (hsfsts.hsf_status.flcerr) {
3948                                 /* Repeat for some time before giving up. */
3949                                 continue;
3950                         } else if (!hsfsts.hsf_status.flcdone) {
3951                                 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3952                                 break;
3953                         }
3954                 }
3955         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3956
3957         return ret_val;
3958 }
3959
3960 /**
3961  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3962  *  @hw: pointer to the HW structure
3963  *  @offset: The offset (in bytes) of the word(s) to write.
3964  *  @words: Size of data to write in words
3965  *  @data: Pointer to the word(s) to write at offset.
3966  *
3967  *  Writes a byte or word to the NVM using the flash access registers.
3968  **/
3969 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3970                                    u16 *data)
3971 {
3972         struct e1000_nvm_info *nvm = &hw->nvm;
3973         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3974         u16 i;
3975
3976         DEBUGFUNC("e1000_write_nvm_ich8lan");
3977
3978         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3979             (words == 0)) {
3980                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3981                 return -E1000_ERR_NVM;
3982         }
3983
3984         nvm->ops.acquire(hw);
3985
3986         for (i = 0; i < words; i++) {
3987                 dev_spec->shadow_ram[offset+i].modified = true;
3988                 dev_spec->shadow_ram[offset+i].value = data[i];
3989         }
3990
3991         nvm->ops.release(hw);
3992
3993         return E1000_SUCCESS;
3994 }
3995
3996 /**
3997  *  e1000_update_nvm_checksum_spt - Update the checksum for NVM
3998  *  @hw: pointer to the HW structure
3999  *
4000  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4001  *  which writes the checksum to the shadow ram.  The changes in the shadow
4002  *  ram are then committed to the EEPROM by processing each bank at a time
4003  *  checking for the modified bit and writing only the pending changes.
4004  *  After a successful commit, the shadow ram is cleared and is ready for
4005  *  future writes.
4006  **/
4007 STATIC s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
4008 {
4009         struct e1000_nvm_info *nvm = &hw->nvm;
4010         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4011         u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4012         s32 ret_val;
4013         u32 dword = 0;
4014
4015         DEBUGFUNC("e1000_update_nvm_checksum_spt");
4016
4017         ret_val = e1000_update_nvm_checksum_generic(hw);
4018         if (ret_val)
4019                 goto out;
4020
4021         if (nvm->type != e1000_nvm_flash_sw)
4022                 goto out;
4023
4024         nvm->ops.acquire(hw);
4025
4026         /* We're writing to the opposite bank so if we're on bank 1,
4027          * write to bank 0 etc.  We also need to erase the segment that
4028          * is going to be written
4029          */
4030         ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4031         if (ret_val != E1000_SUCCESS) {
4032                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4033                 bank = 0;
4034         }
4035
4036         if (bank == 0) {
4037                 new_bank_offset = nvm->flash_bank_size;
4038                 old_bank_offset = 0;
4039                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4040                 if (ret_val)
4041                         goto release;
4042         } else {
4043                 old_bank_offset = nvm->flash_bank_size;
4044                 new_bank_offset = 0;
4045                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4046                 if (ret_val)
4047                         goto release;
4048         }
4049         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) {
4050                 /* Determine whether to write the value stored
4051                  * in the other NVM bank or a modified value stored
4052                  * in the shadow RAM
4053                  */
4054                 ret_val = e1000_read_flash_dword_ich8lan(hw,
4055                                                          i + old_bank_offset,
4056                                                          &dword);
4057
4058                 if (dev_spec->shadow_ram[i].modified) {
4059                         dword &= 0xffff0000;
4060                         dword |= (dev_spec->shadow_ram[i].value & 0xffff);
4061                 }
4062                 if (dev_spec->shadow_ram[i + 1].modified) {
4063                         dword &= 0x0000ffff;
4064                         dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
4065                                   << 16);
4066                 }
4067                 if (ret_val)
4068                         break;
4069
4070                 /* If the word is 0x13, then make sure the signature bits
4071                  * (15:14) are 11b until the commit has completed.
4072                  * This will allow us to write 10b which indicates the
4073                  * signature is valid.  We want to do this after the write
4074                  * has completed so that we don't mark the segment valid
4075                  * while the write is still in progress
4076                  */
4077                 if (i == E1000_ICH_NVM_SIG_WORD - 1)
4078                         dword |= E1000_ICH_NVM_SIG_MASK << 16;
4079
4080                 /* Convert offset to bytes. */
4081                 act_offset = (i + new_bank_offset) << 1;
4082
4083                 usec_delay(100);
4084
4085                 /* Write the data to the new bank. Offset in words*/
4086                 act_offset = i + new_bank_offset;
4087                 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
4088                                                                 dword);
4089                 if (ret_val)
4090                         break;
4091          }
4092
4093         /* Don't bother writing the segment valid bits if sector
4094          * programming failed.
4095          */
4096         if (ret_val) {
4097                 DEBUGOUT("Flash commit failed.\n");
4098                 goto release;
4099         }
4100
4101         /* Finally validate the new segment by setting bit 15:14
4102          * to 10b in word 0x13 , this can be done without an
4103          * erase as well since these bits are 11 to start with
4104          * and we need to change bit 14 to 0b
4105          */
4106         act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4107
4108         /*offset in words but we read dword*/
4109         --act_offset;
4110         ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4111
4112         if (ret_val)
4113                 goto release;
4114
4115         dword &= 0xBFFFFFFF;
4116         ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4117
4118         if (ret_val)
4119                 goto release;
4120
4121         /* And invalidate the previously valid segment by setting
4122          * its signature word (0x13) high_byte to 0b. This can be
4123          * done without an erase because flash erase sets all bits
4124          * to 1's. We can write 1's to 0's without an erase
4125          */
4126         act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4127
4128         /* offset in words but we read dword*/
4129         act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
4130         ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4131
4132         if (ret_val)
4133                 goto release;
4134
4135         dword &= 0x00FFFFFF;
4136         ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4137
4138         if (ret_val)
4139                 goto release;
4140
4141         /* Great!  Everything worked, we can now clear the cached entries. */
4142         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4143                 dev_spec->shadow_ram[i].modified = false;
4144                 dev_spec->shadow_ram[i].value = 0xFFFF;
4145         }
4146
4147 release:
4148         nvm->ops.release(hw);
4149
4150         /* Reload the EEPROM, or else modifications will not appear
4151          * until after the next adapter reset.
4152          */
4153         if (!ret_val) {
4154                 nvm->ops.reload(hw);
4155                 msec_delay(10);
4156         }
4157
4158 out:
4159         if (ret_val)
4160                 DEBUGOUT1("NVM update error: %d\n", ret_val);
4161
4162         return ret_val;
4163 }
4164
4165 /**
4166  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
4167  *  @hw: pointer to the HW structure
4168  *
4169  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4170  *  which writes the checksum to the shadow ram.  The changes in the shadow
4171  *  ram are then committed to the EEPROM by processing each bank at a time
4172  *  checking for the modified bit and writing only the pending changes.
4173  *  After a successful commit, the shadow ram is cleared and is ready for
4174  *  future writes.
4175  **/
4176 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
4177 {
4178         struct e1000_nvm_info *nvm = &hw->nvm;
4179         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4180         u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4181         s32 ret_val;
4182         u16 data = 0;
4183
4184         DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
4185
4186         ret_val = e1000_update_nvm_checksum_generic(hw);
4187         if (ret_val)
4188                 goto out;
4189
4190         if (nvm->type != e1000_nvm_flash_sw)
4191                 goto out;
4192
4193         nvm->ops.acquire(hw);
4194
4195         /* We're writing to the opposite bank so if we're on bank 1,
4196          * write to bank 0 etc.  We also need to erase the segment that
4197          * is going to be written
4198          */
4199         ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4200         if (ret_val != E1000_SUCCESS) {
4201                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4202                 bank = 0;
4203         }
4204
4205         if (bank == 0) {
4206                 new_bank_offset = nvm->flash_bank_size;
4207                 old_bank_offset = 0;
4208                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4209                 if (ret_val)
4210                         goto release;
4211         } else {
4212                 old_bank_offset = nvm->flash_bank_size;
4213                 new_bank_offset = 0;
4214                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4215                 if (ret_val)
4216                         goto release;
4217         }
4218         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4219                 if (dev_spec->shadow_ram[i].modified) {
4220                         data = dev_spec->shadow_ram[i].value;
4221                 } else {
4222                         ret_val = e1000_read_flash_word_ich8lan(hw, i +
4223                                                                 old_bank_offset,
4224                                                                 &data);
4225                         if (ret_val)
4226                                 break;
4227                 }
4228                 /* If the word is 0x13, then make sure the signature bits
4229                  * (15:14) are 11b until the commit has completed.
4230                  * This will allow us to write 10b which indicates the
4231                  * signature is valid.  We want to do this after the write
4232                  * has completed so that we don't mark the segment valid
4233                  * while the write is still in progress
4234                  */
4235                 if (i == E1000_ICH_NVM_SIG_WORD)
4236                         data |= E1000_ICH_NVM_SIG_MASK;
4237
4238                 /* Convert offset to bytes. */
4239                 act_offset = (i + new_bank_offset) << 1;
4240
4241                 usec_delay(100);
4242
4243                 /* Write the bytes to the new bank. */
4244                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4245                                                                act_offset,
4246                                                                (u8)data);
4247                 if (ret_val)
4248                         break;
4249
4250                 usec_delay(100);
4251                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4252                                                           act_offset + 1,
4253                                                           (u8)(data >> 8));
4254                 if (ret_val)
4255                         break;
4256         }
4257
4258         /* Don't bother writing the segment valid bits if sector
4259          * programming failed.
4260          */
4261         if (ret_val) {
4262                 DEBUGOUT("Flash commit failed.\n");
4263                 goto release;
4264         }
4265
4266         /* Finally validate the new segment by setting bit 15:14
4267          * to 10b in word 0x13 , this can be done without an
4268          * erase as well since these bits are 11 to start with
4269          * and we need to change bit 14 to 0b
4270          */
4271         act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4272         ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4273         if (ret_val)
4274                 goto release;
4275
4276         data &= 0xBFFF;
4277         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
4278                                                        (u8)(data >> 8));
4279         if (ret_val)
4280                 goto release;
4281
4282         /* And invalidate the previously valid segment by setting
4283          * its signature word (0x13) high_byte to 0b. This can be
4284          * done without an erase because flash erase sets all bits
4285          * to 1's. We can write 1's to 0's without an erase
4286          */
4287         act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4288
4289         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4290
4291         if (ret_val)
4292                 goto release;
4293
4294         /* Great!  Everything worked, we can now clear the cached entries. */
4295         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4296                 dev_spec->shadow_ram[i].modified = false;
4297                 dev_spec->shadow_ram[i].value = 0xFFFF;
4298         }
4299
4300 release:
4301         nvm->ops.release(hw);
4302
4303         /* Reload the EEPROM, or else modifications will not appear
4304          * until after the next adapter reset.
4305          */
4306         if (!ret_val) {
4307                 nvm->ops.reload(hw);
4308                 msec_delay(10);
4309         }
4310
4311 out:
4312         if (ret_val)
4313                 DEBUGOUT1("NVM update error: %d\n", ret_val);
4314
4315         return ret_val;
4316 }
4317
4318 /**
4319  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4320  *  @hw: pointer to the HW structure
4321  *
4322  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4323  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
4324  *  calculated, in which case we need to calculate the checksum and set bit 6.
4325  **/
4326 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4327 {
4328         s32 ret_val;
4329         u16 data;
4330         u16 word;
4331         u16 valid_csum_mask;
4332
4333         DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
4334
4335         /* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
4336          * the checksum needs to be fixed.  This bit is an indication that
4337          * the NVM was prepared by OEM software and did not calculate
4338          * the checksum...a likely scenario.
4339          */
4340         switch (hw->mac.type) {
4341         case e1000_pch_lpt:
4342         case e1000_pch_spt:
4343                 word = NVM_COMPAT;
4344                 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4345                 break;
4346         default:
4347                 word = NVM_FUTURE_INIT_WORD1;
4348                 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4349                 break;
4350         }
4351
4352         ret_val = hw->nvm.ops.read(hw, word, 1, &data);
4353         if (ret_val)
4354                 return ret_val;
4355
4356         if (!(data & valid_csum_mask)) {
4357                 data |= valid_csum_mask;
4358                 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
4359                 if (ret_val)
4360                         return ret_val;
4361                 ret_val = hw->nvm.ops.update(hw);
4362                 if (ret_val)
4363                         return ret_val;
4364         }
4365
4366         return e1000_validate_nvm_checksum_generic(hw);
4367 }
4368
4369 /**
4370  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4371  *  @hw: pointer to the HW structure
4372  *  @offset: The offset (in bytes) of the byte/word to read.
4373  *  @size: Size of data to read, 1=byte 2=word
4374  *  @data: The byte(s) to write to the NVM.
4375  *
4376  *  Writes one/two bytes to the NVM using the flash access registers.
4377  **/
4378 STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4379                                           u8 size, u16 data)
4380 {
4381         union ich8_hws_flash_status hsfsts;
4382         union ich8_hws_flash_ctrl hsflctl;
4383         u32 flash_linear_addr;
4384         u32 flash_data = 0;
4385         s32 ret_val;
4386         u8 count = 0;
4387
4388         DEBUGFUNC("e1000_write_ich8_data");
4389
4390         if (hw->mac.type >= e1000_pch_spt) {
4391                 if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4392                         return -E1000_ERR_NVM;
4393         } else {
4394                 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4395                         return -E1000_ERR_NVM;
4396         }
4397
4398         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4399                              hw->nvm.flash_base_addr);
4400
4401         do {
4402                 usec_delay(1);
4403                 /* Steps */
4404                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4405                 if (ret_val != E1000_SUCCESS)
4406                         break;
4407                 /* In SPT, This register is in Lan memory space, not
4408                  * flash.  Therefore, only 32 bit access is supported
4409                  */
4410                 if (hw->mac.type >= e1000_pch_spt)
4411                         hsflctl.regval =
4412                             E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
4413                 else
4414                         hsflctl.regval =
4415                             E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4416
4417                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4418                 hsflctl.hsf_ctrl.fldbcount = size - 1;
4419                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4420                 /* In SPT, This register is in Lan memory space,
4421                  * not flash.  Therefore, only 32 bit access is
4422                  * supported
4423                  */
4424                 if (hw->mac.type >= e1000_pch_spt)
4425                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4426                                               hsflctl.regval << 16);
4427                 else
4428                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4429                                                 hsflctl.regval);
4430
4431                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4432
4433                 if (size == 1)
4434                         flash_data = (u32)data & 0x00FF;
4435                 else
4436                         flash_data = (u32)data;
4437
4438                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
4439
4440                 /* check if FCERR is set to 1 , if set to 1, clear it
4441                  * and try the whole sequence a few more times else done
4442                  */
4443                 ret_val =
4444                     e1000_flash_cycle_ich8lan(hw,
4445                                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4446                 if (ret_val == E1000_SUCCESS)
4447                         break;
4448
4449                 /* If we're here, then things are most likely
4450                  * completely hosed, but if the error condition
4451                  * is detected, it won't hurt to give it another
4452                  * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4453                  */
4454                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4455                 if (hsfsts.hsf_status.flcerr)
4456                         /* Repeat for some time before giving up. */
4457                         continue;
4458                 if (!hsfsts.hsf_status.flcdone) {
4459                         DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4460                         break;
4461                 }
4462         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4463
4464         return ret_val;
4465 }
4466
4467 /**
4468 *  e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4469 *  @hw: pointer to the HW structure
4470 *  @offset: The offset (in bytes) of the dwords to read.
4471 *  @data: The 4 bytes to write to the NVM.
4472 *
4473 *  Writes one/two/four bytes to the NVM using the flash access registers.
4474 **/
4475 STATIC s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4476                                             u32 data)
4477 {
4478         union ich8_hws_flash_status hsfsts;
4479         union ich8_hws_flash_ctrl hsflctl;
4480         u32 flash_linear_addr;
4481         s32 ret_val;
4482         u8 count = 0;
4483
4484         DEBUGFUNC("e1000_write_flash_data32_ich8lan");
4485
4486         if (hw->mac.type >= e1000_pch_spt) {
4487                 if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4488                         return -E1000_ERR_NVM;
4489         }
4490         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4491                              hw->nvm.flash_base_addr);
4492         do {
4493                 usec_delay(1);
4494                 /* Steps */
4495                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4496                 if (ret_val != E1000_SUCCESS)
4497                         break;
4498
4499                 /* In SPT, This register is in Lan memory space, not
4500                  * flash.  Therefore, only 32 bit access is supported
4501                  */
4502                 if (hw->mac.type >= e1000_pch_spt)
4503                         hsflctl.regval = E1000_READ_FLASH_REG(hw,
4504                                                               ICH_FLASH_HSFSTS)
4505                                          >> 16;
4506                 else
4507                         hsflctl.regval = E1000_READ_FLASH_REG16(hw,
4508                                                               ICH_FLASH_HSFCTL);
4509
4510                 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4511                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4512
4513                 /* In SPT, This register is in Lan memory space,
4514                  * not flash.  Therefore, only 32 bit access is
4515                  * supported
4516                  */
4517                 if (hw->mac.type >= e1000_pch_spt)
4518                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4519                                               hsflctl.regval << 16);
4520                 else
4521                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4522                                                 hsflctl.regval);
4523
4524                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4525
4526                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
4527
4528                 /* check if FCERR is set to 1 , if set to 1, clear it
4529                  * and try the whole sequence a few more times else done
4530                  */
4531                 ret_val = e1000_flash_cycle_ich8lan(hw,
4532                                                ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4533
4534                 if (ret_val == E1000_SUCCESS)
4535                         break;
4536
4537                 /* If we're here, then things are most likely
4538                  * completely hosed, but if the error condition
4539                  * is detected, it won't hurt to give it another
4540                  * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4541                  */
4542                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4543
4544                 if (hsfsts.hsf_status.flcerr)
4545                         /* Repeat for some time before giving up. */
4546                         continue;
4547                 if (!hsfsts.hsf_status.flcdone) {
4548                         DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4549                         break;
4550                 }
4551         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4552
4553         return ret_val;
4554 }
4555
4556 /**
4557  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4558  *  @hw: pointer to the HW structure
4559  *  @offset: The index of the byte to read.
4560  *  @data: The byte to write to the NVM.
4561  *
4562  *  Writes a single byte to the NVM using the flash access registers.
4563  **/
4564 STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4565                                           u8 data)
4566 {
4567         u16 word = (u16)data;
4568
4569         DEBUGFUNC("e1000_write_flash_byte_ich8lan");
4570
4571         return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4572 }
4573
4574 /**
4575 *  e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
4576 *  @hw: pointer to the HW structure
4577 *  @offset: The offset of the word to write.
4578 *  @dword: The dword to write to the NVM.
4579 *
4580 *  Writes a single dword to the NVM using the flash access registers.
4581 *  Goes through a retry algorithm before giving up.
4582 **/
4583 STATIC s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4584                                                  u32 offset, u32 dword)
4585 {
4586         s32 ret_val;
4587         u16 program_retries;
4588
4589         DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan");
4590
4591         /* Must convert word offset into bytes. */
4592         offset <<= 1;
4593
4594         ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4595
4596         if (!ret_val)
4597                 return ret_val;
4598         for (program_retries = 0; program_retries < 100; program_retries++) {
4599                 DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset);
4600                 usec_delay(100);
4601                 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4602                 if (ret_val == E1000_SUCCESS)
4603                         break;
4604         }
4605         if (program_retries == 100)
4606                 return -E1000_ERR_NVM;
4607
4608         return E1000_SUCCESS;
4609 }
4610
4611 /**
4612  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4613  *  @hw: pointer to the HW structure
4614  *  @offset: The offset of the byte to write.
4615  *  @byte: The byte to write to the NVM.
4616  *
4617  *  Writes a single byte to the NVM using the flash access registers.
4618  *  Goes through a retry algorithm before giving up.
4619  **/
4620 STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4621                                                 u32 offset, u8 byte)
4622 {
4623         s32 ret_val;
4624         u16 program_retries;
4625
4626         DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
4627
4628         ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4629         if (!ret_val)
4630                 return ret_val;
4631
4632         for (program_retries = 0; program_retries < 100; program_retries++) {
4633                 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
4634                 usec_delay(100);
4635                 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4636                 if (ret_val == E1000_SUCCESS)
4637                         break;
4638         }
4639         if (program_retries == 100)
4640                 return -E1000_ERR_NVM;
4641
4642         return E1000_SUCCESS;
4643 }
4644
4645 /**
4646  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4647  *  @hw: pointer to the HW structure
4648  *  @bank: 0 for first bank, 1 for second bank, etc.
4649  *
4650  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4651  *  bank N is 4096 * N + flash_reg_addr.
4652  **/
4653 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4654 {
4655         struct e1000_nvm_info *nvm = &hw->nvm;
4656         union ich8_hws_flash_status hsfsts;
4657         union ich8_hws_flash_ctrl hsflctl;
4658         u32 flash_linear_addr;
4659         /* bank size is in 16bit words - adjust to bytes */
4660         u32 flash_bank_size = nvm->flash_bank_size * 2;
4661         s32 ret_val;
4662         s32 count = 0;
4663         s32 j, iteration, sector_size;
4664
4665         DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
4666
4667         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4668
4669         /* Determine HW Sector size: Read BERASE bits of hw flash status
4670          * register
4671          * 00: The Hw sector is 256 bytes, hence we need to erase 16
4672          *     consecutive sectors.  The start index for the nth Hw sector
4673          *     can be calculated as = bank * 4096 + n * 256
4674          * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4675          *     The start index for the nth Hw sector can be calculated
4676          *     as = bank * 4096
4677          * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4678          *     (ich9 only, otherwise error condition)
4679          * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4680          */
4681         switch (hsfsts.hsf_status.berasesz) {
4682         case 0:
4683                 /* Hw sector size 256 */
4684                 sector_size = ICH_FLASH_SEG_SIZE_256;
4685                 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4686                 break;
4687         case 1:
4688                 sector_size = ICH_FLASH_SEG_SIZE_4K;
4689                 iteration = 1;
4690                 break;
4691         case 2:
4692                 sector_size = ICH_FLASH_SEG_SIZE_8K;
4693                 iteration = 1;
4694                 break;
4695         case 3:
4696                 sector_size = ICH_FLASH_SEG_SIZE_64K;
4697                 iteration = 1;
4698                 break;
4699         default:
4700                 return -E1000_ERR_NVM;
4701         }
4702
4703         /* Start with the base address, then add the sector offset. */
4704         flash_linear_addr = hw->nvm.flash_base_addr;
4705         flash_linear_addr += (bank) ? flash_bank_size : 0;
4706
4707         for (j = 0; j < iteration; j++) {
4708                 do {
4709                         u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4710
4711                         /* Steps */
4712                         ret_val = e1000_flash_cycle_init_ich8lan(hw);
4713                         if (ret_val)
4714                                 return ret_val;
4715
4716                         /* Write a value 11 (block Erase) in Flash
4717                          * Cycle field in hw flash control
4718                          */
4719                         if (hw->mac.type >= e1000_pch_spt)
4720                                 hsflctl.regval =
4721                                     E1000_READ_FLASH_REG(hw,
4722                                                          ICH_FLASH_HSFSTS)>>16;
4723                         else
4724                                 hsflctl.regval =
4725                                     E1000_READ_FLASH_REG16(hw,
4726                                                            ICH_FLASH_HSFCTL);
4727
4728                         hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4729                         if (hw->mac.type >= e1000_pch_spt)
4730                                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4731                                                       hsflctl.regval << 16);
4732                         else
4733                                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4734                                                         hsflctl.regval);
4735
4736                         /* Write the last 24 bits of an index within the
4737                          * block into Flash Linear address field in Flash
4738                          * Address.
4739                          */
4740                         flash_linear_addr += (j * sector_size);
4741                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4742                                               flash_linear_addr);
4743
4744                         ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4745                         if (ret_val == E1000_SUCCESS)
4746                                 break;
4747
4748                         /* Check if FCERR is set to 1.  If 1,
4749                          * clear it and try the whole sequence
4750                          * a few more times else Done
4751                          */
4752                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4753                                                       ICH_FLASH_HSFSTS);
4754                         if (hsfsts.hsf_status.flcerr)
4755                                 /* repeat for some time before giving up */
4756                                 continue;
4757                         else if (!hsfsts.hsf_status.flcdone)
4758                                 return ret_val;
4759                 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4760         }
4761
4762         return E1000_SUCCESS;
4763 }
4764
4765 /**
4766  *  e1000_valid_led_default_ich8lan - Set the default LED settings
4767  *  @hw: pointer to the HW structure
4768  *  @data: Pointer to the LED settings
4769  *
4770  *  Reads the LED default settings from the NVM to data.  If the NVM LED
4771  *  settings is all 0's or F's, set the LED default to a valid LED default
4772  *  setting.
4773  **/
4774 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4775 {
4776         s32 ret_val;
4777
4778         DEBUGFUNC("e1000_valid_led_default_ich8lan");
4779
4780         ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4781         if (ret_val) {
4782                 DEBUGOUT("NVM Read Error\n");
4783                 return ret_val;
4784         }
4785
4786         if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4787                 *data = ID_LED_DEFAULT_ICH8LAN;
4788
4789         return E1000_SUCCESS;
4790 }
4791
4792 /**
4793  *  e1000_id_led_init_pchlan - store LED configurations
4794  *  @hw: pointer to the HW structure
4795  *
4796  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4797  *  the PHY LED configuration register.
4798  *
4799  *  PCH also does not have an "always on" or "always off" mode which
4800  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4801  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4802  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4803  *  link based on logic in e1000_led_[on|off]_pchlan().
4804  **/
4805 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4806 {
4807         struct e1000_mac_info *mac = &hw->mac;
4808         s32 ret_val;
4809         const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4810         const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4811         u16 data, i, temp, shift;
4812
4813         DEBUGFUNC("e1000_id_led_init_pchlan");
4814
4815         /* Get default ID LED modes */
4816         ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4817         if (ret_val)
4818                 return ret_val;
4819
4820         mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4821         mac->ledctl_mode1 = mac->ledctl_default;
4822         mac->ledctl_mode2 = mac->ledctl_default;
4823
4824         for (i = 0; i < 4; i++) {
4825                 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4826                 shift = (i * 5);
4827                 switch (temp) {
4828                 case ID_LED_ON1_DEF2:
4829                 case ID_LED_ON1_ON2:
4830                 case ID_LED_ON1_OFF2:
4831                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4832                         mac->ledctl_mode1 |= (ledctl_on << shift);
4833                         break;
4834                 case ID_LED_OFF1_DEF2:
4835                 case ID_LED_OFF1_ON2:
4836                 case ID_LED_OFF1_OFF2:
4837                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4838                         mac->ledctl_mode1 |= (ledctl_off << shift);
4839                         break;
4840                 default:
4841                         /* Do nothing */
4842                         break;
4843                 }
4844                 switch (temp) {
4845                 case ID_LED_DEF1_ON2:
4846                 case ID_LED_ON1_ON2:
4847                 case ID_LED_OFF1_ON2:
4848                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4849                         mac->ledctl_mode2 |= (ledctl_on << shift);
4850                         break;
4851                 case ID_LED_DEF1_OFF2:
4852                 case ID_LED_ON1_OFF2:
4853                 case ID_LED_OFF1_OFF2:
4854                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4855                         mac->ledctl_mode2 |= (ledctl_off << shift);
4856                         break;
4857                 default:
4858                         /* Do nothing */
4859                         break;
4860                 }
4861         }
4862
4863         return E1000_SUCCESS;
4864 }
4865
4866 /**
4867  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4868  *  @hw: pointer to the HW structure
4869  *
4870  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4871  *  register, so the the bus width is hard coded.
4872  **/
4873 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4874 {
4875         struct e1000_bus_info *bus = &hw->bus;
4876         s32 ret_val;
4877
4878         DEBUGFUNC("e1000_get_bus_info_ich8lan");
4879
4880         ret_val = e1000_get_bus_info_pcie_generic(hw);
4881
4882         /* ICH devices are "PCI Express"-ish.  They have
4883          * a configuration space, but do not contain
4884          * PCI Express Capability registers, so bus width
4885          * must be hardcoded.
4886          */
4887         if (bus->width == e1000_bus_width_unknown)
4888                 bus->width = e1000_bus_width_pcie_x1;
4889
4890         return ret_val;
4891 }
4892
4893 /**
4894  *  e1000_reset_hw_ich8lan - Reset the hardware
4895  *  @hw: pointer to the HW structure
4896  *
4897  *  Does a full reset of the hardware which includes a reset of the PHY and
4898  *  MAC.
4899  **/
4900 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4901 {
4902         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4903         u16 kum_cfg;
4904         u32 ctrl, reg;
4905         s32 ret_val;
4906
4907         DEBUGFUNC("e1000_reset_hw_ich8lan");
4908
4909         /* Prevent the PCI-E bus from sticking if there is no TLP connection
4910          * on the last TLP read/write transaction when MAC is reset.
4911          */
4912         ret_val = e1000_disable_pcie_master_generic(hw);
4913         if (ret_val)
4914                 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4915
4916         DEBUGOUT("Masking off all interrupts\n");
4917         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4918
4919         /* Disable the Transmit and Receive units.  Then delay to allow
4920          * any pending transactions to complete before we hit the MAC
4921          * with the global reset.
4922          */
4923         E1000_WRITE_REG(hw, E1000_RCTL, 0);
4924         E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4925         E1000_WRITE_FLUSH(hw);
4926
4927         msec_delay(10);
4928
4929         /* Workaround for ICH8 bit corruption issue in FIFO memory */
4930         if (hw->mac.type == e1000_ich8lan) {
4931                 /* Set Tx and Rx buffer allocation to 8k apiece. */
4932                 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4933                 /* Set Packet Buffer Size to 16k. */
4934                 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4935         }
4936
4937         if (hw->mac.type == e1000_pchlan) {
4938                 /* Save the NVM K1 bit setting*/
4939                 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4940                 if (ret_val)
4941                         return ret_val;
4942
4943                 if (kum_cfg & E1000_NVM_K1_ENABLE)
4944                         dev_spec->nvm_k1_enabled = true;
4945                 else
4946                         dev_spec->nvm_k1_enabled = false;
4947         }
4948
4949         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4950
4951         if (!hw->phy.ops.check_reset_block(hw)) {
4952                 /* Full-chip reset requires MAC and PHY reset at the same
4953                  * time to make sure the interface between MAC and the
4954                  * external PHY is reset.
4955                  */
4956                 ctrl |= E1000_CTRL_PHY_RST;
4957
4958                 /* Gate automatic PHY configuration by hardware on
4959                  * non-managed 82579
4960                  */
4961                 if ((hw->mac.type == e1000_pch2lan) &&
4962                     !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4963                         e1000_gate_hw_phy_config_ich8lan(hw, true);
4964         }
4965         ret_val = e1000_acquire_swflag_ich8lan(hw);
4966         DEBUGOUT("Issuing a global reset to ich8lan\n");
4967         E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4968         /* cannot issue a flush here because it hangs the hardware */
4969         msec_delay(20);
4970
4971         /* Set Phy Config Counter to 50msec */
4972         if (hw->mac.type == e1000_pch2lan) {
4973                 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4974                 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4975                 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4976                 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4977         }
4978
4979         if (!ret_val)
4980                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
4981
4982         if (ctrl & E1000_CTRL_PHY_RST) {
4983                 ret_val = hw->phy.ops.get_cfg_done(hw);
4984                 if (ret_val)
4985                         return ret_val;
4986
4987                 ret_val = e1000_post_phy_reset_ich8lan(hw);
4988                 if (ret_val)
4989                         return ret_val;
4990         }
4991
4992         /* For PCH, this write will make sure that any noise
4993          * will be detected as a CRC error and be dropped rather than show up
4994          * as a bad packet to the DMA engine.
4995          */
4996         if (hw->mac.type == e1000_pchlan)
4997                 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
4998
4999         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
5000         E1000_READ_REG(hw, E1000_ICR);
5001
5002         reg = E1000_READ_REG(hw, E1000_KABGTXD);
5003         reg |= E1000_KABGTXD_BGSQLBIAS;
5004         E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
5005
5006         return E1000_SUCCESS;
5007 }
5008
5009 /**
5010  *  e1000_init_hw_ich8lan - Initialize the hardware
5011  *  @hw: pointer to the HW structure
5012  *
5013  *  Prepares the hardware for transmit and receive by doing the following:
5014  *   - initialize hardware bits
5015  *   - initialize LED identification
5016  *   - setup receive address registers
5017  *   - setup flow control
5018  *   - setup transmit descriptors
5019  *   - clear statistics
5020  **/
5021 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
5022 {
5023         struct e1000_mac_info *mac = &hw->mac;
5024         u32 ctrl_ext, txdctl, snoop;
5025         s32 ret_val;
5026         u16 i;
5027
5028         DEBUGFUNC("e1000_init_hw_ich8lan");
5029
5030         e1000_initialize_hw_bits_ich8lan(hw);
5031
5032         /* Initialize identification LED */
5033         ret_val = mac->ops.id_led_init(hw);
5034         /* An error is not fatal and we should not stop init due to this */
5035         if (ret_val)
5036                 DEBUGOUT("Error initializing identification LED\n");
5037
5038         /* Setup the receive address. */
5039         e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
5040
5041         /* Zero out the Multicast HASH table */
5042         DEBUGOUT("Zeroing the MTA\n");
5043         for (i = 0; i < mac->mta_reg_count; i++)
5044                 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
5045
5046         /* The 82578 Rx buffer will stall if wakeup is enabled in host and
5047          * the ME.  Disable wakeup by clearing the host wakeup bit.
5048          * Reset the phy after disabling host wakeup to reset the Rx buffer.
5049          */
5050         if (hw->phy.type == e1000_phy_82578) {
5051                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
5052                 i &= ~BM_WUC_HOST_WU_BIT;
5053                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
5054                 ret_val = e1000_phy_hw_reset_ich8lan(hw);
5055                 if (ret_val)
5056                         return ret_val;
5057         }
5058
5059         /* Setup link and flow control */
5060         ret_val = mac->ops.setup_link(hw);
5061
5062         /* Set the transmit descriptor write-back policy for both queues */
5063         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
5064         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5065                   E1000_TXDCTL_FULL_TX_DESC_WB);
5066         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5067                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5068         E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
5069         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
5070         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5071                   E1000_TXDCTL_FULL_TX_DESC_WB);
5072         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5073                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5074         E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
5075
5076         /* ICH8 has opposite polarity of no_snoop bits.
5077          * By default, we should use snoop behavior.
5078          */
5079         if (mac->type == e1000_ich8lan)
5080                 snoop = PCIE_ICH8_SNOOP_ALL;
5081         else
5082                 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
5083         e1000_set_pcie_no_snoop_generic(hw, snoop);
5084
5085         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5086         ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
5087         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5088
5089         /* Clear all of the statistics registers (clear on read).  It is
5090          * important that we do this after we have tried to establish link
5091          * because the symbol error count will increment wildly if there
5092          * is no link.
5093          */
5094         e1000_clear_hw_cntrs_ich8lan(hw);
5095
5096         return ret_val;
5097 }
5098
5099 /**
5100  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
5101  *  @hw: pointer to the HW structure
5102  *
5103  *  Sets/Clears required hardware bits necessary for correctly setting up the
5104  *  hardware for transmit and receive.
5105  **/
5106 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
5107 {
5108         u32 reg;
5109
5110         DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
5111
5112         /* Extended Device Control */
5113         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
5114         reg |= (1 << 22);
5115         /* Enable PHY low-power state when MAC is at D3 w/o WoL */
5116         if (hw->mac.type >= e1000_pchlan)
5117                 reg |= E1000_CTRL_EXT_PHYPDEN;
5118         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
5119
5120         /* Transmit Descriptor Control 0 */
5121         reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
5122         reg |= (1 << 22);
5123         E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
5124
5125         /* Transmit Descriptor Control 1 */
5126         reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
5127         reg |= (1 << 22);
5128         E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
5129
5130         /* Transmit Arbitration Control 0 */
5131         reg = E1000_READ_REG(hw, E1000_TARC(0));
5132         if (hw->mac.type == e1000_ich8lan)
5133                 reg |= (1 << 28) | (1 << 29);
5134         reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
5135         E1000_WRITE_REG(hw, E1000_TARC(0), reg);
5136
5137         /* Transmit Arbitration Control 1 */
5138         reg = E1000_READ_REG(hw, E1000_TARC(1));
5139         if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
5140                 reg &= ~(1 << 28);
5141         else
5142                 reg |= (1 << 28);
5143         reg |= (1 << 24) | (1 << 26) | (1 << 30);
5144         E1000_WRITE_REG(hw, E1000_TARC(1), reg);
5145
5146         /* Device Status */
5147         if (hw->mac.type == e1000_ich8lan) {
5148                 reg = E1000_READ_REG(hw, E1000_STATUS);
5149                 reg &= ~(1 << 31);
5150                 E1000_WRITE_REG(hw, E1000_STATUS, reg);
5151         }
5152
5153         /* work-around descriptor data corruption issue during nfs v2 udp
5154          * traffic, just disable the nfs filtering capability
5155          */
5156         reg = E1000_READ_REG(hw, E1000_RFCTL);
5157         reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
5158
5159         /* Disable IPv6 extension header parsing because some malformed
5160          * IPv6 headers can hang the Rx.
5161          */
5162         if (hw->mac.type == e1000_ich8lan)
5163                 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
5164         E1000_WRITE_REG(hw, E1000_RFCTL, reg);
5165
5166         /* Enable ECC on Lynxpoint */
5167         if (hw->mac.type >= e1000_pch_lpt) {
5168                 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
5169                 reg |= E1000_PBECCSTS_ECC_ENABLE;
5170                 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
5171
5172                 reg = E1000_READ_REG(hw, E1000_CTRL);
5173                 reg |= E1000_CTRL_MEHE;
5174                 E1000_WRITE_REG(hw, E1000_CTRL, reg);
5175         }
5176
5177         return;
5178 }
5179
5180 /**
5181  *  e1000_setup_link_ich8lan - Setup flow control and link settings
5182  *  @hw: pointer to the HW structure
5183  *
5184  *  Determines which flow control settings to use, then configures flow
5185  *  control.  Calls the appropriate media-specific link configuration
5186  *  function.  Assuming the adapter has a valid link partner, a valid link
5187  *  should be established.  Assumes the hardware has previously been reset
5188  *  and the transmitter and receiver are not enabled.
5189  **/
5190 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
5191 {
5192         s32 ret_val;
5193
5194         DEBUGFUNC("e1000_setup_link_ich8lan");
5195
5196         if (hw->phy.ops.check_reset_block(hw))
5197                 return E1000_SUCCESS;
5198
5199         /* ICH parts do not have a word in the NVM to determine
5200          * the default flow control setting, so we explicitly
5201          * set it to full.
5202          */
5203         if (hw->fc.requested_mode == e1000_fc_default)
5204                 hw->fc.requested_mode = e1000_fc_full;
5205
5206         /* Save off the requested flow control mode for use later.  Depending
5207          * on the link partner's capabilities, we may or may not use this mode.
5208          */
5209         hw->fc.current_mode = hw->fc.requested_mode;
5210
5211         DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
5212                 hw->fc.current_mode);
5213
5214         /* Continue to configure the copper link. */
5215         ret_val = hw->mac.ops.setup_physical_interface(hw);
5216         if (ret_val)
5217                 return ret_val;
5218
5219         E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
5220         if ((hw->phy.type == e1000_phy_82578) ||
5221             (hw->phy.type == e1000_phy_82579) ||
5222             (hw->phy.type == e1000_phy_i217) ||
5223             (hw->phy.type == e1000_phy_82577)) {
5224                 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
5225
5226                 ret_val = hw->phy.ops.write_reg(hw,
5227                                              PHY_REG(BM_PORT_CTRL_PAGE, 27),
5228                                              hw->fc.pause_time);
5229                 if (ret_val)
5230                         return ret_val;
5231         }
5232
5233         return e1000_set_fc_watermarks_generic(hw);
5234 }
5235
5236 /**
5237  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
5238  *  @hw: pointer to the HW structure
5239  *
5240  *  Configures the kumeran interface to the PHY to wait the appropriate time
5241  *  when polling the PHY, then call the generic setup_copper_link to finish
5242  *  configuring the copper link.
5243  **/
5244 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
5245 {
5246         u32 ctrl;
5247         s32 ret_val;
5248         u16 reg_data;
5249
5250         DEBUGFUNC("e1000_setup_copper_link_ich8lan");
5251
5252         ctrl = E1000_READ_REG(hw, E1000_CTRL);
5253         ctrl |= E1000_CTRL_SLU;
5254         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5255         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5256
5257         /* Set the mac to wait the maximum time between each iteration
5258          * and increase the max iterations when polling the phy;
5259          * this fixes erroneous timeouts at 10Mbps.
5260          */
5261         ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
5262                                                0xFFFF);
5263         if (ret_val)
5264                 return ret_val;
5265         ret_val = e1000_read_kmrn_reg_generic(hw,
5266                                               E1000_KMRNCTRLSTA_INBAND_PARAM,
5267                                               &reg_data);
5268         if (ret_val)
5269                 return ret_val;
5270         reg_data |= 0x3F;
5271         ret_val = e1000_write_kmrn_reg_generic(hw,
5272                                                E1000_KMRNCTRLSTA_INBAND_PARAM,
5273                                                reg_data);
5274         if (ret_val)
5275                 return ret_val;
5276
5277         switch (hw->phy.type) {
5278         case e1000_phy_igp_3:
5279                 ret_val = e1000_copper_link_setup_igp(hw);
5280                 if (ret_val)
5281                         return ret_val;
5282                 break;
5283         case e1000_phy_bm:
5284         case e1000_phy_82578:
5285                 ret_val = e1000_copper_link_setup_m88(hw);
5286                 if (ret_val)
5287                         return ret_val;
5288                 break;
5289         case e1000_phy_82577:
5290         case e1000_phy_82579:
5291                 ret_val = e1000_copper_link_setup_82577(hw);
5292                 if (ret_val)
5293                         return ret_val;
5294                 break;
5295         case e1000_phy_ife:
5296                 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
5297                                                &reg_data);
5298                 if (ret_val)
5299                         return ret_val;
5300
5301                 reg_data &= ~IFE_PMC_AUTO_MDIX;
5302
5303                 switch (hw->phy.mdix) {
5304                 case 1:
5305                         reg_data &= ~IFE_PMC_FORCE_MDIX;
5306                         break;
5307                 case 2:
5308                         reg_data |= IFE_PMC_FORCE_MDIX;
5309                         break;
5310                 case 0:
5311                 default:
5312                         reg_data |= IFE_PMC_AUTO_MDIX;
5313                         break;
5314                 }
5315                 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
5316                                                 reg_data);
5317                 if (ret_val)
5318                         return ret_val;
5319                 break;
5320         default:
5321                 break;
5322         }
5323
5324         return e1000_setup_copper_link_generic(hw);
5325 }
5326
5327 /**
5328  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5329  *  @hw: pointer to the HW structure
5330  *
5331  *  Calls the PHY specific link setup function and then calls the
5332  *  generic setup_copper_link to finish configuring the link for
5333  *  Lynxpoint PCH devices
5334  **/
5335 STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5336 {
5337         u32 ctrl;
5338         s32 ret_val;
5339
5340         DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
5341
5342         ctrl = E1000_READ_REG(hw, E1000_CTRL);
5343         ctrl |= E1000_CTRL_SLU;
5344         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5345         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5346
5347         ret_val = e1000_copper_link_setup_82577(hw);
5348         if (ret_val)
5349                 return ret_val;
5350
5351         return e1000_setup_copper_link_generic(hw);
5352 }
5353
5354 /**
5355  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5356  *  @hw: pointer to the HW structure
5357  *  @speed: pointer to store current link speed
5358  *  @duplex: pointer to store the current link duplex
5359  *
5360  *  Calls the generic get_speed_and_duplex to retrieve the current link
5361  *  information and then calls the Kumeran lock loss workaround for links at
5362  *  gigabit speeds.
5363  **/
5364 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5365                                           u16 *duplex)
5366 {
5367         s32 ret_val;
5368
5369         DEBUGFUNC("e1000_get_link_up_info_ich8lan");
5370
5371         ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
5372         if (ret_val)
5373                 return ret_val;
5374
5375         if ((hw->mac.type == e1000_ich8lan) &&
5376             (hw->phy.type == e1000_phy_igp_3) &&
5377             (*speed == SPEED_1000)) {
5378                 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5379         }
5380
5381         return ret_val;
5382 }
5383
5384 /**
5385  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5386  *  @hw: pointer to the HW structure
5387  *
5388  *  Work-around for 82566 Kumeran PCS lock loss:
5389  *  On link status change (i.e. PCI reset, speed change) and link is up and
5390  *  speed is gigabit-
5391  *    0) if workaround is optionally disabled do nothing
5392  *    1) wait 1ms for Kumeran link to come up
5393  *    2) check Kumeran Diagnostic register PCS lock loss bit
5394  *    3) if not set the link is locked (all is good), otherwise...
5395  *    4) reset the PHY
5396  *    5) repeat up to 10 times
5397  *  Note: this is only called for IGP3 copper when speed is 1gb.
5398  **/
5399 STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5400 {
5401         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5402         u32 phy_ctrl;
5403         s32 ret_val;
5404         u16 i, data;
5405         bool link;
5406
5407         DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
5408
5409         if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5410                 return E1000_SUCCESS;
5411
5412         /* Make sure link is up before proceeding.  If not just return.
5413          * Attempting this while link is negotiating fouled up link
5414          * stability
5415          */
5416         ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
5417         if (!link)
5418                 return E1000_SUCCESS;
5419
5420         for (i = 0; i < 10; i++) {
5421                 /* read once to clear */
5422                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5423                 if (ret_val)
5424                         return ret_val;
5425                 /* and again to get new status */
5426                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5427                 if (ret_val)
5428                         return ret_val;
5429
5430                 /* check for PCS lock */
5431                 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5432                         return E1000_SUCCESS;
5433
5434                 /* Issue PHY reset */
5435                 hw->phy.ops.reset(hw);
5436                 msec_delay_irq(5);
5437         }
5438         /* Disable GigE link negotiation */
5439         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5440         phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5441                      E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5442         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5443
5444         /* Call gig speed drop workaround on Gig disable before accessing
5445          * any PHY registers
5446          */
5447         e1000_gig_downshift_workaround_ich8lan(hw);
5448
5449         /* unable to acquire PCS lock */
5450         return -E1000_ERR_PHY;
5451 }
5452
5453 /**
5454  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5455  *  @hw: pointer to the HW structure
5456  *  @state: boolean value used to set the current Kumeran workaround state
5457  *
5458  *  If ICH8, set the current Kumeran workaround state (enabled - true
5459  *  /disabled - false).
5460  **/
5461 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5462                                                  bool state)
5463 {
5464         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5465
5466         DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
5467
5468         if (hw->mac.type != e1000_ich8lan) {
5469                 DEBUGOUT("Workaround applies to ICH8 only.\n");
5470                 return;
5471         }
5472
5473         dev_spec->kmrn_lock_loss_workaround_enabled = state;
5474
5475         return;
5476 }
5477
5478 /**
5479  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5480  *  @hw: pointer to the HW structure
5481  *
5482  *  Workaround for 82566 power-down on D3 entry:
5483  *    1) disable gigabit link
5484  *    2) write VR power-down enable
5485  *    3) read it back
5486  *  Continue if successful, else issue LCD reset and repeat
5487  **/
5488 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5489 {
5490         u32 reg;
5491         u16 data;
5492         u8  retry = 0;
5493
5494         DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
5495
5496         if (hw->phy.type != e1000_phy_igp_3)
5497                 return;
5498
5499         /* Try the workaround twice (if needed) */
5500         do {
5501                 /* Disable link */
5502                 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
5503                 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5504                         E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5505                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
5506
5507                 /* Call gig speed drop workaround on Gig disable before
5508                  * accessing any PHY registers
5509                  */
5510                 if (hw->mac.type == e1000_ich8lan)
5511                         e1000_gig_downshift_workaround_ich8lan(hw);
5512
5513                 /* Write VR power-down enable */
5514                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5515                 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5516                 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
5517                                       data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5518
5519                 /* Read it back and test */
5520                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5521                 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5522                 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5523                         break;
5524
5525                 /* Issue PHY reset and repeat at most one more time */
5526                 reg = E1000_READ_REG(hw, E1000_CTRL);
5527                 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
5528                 retry++;
5529         } while (retry);
5530 }
5531
5532 /**
5533  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5534  *  @hw: pointer to the HW structure
5535  *
5536  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5537  *  LPLU, Gig disable, MDIC PHY reset):
5538  *    1) Set Kumeran Near-end loopback
5539  *    2) Clear Kumeran Near-end loopback
5540  *  Should only be called for ICH8[m] devices with any 1G Phy.
5541  **/
5542 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5543 {
5544         s32 ret_val;
5545         u16 reg_data;
5546
5547         DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
5548
5549         if ((hw->mac.type != e1000_ich8lan) ||
5550             (hw->phy.type == e1000_phy_ife))
5551                 return;
5552
5553         ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5554                                               &reg_data);
5555         if (ret_val)
5556                 return;
5557         reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5558         ret_val = e1000_write_kmrn_reg_generic(hw,
5559                                                E1000_KMRNCTRLSTA_DIAG_OFFSET,
5560                                                reg_data);
5561         if (ret_val)
5562                 return;
5563         reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5564         e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5565                                      reg_data);
5566 }
5567
5568 /**
5569  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5570  *  @hw: pointer to the HW structure
5571  *
5572  *  During S0 to Sx transition, it is possible the link remains at gig
5573  *  instead of negotiating to a lower speed.  Before going to Sx, set
5574  *  'Gig Disable' to force link speed negotiation to a lower speed based on
5575  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
5576  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5577  *  needs to be written.
5578  *  Parts that support (and are linked to a partner which support) EEE in
5579  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5580  *  than 10Mbps w/o EEE.
5581  **/
5582 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5583 {
5584         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5585         u32 phy_ctrl;
5586         s32 ret_val;
5587
5588         DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
5589
5590         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5591         phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5592
5593         if (hw->phy.type == e1000_phy_i217) {
5594                 u16 phy_reg, device_id = hw->device_id;
5595
5596                 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5597                     (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5598                     (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5599                     (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5600                     (hw->mac.type >= e1000_pch_spt)) {
5601                         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
5602
5603                         E1000_WRITE_REG(hw, E1000_FEXTNVM6,
5604                                         fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5605                 }
5606
5607                 ret_val = hw->phy.ops.acquire(hw);
5608                 if (ret_val)
5609                         goto out;
5610
5611                 if (!dev_spec->eee_disable) {
5612                         u16 eee_advert;
5613
5614                         ret_val =
5615                             e1000_read_emi_reg_locked(hw,
5616                                                       I217_EEE_ADVERTISEMENT,
5617                                                       &eee_advert);
5618                         if (ret_val)
5619                                 goto release;
5620
5621                         /* Disable LPLU if both link partners support 100BaseT
5622                          * EEE and 100Full is advertised on both ends of the
5623                          * link, and enable Auto Enable LPI since there will
5624                          * be no driver to enable LPI while in Sx.
5625                          */
5626                         if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5627                             (dev_spec->eee_lp_ability &
5628                              I82579_EEE_100_SUPPORTED) &&
5629                             (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5630                                 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5631                                               E1000_PHY_CTRL_NOND0A_LPLU);
5632
5633                                 /* Set Auto Enable LPI after link up */
5634                                 hw->phy.ops.read_reg_locked(hw,
5635                                                             I217_LPI_GPIO_CTRL,
5636                                                             &phy_reg);
5637                                 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5638                                 hw->phy.ops.write_reg_locked(hw,
5639                                                              I217_LPI_GPIO_CTRL,
5640                                                              phy_reg);
5641                         }
5642                 }
5643
5644                 /* For i217 Intel Rapid Start Technology support,
5645                  * when the system is going into Sx and no manageability engine
5646                  * is present, the driver must configure proxy to reset only on
5647                  * power good.  LPI (Low Power Idle) state must also reset only
5648                  * on power good, as well as the MTA (Multicast table array).
5649                  * The SMBus release must also be disabled on LCD reset.
5650                  */
5651                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5652                       E1000_ICH_FWSM_FW_VALID)) {
5653                         /* Enable proxy to reset only on power good. */
5654                         hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
5655                                                     &phy_reg);
5656                         phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5657                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
5658                                                      phy_reg);
5659
5660                         /* Set bit enable LPI (EEE) to reset only on
5661                          * power good.
5662                         */
5663                         hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
5664                         phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5665                         hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
5666
5667                         /* Disable the SMB release on LCD reset. */
5668                         hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
5669                         phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5670                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5671                 }
5672
5673                 /* Enable MTA to reset for Intel Rapid Start Technology
5674                  * Support
5675                  */
5676                 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
5677                 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5678                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5679
5680 release:
5681                 hw->phy.ops.release(hw);
5682         }
5683 out:
5684         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5685
5686         if (hw->mac.type == e1000_ich8lan)
5687                 e1000_gig_downshift_workaround_ich8lan(hw);
5688
5689         if (hw->mac.type >= e1000_pchlan) {
5690                 e1000_oem_bits_config_ich8lan(hw, false);
5691
5692                 /* Reset PHY to activate OEM bits on 82577/8 */
5693                 if (hw->mac.type == e1000_pchlan)
5694                         e1000_phy_hw_reset_generic(hw);
5695
5696                 ret_val = hw->phy.ops.acquire(hw);
5697                 if (ret_val)
5698                         return;
5699                 e1000_write_smbus_addr(hw);
5700                 hw->phy.ops.release(hw);
5701         }
5702
5703         return;
5704 }
5705
5706 /**
5707  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5708  *  @hw: pointer to the HW structure
5709  *
5710  *  During Sx to S0 transitions on non-managed devices or managed devices
5711  *  on which PHY resets are not blocked, if the PHY registers cannot be
5712  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
5713  *  the PHY.
5714  *  On i217, setup Intel Rapid Start Technology.
5715  **/
5716 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5717 {
5718         s32 ret_val;
5719
5720         DEBUGFUNC("e1000_resume_workarounds_pchlan");
5721         if (hw->mac.type < e1000_pch2lan)
5722                 return E1000_SUCCESS;
5723
5724         ret_val = e1000_init_phy_workarounds_pchlan(hw);
5725         if (ret_val) {
5726                 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5727                 return ret_val;
5728         }
5729
5730         /* For i217 Intel Rapid Start Technology support when the system
5731          * is transitioning from Sx and no manageability engine is present
5732          * configure SMBus to restore on reset, disable proxy, and enable
5733          * the reset on MTA (Multicast table array).
5734          */
5735         if (hw->phy.type == e1000_phy_i217) {
5736                 u16 phy_reg;
5737
5738                 ret_val = hw->phy.ops.acquire(hw);
5739                 if (ret_val) {
5740                         DEBUGOUT("Failed to setup iRST\n");
5741                         return ret_val;
5742                 }
5743
5744                 /* Clear Auto Enable LPI after link up */
5745                 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5746                 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5747                 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5748
5749                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5750                     E1000_ICH_FWSM_FW_VALID)) {
5751                         /* Restore clear on SMB if no manageability engine
5752                          * is present
5753                          */
5754                         ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5755                                                               &phy_reg);
5756                         if (ret_val)
5757                                 goto release;
5758                         phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5759                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5760
5761                         /* Disable Proxy */
5762                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5763                 }
5764                 /* Enable reset on MTA */
5765                 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5766                                                       &phy_reg);
5767                 if (ret_val)
5768                         goto release;
5769                 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5770                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5771 release:
5772                 if (ret_val)
5773                         DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5774                 hw->phy.ops.release(hw);
5775                 return ret_val;
5776         }
5777         return E1000_SUCCESS;
5778 }
5779
5780 /**
5781  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5782  *  @hw: pointer to the HW structure
5783  *
5784  *  Return the LED back to the default configuration.
5785  **/
5786 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5787 {
5788         DEBUGFUNC("e1000_cleanup_led_ich8lan");
5789
5790         if (hw->phy.type == e1000_phy_ife)
5791                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5792                                              0);
5793
5794         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5795         return E1000_SUCCESS;
5796 }
5797
5798 /**
5799  *  e1000_led_on_ich8lan - Turn LEDs on
5800  *  @hw: pointer to the HW structure
5801  *
5802  *  Turn on the LEDs.
5803  **/
5804 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5805 {
5806         DEBUGFUNC("e1000_led_on_ich8lan");
5807
5808         if (hw->phy.type == e1000_phy_ife)
5809                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5810                                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5811
5812         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5813         return E1000_SUCCESS;
5814 }
5815
5816 /**
5817  *  e1000_led_off_ich8lan - Turn LEDs off
5818  *  @hw: pointer to the HW structure
5819  *
5820  *  Turn off the LEDs.
5821  **/
5822 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5823 {
5824         DEBUGFUNC("e1000_led_off_ich8lan");
5825
5826         if (hw->phy.type == e1000_phy_ife)
5827                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5828                                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5829
5830         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5831         return E1000_SUCCESS;
5832 }
5833
5834 /**
5835  *  e1000_setup_led_pchlan - Configures SW controllable LED
5836  *  @hw: pointer to the HW structure
5837  *
5838  *  This prepares the SW controllable LED for use.
5839  **/
5840 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5841 {
5842         DEBUGFUNC("e1000_setup_led_pchlan");
5843
5844         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5845                                      (u16)hw->mac.ledctl_mode1);
5846 }
5847
5848 /**
5849  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5850  *  @hw: pointer to the HW structure
5851  *
5852  *  Return the LED back to the default configuration.
5853  **/
5854 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5855 {
5856         DEBUGFUNC("e1000_cleanup_led_pchlan");
5857
5858         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5859                                      (u16)hw->mac.ledctl_default);
5860 }
5861
5862 /**
5863  *  e1000_led_on_pchlan - Turn LEDs on
5864  *  @hw: pointer to the HW structure
5865  *
5866  *  Turn on the LEDs.
5867  **/
5868 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5869 {
5870         u16 data = (u16)hw->mac.ledctl_mode2;
5871         u32 i, led;
5872
5873         DEBUGFUNC("e1000_led_on_pchlan");
5874
5875         /* If no link, then turn LED on by setting the invert bit
5876          * for each LED that's mode is "link_up" in ledctl_mode2.
5877          */
5878         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5879                 for (i = 0; i < 3; i++) {
5880                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5881                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5882                             E1000_LEDCTL_MODE_LINK_UP)
5883                                 continue;
5884                         if (led & E1000_PHY_LED0_IVRT)
5885                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5886                         else
5887                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5888                 }
5889         }
5890
5891         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5892 }
5893
5894 /**
5895  *  e1000_led_off_pchlan - Turn LEDs off
5896  *  @hw: pointer to the HW structure
5897  *
5898  *  Turn off the LEDs.
5899  **/
5900 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5901 {
5902         u16 data = (u16)hw->mac.ledctl_mode1;
5903         u32 i, led;
5904
5905         DEBUGFUNC("e1000_led_off_pchlan");
5906
5907         /* If no link, then turn LED off by clearing the invert bit
5908          * for each LED that's mode is "link_up" in ledctl_mode1.
5909          */
5910         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5911                 for (i = 0; i < 3; i++) {
5912                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5913                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5914                             E1000_LEDCTL_MODE_LINK_UP)
5915                                 continue;
5916                         if (led & E1000_PHY_LED0_IVRT)
5917                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5918                         else
5919                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5920                 }
5921         }
5922
5923         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5924 }
5925
5926 /**
5927  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5928  *  @hw: pointer to the HW structure
5929  *
5930  *  Read appropriate register for the config done bit for completion status
5931  *  and configure the PHY through s/w for EEPROM-less parts.
5932  *
5933  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5934  *  config done bit, so only an error is logged and continues.  If we were
5935  *  to return with error, EEPROM-less silicon would not be able to be reset
5936  *  or change link.
5937  **/
5938 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5939 {
5940         s32 ret_val = E1000_SUCCESS;
5941         u32 bank = 0;
5942         u32 status;
5943
5944         DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5945
5946         e1000_get_cfg_done_generic(hw);
5947
5948         /* Wait for indication from h/w that it has completed basic config */
5949         if (hw->mac.type >= e1000_ich10lan) {
5950                 e1000_lan_init_done_ich8lan(hw);
5951         } else {
5952                 ret_val = e1000_get_auto_rd_done_generic(hw);
5953                 if (ret_val) {
5954                         /* When auto config read does not complete, do not
5955                          * return with an error. This can happen in situations
5956                          * where there is no eeprom and prevents getting link.
5957                          */
5958                         DEBUGOUT("Auto Read Done did not complete\n");
5959                         ret_val = E1000_SUCCESS;
5960                 }
5961         }
5962
5963         /* Clear PHY Reset Asserted bit */
5964         status = E1000_READ_REG(hw, E1000_STATUS);
5965         if (status & E1000_STATUS_PHYRA)
5966                 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5967         else
5968                 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5969
5970         /* If EEPROM is not marked present, init the IGP 3 PHY manually */
5971         if (hw->mac.type <= e1000_ich9lan) {
5972                 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5973                     (hw->phy.type == e1000_phy_igp_3)) {
5974                         e1000_phy_init_script_igp3(hw);
5975                 }
5976         } else {
5977                 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5978                         /* Maybe we should do a basic PHY config */
5979                         DEBUGOUT("EEPROM not present\n");
5980                         ret_val = -E1000_ERR_CONFIG;
5981                 }
5982         }
5983
5984         return ret_val;
5985 }
5986
5987 /**
5988  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5989  * @hw: pointer to the HW structure
5990  *
5991  * In the case of a PHY power down to save power, or to turn off link during a
5992  * driver unload, or wake on lan is not enabled, remove the link.
5993  **/
5994 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5995 {
5996         /* If the management interface is not enabled, then power down */
5997         if (!(hw->mac.ops.check_mng_mode(hw) ||
5998               hw->phy.ops.check_reset_block(hw)))
5999                 e1000_power_down_phy_copper(hw);
6000
6001         return;
6002 }
6003
6004 /**
6005  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
6006  *  @hw: pointer to the HW structure
6007  *
6008  *  Clears hardware counters specific to the silicon family and calls
6009  *  clear_hw_cntrs_generic to clear all general purpose counters.
6010  **/
6011 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
6012 {
6013         u16 phy_data;
6014         s32 ret_val;
6015
6016         DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
6017
6018         e1000_clear_hw_cntrs_base_generic(hw);
6019
6020         E1000_READ_REG(hw, E1000_ALGNERRC);
6021         E1000_READ_REG(hw, E1000_RXERRC);
6022         E1000_READ_REG(hw, E1000_TNCRS);
6023         E1000_READ_REG(hw, E1000_CEXTERR);
6024         E1000_READ_REG(hw, E1000_TSCTC);
6025         E1000_READ_REG(hw, E1000_TSCTFC);
6026
6027         E1000_READ_REG(hw, E1000_MGTPRC);
6028         E1000_READ_REG(hw, E1000_MGTPDC);
6029         E1000_READ_REG(hw, E1000_MGTPTC);
6030
6031         E1000_READ_REG(hw, E1000_IAC);
6032         E1000_READ_REG(hw, E1000_ICRXOC);
6033
6034         /* Clear PHY statistics registers */
6035         if ((hw->phy.type == e1000_phy_82578) ||
6036             (hw->phy.type == e1000_phy_82579) ||
6037             (hw->phy.type == e1000_phy_i217) ||
6038             (hw->phy.type == e1000_phy_82577)) {
6039                 ret_val = hw->phy.ops.acquire(hw);
6040                 if (ret_val)
6041                         return;
6042                 ret_val = hw->phy.ops.set_page(hw,
6043                                                HV_STATS_PAGE << IGP_PAGE_SHIFT);
6044                 if (ret_val)
6045                         goto release;
6046                 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
6047                 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
6048                 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
6049                 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
6050                 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
6051                 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
6052                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
6053                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
6054                 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
6055                 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
6056                 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
6057                 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
6058                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
6059                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
6060 release:
6061                 hw->phy.ops.release(hw);
6062         }
6063 }
6064
6065 /**
6066  *  e1000_configure_k0s_lpt - Configure K0s power state
6067  *  @hw: pointer to the HW structure
6068  *  @entry_latency: Tx idle period for entering K0s - valid values are 0 to 3.
6069  *      0 corresponds to 128ns, each value over 0 doubles the duration.
6070  *  @min_time: Minimum Tx idle period allowed  - valid values are 0 to 4.
6071  *      0 corresponds to 128ns, each value over 0 doubles the duration.
6072  *
6073  *  Configure the K1 power state based on the provided parameter.
6074  *  Assumes semaphore already acquired.
6075  *
6076  *  Success returns 0, Failure returns:
6077  *      -E1000_ERR_PHY (-2) in case of access error
6078  *      -E1000_ERR_PARAM (-4) in case of parameters error
6079  **/
6080 s32 e1000_configure_k0s_lpt(struct e1000_hw *hw, u8 entry_latency, u8 min_time)
6081 {
6082         s32 ret_val;
6083         u16 kmrn_reg = 0;
6084
6085         DEBUGFUNC("e1000_configure_k0s_lpt");
6086
6087         if (entry_latency > 3 || min_time > 4)
6088                 return -E1000_ERR_PARAM;
6089
6090         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
6091                                              &kmrn_reg);
6092         if (ret_val)
6093                 return ret_val;
6094
6095         /* for now don't touch the latency */
6096         kmrn_reg &= ~(E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_MASK);
6097         kmrn_reg |= ((min_time << E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT));
6098
6099         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
6100                                               kmrn_reg);
6101         if (ret_val)
6102                 return ret_val;
6103
6104         return E1000_SUCCESS;
6105 }