89265d21337217b27cbd4f80be3a12537e5749d1
[dpdk.git] / drivers / net / e1000 / base / e1000_ich8lan.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /* 82562G 10/100 Network Connection
35  * 82562G-2 10/100 Network Connection
36  * 82562GT 10/100 Network Connection
37  * 82562GT-2 10/100 Network Connection
38  * 82562V 10/100 Network Connection
39  * 82562V-2 10/100 Network Connection
40  * 82566DC-2 Gigabit Network Connection
41  * 82566DC Gigabit Network Connection
42  * 82566DM-2 Gigabit Network Connection
43  * 82566DM Gigabit Network Connection
44  * 82566MC Gigabit Network Connection
45  * 82566MM Gigabit Network Connection
46  * 82567LM Gigabit Network Connection
47  * 82567LF Gigabit Network Connection
48  * 82567V Gigabit Network Connection
49  * 82567LM-2 Gigabit Network Connection
50  * 82567LF-2 Gigabit Network Connection
51  * 82567V-2 Gigabit Network Connection
52  * 82567LF-3 Gigabit Network Connection
53  * 82567LM-3 Gigabit Network Connection
54  * 82567LM-4 Gigabit Network Connection
55  * 82577LM Gigabit Network Connection
56  * 82577LC Gigabit Network Connection
57  * 82578DM Gigabit Network Connection
58  * 82578DC Gigabit Network Connection
59  * 82579LM Gigabit Network Connection
60  * 82579V Gigabit Network Connection
61  * Ethernet Connection I217-LM
62  * Ethernet Connection I217-V
63  * Ethernet Connection I218-V
64  * Ethernet Connection I218-LM
65  * Ethernet Connection (2) I218-LM
66  * Ethernet Connection (2) I218-V
67  * Ethernet Connection (3) I218-LM
68  * Ethernet Connection (3) I218-V
69  */
70
71 #include "e1000_api.h"
72
73 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
74 STATIC s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 STATIC s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 STATIC int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 STATIC int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
84 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
85                                               u8 *mc_addr_list,
86                                               u32 mc_addr_count);
87 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
88 STATIC s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
89 STATIC s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
90 STATIC s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
91 STATIC s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
92                                             bool active);
93 STATIC s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
94                                             bool active);
95 STATIC s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
96                                    u16 words, u16 *data);
97 STATIC s32  e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
98                                u16 *data);
99 STATIC s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
100                                     u16 words, u16 *data);
101 STATIC s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
102 STATIC s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
103 STATIC s32  e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
104 STATIC s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
105                                             u16 *data);
106 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
107 STATIC s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
108 STATIC s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
109 STATIC s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
110 STATIC s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
111 STATIC s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
112 STATIC s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
113 STATIC s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
114                                            u16 *speed, u16 *duplex);
115 STATIC s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
116 STATIC s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
117 STATIC s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
118 STATIC s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
119 STATIC s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
120 STATIC s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
121 STATIC s32  e1000_led_on_pchlan(struct e1000_hw *hw);
122 STATIC s32  e1000_led_off_pchlan(struct e1000_hw *hw);
123 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
124 STATIC s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
125 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
126 STATIC s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
127 STATIC s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
128                                           u32 offset, u8 *data);
129 STATIC s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
130                                           u8 size, u16 *data);
131 STATIC s32  e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
132                                             u32 *data);
133 STATIC s32  e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
134                                            u32 offset, u32 *data);
135 STATIC s32  e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
136                                              u32 offset, u32 data);
137 STATIC s32  e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
138                                                   u32 offset, u32 dword);
139 STATIC s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
140                                           u32 offset, u16 *data);
141 STATIC s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
142                                                  u32 offset, u8 byte);
143 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
144 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
145 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
146 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
147 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
148 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
149
150 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
151 /* Offset 04h HSFSTS */
152 union ich8_hws_flash_status {
153         struct ich8_hsfsts {
154                 u16 flcdone:1; /* bit 0 Flash Cycle Done */
155                 u16 flcerr:1; /* bit 1 Flash Cycle Error */
156                 u16 dael:1; /* bit 2 Direct Access error Log */
157                 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
158                 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
159                 u16 reserved1:2; /* bit 13:6 Reserved */
160                 u16 reserved2:6; /* bit 13:6 Reserved */
161                 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
162                 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
163         } hsf_status;
164         u16 regval;
165 };
166
167 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
168 /* Offset 06h FLCTL */
169 union ich8_hws_flash_ctrl {
170         struct ich8_hsflctl {
171                 u16 flcgo:1;   /* 0 Flash Cycle Go */
172                 u16 flcycle:2;   /* 2:1 Flash Cycle */
173                 u16 reserved:5;   /* 7:3 Reserved  */
174                 u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
175                 u16 flockdn:6;   /* 15:10 Reserved */
176         } hsf_ctrl;
177         u16 regval;
178 };
179
180 /* ICH Flash Region Access Permissions */
181 union ich8_hws_flash_regacc {
182         struct ich8_flracc {
183                 u32 grra:8; /* 0:7 GbE region Read Access */
184                 u32 grwa:8; /* 8:15 GbE region Write Access */
185                 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
186                 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
187         } hsf_flregacc;
188         u16 regval;
189 };
190
191 /**
192  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
193  *  @hw: pointer to the HW structure
194  *
195  *  Test access to the PHY registers by reading the PHY ID registers.  If
196  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
197  *  otherwise assume the read PHY ID is correct if it is valid.
198  *
199  *  Assumes the sw/fw/hw semaphore is already acquired.
200  **/
201 STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
202 {
203         u16 phy_reg = 0;
204         u32 phy_id = 0;
205         s32 ret_val = 0;
206         u16 retry_count;
207         u32 mac_reg = 0;
208
209         for (retry_count = 0; retry_count < 2; retry_count++) {
210                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
211                 if (ret_val || (phy_reg == 0xFFFF))
212                         continue;
213                 phy_id = (u32)(phy_reg << 16);
214
215                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
216                 if (ret_val || (phy_reg == 0xFFFF)) {
217                         phy_id = 0;
218                         continue;
219                 }
220                 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
221                 break;
222         }
223
224         if (hw->phy.id) {
225                 if  (hw->phy.id == phy_id)
226                         goto out;
227         } else if (phy_id) {
228                 hw->phy.id = phy_id;
229                 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
230                 goto out;
231         }
232
233         /* In case the PHY needs to be in mdio slow mode,
234          * set slow mode and try to get the PHY id again.
235          */
236         if (hw->mac.type < e1000_pch_lpt) {
237                 hw->phy.ops.release(hw);
238                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
239                 if (!ret_val)
240                         ret_val = e1000_get_phy_id(hw);
241                 hw->phy.ops.acquire(hw);
242         }
243
244         if (ret_val)
245                 return false;
246 out:
247         if (hw->mac.type >= e1000_pch_lpt) {
248                 /* Only unforce SMBus if ME is not active */
249                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
250                     E1000_ICH_FWSM_FW_VALID)) {
251                         /* Unforce SMBus mode in PHY */
252                         hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
253                         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
254                         hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
255
256                         /* Unforce SMBus mode in MAC */
257                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
258                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
259                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
260                 }
261         }
262
263         return true;
264 }
265
266 /**
267  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
268  *  @hw: pointer to the HW structure
269  *
270  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
271  *  used to reset the PHY to a quiescent state when necessary.
272  **/
273 STATIC void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
274 {
275         u32 mac_reg;
276
277         DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
278
279         /* Set Phy Config Counter to 50msec */
280         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
281         mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
282         mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
283         E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
284
285         /* Toggle LANPHYPC Value bit */
286         mac_reg = E1000_READ_REG(hw, E1000_CTRL);
287         mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
288         mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
289         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
290         E1000_WRITE_FLUSH(hw);
291         msec_delay(1);
292         mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
293         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
294         E1000_WRITE_FLUSH(hw);
295
296         if (hw->mac.type < e1000_pch_lpt) {
297                 msec_delay(50);
298         } else {
299                 u16 count = 20;
300
301                 do {
302                         msec_delay(5);
303                 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
304                            E1000_CTRL_EXT_LPCD) && count--);
305
306                 msec_delay(30);
307         }
308 }
309
310 /**
311  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
312  *  @hw: pointer to the HW structure
313  *
314  *  Workarounds/flow necessary for PHY initialization during driver load
315  *  and resume paths.
316  **/
317 STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
318 {
319         u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
320         s32 ret_val;
321
322         DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
323
324         /* Gate automatic PHY configuration by hardware on managed and
325          * non-managed 82579 and newer adapters.
326          */
327         e1000_gate_hw_phy_config_ich8lan(hw, true);
328
329 #ifdef ULP_SUPPORT
330         /* It is not possible to be certain of the current state of ULP
331          * so forcibly disable it.
332          */
333         hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
334
335 #endif /* ULP_SUPPORT */
336         ret_val = hw->phy.ops.acquire(hw);
337         if (ret_val) {
338                 DEBUGOUT("Failed to initialize PHY flow\n");
339                 goto out;
340         }
341
342         /* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
343          * inaccessible and resetting the PHY is not blocked, toggle the
344          * LANPHYPC Value bit to force the interconnect to PCIe mode.
345          */
346         switch (hw->mac.type) {
347         case e1000_pch_lpt:
348         case e1000_pch_spt:
349                 if (e1000_phy_is_accessible_pchlan(hw))
350                         break;
351
352                 /* Before toggling LANPHYPC, see if PHY is accessible by
353                  * forcing MAC to SMBus mode first.
354                  */
355                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
356                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
357                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
358
359                 /* Wait 50 milliseconds for MAC to finish any retries
360                  * that it might be trying to perform from previous
361                  * attempts to acknowledge any phy read requests.
362                  */
363                  msec_delay(50);
364
365                 /* fall-through */
366         case e1000_pch2lan:
367                 if (e1000_phy_is_accessible_pchlan(hw))
368                         break;
369
370                 /* fall-through */
371         case e1000_pchlan:
372                 if ((hw->mac.type == e1000_pchlan) &&
373                     (fwsm & E1000_ICH_FWSM_FW_VALID))
374                         break;
375
376                 if (hw->phy.ops.check_reset_block(hw)) {
377                         DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
378                         ret_val = -E1000_ERR_PHY;
379                         break;
380                 }
381
382                 /* Toggle LANPHYPC Value bit */
383                 e1000_toggle_lanphypc_pch_lpt(hw);
384                 if (hw->mac.type >= e1000_pch_lpt) {
385                         if (e1000_phy_is_accessible_pchlan(hw))
386                                 break;
387
388                         /* Toggling LANPHYPC brings the PHY out of SMBus mode
389                          * so ensure that the MAC is also out of SMBus mode
390                          */
391                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
392                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
393                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
394
395                         if (e1000_phy_is_accessible_pchlan(hw))
396                                 break;
397
398                         ret_val = -E1000_ERR_PHY;
399                 }
400                 break;
401         default:
402                 break;
403         }
404
405         hw->phy.ops.release(hw);
406         if (!ret_val) {
407
408                 /* Check to see if able to reset PHY.  Print error if not */
409                 if (hw->phy.ops.check_reset_block(hw)) {
410                         ERROR_REPORT("Reset blocked by ME\n");
411                         goto out;
412                 }
413
414                 /* Reset the PHY before any access to it.  Doing so, ensures
415                  * that the PHY is in a known good state before we read/write
416                  * PHY registers.  The generic reset is sufficient here,
417                  * because we haven't determined the PHY type yet.
418                  */
419                 ret_val = e1000_phy_hw_reset_generic(hw);
420                 if (ret_val)
421                         goto out;
422
423                 /* On a successful reset, possibly need to wait for the PHY
424                  * to quiesce to an accessible state before returning control
425                  * to the calling function.  If the PHY does not quiesce, then
426                  * return E1000E_BLK_PHY_RESET, as this is the condition that
427                  *  the PHY is in.
428                  */
429                 ret_val = hw->phy.ops.check_reset_block(hw);
430                 if (ret_val)
431                         ERROR_REPORT("ME blocked access to PHY after reset\n");
432         }
433
434 out:
435         /* Ungate automatic PHY configuration on non-managed 82579 */
436         if ((hw->mac.type == e1000_pch2lan) &&
437             !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
438                 msec_delay(10);
439                 e1000_gate_hw_phy_config_ich8lan(hw, false);
440         }
441
442         return ret_val;
443 }
444
445 /**
446  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
447  *  @hw: pointer to the HW structure
448  *
449  *  Initialize family-specific PHY parameters and function pointers.
450  **/
451 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
452 {
453         struct e1000_phy_info *phy = &hw->phy;
454         s32 ret_val;
455
456         DEBUGFUNC("e1000_init_phy_params_pchlan");
457
458         phy->addr               = 1;
459         phy->reset_delay_us     = 100;
460
461         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
462         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
463         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
464         phy->ops.set_page       = e1000_set_page_igp;
465         phy->ops.read_reg       = e1000_read_phy_reg_hv;
466         phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
467         phy->ops.read_reg_page  = e1000_read_phy_reg_page_hv;
468         phy->ops.release        = e1000_release_swflag_ich8lan;
469         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
470         phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
471         phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
472         phy->ops.write_reg      = e1000_write_phy_reg_hv;
473         phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
474         phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
475         phy->ops.power_up       = e1000_power_up_phy_copper;
476         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
477         phy->autoneg_mask       = AUTONEG_ADVERTISE_SPEED_DEFAULT;
478
479         phy->id = e1000_phy_unknown;
480
481         ret_val = e1000_init_phy_workarounds_pchlan(hw);
482         if (ret_val)
483                 return ret_val;
484
485         if (phy->id == e1000_phy_unknown)
486                 switch (hw->mac.type) {
487                 default:
488                         ret_val = e1000_get_phy_id(hw);
489                         if (ret_val)
490                                 return ret_val;
491                         if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
492                                 break;
493                         /* fall-through */
494                 case e1000_pch2lan:
495                 case e1000_pch_lpt:
496                 case e1000_pch_spt:
497                         /* In case the PHY needs to be in mdio slow mode,
498                          * set slow mode and try to get the PHY id again.
499                          */
500                         ret_val = e1000_set_mdio_slow_mode_hv(hw);
501                         if (ret_val)
502                                 return ret_val;
503                         ret_val = e1000_get_phy_id(hw);
504                         if (ret_val)
505                                 return ret_val;
506                         break;
507                 }
508         phy->type = e1000_get_phy_type_from_id(phy->id);
509
510         switch (phy->type) {
511         case e1000_phy_82577:
512         case e1000_phy_82579:
513         case e1000_phy_i217:
514                 phy->ops.check_polarity = e1000_check_polarity_82577;
515                 phy->ops.force_speed_duplex =
516                         e1000_phy_force_speed_duplex_82577;
517                 phy->ops.get_cable_length = e1000_get_cable_length_82577;
518                 phy->ops.get_info = e1000_get_phy_info_82577;
519                 phy->ops.commit = e1000_phy_sw_reset_generic;
520                 break;
521         case e1000_phy_82578:
522                 phy->ops.check_polarity = e1000_check_polarity_m88;
523                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
524                 phy->ops.get_cable_length = e1000_get_cable_length_m88;
525                 phy->ops.get_info = e1000_get_phy_info_m88;
526                 break;
527         default:
528                 ret_val = -E1000_ERR_PHY;
529                 break;
530         }
531
532         return ret_val;
533 }
534
535 /**
536  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
537  *  @hw: pointer to the HW structure
538  *
539  *  Initialize family-specific PHY parameters and function pointers.
540  **/
541 STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
542 {
543         struct e1000_phy_info *phy = &hw->phy;
544         s32 ret_val;
545         u16 i = 0;
546
547         DEBUGFUNC("e1000_init_phy_params_ich8lan");
548
549         phy->addr               = 1;
550         phy->reset_delay_us     = 100;
551
552         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
553         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
554         phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
555         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
556         phy->ops.read_reg       = e1000_read_phy_reg_igp;
557         phy->ops.release        = e1000_release_swflag_ich8lan;
558         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
559         phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
560         phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
561         phy->ops.write_reg      = e1000_write_phy_reg_igp;
562         phy->ops.power_up       = e1000_power_up_phy_copper;
563         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
564
565         /* We may need to do this twice - once for IGP and if that fails,
566          * we'll set BM func pointers and try again
567          */
568         ret_val = e1000_determine_phy_address(hw);
569         if (ret_val) {
570                 phy->ops.write_reg = e1000_write_phy_reg_bm;
571                 phy->ops.read_reg  = e1000_read_phy_reg_bm;
572                 ret_val = e1000_determine_phy_address(hw);
573                 if (ret_val) {
574                         DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
575                         return ret_val;
576                 }
577         }
578
579         phy->id = 0;
580         while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
581                (i++ < 100)) {
582                 msec_delay(1);
583                 ret_val = e1000_get_phy_id(hw);
584                 if (ret_val)
585                         return ret_val;
586         }
587
588         /* Verify phy id */
589         switch (phy->id) {
590         case IGP03E1000_E_PHY_ID:
591                 phy->type = e1000_phy_igp_3;
592                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
593                 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
594                 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
595                 phy->ops.get_info = e1000_get_phy_info_igp;
596                 phy->ops.check_polarity = e1000_check_polarity_igp;
597                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
598                 break;
599         case IFE_E_PHY_ID:
600         case IFE_PLUS_E_PHY_ID:
601         case IFE_C_E_PHY_ID:
602                 phy->type = e1000_phy_ife;
603                 phy->autoneg_mask = E1000_ALL_NOT_GIG;
604                 phy->ops.get_info = e1000_get_phy_info_ife;
605                 phy->ops.check_polarity = e1000_check_polarity_ife;
606                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
607                 break;
608         case BME1000_E_PHY_ID:
609                 phy->type = e1000_phy_bm;
610                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
611                 phy->ops.read_reg = e1000_read_phy_reg_bm;
612                 phy->ops.write_reg = e1000_write_phy_reg_bm;
613                 phy->ops.commit = e1000_phy_sw_reset_generic;
614                 phy->ops.get_info = e1000_get_phy_info_m88;
615                 phy->ops.check_polarity = e1000_check_polarity_m88;
616                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
617                 break;
618         default:
619                 return -E1000_ERR_PHY;
620                 break;
621         }
622
623         return E1000_SUCCESS;
624 }
625
626 /**
627  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
628  *  @hw: pointer to the HW structure
629  *
630  *  Initialize family-specific NVM parameters and function
631  *  pointers.
632  **/
633 STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
634 {
635         struct e1000_nvm_info *nvm = &hw->nvm;
636         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
637         u32 gfpreg, sector_base_addr, sector_end_addr;
638         u16 i;
639         u32 nvm_size;
640
641         DEBUGFUNC("e1000_init_nvm_params_ich8lan");
642
643         nvm->type = e1000_nvm_flash_sw;
644
645         if (hw->mac.type >= e1000_pch_spt) {
646                 /* in SPT, gfpreg doesn't exist. NVM size is taken from the
647                  * STRAP register. This is because in SPT the GbE Flash region
648                  * is no longer accessed through the flash registers. Instead,
649                  * the mechanism has changed, and the Flash region access
650                  * registers are now implemented in GbE memory space.
651                  */
652                 nvm->flash_base_addr = 0;
653                 nvm_size =
654                     (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
655                     * NVM_SIZE_MULTIPLIER;
656                 nvm->flash_bank_size = nvm_size / 2;
657                 /* Adjust to word count */
658                 nvm->flash_bank_size /= sizeof(u16);
659                 /* Set the base address for flash register access */
660                 hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
661         } else {
662                 /* Can't read flash registers if register set isn't mapped. */
663                 if (!hw->flash_address) {
664                         DEBUGOUT("ERROR: Flash registers not mapped\n");
665                         return -E1000_ERR_CONFIG;
666                 }
667
668                 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
669
670                 /* sector_X_addr is a "sector"-aligned address (4096 bytes)
671                  * Add 1 to sector_end_addr since this sector is included in
672                  * the overall size.
673                  */
674                 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
675                 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
676
677                 /* flash_base_addr is byte-aligned */
678                 nvm->flash_base_addr = sector_base_addr
679                                        << FLASH_SECTOR_ADDR_SHIFT;
680
681                 /* find total size of the NVM, then cut in half since the total
682                  * size represents two separate NVM banks.
683                  */
684                 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
685                                         << FLASH_SECTOR_ADDR_SHIFT);
686                 nvm->flash_bank_size /= 2;
687                 /* Adjust to word count */
688                 nvm->flash_bank_size /= sizeof(u16);
689         }
690
691         nvm->word_size = E1000_SHADOW_RAM_WORDS;
692
693         /* Clear shadow ram */
694         for (i = 0; i < nvm->word_size; i++) {
695                 dev_spec->shadow_ram[i].modified = false;
696                 dev_spec->shadow_ram[i].value    = 0xFFFF;
697         }
698
699         E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
700         E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
701
702         /* Function Pointers */
703         nvm->ops.acquire        = e1000_acquire_nvm_ich8lan;
704         nvm->ops.release        = e1000_release_nvm_ich8lan;
705         if (hw->mac.type >= e1000_pch_spt) {
706                 nvm->ops.read   = e1000_read_nvm_spt;
707                 nvm->ops.update = e1000_update_nvm_checksum_spt;
708         } else {
709                 nvm->ops.read   = e1000_read_nvm_ich8lan;
710                 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
711         }
712         nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
713         nvm->ops.validate       = e1000_validate_nvm_checksum_ich8lan;
714         nvm->ops.write          = e1000_write_nvm_ich8lan;
715
716         return E1000_SUCCESS;
717 }
718
719 /**
720  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
721  *  @hw: pointer to the HW structure
722  *
723  *  Initialize family-specific MAC parameters and function
724  *  pointers.
725  **/
726 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
727 {
728         struct e1000_mac_info *mac = &hw->mac;
729 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
730         u16 pci_cfg;
731 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
732
733         DEBUGFUNC("e1000_init_mac_params_ich8lan");
734
735         /* Set media type function pointer */
736         hw->phy.media_type = e1000_media_type_copper;
737
738         /* Set mta register count */
739         mac->mta_reg_count = 32;
740         /* Set rar entry count */
741         mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
742         if (mac->type == e1000_ich8lan)
743                 mac->rar_entry_count--;
744         /* Set if part includes ASF firmware */
745         mac->asf_firmware_present = true;
746         /* FWSM register */
747         mac->has_fwsm = true;
748         /* ARC subsystem not supported */
749         mac->arc_subsystem_valid = false;
750         /* Adaptive IFS supported */
751         mac->adaptive_ifs = true;
752
753         /* Function pointers */
754
755         /* bus type/speed/width */
756         mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
757         /* function id */
758         mac->ops.set_lan_id = e1000_set_lan_id_single_port;
759         /* reset */
760         mac->ops.reset_hw = e1000_reset_hw_ich8lan;
761         /* hw initialization */
762         mac->ops.init_hw = e1000_init_hw_ich8lan;
763         /* link setup */
764         mac->ops.setup_link = e1000_setup_link_ich8lan;
765         /* physical interface setup */
766         mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
767         /* check for link */
768         mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
769         /* link info */
770         mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
771         /* multicast address update */
772         mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
773         /* clear hardware counters */
774         mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
775
776         /* LED and other operations */
777         switch (mac->type) {
778         case e1000_ich8lan:
779         case e1000_ich9lan:
780         case e1000_ich10lan:
781                 /* check management mode */
782                 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
783                 /* ID LED init */
784                 mac->ops.id_led_init = e1000_id_led_init_generic;
785                 /* blink LED */
786                 mac->ops.blink_led = e1000_blink_led_generic;
787                 /* setup LED */
788                 mac->ops.setup_led = e1000_setup_led_generic;
789                 /* cleanup LED */
790                 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
791                 /* turn on/off LED */
792                 mac->ops.led_on = e1000_led_on_ich8lan;
793                 mac->ops.led_off = e1000_led_off_ich8lan;
794                 break;
795         case e1000_pch2lan:
796                 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
797                 mac->ops.rar_set = e1000_rar_set_pch2lan;
798                 /* fall-through */
799         case e1000_pch_lpt:
800         case e1000_pch_spt:
801 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
802                 /* multicast address update for pch2 */
803                 mac->ops.update_mc_addr_list =
804                         e1000_update_mc_addr_list_pch2lan;
805                 /* fall-through */
806 #endif
807         case e1000_pchlan:
808 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
809                 /* save PCH revision_id */
810                 e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
811                 /* SPT uses full byte for revision ID,
812                  * as opposed to previous generations
813                  */
814                 if (hw->mac.type >= e1000_pch_spt)
815                         hw->revision_id = (u8)(pci_cfg &= 0x00FF);
816                 else
817                         hw->revision_id = (u8)(pci_cfg &= 0x000F);
818 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
819                 /* check management mode */
820                 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
821                 /* ID LED init */
822                 mac->ops.id_led_init = e1000_id_led_init_pchlan;
823                 /* setup LED */
824                 mac->ops.setup_led = e1000_setup_led_pchlan;
825                 /* cleanup LED */
826                 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
827                 /* turn on/off LED */
828                 mac->ops.led_on = e1000_led_on_pchlan;
829                 mac->ops.led_off = e1000_led_off_pchlan;
830                 break;
831         default:
832                 break;
833         }
834
835         if (mac->type >= e1000_pch_lpt) {
836                 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
837                 mac->ops.rar_set = e1000_rar_set_pch_lpt;
838                 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
839         }
840
841         /* Enable PCS Lock-loss workaround for ICH8 */
842         if (mac->type == e1000_ich8lan)
843                 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
844
845         return E1000_SUCCESS;
846 }
847
848 /**
849  *  __e1000_access_emi_reg_locked - Read/write EMI register
850  *  @hw: pointer to the HW structure
851  *  @addr: EMI address to program
852  *  @data: pointer to value to read/write from/to the EMI address
853  *  @read: boolean flag to indicate read or write
854  *
855  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
856  **/
857 STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
858                                          u16 *data, bool read)
859 {
860         s32 ret_val;
861
862         DEBUGFUNC("__e1000_access_emi_reg_locked");
863
864         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
865         if (ret_val)
866                 return ret_val;
867
868         if (read)
869                 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
870                                                       data);
871         else
872                 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
873                                                        *data);
874
875         return ret_val;
876 }
877
878 /**
879  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
880  *  @hw: pointer to the HW structure
881  *  @addr: EMI address to program
882  *  @data: value to be read from the EMI address
883  *
884  *  Assumes the SW/FW/HW Semaphore is already acquired.
885  **/
886 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
887 {
888         DEBUGFUNC("e1000_read_emi_reg_locked");
889
890         return __e1000_access_emi_reg_locked(hw, addr, data, true);
891 }
892
893 /**
894  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
895  *  @hw: pointer to the HW structure
896  *  @addr: EMI address to program
897  *  @data: value to be written to the EMI address
898  *
899  *  Assumes the SW/FW/HW Semaphore is already acquired.
900  **/
901 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
902 {
903         DEBUGFUNC("e1000_read_emi_reg_locked");
904
905         return __e1000_access_emi_reg_locked(hw, addr, &data, false);
906 }
907
908 /**
909  *  e1000_set_eee_pchlan - Enable/disable EEE support
910  *  @hw: pointer to the HW structure
911  *
912  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
913  *  the link and the EEE capabilities of the link partner.  The LPI Control
914  *  register bits will remain set only if/when link is up.
915  *
916  *  EEE LPI must not be asserted earlier than one second after link is up.
917  *  On 82579, EEE LPI should not be enabled until such time otherwise there
918  *  can be link issues with some switches.  Other devices can have EEE LPI
919  *  enabled immediately upon link up since they have a timer in hardware which
920  *  prevents LPI from being asserted too early.
921  **/
922 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
923 {
924         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
925         s32 ret_val;
926         u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
927
928         DEBUGFUNC("e1000_set_eee_pchlan");
929
930         switch (hw->phy.type) {
931         case e1000_phy_82579:
932                 lpa = I82579_EEE_LP_ABILITY;
933                 pcs_status = I82579_EEE_PCS_STATUS;
934                 adv_addr = I82579_EEE_ADVERTISEMENT;
935                 break;
936         case e1000_phy_i217:
937                 lpa = I217_EEE_LP_ABILITY;
938                 pcs_status = I217_EEE_PCS_STATUS;
939                 adv_addr = I217_EEE_ADVERTISEMENT;
940                 break;
941         default:
942                 return E1000_SUCCESS;
943         }
944
945         ret_val = hw->phy.ops.acquire(hw);
946         if (ret_val)
947                 return ret_val;
948
949         ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
950         if (ret_val)
951                 goto release;
952
953         /* Clear bits that enable EEE in various speeds */
954         lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
955
956         /* Enable EEE if not disabled by user */
957         if (!dev_spec->eee_disable) {
958                 /* Save off link partner's EEE ability */
959                 ret_val = e1000_read_emi_reg_locked(hw, lpa,
960                                                     &dev_spec->eee_lp_ability);
961                 if (ret_val)
962                         goto release;
963
964                 /* Read EEE advertisement */
965                 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
966                 if (ret_val)
967                         goto release;
968
969                 /* Enable EEE only for speeds in which the link partner is
970                  * EEE capable and for which we advertise EEE.
971                  */
972                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
973                         lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
974
975                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
976                         hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
977                         if (data & NWAY_LPAR_100TX_FD_CAPS)
978                                 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
979                         else
980                                 /* EEE is not supported in 100Half, so ignore
981                                  * partner's EEE in 100 ability if full-duplex
982                                  * is not advertised.
983                                  */
984                                 dev_spec->eee_lp_ability &=
985                                     ~I82579_EEE_100_SUPPORTED;
986                 }
987         }
988
989         if (hw->phy.type == e1000_phy_82579) {
990                 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
991                                                     &data);
992                 if (ret_val)
993                         goto release;
994
995                 data &= ~I82579_LPI_100_PLL_SHUT;
996                 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
997                                                      data);
998         }
999
1000         /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
1001         ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
1002         if (ret_val)
1003                 goto release;
1004
1005         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
1006 release:
1007         hw->phy.ops.release(hw);
1008
1009         return ret_val;
1010 }
1011
1012 /**
1013  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
1014  *  @hw:   pointer to the HW structure
1015  *  @link: link up bool flag
1016  *
1017  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
1018  *  preventing further DMA write requests.  Workaround the issue by disabling
1019  *  the de-assertion of the clock request when in 1Gpbs mode.
1020  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
1021  *  speeds in order to avoid Tx hangs.
1022  **/
1023 STATIC s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
1024 {
1025         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1026         u32 status = E1000_READ_REG(hw, E1000_STATUS);
1027         s32 ret_val = E1000_SUCCESS;
1028         u16 reg;
1029
1030         if (link && (status & E1000_STATUS_SPEED_1000)) {
1031                 ret_val = hw->phy.ops.acquire(hw);
1032                 if (ret_val)
1033                         return ret_val;
1034
1035                 ret_val =
1036                     e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1037                                                &reg);
1038                 if (ret_val)
1039                         goto release;
1040
1041                 ret_val =
1042                     e1000_write_kmrn_reg_locked(hw,
1043                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
1044                                                 reg &
1045                                                 ~E1000_KMRNCTRLSTA_K1_ENABLE);
1046                 if (ret_val)
1047                         goto release;
1048
1049                 usec_delay(10);
1050
1051                 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1052                                 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1053
1054                 ret_val =
1055                     e1000_write_kmrn_reg_locked(hw,
1056                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
1057                                                 reg);
1058 release:
1059                 hw->phy.ops.release(hw);
1060         } else {
1061                 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1062                 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1063
1064                 if ((hw->phy.revision > 5) || !link ||
1065                     ((status & E1000_STATUS_SPEED_100) &&
1066                      (status & E1000_STATUS_FD)))
1067                         goto update_fextnvm6;
1068
1069                 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1070                 if (ret_val)
1071                         return ret_val;
1072
1073                 /* Clear link status transmit timeout */
1074                 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1075
1076                 if (status & E1000_STATUS_SPEED_100) {
1077                         /* Set inband Tx timeout to 5x10us for 100Half */
1078                         reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1079
1080                         /* Do not extend the K1 entry latency for 100Half */
1081                         fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1082                 } else {
1083                         /* Set inband Tx timeout to 50x10us for 10Full/Half */
1084                         reg |= 50 <<
1085                                I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1086
1087                         /* Extend the K1 entry latency for 10 Mbps */
1088                         fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1089                 }
1090
1091                 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1092                 if (ret_val)
1093                         return ret_val;
1094
1095 update_fextnvm6:
1096                 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1097         }
1098
1099         return ret_val;
1100 }
1101
1102 #ifdef ULP_SUPPORT
1103 /**
1104  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1105  *  @hw: pointer to the HW structure
1106  *  @to_sx: boolean indicating a system power state transition to Sx
1107  *
1108  *  When link is down, configure ULP mode to significantly reduce the power
1109  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1110  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1111  *  system, configure the ULP mode by software.
1112  */
1113 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1114 {
1115         u32 mac_reg;
1116         s32 ret_val = E1000_SUCCESS;
1117         u16 phy_reg;
1118         u16 oem_reg = 0;
1119
1120         if ((hw->mac.type < e1000_pch_lpt) ||
1121             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1122             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1123             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1124             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1125             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1126                 return 0;
1127
1128         if (!to_sx) {
1129                 int i = 0;
1130                 /* Poll up to 5 seconds for Cable Disconnected indication */
1131                 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1132                          E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1133                         /* Bail if link is re-acquired */
1134                         if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1135                                 return -E1000_ERR_PHY;
1136                         if (i++ == 100)
1137                                 break;
1138
1139                         msec_delay(50);
1140                 }
1141                 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1142                           (E1000_READ_REG(hw, E1000_FEXT) &
1143                            E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1144                           i * 50);
1145                 if (!(E1000_READ_REG(hw, E1000_FEXT) &
1146                     E1000_FEXT_PHY_CABLE_DISCONNECTED))
1147                         return 0;
1148         }
1149
1150         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1151                 /* Request ME configure ULP mode in the PHY */
1152                 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1153                 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1154                 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1155
1156                 goto out;
1157         }
1158
1159         ret_val = hw->phy.ops.acquire(hw);
1160         if (ret_val)
1161                 goto out;
1162
1163         /* During S0 Idle keep the phy in PCI-E mode */
1164         if (hw->dev_spec.ich8lan.smbus_disable)
1165                 goto skip_smbus;
1166
1167         /* Force SMBus mode in PHY */
1168         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1169         if (ret_val)
1170                 goto release;
1171         phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1172         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1173
1174         /* Force SMBus mode in MAC */
1175         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1176         mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1177         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1178
1179         /* Si workaround for ULP entry flow on i127/rev6 h/w.  Enable
1180          * LPLU and disable Gig speed when entering ULP
1181          */
1182         if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1183                 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1184                                                        &oem_reg);
1185                 if (ret_val)
1186                         goto release;
1187
1188                 phy_reg = oem_reg;
1189                 phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1190
1191                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1192                                                         phy_reg);
1193
1194                 if (ret_val)
1195                         goto release;
1196         }
1197
1198 skip_smbus:
1199         if (!to_sx) {
1200                 /* Change the 'Link Status Change' interrupt to trigger
1201                  * on 'Cable Status Change'
1202                  */
1203                 ret_val = e1000_read_kmrn_reg_locked(hw,
1204                                                      E1000_KMRNCTRLSTA_OP_MODES,
1205                                                      &phy_reg);
1206                 if (ret_val)
1207                         goto release;
1208                 phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1209                 e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1210                                             phy_reg);
1211         }
1212
1213         /* Set Inband ULP Exit, Reset to SMBus mode and
1214          * Disable SMBus Release on PERST# in PHY
1215          */
1216         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1217         if (ret_val)
1218                 goto release;
1219         phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1220                     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1221         if (to_sx) {
1222                 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1223                         phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1224                 else
1225                         phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1226
1227                 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1228                 phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1229         } else {
1230                 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1231                 phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1232                 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1233         }
1234         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1235
1236         /* Set Disable SMBus Release on PERST# in MAC */
1237         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1238         mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1239         E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1240
1241         /* Commit ULP changes in PHY by starting auto ULP configuration */
1242         phy_reg |= I218_ULP_CONFIG1_START;
1243         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1244
1245         if (!to_sx) {
1246                 /* Disable Tx so that the MAC doesn't send any (buffered)
1247                  * packets to the PHY.
1248                  */
1249                 mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1250                 mac_reg &= ~E1000_TCTL_EN;
1251                 E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1252         }
1253
1254         if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
1255             to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1256                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1257                                                         oem_reg);
1258                 if (ret_val)
1259                         goto release;
1260         }
1261
1262 release:
1263         hw->phy.ops.release(hw);
1264 out:
1265         if (ret_val)
1266                 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1267         else
1268                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1269
1270         return ret_val;
1271 }
1272
1273 /**
1274  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1275  *  @hw: pointer to the HW structure
1276  *  @force: boolean indicating whether or not to force disabling ULP
1277  *
1278  *  Un-configure ULP mode when link is up, the system is transitioned from
1279  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1280  *  system, poll for an indication from ME that ULP has been un-configured.
1281  *  If not on an ME enabled system, un-configure the ULP mode by software.
1282  *
1283  *  During nominal operation, this function is called when link is acquired
1284  *  to disable ULP mode (force=false); otherwise, for example when unloading
1285  *  the driver or during Sx->S0 transitions, this is called with force=true
1286  *  to forcibly disable ULP.
1287
1288  *  When the cable is plugged in while the device is in D0, a Cable Status
1289  *  Change interrupt is generated which causes this function to be called
1290  *  to partially disable ULP mode and restart autonegotiation.  This function
1291  *  is then called again due to the resulting Link Status Change interrupt
1292  *  to finish cleaning up after the ULP flow.
1293  */
1294 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1295 {
1296         s32 ret_val = E1000_SUCCESS;
1297         u32 mac_reg;
1298         u16 phy_reg;
1299         int i = 0;
1300
1301         if ((hw->mac.type < e1000_pch_lpt) ||
1302             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1303             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1304             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1305             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1306             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1307                 return 0;
1308
1309         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1310                 if (force) {
1311                         /* Request ME un-configure ULP mode in the PHY */
1312                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1313                         mac_reg &= ~E1000_H2ME_ULP;
1314                         mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1315                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1316                 }
1317
1318                 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
1319                 while (E1000_READ_REG(hw, E1000_FWSM) &
1320                        E1000_FWSM_ULP_CFG_DONE) {
1321                         if (i++ == 30) {
1322                                 ret_val = -E1000_ERR_PHY;
1323                                 goto out;
1324                         }
1325
1326                         msec_delay(10);
1327                 }
1328                 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1329
1330                 if (force) {
1331                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1332                         mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1333                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1334                 } else {
1335                         /* Clear H2ME.ULP after ME ULP configuration */
1336                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1337                         mac_reg &= ~E1000_H2ME_ULP;
1338                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1339
1340                         /* Restore link speed advertisements and restart
1341                          * Auto-negotiation
1342                          */
1343                         if (hw->mac.autoneg) {
1344                                 ret_val = e1000_phy_setup_autoneg(hw);
1345                                 if (ret_val)
1346                                         goto out;
1347                         } else {
1348                                 ret_val = e1000_setup_copper_link_generic(hw);
1349                                 if (ret_val)
1350                                         goto out;
1351                         }
1352                         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1353                 }
1354
1355                 goto out;
1356         }
1357
1358         ret_val = hw->phy.ops.acquire(hw);
1359         if (ret_val)
1360                 goto out;
1361
1362         /* Revert the change to the 'Link Status Change'
1363          * interrupt to trigger on 'Cable Status Change'
1364          */
1365         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1366                                              &phy_reg);
1367         if (ret_val)
1368                 goto release;
1369         phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1370         e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg);
1371
1372         if (force)
1373                 /* Toggle LANPHYPC Value bit */
1374                 e1000_toggle_lanphypc_pch_lpt(hw);
1375
1376         /* Unforce SMBus mode in PHY */
1377         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1378         if (ret_val) {
1379                 /* The MAC might be in PCIe mode, so temporarily force to
1380                  * SMBus mode in order to access the PHY.
1381                  */
1382                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1383                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1384                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1385
1386                 msec_delay(50);
1387
1388                 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1389                                                        &phy_reg);
1390                 if (ret_val)
1391                         goto release;
1392         }
1393         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1394         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1395
1396         /* Unforce SMBus mode in MAC */
1397         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1398         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1399         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1400
1401         /* When ULP mode was previously entered, K1 was disabled by the
1402          * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1403          */
1404         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1405         if (ret_val)
1406                 goto release;
1407         phy_reg |= HV_PM_CTRL_K1_ENABLE;
1408         e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1409
1410         /* Clear ULP enabled configuration */
1411         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1412         if (ret_val)
1413                 goto release;
1414         /* CSC interrupt received due to ULP Indication */
1415         if ((phy_reg & I218_ULP_CONFIG1_IND) || force) {
1416                 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1417                              I218_ULP_CONFIG1_STICKY_ULP |
1418                              I218_ULP_CONFIG1_RESET_TO_SMBUS |
1419                              I218_ULP_CONFIG1_WOL_HOST |
1420                              I218_ULP_CONFIG1_INBAND_EXIT |
1421                              I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
1422                              I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
1423                              I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1424                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1425
1426                 /* Commit ULP changes by starting auto ULP configuration */
1427                 phy_reg |= I218_ULP_CONFIG1_START;
1428                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1429
1430                 /* Clear Disable SMBus Release on PERST# in MAC */
1431                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1432                 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1433                 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1434
1435                 if (!force) {
1436                         hw->phy.ops.release(hw);
1437
1438                         if (hw->mac.autoneg)
1439                                 e1000_phy_setup_autoneg(hw);
1440                         else
1441                                 e1000_setup_copper_link_generic(hw);
1442
1443                         e1000_sw_lcd_config_ich8lan(hw);
1444
1445                         e1000_oem_bits_config_ich8lan(hw, true);
1446
1447                         /* Set ULP state to unknown and return non-zero to
1448                          * indicate no link (yet) and re-enter on the next LSC
1449                          * to finish disabling ULP flow.
1450                          */
1451                         hw->dev_spec.ich8lan.ulp_state =
1452                             e1000_ulp_state_unknown;
1453
1454                         return 1;
1455                 }
1456         }
1457
1458         /* Re-enable Tx */
1459         mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1460         mac_reg |= E1000_TCTL_EN;
1461         E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1462
1463 release:
1464         hw->phy.ops.release(hw);
1465         if (force) {
1466                 hw->phy.ops.reset(hw);
1467                 msec_delay(50);
1468         }
1469 out:
1470         if (ret_val)
1471                 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1472         else
1473                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1474
1475         return ret_val;
1476 }
1477
1478 #endif /* ULP_SUPPORT */
1479
1480
1481 /**
1482  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1483  *  @hw: pointer to the HW structure
1484  *
1485  *  Checks to see of the link status of the hardware has changed.  If a
1486  *  change in link status has been detected, then we read the PHY registers
1487  *  to get the current speed/duplex if link exists.
1488  **/
1489 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1490 {
1491         struct e1000_mac_info *mac = &hw->mac;
1492         s32 ret_val, tipg_reg = 0;
1493         u16 emi_addr, emi_val = 0;
1494         bool link = false;
1495         u16 phy_reg;
1496
1497         DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1498
1499         /* We only want to go out to the PHY registers to see if Auto-Neg
1500          * has completed and/or if our link status has changed.  The
1501          * get_link_status flag is set upon receiving a Link Status
1502          * Change or Rx Sequence Error interrupt.
1503          */
1504         if (!mac->get_link_status)
1505                 return E1000_SUCCESS;
1506
1507         if ((hw->mac.type < e1000_pch_lpt) ||
1508             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1509             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) {
1510                 /* First we want to see if the MII Status Register reports
1511                  * link.  If so, then we want to get the current speed/duplex
1512                  * of the PHY.
1513                  */
1514                 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1515                 if (ret_val)
1516                         return ret_val;
1517         } else {
1518                 /* Check the MAC's STATUS register to determine link state
1519                  * since the PHY could be inaccessible while in ULP mode.
1520                  */
1521                 link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
1522                 if (link)
1523                         ret_val = e1000_disable_ulp_lpt_lp(hw, false);
1524                 else
1525                         ret_val = e1000_enable_ulp_lpt_lp(hw, false);
1526                 if (ret_val)
1527                         return ret_val;
1528         }
1529
1530         if (hw->mac.type == e1000_pchlan) {
1531                 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1532                 if (ret_val)
1533                         return ret_val;
1534         }
1535
1536         /* When connected at 10Mbps half-duplex, some parts are excessively
1537          * aggressive resulting in many collisions. To avoid this, increase
1538          * the IPG and reduce Rx latency in the PHY.
1539          */
1540         if ((hw->mac.type >= e1000_pch2lan) && link) {
1541                 u16 speed, duplex;
1542
1543                 e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1544                 tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1545                 tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1546
1547                 if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1548                         tipg_reg |= 0xFF;
1549                         /* Reduce Rx latency in analog PHY */
1550                         emi_val = 0;
1551                 } else if (hw->mac.type >= e1000_pch_spt &&
1552                            duplex == FULL_DUPLEX && speed != SPEED_1000) {
1553                         tipg_reg |= 0xC;
1554                         emi_val = 1;
1555                 } else {
1556                         /* Roll back the default values */
1557                         tipg_reg |= 0x08;
1558                         emi_val = 1;
1559                 }
1560
1561                 E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1562
1563                 ret_val = hw->phy.ops.acquire(hw);
1564                 if (ret_val)
1565                         return ret_val;
1566
1567                 if (hw->mac.type == e1000_pch2lan)
1568                         emi_addr = I82579_RX_CONFIG;
1569                 else
1570                         emi_addr = I217_RX_CONFIG;
1571                 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1572
1573
1574                 if (hw->mac.type >= e1000_pch_lpt) {
1575                         u16 phy_reg;
1576
1577                         hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
1578                                                     &phy_reg);
1579                         phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1580                         if (speed == SPEED_100 || speed == SPEED_10)
1581                                 phy_reg |= 0x3E8;
1582                         else
1583                                 phy_reg |= 0xFA;
1584                         hw->phy.ops.write_reg_locked(hw,
1585                                                      I217_PLL_CLOCK_GATE_REG,
1586                                                      phy_reg);
1587
1588                         if (speed == SPEED_1000) {
1589                                 hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
1590                                                             &phy_reg);
1591
1592                                 phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
1593
1594                                 hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
1595                                                              phy_reg);
1596                                 }
1597                  }
1598                 hw->phy.ops.release(hw);
1599
1600                 if (ret_val)
1601                         return ret_val;
1602
1603                 if (hw->mac.type >= e1000_pch_spt) {
1604                         u16 data;
1605                         u16 ptr_gap;
1606
1607                         if (speed == SPEED_1000) {
1608                                 ret_val = hw->phy.ops.acquire(hw);
1609                                 if (ret_val)
1610                                         return ret_val;
1611
1612                                 ret_val = hw->phy.ops.read_reg_locked(hw,
1613                                                               PHY_REG(776, 20),
1614                                                               &data);
1615                                 if (ret_val) {
1616                                         hw->phy.ops.release(hw);
1617                                         return ret_val;
1618                                 }
1619
1620                                 ptr_gap = (data & (0x3FF << 2)) >> 2;
1621                                 if (ptr_gap < 0x18) {
1622                                         data &= ~(0x3FF << 2);
1623                                         data |= (0x18 << 2);
1624                                         ret_val =
1625                                                 hw->phy.ops.write_reg_locked(hw,
1626                                                         PHY_REG(776, 20), data);
1627                                 }
1628                                 hw->phy.ops.release(hw);
1629                                 if (ret_val)
1630                                         return ret_val;
1631                         } else {
1632                                 ret_val = hw->phy.ops.acquire(hw);
1633                                 if (ret_val)
1634                                         return ret_val;
1635
1636                                 ret_val = hw->phy.ops.write_reg_locked(hw,
1637                                                              PHY_REG(776, 20),
1638                                                              0xC023);
1639                                 hw->phy.ops.release(hw);
1640                                 if (ret_val)
1641                                         return ret_val;
1642
1643                         }
1644                 }
1645         }
1646
1647         /* I217 Packet Loss issue:
1648          * ensure that FEXTNVM4 Beacon Duration is set correctly
1649          * on power up.
1650          * Set the Beacon Duration for I217 to 8 usec
1651          */
1652         if (hw->mac.type >= e1000_pch_lpt) {
1653                 u32 mac_reg;
1654
1655                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1656                 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1657                 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1658                 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1659         }
1660
1661         /* Work-around I218 hang issue */
1662         if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1663             (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1664             (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1665             (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1666                 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1667                 if (ret_val)
1668                         return ret_val;
1669         }
1670         /* Clear link partner's EEE ability */
1671         hw->dev_spec.ich8lan.eee_lp_ability = 0;
1672
1673         /* Configure K0s minimum time */
1674         if (hw->mac.type >= e1000_pch_lpt) {
1675                 e1000_configure_k0s_lpt(hw, K1_ENTRY_LATENCY, K1_MIN_TIME);
1676         }
1677
1678         if (hw->mac.type >= e1000_pch_lpt) {
1679                 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1680
1681                 if (hw->mac.type == e1000_pch_spt) {
1682                         /* FEXTNVM6 K1-off workaround - for SPT only */
1683                         u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
1684
1685                         if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1686                                 fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1687                         else
1688                                 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1689                 }
1690
1691                 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1692         }
1693
1694         if (!link)
1695                 return E1000_SUCCESS; /* No link detected */
1696
1697         mac->get_link_status = false;
1698
1699         switch (hw->mac.type) {
1700         case e1000_pch2lan:
1701                 ret_val = e1000_k1_workaround_lv(hw);
1702                 if (ret_val)
1703                         return ret_val;
1704                 /* fall-thru */
1705         case e1000_pchlan:
1706                 if (hw->phy.type == e1000_phy_82578) {
1707                         ret_val = e1000_link_stall_workaround_hv(hw);
1708                         if (ret_val)
1709                                 return ret_val;
1710                 }
1711
1712                 /* Workaround for PCHx parts in half-duplex:
1713                  * Set the number of preambles removed from the packet
1714                  * when it is passed from the PHY to the MAC to prevent
1715                  * the MAC from misinterpreting the packet type.
1716                  */
1717                 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1718                 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1719
1720                 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1721                     E1000_STATUS_FD)
1722                         phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1723
1724                 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1725                 break;
1726         default:
1727                 break;
1728         }
1729
1730         /* Check if there was DownShift, must be checked
1731          * immediately after link-up
1732          */
1733         e1000_check_downshift_generic(hw);
1734
1735         /* Enable/Disable EEE after link up */
1736         if (hw->phy.type > e1000_phy_82579) {
1737                 ret_val = e1000_set_eee_pchlan(hw);
1738                 if (ret_val)
1739                         return ret_val;
1740         }
1741
1742         /* If we are forcing speed/duplex, then we simply return since
1743          * we have already determined whether we have link or not.
1744          */
1745         if (!mac->autoneg)
1746                 return -E1000_ERR_CONFIG;
1747
1748         /* Auto-Neg is enabled.  Auto Speed Detection takes care
1749          * of MAC speed/duplex configuration.  So we only need to
1750          * configure Collision Distance in the MAC.
1751          */
1752         mac->ops.config_collision_dist(hw);
1753
1754         /* Configure Flow Control now that Auto-Neg has completed.
1755          * First, we need to restore the desired flow control
1756          * settings because we may have had to re-autoneg with a
1757          * different link partner.
1758          */
1759         ret_val = e1000_config_fc_after_link_up_generic(hw);
1760         if (ret_val)
1761                 DEBUGOUT("Error configuring flow control\n");
1762
1763         return ret_val;
1764 }
1765
1766 /**
1767  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1768  *  @hw: pointer to the HW structure
1769  *
1770  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1771  **/
1772 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1773 {
1774         DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1775
1776         hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1777         hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1778         switch (hw->mac.type) {
1779         case e1000_ich8lan:
1780         case e1000_ich9lan:
1781         case e1000_ich10lan:
1782                 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1783                 break;
1784         case e1000_pchlan:
1785         case e1000_pch2lan:
1786         case e1000_pch_lpt:
1787         case e1000_pch_spt:
1788                 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1789                 break;
1790         default:
1791                 break;
1792         }
1793 }
1794
1795 /**
1796  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1797  *  @hw: pointer to the HW structure
1798  *
1799  *  Acquires the mutex for performing NVM operations.
1800  **/
1801 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1802 {
1803         DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1804
1805         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1806
1807         return E1000_SUCCESS;
1808 }
1809
1810 /**
1811  *  e1000_release_nvm_ich8lan - Release NVM mutex
1812  *  @hw: pointer to the HW structure
1813  *
1814  *  Releases the mutex used while performing NVM operations.
1815  **/
1816 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1817 {
1818         DEBUGFUNC("e1000_release_nvm_ich8lan");
1819
1820         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1821
1822         return;
1823 }
1824
1825 /**
1826  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1827  *  @hw: pointer to the HW structure
1828  *
1829  *  Acquires the software control flag for performing PHY and select
1830  *  MAC CSR accesses.
1831  **/
1832 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1833 {
1834         u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1835         s32 ret_val = E1000_SUCCESS;
1836
1837         DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1838
1839         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1840
1841         while (timeout) {
1842                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1843                 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1844                         break;
1845
1846                 msec_delay_irq(1);
1847                 timeout--;
1848         }
1849
1850         if (!timeout) {
1851                 DEBUGOUT("SW has already locked the resource.\n");
1852                 ret_val = -E1000_ERR_CONFIG;
1853                 goto out;
1854         }
1855
1856         timeout = SW_FLAG_TIMEOUT;
1857
1858         extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1859         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1860
1861         while (timeout) {
1862                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1863                 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1864                         break;
1865
1866                 msec_delay_irq(1);
1867                 timeout--;
1868         }
1869
1870         if (!timeout) {
1871                 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1872                           E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1873                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1874                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1875                 ret_val = -E1000_ERR_CONFIG;
1876                 goto out;
1877         }
1878
1879 out:
1880         if (ret_val)
1881                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1882
1883         return ret_val;
1884 }
1885
1886 /**
1887  *  e1000_release_swflag_ich8lan - Release software control flag
1888  *  @hw: pointer to the HW structure
1889  *
1890  *  Releases the software control flag for performing PHY and select
1891  *  MAC CSR accesses.
1892  **/
1893 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1894 {
1895         u32 extcnf_ctrl;
1896
1897         DEBUGFUNC("e1000_release_swflag_ich8lan");
1898
1899         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1900
1901         if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1902                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1903                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1904         } else {
1905                 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1906         }
1907
1908         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1909
1910         return;
1911 }
1912
1913 /**
1914  *  e1000_check_mng_mode_ich8lan - Checks management mode
1915  *  @hw: pointer to the HW structure
1916  *
1917  *  This checks if the adapter has any manageability enabled.
1918  *  This is a function pointer entry point only called by read/write
1919  *  routines for the PHY and NVM parts.
1920  **/
1921 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1922 {
1923         u32 fwsm;
1924
1925         DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1926
1927         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1928
1929         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1930                ((fwsm & E1000_FWSM_MODE_MASK) ==
1931                 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1932 }
1933
1934 /**
1935  *  e1000_check_mng_mode_pchlan - Checks management mode
1936  *  @hw: pointer to the HW structure
1937  *
1938  *  This checks if the adapter has iAMT enabled.
1939  *  This is a function pointer entry point only called by read/write
1940  *  routines for the PHY and NVM parts.
1941  **/
1942 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1943 {
1944         u32 fwsm;
1945
1946         DEBUGFUNC("e1000_check_mng_mode_pchlan");
1947
1948         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1949
1950         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1951                (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1952 }
1953
1954 /**
1955  *  e1000_rar_set_pch2lan - Set receive address register
1956  *  @hw: pointer to the HW structure
1957  *  @addr: pointer to the receive address
1958  *  @index: receive address array register
1959  *
1960  *  Sets the receive address array register at index to the address passed
1961  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1962  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1963  *  Use SHRA[0-3] in place of those reserved for ME.
1964  **/
1965 STATIC int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1966 {
1967         u32 rar_low, rar_high;
1968
1969         DEBUGFUNC("e1000_rar_set_pch2lan");
1970
1971         /* HW expects these in little endian so we reverse the byte order
1972          * from network order (big endian) to little endian
1973          */
1974         rar_low = ((u32) addr[0] |
1975                    ((u32) addr[1] << 8) |
1976                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1977
1978         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1979
1980         /* If MAC address zero, no need to set the AV bit */
1981         if (rar_low || rar_high)
1982                 rar_high |= E1000_RAH_AV;
1983
1984         if (index == 0) {
1985                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1986                 E1000_WRITE_FLUSH(hw);
1987                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1988                 E1000_WRITE_FLUSH(hw);
1989                 return E1000_SUCCESS;
1990         }
1991
1992         /* RAR[1-6] are owned by manageability.  Skip those and program the
1993          * next address into the SHRA register array.
1994          */
1995         if (index < (u32) (hw->mac.rar_entry_count)) {
1996                 s32 ret_val;
1997
1998                 ret_val = e1000_acquire_swflag_ich8lan(hw);
1999                 if (ret_val)
2000                         goto out;
2001
2002                 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
2003                 E1000_WRITE_FLUSH(hw);
2004                 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
2005                 E1000_WRITE_FLUSH(hw);
2006
2007                 e1000_release_swflag_ich8lan(hw);
2008
2009                 /* verify the register updates */
2010                 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
2011                     (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
2012                         return E1000_SUCCESS;
2013
2014                 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
2015                          (index - 1), E1000_READ_REG(hw, E1000_FWSM));
2016         }
2017
2018 out:
2019         DEBUGOUT1("Failed to write receive address at index %d\n", index);
2020         return -E1000_ERR_CONFIG;
2021 }
2022
2023 /**
2024  *  e1000_rar_set_pch_lpt - Set receive address registers
2025  *  @hw: pointer to the HW structure
2026  *  @addr: pointer to the receive address
2027  *  @index: receive address array register
2028  *
2029  *  Sets the receive address register array at index to the address passed
2030  *  in by addr. For LPT, RAR[0] is the base address register that is to
2031  *  contain the MAC address. SHRA[0-10] are the shared receive address
2032  *  registers that are shared between the Host and manageability engine (ME).
2033  **/
2034 STATIC int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
2035 {
2036         u32 rar_low, rar_high;
2037         u32 wlock_mac;
2038
2039         DEBUGFUNC("e1000_rar_set_pch_lpt");
2040
2041         /* HW expects these in little endian so we reverse the byte order
2042          * from network order (big endian) to little endian
2043          */
2044         rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
2045                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2046
2047         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2048
2049         /* If MAC address zero, no need to set the AV bit */
2050         if (rar_low || rar_high)
2051                 rar_high |= E1000_RAH_AV;
2052
2053         if (index == 0) {
2054                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2055                 E1000_WRITE_FLUSH(hw);
2056                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2057                 E1000_WRITE_FLUSH(hw);
2058                 return E1000_SUCCESS;
2059         }
2060
2061         /* The manageability engine (ME) can lock certain SHRAR registers that
2062          * it is using - those registers are unavailable for use.
2063          */
2064         if (index < hw->mac.rar_entry_count) {
2065                 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
2066                             E1000_FWSM_WLOCK_MAC_MASK;
2067                 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2068
2069                 /* Check if all SHRAR registers are locked */
2070                 if (wlock_mac == 1)
2071                         goto out;
2072
2073                 if ((wlock_mac == 0) || (index <= wlock_mac)) {
2074                         s32 ret_val;
2075
2076                         ret_val = e1000_acquire_swflag_ich8lan(hw);
2077
2078                         if (ret_val)
2079                                 goto out;
2080
2081                         E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
2082                                         rar_low);
2083                         E1000_WRITE_FLUSH(hw);
2084                         E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
2085                                         rar_high);
2086                         E1000_WRITE_FLUSH(hw);
2087
2088                         e1000_release_swflag_ich8lan(hw);
2089
2090                         /* verify the register updates */
2091                         if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
2092                             (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
2093                                 return E1000_SUCCESS;
2094                 }
2095         }
2096
2097 out:
2098         DEBUGOUT1("Failed to write receive address at index %d\n", index);
2099         return -E1000_ERR_CONFIG;
2100 }
2101
2102 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
2103 /**
2104  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
2105  *  @hw: pointer to the HW structure
2106  *  @mc_addr_list: array of multicast addresses to program
2107  *  @mc_addr_count: number of multicast addresses to program
2108  *
2109  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
2110  *  The caller must have a packed mc_addr_list of multicast addresses.
2111  **/
2112 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
2113                                               u8 *mc_addr_list,
2114                                               u32 mc_addr_count)
2115 {
2116         u16 phy_reg = 0;
2117         int i;
2118         s32 ret_val;
2119
2120         DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
2121
2122         e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
2123
2124         ret_val = hw->phy.ops.acquire(hw);
2125         if (ret_val)
2126                 return;
2127
2128         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2129         if (ret_val)
2130                 goto release;
2131
2132         for (i = 0; i < hw->mac.mta_reg_count; i++) {
2133                 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
2134                                            (u16)(hw->mac.mta_shadow[i] &
2135                                                  0xFFFF));
2136                 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
2137                                            (u16)((hw->mac.mta_shadow[i] >> 16) &
2138                                                  0xFFFF));
2139         }
2140
2141         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2142
2143 release:
2144         hw->phy.ops.release(hw);
2145 }
2146
2147 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
2148 /**
2149  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2150  *  @hw: pointer to the HW structure
2151  *
2152  *  Checks if firmware is blocking the reset of the PHY.
2153  *  This is a function pointer entry point only called by
2154  *  reset routines.
2155  **/
2156 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2157 {
2158         u32 fwsm;
2159         bool blocked = false;
2160         int i = 0;
2161
2162         DEBUGFUNC("e1000_check_reset_block_ich8lan");
2163
2164         do {
2165                 fwsm = E1000_READ_REG(hw, E1000_FWSM);
2166                 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2167                         blocked = true;
2168                         msec_delay(10);
2169                         continue;
2170                 }
2171                 blocked = false;
2172         } while (blocked && (i++ < 30));
2173         return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2174 }
2175
2176 /**
2177  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2178  *  @hw: pointer to the HW structure
2179  *
2180  *  Assumes semaphore already acquired.
2181  *
2182  **/
2183 STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2184 {
2185         u16 phy_data;
2186         u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2187         u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2188                 E1000_STRAP_SMT_FREQ_SHIFT;
2189         s32 ret_val;
2190
2191         strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2192
2193         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2194         if (ret_val)
2195                 return ret_val;
2196
2197         phy_data &= ~HV_SMB_ADDR_MASK;
2198         phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2199         phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2200
2201         if (hw->phy.type == e1000_phy_i217) {
2202                 /* Restore SMBus frequency */
2203                 if (freq--) {
2204                         phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2205                         phy_data |= (freq & (1 << 0)) <<
2206                                 HV_SMB_ADDR_FREQ_LOW_SHIFT;
2207                         phy_data |= (freq & (1 << 1)) <<
2208                                 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2209                 } else {
2210                         DEBUGOUT("Unsupported SMB frequency in PHY\n");
2211                 }
2212         }
2213
2214         return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2215 }
2216
2217 /**
2218  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2219  *  @hw:   pointer to the HW structure
2220  *
2221  *  SW should configure the LCD from the NVM extended configuration region
2222  *  as a workaround for certain parts.
2223  **/
2224 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2225 {
2226         struct e1000_phy_info *phy = &hw->phy;
2227         u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2228         s32 ret_val = E1000_SUCCESS;
2229         u16 word_addr, reg_data, reg_addr, phy_page = 0;
2230
2231         DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2232
2233         /* Initialize the PHY from the NVM on ICH platforms.  This
2234          * is needed due to an issue where the NVM configuration is
2235          * not properly autoloaded after power transitions.
2236          * Therefore, after each PHY reset, we will load the
2237          * configuration data out of the NVM manually.
2238          */
2239         switch (hw->mac.type) {
2240         case e1000_ich8lan:
2241                 if (phy->type != e1000_phy_igp_3)
2242                         return ret_val;
2243
2244                 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2245                     (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2246                         sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2247                         break;
2248                 }
2249                 /* Fall-thru */
2250         case e1000_pchlan:
2251         case e1000_pch2lan:
2252         case e1000_pch_lpt:
2253         case e1000_pch_spt:
2254                 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2255                 break;
2256         default:
2257                 return ret_val;
2258         }
2259
2260         ret_val = hw->phy.ops.acquire(hw);
2261         if (ret_val)
2262                 return ret_val;
2263
2264         data = E1000_READ_REG(hw, E1000_FEXTNVM);
2265         if (!(data & sw_cfg_mask))
2266                 goto release;
2267
2268         /* Make sure HW does not configure LCD from PHY
2269          * extended configuration before SW configuration
2270          */
2271         data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2272         if ((hw->mac.type < e1000_pch2lan) &&
2273             (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2274                         goto release;
2275
2276         cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2277         cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2278         cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2279         if (!cnf_size)
2280                 goto release;
2281
2282         cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2283         cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2284
2285         if (((hw->mac.type == e1000_pchlan) &&
2286              !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2287             (hw->mac.type > e1000_pchlan)) {
2288                 /* HW configures the SMBus address and LEDs when the
2289                  * OEM and LCD Write Enable bits are set in the NVM.
2290                  * When both NVM bits are cleared, SW will configure
2291                  * them instead.
2292                  */
2293                 ret_val = e1000_write_smbus_addr(hw);
2294                 if (ret_val)
2295                         goto release;
2296
2297                 data = E1000_READ_REG(hw, E1000_LEDCTL);
2298                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2299                                                         (u16)data);
2300                 if (ret_val)
2301                         goto release;
2302         }
2303
2304         /* Configure LCD from extended configuration region. */
2305
2306         /* cnf_base_addr is in DWORD */
2307         word_addr = (u16)(cnf_base_addr << 1);
2308
2309         for (i = 0; i < cnf_size; i++) {
2310                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2311                                            &reg_data);
2312                 if (ret_val)
2313                         goto release;
2314
2315                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2316                                            1, &reg_addr);
2317                 if (ret_val)
2318                         goto release;
2319
2320                 /* Save off the PHY page for future writes. */
2321                 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2322                         phy_page = reg_data;
2323                         continue;
2324                 }
2325
2326                 reg_addr &= PHY_REG_MASK;
2327                 reg_addr |= phy_page;
2328
2329                 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2330                                                     reg_data);
2331                 if (ret_val)
2332                         goto release;
2333         }
2334
2335 release:
2336         hw->phy.ops.release(hw);
2337         return ret_val;
2338 }
2339
2340 /**
2341  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2342  *  @hw:   pointer to the HW structure
2343  *  @link: link up bool flag
2344  *
2345  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2346  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2347  *  If link is down, the function will restore the default K1 setting located
2348  *  in the NVM.
2349  **/
2350 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2351 {
2352         s32 ret_val = E1000_SUCCESS;
2353         u16 status_reg = 0;
2354         bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2355
2356         DEBUGFUNC("e1000_k1_gig_workaround_hv");
2357
2358         if (hw->mac.type != e1000_pchlan)
2359                 return E1000_SUCCESS;
2360
2361         /* Wrap the whole flow with the sw flag */
2362         ret_val = hw->phy.ops.acquire(hw);
2363         if (ret_val)
2364                 return ret_val;
2365
2366         /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2367         if (link) {
2368                 if (hw->phy.type == e1000_phy_82578) {
2369                         ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2370                                                               &status_reg);
2371                         if (ret_val)
2372                                 goto release;
2373
2374                         status_reg &= (BM_CS_STATUS_LINK_UP |
2375                                        BM_CS_STATUS_RESOLVED |
2376                                        BM_CS_STATUS_SPEED_MASK);
2377
2378                         if (status_reg == (BM_CS_STATUS_LINK_UP |
2379                                            BM_CS_STATUS_RESOLVED |
2380                                            BM_CS_STATUS_SPEED_1000))
2381                                 k1_enable = false;
2382                 }
2383
2384                 if (hw->phy.type == e1000_phy_82577) {
2385                         ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2386                                                               &status_reg);
2387                         if (ret_val)
2388                                 goto release;
2389
2390                         status_reg &= (HV_M_STATUS_LINK_UP |
2391                                        HV_M_STATUS_AUTONEG_COMPLETE |
2392                                        HV_M_STATUS_SPEED_MASK);
2393
2394                         if (status_reg == (HV_M_STATUS_LINK_UP |
2395                                            HV_M_STATUS_AUTONEG_COMPLETE |
2396                                            HV_M_STATUS_SPEED_1000))
2397                                 k1_enable = false;
2398                 }
2399
2400                 /* Link stall fix for link up */
2401                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2402                                                        0x0100);
2403                 if (ret_val)
2404                         goto release;
2405
2406         } else {
2407                 /* Link stall fix for link down */
2408                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2409                                                        0x4100);
2410                 if (ret_val)
2411                         goto release;
2412         }
2413
2414         ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2415
2416 release:
2417         hw->phy.ops.release(hw);
2418
2419         return ret_val;
2420 }
2421
2422 /**
2423  *  e1000_configure_k1_ich8lan - Configure K1 power state
2424  *  @hw: pointer to the HW structure
2425  *  @enable: K1 state to configure
2426  *
2427  *  Configure the K1 power state based on the provided parameter.
2428  *  Assumes semaphore already acquired.
2429  *
2430  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2431  **/
2432 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2433 {
2434         s32 ret_val;
2435         u32 ctrl_reg = 0;
2436         u32 ctrl_ext = 0;
2437         u32 reg = 0;
2438         u16 kmrn_reg = 0;
2439
2440         DEBUGFUNC("e1000_configure_k1_ich8lan");
2441
2442         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2443                                              &kmrn_reg);
2444         if (ret_val)
2445                 return ret_val;
2446
2447         if (k1_enable)
2448                 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2449         else
2450                 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2451
2452         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2453                                               kmrn_reg);
2454         if (ret_val)
2455                 return ret_val;
2456
2457         usec_delay(20);
2458         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2459         ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2460
2461         reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2462         reg |= E1000_CTRL_FRCSPD;
2463         E1000_WRITE_REG(hw, E1000_CTRL, reg);
2464
2465         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2466         E1000_WRITE_FLUSH(hw);
2467         usec_delay(20);
2468         E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2469         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2470         E1000_WRITE_FLUSH(hw);
2471         usec_delay(20);
2472
2473         return E1000_SUCCESS;
2474 }
2475
2476 /**
2477  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2478  *  @hw:       pointer to the HW structure
2479  *  @d0_state: boolean if entering d0 or d3 device state
2480  *
2481  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2482  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2483  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2484  **/
2485 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2486 {
2487         s32 ret_val = 0;
2488         u32 mac_reg;
2489         u16 oem_reg;
2490
2491         DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2492
2493         if (hw->mac.type < e1000_pchlan)
2494                 return ret_val;
2495
2496         ret_val = hw->phy.ops.acquire(hw);
2497         if (ret_val)
2498                 return ret_val;
2499
2500         if (hw->mac.type == e1000_pchlan) {
2501                 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2502                 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2503                         goto release;
2504         }
2505
2506         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2507         if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2508                 goto release;
2509
2510         mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2511
2512         ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2513         if (ret_val)
2514                 goto release;
2515
2516         oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2517
2518         if (d0_state) {
2519                 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2520                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2521
2522                 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2523                         oem_reg |= HV_OEM_BITS_LPLU;
2524         } else {
2525                 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2526                     E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2527                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2528
2529                 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2530                     E1000_PHY_CTRL_NOND0A_LPLU))
2531                         oem_reg |= HV_OEM_BITS_LPLU;
2532         }
2533
2534         /* Set Restart auto-neg to activate the bits */
2535         if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2536             !hw->phy.ops.check_reset_block(hw))
2537                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2538
2539         ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2540
2541 release:
2542         hw->phy.ops.release(hw);
2543
2544         return ret_val;
2545 }
2546
2547
2548 /**
2549  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2550  *  @hw:   pointer to the HW structure
2551  **/
2552 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2553 {
2554         s32 ret_val;
2555         u16 data;
2556
2557         DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2558
2559         ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2560         if (ret_val)
2561                 return ret_val;
2562
2563         data |= HV_KMRN_MDIO_SLOW;
2564
2565         ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2566
2567         return ret_val;
2568 }
2569
2570 /**
2571  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2572  *  done after every PHY reset.
2573  **/
2574 STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2575 {
2576         s32 ret_val = E1000_SUCCESS;
2577         u16 phy_data;
2578
2579         DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2580
2581         if (hw->mac.type != e1000_pchlan)
2582                 return E1000_SUCCESS;
2583
2584         /* Set MDIO slow mode before any other MDIO access */
2585         if (hw->phy.type == e1000_phy_82577) {
2586                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2587                 if (ret_val)
2588                         return ret_val;
2589         }
2590
2591         if (((hw->phy.type == e1000_phy_82577) &&
2592              ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2593             ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2594                 /* Disable generation of early preamble */
2595                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2596                 if (ret_val)
2597                         return ret_val;
2598
2599                 /* Preamble tuning for SSC */
2600                 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2601                                                 0xA204);
2602                 if (ret_val)
2603                         return ret_val;
2604         }
2605
2606         if (hw->phy.type == e1000_phy_82578) {
2607                 /* Return registers to default by doing a soft reset then
2608                  * writing 0x3140 to the control register.
2609                  */
2610                 if (hw->phy.revision < 2) {
2611                         e1000_phy_sw_reset_generic(hw);
2612                         ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2613                                                         0x3140);
2614                 }
2615         }
2616
2617         /* Select page 0 */
2618         ret_val = hw->phy.ops.acquire(hw);
2619         if (ret_val)
2620                 return ret_val;
2621
2622         hw->phy.addr = 1;
2623         ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2624         hw->phy.ops.release(hw);
2625         if (ret_val)
2626                 return ret_val;
2627
2628         /* Configure the K1 Si workaround during phy reset assuming there is
2629          * link so that it disables K1 if link is in 1Gbps.
2630          */
2631         ret_val = e1000_k1_gig_workaround_hv(hw, true);
2632         if (ret_val)
2633                 return ret_val;
2634
2635         /* Workaround for link disconnects on a busy hub in half duplex */
2636         ret_val = hw->phy.ops.acquire(hw);
2637         if (ret_val)
2638                 return ret_val;
2639         ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2640         if (ret_val)
2641                 goto release;
2642         ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2643                                                phy_data & 0x00FF);
2644         if (ret_val)
2645                 goto release;
2646
2647         /* set MSE higher to enable link to stay up when noise is high */
2648         ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2649 release:
2650         hw->phy.ops.release(hw);
2651
2652         return ret_val;
2653 }
2654
2655 /**
2656  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2657  *  @hw:   pointer to the HW structure
2658  **/
2659 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2660 {
2661         u32 mac_reg;
2662         u16 i, phy_reg = 0;
2663         s32 ret_val;
2664
2665         DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2666
2667         ret_val = hw->phy.ops.acquire(hw);
2668         if (ret_val)
2669                 return;
2670         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2671         if (ret_val)
2672                 goto release;
2673
2674         /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2675         for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2676                 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2677                 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2678                                            (u16)(mac_reg & 0xFFFF));
2679                 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2680                                            (u16)((mac_reg >> 16) & 0xFFFF));
2681
2682                 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2683                 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2684                                            (u16)(mac_reg & 0xFFFF));
2685                 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2686                                            (u16)((mac_reg & E1000_RAH_AV)
2687                                                  >> 16));
2688         }
2689
2690         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2691
2692 release:
2693         hw->phy.ops.release(hw);
2694 }
2695
2696 #ifndef CRC32_OS_SUPPORT
2697 STATIC u32 e1000_calc_rx_da_crc(u8 mac[])
2698 {
2699         u32 poly = 0xEDB88320;  /* Polynomial for 802.3 CRC calculation */
2700         u32 i, j, mask, crc;
2701
2702         DEBUGFUNC("e1000_calc_rx_da_crc");
2703
2704         crc = 0xffffffff;
2705         for (i = 0; i < 6; i++) {
2706                 crc = crc ^ mac[i];
2707                 for (j = 8; j > 0; j--) {
2708                         mask = (crc & 1) * (-1);
2709                         crc = (crc >> 1) ^ (poly & mask);
2710                 }
2711         }
2712         return ~crc;
2713 }
2714
2715 #endif /* CRC32_OS_SUPPORT */
2716 /**
2717  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2718  *  with 82579 PHY
2719  *  @hw: pointer to the HW structure
2720  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2721  **/
2722 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2723 {
2724         s32 ret_val = E1000_SUCCESS;
2725         u16 phy_reg, data;
2726         u32 mac_reg;
2727         u16 i;
2728
2729         DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2730
2731         if (hw->mac.type < e1000_pch2lan)
2732                 return E1000_SUCCESS;
2733
2734         /* disable Rx path while enabling/disabling workaround */
2735         hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2736         ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2737                                         phy_reg | (1 << 14));
2738         if (ret_val)
2739                 return ret_val;
2740
2741         if (enable) {
2742                 /* Write Rx addresses (rar_entry_count for RAL/H, and
2743                  * SHRAL/H) and initial CRC values to the MAC
2744                  */
2745                 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2746                         u8 mac_addr[ETH_ADDR_LEN] = {0};
2747                         u32 addr_high, addr_low;
2748
2749                         addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2750                         if (!(addr_high & E1000_RAH_AV))
2751                                 continue;
2752                         addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2753                         mac_addr[0] = (addr_low & 0xFF);
2754                         mac_addr[1] = ((addr_low >> 8) & 0xFF);
2755                         mac_addr[2] = ((addr_low >> 16) & 0xFF);
2756                         mac_addr[3] = ((addr_low >> 24) & 0xFF);
2757                         mac_addr[4] = (addr_high & 0xFF);
2758                         mac_addr[5] = ((addr_high >> 8) & 0xFF);
2759
2760 #ifndef CRC32_OS_SUPPORT
2761                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2762                                         e1000_calc_rx_da_crc(mac_addr));
2763 #else /* CRC32_OS_SUPPORT */
2764                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2765                                         E1000_CRC32(ETH_ADDR_LEN, mac_addr));
2766 #endif /* CRC32_OS_SUPPORT */
2767                 }
2768
2769                 /* Write Rx addresses to the PHY */
2770                 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2771
2772                 /* Enable jumbo frame workaround in the MAC */
2773                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2774                 mac_reg &= ~(1 << 14);
2775                 mac_reg |= (7 << 15);
2776                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2777
2778                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2779                 mac_reg |= E1000_RCTL_SECRC;
2780                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2781
2782                 ret_val = e1000_read_kmrn_reg_generic(hw,
2783                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2784                                                 &data);
2785                 if (ret_val)
2786                         return ret_val;
2787                 ret_val = e1000_write_kmrn_reg_generic(hw,
2788                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2789                                                 data | (1 << 0));
2790                 if (ret_val)
2791                         return ret_val;
2792                 ret_val = e1000_read_kmrn_reg_generic(hw,
2793                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2794                                                 &data);
2795                 if (ret_val)
2796                         return ret_val;
2797                 data &= ~(0xF << 8);
2798                 data |= (0xB << 8);
2799                 ret_val = e1000_write_kmrn_reg_generic(hw,
2800                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2801                                                 data);
2802                 if (ret_val)
2803                         return ret_val;
2804
2805                 /* Enable jumbo frame workaround in the PHY */
2806                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2807                 data &= ~(0x7F << 5);
2808                 data |= (0x37 << 5);
2809                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2810                 if (ret_val)
2811                         return ret_val;
2812                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2813                 data &= ~(1 << 13);
2814                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2815                 if (ret_val)
2816                         return ret_val;
2817                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2818                 data &= ~(0x3FF << 2);
2819                 data |= (E1000_TX_PTR_GAP << 2);
2820                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2821                 if (ret_val)
2822                         return ret_val;
2823                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2824                 if (ret_val)
2825                         return ret_val;
2826                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2827                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2828                                                 (1 << 10));
2829                 if (ret_val)
2830                         return ret_val;
2831         } else {
2832                 /* Write MAC register values back to h/w defaults */
2833                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2834                 mac_reg &= ~(0xF << 14);
2835                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2836
2837                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2838                 mac_reg &= ~E1000_RCTL_SECRC;
2839                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2840
2841                 ret_val = e1000_read_kmrn_reg_generic(hw,
2842                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2843                                                 &data);
2844                 if (ret_val)
2845                         return ret_val;
2846                 ret_val = e1000_write_kmrn_reg_generic(hw,
2847                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2848                                                 data & ~(1 << 0));
2849                 if (ret_val)
2850                         return ret_val;
2851                 ret_val = e1000_read_kmrn_reg_generic(hw,
2852                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2853                                                 &data);
2854                 if (ret_val)
2855                         return ret_val;
2856                 data &= ~(0xF << 8);
2857                 data |= (0xB << 8);
2858                 ret_val = e1000_write_kmrn_reg_generic(hw,
2859                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2860                                                 data);
2861                 if (ret_val)
2862                         return ret_val;
2863
2864                 /* Write PHY register values back to h/w defaults */
2865                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2866                 data &= ~(0x7F << 5);
2867                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2868                 if (ret_val)
2869                         return ret_val;
2870                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2871                 data |= (1 << 13);
2872                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2873                 if (ret_val)
2874                         return ret_val;
2875                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2876                 data &= ~(0x3FF << 2);
2877                 data |= (0x8 << 2);
2878                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2879                 if (ret_val)
2880                         return ret_val;
2881                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2882                 if (ret_val)
2883                         return ret_val;
2884                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2885                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2886                                                 ~(1 << 10));
2887                 if (ret_val)
2888                         return ret_val;
2889         }
2890
2891         /* re-enable Rx path after enabling/disabling workaround */
2892         return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2893                                      ~(1 << 14));
2894 }
2895
2896 /**
2897  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2898  *  done after every PHY reset.
2899  **/
2900 STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2901 {
2902         s32 ret_val = E1000_SUCCESS;
2903
2904         DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2905
2906         if (hw->mac.type != e1000_pch2lan)
2907                 return E1000_SUCCESS;
2908
2909         /* Set MDIO slow mode before any other MDIO access */
2910         ret_val = e1000_set_mdio_slow_mode_hv(hw);
2911         if (ret_val)
2912                 return ret_val;
2913
2914         ret_val = hw->phy.ops.acquire(hw);
2915         if (ret_val)
2916                 return ret_val;
2917         /* set MSE higher to enable link to stay up when noise is high */
2918         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2919         if (ret_val)
2920                 goto release;
2921         /* drop link after 5 times MSE threshold was reached */
2922         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2923 release:
2924         hw->phy.ops.release(hw);
2925
2926         return ret_val;
2927 }
2928
2929 /**
2930  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2931  *  @hw:   pointer to the HW structure
2932  *
2933  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2934  *  Disable K1 for 1000 and 100 speeds
2935  **/
2936 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2937 {
2938         s32 ret_val = E1000_SUCCESS;
2939         u16 status_reg = 0;
2940
2941         DEBUGFUNC("e1000_k1_workaround_lv");
2942
2943         if (hw->mac.type != e1000_pch2lan)
2944                 return E1000_SUCCESS;
2945
2946         /* Set K1 beacon duration based on 10Mbs speed */
2947         ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2948         if (ret_val)
2949                 return ret_val;
2950
2951         if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2952             == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2953                 if (status_reg &
2954                     (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2955                         u16 pm_phy_reg;
2956
2957                         /* LV 1G/100 Packet drop issue wa  */
2958                         ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2959                                                        &pm_phy_reg);
2960                         if (ret_val)
2961                                 return ret_val;
2962                         pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2963                         ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2964                                                         pm_phy_reg);
2965                         if (ret_val)
2966                                 return ret_val;
2967                 } else {
2968                         u32 mac_reg;
2969                         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2970                         mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2971                         mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2972                         E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2973                 }
2974         }
2975
2976         return ret_val;
2977 }
2978
2979 /**
2980  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2981  *  @hw:   pointer to the HW structure
2982  *  @gate: boolean set to true to gate, false to ungate
2983  *
2984  *  Gate/ungate the automatic PHY configuration via hardware; perform
2985  *  the configuration via software instead.
2986  **/
2987 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2988 {
2989         u32 extcnf_ctrl;
2990
2991         DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2992
2993         if (hw->mac.type < e1000_pch2lan)
2994                 return;
2995
2996         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2997
2998         if (gate)
2999                 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3000         else
3001                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3002
3003         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
3004 }
3005
3006 /**
3007  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
3008  *  @hw: pointer to the HW structure
3009  *
3010  *  Check the appropriate indication the MAC has finished configuring the
3011  *  PHY after a software reset.
3012  **/
3013 STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
3014 {
3015         u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
3016
3017         DEBUGFUNC("e1000_lan_init_done_ich8lan");
3018
3019         /* Wait for basic configuration completes before proceeding */
3020         do {
3021                 data = E1000_READ_REG(hw, E1000_STATUS);
3022                 data &= E1000_STATUS_LAN_INIT_DONE;
3023                 usec_delay(100);
3024         } while ((!data) && --loop);
3025
3026         /* If basic configuration is incomplete before the above loop
3027          * count reaches 0, loading the configuration from NVM will
3028          * leave the PHY in a bad state possibly resulting in no link.
3029          */
3030         if (loop == 0)
3031                 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
3032
3033         /* Clear the Init Done bit for the next init event */
3034         data = E1000_READ_REG(hw, E1000_STATUS);
3035         data &= ~E1000_STATUS_LAN_INIT_DONE;
3036         E1000_WRITE_REG(hw, E1000_STATUS, data);
3037 }
3038
3039 /**
3040  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
3041  *  @hw: pointer to the HW structure
3042  **/
3043 STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
3044 {
3045         s32 ret_val = E1000_SUCCESS;
3046         u16 reg;
3047
3048         DEBUGFUNC("e1000_post_phy_reset_ich8lan");
3049
3050         if (hw->phy.ops.check_reset_block(hw))
3051                 return E1000_SUCCESS;
3052
3053         /* Allow time for h/w to get to quiescent state after reset */
3054         msec_delay(10);
3055
3056         /* Perform any necessary post-reset workarounds */
3057         switch (hw->mac.type) {
3058         case e1000_pchlan:
3059                 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
3060                 if (ret_val)
3061                         return ret_val;
3062                 break;
3063         case e1000_pch2lan:
3064                 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
3065                 if (ret_val)
3066                         return ret_val;
3067                 break;
3068         default:
3069                 break;
3070         }
3071
3072         /* Clear the host wakeup bit after lcd reset */
3073         if (hw->mac.type >= e1000_pchlan) {
3074                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
3075                 reg &= ~BM_WUC_HOST_WU_BIT;
3076                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
3077         }
3078
3079         /* Configure the LCD with the extended configuration region in NVM */
3080         ret_val = e1000_sw_lcd_config_ich8lan(hw);
3081         if (ret_val)
3082                 return ret_val;
3083
3084         /* Configure the LCD with the OEM bits in NVM */
3085         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
3086
3087         if (hw->mac.type == e1000_pch2lan) {
3088                 /* Ungate automatic PHY configuration on non-managed 82579 */
3089                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
3090                     E1000_ICH_FWSM_FW_VALID)) {
3091                         msec_delay(10);
3092                         e1000_gate_hw_phy_config_ich8lan(hw, false);
3093                 }
3094
3095                 /* Set EEE LPI Update Timer to 200usec */
3096                 ret_val = hw->phy.ops.acquire(hw);
3097                 if (ret_val)
3098                         return ret_val;
3099                 ret_val = e1000_write_emi_reg_locked(hw,
3100                                                      I82579_LPI_UPDATE_TIMER,
3101                                                      0x1387);
3102                 hw->phy.ops.release(hw);
3103         }
3104
3105         return ret_val;
3106 }
3107
3108 /**
3109  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
3110  *  @hw: pointer to the HW structure
3111  *
3112  *  Resets the PHY
3113  *  This is a function pointer entry point called by drivers
3114  *  or other shared routines.
3115  **/
3116 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
3117 {
3118         s32 ret_val = E1000_SUCCESS;
3119
3120         DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
3121
3122         /* Gate automatic PHY configuration by hardware on non-managed 82579 */
3123         if ((hw->mac.type == e1000_pch2lan) &&
3124             !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3125                 e1000_gate_hw_phy_config_ich8lan(hw, true);
3126
3127         ret_val = e1000_phy_hw_reset_generic(hw);
3128         if (ret_val)
3129                 return ret_val;
3130
3131         return e1000_post_phy_reset_ich8lan(hw);
3132 }
3133
3134 /**
3135  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3136  *  @hw: pointer to the HW structure
3137  *  @active: true to enable LPLU, false to disable
3138  *
3139  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
3140  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3141  *  the phy speed. This function will manually set the LPLU bit and restart
3142  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
3143  *  since it configures the same bit.
3144  **/
3145 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3146 {
3147         s32 ret_val;
3148         u16 oem_reg;
3149
3150         DEBUGFUNC("e1000_set_lplu_state_pchlan");
3151         ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3152         if (ret_val)
3153                 return ret_val;
3154
3155         if (active)
3156                 oem_reg |= HV_OEM_BITS_LPLU;
3157         else
3158                 oem_reg &= ~HV_OEM_BITS_LPLU;
3159
3160         if (!hw->phy.ops.check_reset_block(hw))
3161                 oem_reg |= HV_OEM_BITS_RESTART_AN;
3162
3163         return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3164 }
3165
3166 /**
3167  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3168  *  @hw: pointer to the HW structure
3169  *  @active: true to enable LPLU, false to disable
3170  *
3171  *  Sets the LPLU D0 state according to the active flag.  When
3172  *  activating LPLU this function also disables smart speed
3173  *  and vice versa.  LPLU will not be activated unless the
3174  *  device autonegotiation advertisement meets standards of
3175  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3176  *  This is a function pointer entry point only called by
3177  *  PHY setup routines.
3178  **/
3179 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3180 {
3181         struct e1000_phy_info *phy = &hw->phy;
3182         u32 phy_ctrl;
3183         s32 ret_val = E1000_SUCCESS;
3184         u16 data;
3185
3186         DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3187
3188         if (phy->type == e1000_phy_ife)
3189                 return E1000_SUCCESS;
3190
3191         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3192
3193         if (active) {
3194                 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3195                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3196
3197                 if (phy->type != e1000_phy_igp_3)
3198                         return E1000_SUCCESS;
3199
3200                 /* Call gig speed drop workaround on LPLU before accessing
3201                  * any PHY registers
3202                  */
3203                 if (hw->mac.type == e1000_ich8lan)
3204                         e1000_gig_downshift_workaround_ich8lan(hw);
3205
3206                 /* When LPLU is enabled, we should disable SmartSpeed */
3207                 ret_val = phy->ops.read_reg(hw,
3208                                             IGP01E1000_PHY_PORT_CONFIG,
3209                                             &data);
3210                 if (ret_val)
3211                         return ret_val;
3212                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3213                 ret_val = phy->ops.write_reg(hw,
3214                                              IGP01E1000_PHY_PORT_CONFIG,
3215                                              data);
3216                 if (ret_val)
3217                         return ret_val;
3218         } else {
3219                 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3220                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3221
3222                 if (phy->type != e1000_phy_igp_3)
3223                         return E1000_SUCCESS;
3224
3225                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3226                  * during Dx states where the power conservation is most
3227                  * important.  During driver activity we should enable
3228                  * SmartSpeed, so performance is maintained.
3229                  */
3230                 if (phy->smart_speed == e1000_smart_speed_on) {
3231                         ret_val = phy->ops.read_reg(hw,
3232                                                     IGP01E1000_PHY_PORT_CONFIG,
3233                                                     &data);
3234                         if (ret_val)
3235                                 return ret_val;
3236
3237                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3238                         ret_val = phy->ops.write_reg(hw,
3239                                                      IGP01E1000_PHY_PORT_CONFIG,
3240                                                      data);
3241                         if (ret_val)
3242                                 return ret_val;
3243                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3244                         ret_val = phy->ops.read_reg(hw,
3245                                                     IGP01E1000_PHY_PORT_CONFIG,
3246                                                     &data);
3247                         if (ret_val)
3248                                 return ret_val;
3249
3250                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3251                         ret_val = phy->ops.write_reg(hw,
3252                                                      IGP01E1000_PHY_PORT_CONFIG,
3253                                                      data);
3254                         if (ret_val)
3255                                 return ret_val;
3256                 }
3257         }
3258
3259         return E1000_SUCCESS;
3260 }
3261
3262 /**
3263  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3264  *  @hw: pointer to the HW structure
3265  *  @active: true to enable LPLU, false to disable
3266  *
3267  *  Sets the LPLU D3 state according to the active flag.  When
3268  *  activating LPLU this function also disables smart speed
3269  *  and vice versa.  LPLU will not be activated unless the
3270  *  device autonegotiation advertisement meets standards of
3271  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3272  *  This is a function pointer entry point only called by
3273  *  PHY setup routines.
3274  **/
3275 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3276 {
3277         struct e1000_phy_info *phy = &hw->phy;
3278         u32 phy_ctrl;
3279         s32 ret_val = E1000_SUCCESS;
3280         u16 data;
3281
3282         DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3283
3284         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3285
3286         if (!active) {
3287                 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3288                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3289
3290                 if (phy->type != e1000_phy_igp_3)
3291                         return E1000_SUCCESS;
3292
3293                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3294                  * during Dx states where the power conservation is most
3295                  * important.  During driver activity we should enable
3296                  * SmartSpeed, so performance is maintained.
3297                  */
3298                 if (phy->smart_speed == e1000_smart_speed_on) {
3299                         ret_val = phy->ops.read_reg(hw,
3300                                                     IGP01E1000_PHY_PORT_CONFIG,
3301                                                     &data);
3302                         if (ret_val)
3303                                 return ret_val;
3304
3305                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3306                         ret_val = phy->ops.write_reg(hw,
3307                                                      IGP01E1000_PHY_PORT_CONFIG,
3308                                                      data);
3309                         if (ret_val)
3310                                 return ret_val;
3311                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3312                         ret_val = phy->ops.read_reg(hw,
3313                                                     IGP01E1000_PHY_PORT_CONFIG,
3314                                                     &data);
3315                         if (ret_val)
3316                                 return ret_val;
3317
3318                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3319                         ret_val = phy->ops.write_reg(hw,
3320                                                      IGP01E1000_PHY_PORT_CONFIG,
3321                                                      data);
3322                         if (ret_val)
3323                                 return ret_val;
3324                 }
3325         } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3326                    (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3327                    (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3328                 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3329                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3330
3331                 if (phy->type != e1000_phy_igp_3)
3332                         return E1000_SUCCESS;
3333
3334                 /* Call gig speed drop workaround on LPLU before accessing
3335                  * any PHY registers
3336                  */
3337                 if (hw->mac.type == e1000_ich8lan)
3338                         e1000_gig_downshift_workaround_ich8lan(hw);
3339
3340                 /* When LPLU is enabled, we should disable SmartSpeed */
3341                 ret_val = phy->ops.read_reg(hw,
3342                                             IGP01E1000_PHY_PORT_CONFIG,
3343                                             &data);
3344                 if (ret_val)
3345                         return ret_val;
3346
3347                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3348                 ret_val = phy->ops.write_reg(hw,
3349                                              IGP01E1000_PHY_PORT_CONFIG,
3350                                              data);
3351         }
3352
3353         return ret_val;
3354 }
3355
3356 /**
3357  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3358  *  @hw: pointer to the HW structure
3359  *  @bank:  pointer to the variable that returns the active bank
3360  *
3361  *  Reads signature byte from the NVM using the flash access registers.
3362  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3363  **/
3364 STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3365 {
3366         u32 eecd;
3367         struct e1000_nvm_info *nvm = &hw->nvm;
3368         u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3369         u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3370         u32 nvm_dword = 0;
3371         u8 sig_byte = 0;
3372         s32 ret_val;
3373
3374         DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3375
3376         switch (hw->mac.type) {
3377         case e1000_pch_spt:
3378                 bank1_offset = nvm->flash_bank_size;
3379                 act_offset = E1000_ICH_NVM_SIG_WORD;
3380
3381                 /* set bank to 0 in case flash read fails */
3382                 *bank = 0;
3383
3384                 /* Check bank 0 */
3385                 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3386                                                          &nvm_dword);
3387                 if (ret_val)
3388                         return ret_val;
3389                 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3390                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3391                     E1000_ICH_NVM_SIG_VALUE) {
3392                         *bank = 0;
3393                         return E1000_SUCCESS;
3394                 }
3395
3396                 /* Check bank 1 */
3397                 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3398                                                          bank1_offset,
3399                                                          &nvm_dword);
3400                 if (ret_val)
3401                         return ret_val;
3402                 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3403                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3404                     E1000_ICH_NVM_SIG_VALUE) {
3405                         *bank = 1;
3406                         return E1000_SUCCESS;
3407                 }
3408
3409                 DEBUGOUT("ERROR: No valid NVM bank present\n");
3410                 return -E1000_ERR_NVM;
3411         case e1000_ich8lan:
3412         case e1000_ich9lan:
3413                 eecd = E1000_READ_REG(hw, E1000_EECD);
3414                 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3415                     E1000_EECD_SEC1VAL_VALID_MASK) {
3416                         if (eecd & E1000_EECD_SEC1VAL)
3417                                 *bank = 1;
3418                         else
3419                                 *bank = 0;
3420
3421                         return E1000_SUCCESS;
3422                 }
3423                 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3424                 /* fall-thru */
3425         default:
3426                 /* set bank to 0 in case flash read fails */
3427                 *bank = 0;
3428
3429                 /* Check bank 0 */
3430                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3431                                                         &sig_byte);
3432                 if (ret_val)
3433                         return ret_val;
3434                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3435                     E1000_ICH_NVM_SIG_VALUE) {
3436                         *bank = 0;
3437                         return E1000_SUCCESS;
3438                 }
3439
3440                 /* Check bank 1 */
3441                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3442                                                         bank1_offset,
3443                                                         &sig_byte);
3444                 if (ret_val)
3445                         return ret_val;
3446                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3447                     E1000_ICH_NVM_SIG_VALUE) {
3448                         *bank = 1;
3449                         return E1000_SUCCESS;
3450                 }
3451
3452                 DEBUGOUT("ERROR: No valid NVM bank present\n");
3453                 return -E1000_ERR_NVM;
3454         }
3455 }
3456
3457 /**
3458  *  e1000_read_nvm_spt - NVM access for SPT
3459  *  @hw: pointer to the HW structure
3460  *  @offset: The offset (in bytes) of the word(s) to read.
3461  *  @words: Size of data to read in words.
3462  *  @data: pointer to the word(s) to read at offset.
3463  *
3464  *  Reads a word(s) from the NVM
3465  **/
3466 STATIC s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3467                               u16 *data)
3468 {
3469         struct e1000_nvm_info *nvm = &hw->nvm;
3470         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3471         u32 act_offset;
3472         s32 ret_val = E1000_SUCCESS;
3473         u32 bank = 0;
3474         u32 dword = 0;
3475         u16 offset_to_read;
3476         u16 i;
3477
3478         DEBUGFUNC("e1000_read_nvm_spt");
3479
3480         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3481             (words == 0)) {
3482                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3483                 ret_val = -E1000_ERR_NVM;
3484                 goto out;
3485         }
3486
3487         nvm->ops.acquire(hw);
3488
3489         ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3490         if (ret_val != E1000_SUCCESS) {
3491                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3492                 bank = 0;
3493         }
3494
3495         act_offset = (bank) ? nvm->flash_bank_size : 0;
3496         act_offset += offset;
3497
3498         ret_val = E1000_SUCCESS;
3499
3500         for (i = 0; i < words; i += 2) {
3501                 if (words - i == 1) {
3502                         if (dev_spec->shadow_ram[offset+i].modified) {
3503                                 data[i] = dev_spec->shadow_ram[offset+i].value;
3504                         } else {
3505                                 offset_to_read = act_offset + i -
3506                                                  ((act_offset + i) % 2);
3507                                 ret_val =
3508                                    e1000_read_flash_dword_ich8lan(hw,
3509                                                                  offset_to_read,
3510                                                                  &dword);
3511                                 if (ret_val)
3512                                         break;
3513                                 if ((act_offset + i) % 2 == 0)
3514                                         data[i] = (u16)(dword & 0xFFFF);
3515                                 else
3516                                         data[i] = (u16)((dword >> 16) & 0xFFFF);
3517                         }
3518                 } else {
3519                         offset_to_read = act_offset + i;
3520                         if (!(dev_spec->shadow_ram[offset+i].modified) ||
3521                             !(dev_spec->shadow_ram[offset+i+1].modified)) {
3522                                 ret_val =
3523                                    e1000_read_flash_dword_ich8lan(hw,
3524                                                                  offset_to_read,
3525                                                                  &dword);
3526                                 if (ret_val)
3527                                         break;
3528                         }
3529                         if (dev_spec->shadow_ram[offset+i].modified)
3530                                 data[i] = dev_spec->shadow_ram[offset+i].value;
3531                         else
3532                                 data[i] = (u16) (dword & 0xFFFF);
3533                         if (dev_spec->shadow_ram[offset+i].modified)
3534                                 data[i+1] =
3535                                    dev_spec->shadow_ram[offset+i+1].value;
3536                         else
3537                                 data[i+1] = (u16) (dword >> 16 & 0xFFFF);
3538                 }
3539         }
3540
3541         nvm->ops.release(hw);
3542
3543 out:
3544         if (ret_val)
3545                 DEBUGOUT1("NVM read error: %d\n", ret_val);
3546
3547         return ret_val;
3548 }
3549
3550 /**
3551  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3552  *  @hw: pointer to the HW structure
3553  *  @offset: The offset (in bytes) of the word(s) to read.
3554  *  @words: Size of data to read in words
3555  *  @data: Pointer to the word(s) to read at offset.
3556  *
3557  *  Reads a word(s) from the NVM using the flash access registers.
3558  **/
3559 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3560                                   u16 *data)
3561 {
3562         struct e1000_nvm_info *nvm = &hw->nvm;
3563         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3564         u32 act_offset;
3565         s32 ret_val = E1000_SUCCESS;
3566         u32 bank = 0;
3567         u16 i, word;
3568
3569         DEBUGFUNC("e1000_read_nvm_ich8lan");
3570
3571         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3572             (words == 0)) {
3573                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3574                 ret_val = -E1000_ERR_NVM;
3575                 goto out;
3576         }
3577
3578         nvm->ops.acquire(hw);
3579
3580         ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3581         if (ret_val != E1000_SUCCESS) {
3582                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3583                 bank = 0;
3584         }
3585
3586         act_offset = (bank) ? nvm->flash_bank_size : 0;
3587         act_offset += offset;
3588
3589         ret_val = E1000_SUCCESS;
3590         for (i = 0; i < words; i++) {
3591                 if (dev_spec->shadow_ram[offset+i].modified) {
3592                         data[i] = dev_spec->shadow_ram[offset+i].value;
3593                 } else {
3594                         ret_val = e1000_read_flash_word_ich8lan(hw,
3595                                                                 act_offset + i,
3596                                                                 &word);
3597                         if (ret_val)
3598                                 break;
3599                         data[i] = word;
3600                 }
3601         }
3602
3603         nvm->ops.release(hw);
3604
3605 out:
3606         if (ret_val)
3607                 DEBUGOUT1("NVM read error: %d\n", ret_val);
3608
3609         return ret_val;
3610 }
3611
3612 /**
3613  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3614  *  @hw: pointer to the HW structure
3615  *
3616  *  This function does initial flash setup so that a new read/write/erase cycle
3617  *  can be started.
3618  **/
3619 STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3620 {
3621         union ich8_hws_flash_status hsfsts;
3622         s32 ret_val = -E1000_ERR_NVM;
3623
3624         DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3625
3626         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3627
3628         /* Check if the flash descriptor is valid */
3629         if (!hsfsts.hsf_status.fldesvalid) {
3630                 DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3631                 return -E1000_ERR_NVM;
3632         }
3633
3634         /* Clear FCERR and DAEL in hw status by writing 1 */
3635         hsfsts.hsf_status.flcerr = 1;
3636         hsfsts.hsf_status.dael = 1;
3637         if (hw->mac.type >= e1000_pch_spt)
3638                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3639                                       hsfsts.regval & 0xFFFF);
3640         else
3641                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3642
3643         /* Either we should have a hardware SPI cycle in progress
3644          * bit to check against, in order to start a new cycle or
3645          * FDONE bit should be changed in the hardware so that it
3646          * is 1 after hardware reset, which can then be used as an
3647          * indication whether a cycle is in progress or has been
3648          * completed.
3649          */
3650
3651         if (!hsfsts.hsf_status.flcinprog) {
3652                 /* There is no cycle running at present,
3653                  * so we can start a cycle.
3654                  * Begin by setting Flash Cycle Done.
3655                  */
3656                 hsfsts.hsf_status.flcdone = 1;
3657                 if (hw->mac.type >= e1000_pch_spt)
3658                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3659                                               hsfsts.regval & 0xFFFF);
3660                 else
3661                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3662                                                 hsfsts.regval);
3663                 ret_val = E1000_SUCCESS;
3664         } else {
3665                 s32 i;
3666
3667                 /* Otherwise poll for sometime so the current
3668                  * cycle has a chance to end before giving up.
3669                  */
3670                 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3671                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3672                                                               ICH_FLASH_HSFSTS);
3673                         if (!hsfsts.hsf_status.flcinprog) {
3674                                 ret_val = E1000_SUCCESS;
3675                                 break;
3676                         }
3677                         usec_delay(1);
3678                 }
3679                 if (ret_val == E1000_SUCCESS) {
3680                         /* Successful in waiting for previous cycle to timeout,
3681                          * now set the Flash Cycle Done.
3682                          */
3683                         hsfsts.hsf_status.flcdone = 1;
3684                         if (hw->mac.type >= e1000_pch_spt)
3685                                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3686                                                       hsfsts.regval & 0xFFFF);
3687                         else
3688                                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3689                                                         hsfsts.regval);
3690                 } else {
3691                         DEBUGOUT("Flash controller busy, cannot get access\n");
3692                 }
3693         }
3694
3695         return ret_val;
3696 }
3697
3698 /**
3699  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3700  *  @hw: pointer to the HW structure
3701  *  @timeout: maximum time to wait for completion
3702  *
3703  *  This function starts a flash cycle and waits for its completion.
3704  **/
3705 STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3706 {
3707         union ich8_hws_flash_ctrl hsflctl;
3708         union ich8_hws_flash_status hsfsts;
3709         u32 i = 0;
3710
3711         DEBUGFUNC("e1000_flash_cycle_ich8lan");
3712
3713         /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3714         if (hw->mac.type >= e1000_pch_spt)
3715                 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3716         else
3717                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3718         hsflctl.hsf_ctrl.flcgo = 1;
3719
3720         if (hw->mac.type >= e1000_pch_spt)
3721                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3722                                       hsflctl.regval << 16);
3723         else
3724                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3725
3726         /* wait till FDONE bit is set to 1 */
3727         do {
3728                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3729                 if (hsfsts.hsf_status.flcdone)
3730                         break;
3731                 usec_delay(1);
3732         } while (i++ < timeout);
3733
3734         if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3735                 return E1000_SUCCESS;
3736
3737         return -E1000_ERR_NVM;
3738 }
3739
3740 /**
3741  *  e1000_read_flash_dword_ich8lan - Read dword from flash
3742  *  @hw: pointer to the HW structure
3743  *  @offset: offset to data location
3744  *  @data: pointer to the location for storing the data
3745  *
3746  *  Reads the flash dword at offset into data.  Offset is converted
3747  *  to bytes before read.
3748  **/
3749 STATIC s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3750                                           u32 *data)
3751 {
3752         DEBUGFUNC("e1000_read_flash_dword_ich8lan");
3753
3754         if (!data)
3755                 return -E1000_ERR_NVM;
3756
3757         /* Must convert word offset into bytes. */
3758         offset <<= 1;
3759
3760         return e1000_read_flash_data32_ich8lan(hw, offset, data);
3761 }
3762
3763 /**
3764  *  e1000_read_flash_word_ich8lan - Read word from flash
3765  *  @hw: pointer to the HW structure
3766  *  @offset: offset to data location
3767  *  @data: pointer to the location for storing the data
3768  *
3769  *  Reads the flash word at offset into data.  Offset is converted
3770  *  to bytes before read.
3771  **/
3772 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3773                                          u16 *data)
3774 {
3775         DEBUGFUNC("e1000_read_flash_word_ich8lan");
3776
3777         if (!data)
3778                 return -E1000_ERR_NVM;
3779
3780         /* Must convert offset into bytes. */
3781         offset <<= 1;
3782
3783         return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3784 }
3785
3786 /**
3787  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3788  *  @hw: pointer to the HW structure
3789  *  @offset: The offset of the byte to read.
3790  *  @data: Pointer to a byte to store the value read.
3791  *
3792  *  Reads a single byte from the NVM using the flash access registers.
3793  **/
3794 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3795                                          u8 *data)
3796 {
3797         s32 ret_val;
3798         u16 word = 0;
3799
3800         /* In SPT, only 32 bits access is supported,
3801          * so this function should not be called.
3802          */
3803         if (hw->mac.type >= e1000_pch_spt)
3804                 return -E1000_ERR_NVM;
3805         else
3806                 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3807
3808         if (ret_val)
3809                 return ret_val;
3810
3811         *data = (u8)word;
3812
3813         return E1000_SUCCESS;
3814 }
3815
3816 /**
3817  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3818  *  @hw: pointer to the HW structure
3819  *  @offset: The offset (in bytes) of the byte or word to read.
3820  *  @size: Size of data to read, 1=byte 2=word
3821  *  @data: Pointer to the word to store the value read.
3822  *
3823  *  Reads a byte or word from the NVM using the flash access registers.
3824  **/
3825 STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3826                                          u8 size, u16 *data)
3827 {
3828         union ich8_hws_flash_status hsfsts;
3829         union ich8_hws_flash_ctrl hsflctl;
3830         u32 flash_linear_addr;
3831         u32 flash_data = 0;
3832         s32 ret_val = -E1000_ERR_NVM;
3833         u8 count = 0;
3834
3835         DEBUGFUNC("e1000_read_flash_data_ich8lan");
3836
3837         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3838                 return -E1000_ERR_NVM;
3839         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3840                              hw->nvm.flash_base_addr);
3841
3842         do {
3843                 usec_delay(1);
3844                 /* Steps */
3845                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3846                 if (ret_val != E1000_SUCCESS)
3847                         break;
3848                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3849
3850                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3851                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3852                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3853                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3854                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3855
3856                 ret_val = e1000_flash_cycle_ich8lan(hw,
3857                                                 ICH_FLASH_READ_COMMAND_TIMEOUT);
3858
3859                 /* Check if FCERR is set to 1, if set to 1, clear it
3860                  * and try the whole sequence a few more times, else
3861                  * read in (shift in) the Flash Data0, the order is
3862                  * least significant byte first msb to lsb
3863                  */
3864                 if (ret_val == E1000_SUCCESS) {
3865                         flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3866                         if (size == 1)
3867                                 *data = (u8)(flash_data & 0x000000FF);
3868                         else if (size == 2)
3869                                 *data = (u16)(flash_data & 0x0000FFFF);
3870                         break;
3871                 } else {
3872                         /* If we've gotten here, then things are probably
3873                          * completely hosed, but if the error condition is
3874                          * detected, it won't hurt to give it another try...
3875                          * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3876                          */
3877                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3878                                                               ICH_FLASH_HSFSTS);
3879                         if (hsfsts.hsf_status.flcerr) {
3880                                 /* Repeat for some time before giving up. */
3881                                 continue;
3882                         } else if (!hsfsts.hsf_status.flcdone) {
3883                                 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3884                                 break;
3885                         }
3886                 }
3887         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3888
3889         return ret_val;
3890 }
3891
3892 /**
3893  *  e1000_read_flash_data32_ich8lan - Read dword from NVM
3894  *  @hw: pointer to the HW structure
3895  *  @offset: The offset (in bytes) of the dword to read.
3896  *  @data: Pointer to the dword to store the value read.
3897  *
3898  *  Reads a byte or word from the NVM using the flash access registers.
3899  **/
3900 STATIC s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3901                                            u32 *data)
3902 {
3903         union ich8_hws_flash_status hsfsts;
3904         union ich8_hws_flash_ctrl hsflctl;
3905         u32 flash_linear_addr;
3906         s32 ret_val = -E1000_ERR_NVM;
3907         u8 count = 0;
3908
3909         DEBUGFUNC("e1000_read_flash_data_ich8lan");
3910
3911                 if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
3912                     hw->mac.type < e1000_pch_spt)
3913                         return -E1000_ERR_NVM;
3914         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3915                              hw->nvm.flash_base_addr);
3916
3917         do {
3918                 usec_delay(1);
3919                 /* Steps */
3920                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3921                 if (ret_val != E1000_SUCCESS)
3922                         break;
3923                 /* In SPT, This register is in Lan memory space, not flash.
3924                  * Therefore, only 32 bit access is supported
3925                  */
3926                 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3927
3928                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3929                 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3930                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3931                 /* In SPT, This register is in Lan memory space, not flash.
3932                  * Therefore, only 32 bit access is supported
3933                  */
3934                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3935                                       (u32)hsflctl.regval << 16);
3936                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3937
3938                 ret_val = e1000_flash_cycle_ich8lan(hw,
3939                                                 ICH_FLASH_READ_COMMAND_TIMEOUT);
3940
3941                 /* Check if FCERR is set to 1, if set to 1, clear it
3942                  * and try the whole sequence a few more times, else
3943                  * read in (shift in) the Flash Data0, the order is
3944                  * least significant byte first msb to lsb
3945                  */
3946                 if (ret_val == E1000_SUCCESS) {
3947                         *data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3948                         break;
3949                 } else {
3950                         /* If we've gotten here, then things are probably
3951                          * completely hosed, but if the error condition is
3952                          * detected, it won't hurt to give it another try...
3953                          * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3954                          */
3955                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3956                                                               ICH_FLASH_HSFSTS);
3957                         if (hsfsts.hsf_status.flcerr) {
3958                                 /* Repeat for some time before giving up. */
3959                                 continue;
3960                         } else if (!hsfsts.hsf_status.flcdone) {
3961                                 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3962                                 break;
3963                         }
3964                 }
3965         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3966
3967         return ret_val;
3968 }
3969
3970 /**
3971  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3972  *  @hw: pointer to the HW structure
3973  *  @offset: The offset (in bytes) of the word(s) to write.
3974  *  @words: Size of data to write in words
3975  *  @data: Pointer to the word(s) to write at offset.
3976  *
3977  *  Writes a byte or word to the NVM using the flash access registers.
3978  **/
3979 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3980                                    u16 *data)
3981 {
3982         struct e1000_nvm_info *nvm = &hw->nvm;
3983         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3984         u16 i;
3985
3986         DEBUGFUNC("e1000_write_nvm_ich8lan");
3987
3988         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3989             (words == 0)) {
3990                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3991                 return -E1000_ERR_NVM;
3992         }
3993
3994         nvm->ops.acquire(hw);
3995
3996         for (i = 0; i < words; i++) {
3997                 dev_spec->shadow_ram[offset+i].modified = true;
3998                 dev_spec->shadow_ram[offset+i].value = data[i];
3999         }
4000
4001         nvm->ops.release(hw);
4002
4003         return E1000_SUCCESS;
4004 }
4005
4006 /**
4007  *  e1000_update_nvm_checksum_spt - Update the checksum for NVM
4008  *  @hw: pointer to the HW structure
4009  *
4010  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4011  *  which writes the checksum to the shadow ram.  The changes in the shadow
4012  *  ram are then committed to the EEPROM by processing each bank at a time
4013  *  checking for the modified bit and writing only the pending changes.
4014  *  After a successful commit, the shadow ram is cleared and is ready for
4015  *  future writes.
4016  **/
4017 STATIC s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
4018 {
4019         struct e1000_nvm_info *nvm = &hw->nvm;
4020         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4021         u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4022         s32 ret_val;
4023         u32 dword = 0;
4024
4025         DEBUGFUNC("e1000_update_nvm_checksum_spt");
4026
4027         ret_val = e1000_update_nvm_checksum_generic(hw);
4028         if (ret_val)
4029                 goto out;
4030
4031         if (nvm->type != e1000_nvm_flash_sw)
4032                 goto out;
4033
4034         nvm->ops.acquire(hw);
4035
4036         /* We're writing to the opposite bank so if we're on bank 1,
4037          * write to bank 0 etc.  We also need to erase the segment that
4038          * is going to be written
4039          */
4040         ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4041         if (ret_val != E1000_SUCCESS) {
4042                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4043                 bank = 0;
4044         }
4045
4046         if (bank == 0) {
4047                 new_bank_offset = nvm->flash_bank_size;
4048                 old_bank_offset = 0;
4049                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4050                 if (ret_val)
4051                         goto release;
4052         } else {
4053                 old_bank_offset = nvm->flash_bank_size;
4054                 new_bank_offset = 0;
4055                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4056                 if (ret_val)
4057                         goto release;
4058         }
4059         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) {
4060                 /* Determine whether to write the value stored
4061                  * in the other NVM bank or a modified value stored
4062                  * in the shadow RAM
4063                  */
4064                 ret_val = e1000_read_flash_dword_ich8lan(hw,
4065                                                          i + old_bank_offset,
4066                                                          &dword);
4067
4068                 if (dev_spec->shadow_ram[i].modified) {
4069                         dword &= 0xffff0000;
4070                         dword |= (dev_spec->shadow_ram[i].value & 0xffff);
4071                 }
4072                 if (dev_spec->shadow_ram[i + 1].modified) {
4073                         dword &= 0x0000ffff;
4074                         dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
4075                                   << 16);
4076                 }
4077                 if (ret_val)
4078                         break;
4079
4080                 /* If the word is 0x13, then make sure the signature bits
4081                  * (15:14) are 11b until the commit has completed.
4082                  * This will allow us to write 10b which indicates the
4083                  * signature is valid.  We want to do this after the write
4084                  * has completed so that we don't mark the segment valid
4085                  * while the write is still in progress
4086                  */
4087                 if (i == E1000_ICH_NVM_SIG_WORD - 1)
4088                         dword |= E1000_ICH_NVM_SIG_MASK << 16;
4089
4090                 /* Convert offset to bytes. */
4091                 act_offset = (i + new_bank_offset) << 1;
4092
4093                 usec_delay(100);
4094
4095                 /* Write the data to the new bank. Offset in words*/
4096                 act_offset = i + new_bank_offset;
4097                 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
4098                                                                 dword);
4099                 if (ret_val)
4100                         break;
4101          }
4102
4103         /* Don't bother writing the segment valid bits if sector
4104          * programming failed.
4105          */
4106         if (ret_val) {
4107                 DEBUGOUT("Flash commit failed.\n");
4108                 goto release;
4109         }
4110
4111         /* Finally validate the new segment by setting bit 15:14
4112          * to 10b in word 0x13 , this can be done without an
4113          * erase as well since these bits are 11 to start with
4114          * and we need to change bit 14 to 0b
4115          */
4116         act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4117
4118         /*offset in words but we read dword*/
4119         --act_offset;
4120         ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4121
4122         if (ret_val)
4123                 goto release;
4124
4125         dword &= 0xBFFFFFFF;
4126         ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4127
4128         if (ret_val)
4129                 goto release;
4130
4131         /* And invalidate the previously valid segment by setting
4132          * its signature word (0x13) high_byte to 0b. This can be
4133          * done without an erase because flash erase sets all bits
4134          * to 1's. We can write 1's to 0's without an erase
4135          */
4136         act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4137
4138         /* offset in words but we read dword*/
4139         act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
4140         ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4141
4142         if (ret_val)
4143                 goto release;
4144
4145         dword &= 0x00FFFFFF;
4146         ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4147
4148         if (ret_val)
4149                 goto release;
4150
4151         /* Great!  Everything worked, we can now clear the cached entries. */
4152         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4153                 dev_spec->shadow_ram[i].modified = false;
4154                 dev_spec->shadow_ram[i].value = 0xFFFF;
4155         }
4156
4157 release:
4158         nvm->ops.release(hw);
4159
4160         /* Reload the EEPROM, or else modifications will not appear
4161          * until after the next adapter reset.
4162          */
4163         if (!ret_val) {
4164                 nvm->ops.reload(hw);
4165                 msec_delay(10);
4166         }
4167
4168 out:
4169         if (ret_val)
4170                 DEBUGOUT1("NVM update error: %d\n", ret_val);
4171
4172         return ret_val;
4173 }
4174
4175 /**
4176  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
4177  *  @hw: pointer to the HW structure
4178  *
4179  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4180  *  which writes the checksum to the shadow ram.  The changes in the shadow
4181  *  ram are then committed to the EEPROM by processing each bank at a time
4182  *  checking for the modified bit and writing only the pending changes.
4183  *  After a successful commit, the shadow ram is cleared and is ready for
4184  *  future writes.
4185  **/
4186 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
4187 {
4188         struct e1000_nvm_info *nvm = &hw->nvm;
4189         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4190         u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4191         s32 ret_val;
4192         u16 data = 0;
4193
4194         DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
4195
4196         ret_val = e1000_update_nvm_checksum_generic(hw);
4197         if (ret_val)
4198                 goto out;
4199
4200         if (nvm->type != e1000_nvm_flash_sw)
4201                 goto out;
4202
4203         nvm->ops.acquire(hw);
4204
4205         /* We're writing to the opposite bank so if we're on bank 1,
4206          * write to bank 0 etc.  We also need to erase the segment that
4207          * is going to be written
4208          */
4209         ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4210         if (ret_val != E1000_SUCCESS) {
4211                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4212                 bank = 0;
4213         }
4214
4215         if (bank == 0) {
4216                 new_bank_offset = nvm->flash_bank_size;
4217                 old_bank_offset = 0;
4218                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4219                 if (ret_val)
4220                         goto release;
4221         } else {
4222                 old_bank_offset = nvm->flash_bank_size;
4223                 new_bank_offset = 0;
4224                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4225                 if (ret_val)
4226                         goto release;
4227         }
4228         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4229                 if (dev_spec->shadow_ram[i].modified) {
4230                         data = dev_spec->shadow_ram[i].value;
4231                 } else {
4232                         ret_val = e1000_read_flash_word_ich8lan(hw, i +
4233                                                                 old_bank_offset,
4234                                                                 &data);
4235                         if (ret_val)
4236                                 break;
4237                 }
4238                 /* If the word is 0x13, then make sure the signature bits
4239                  * (15:14) are 11b until the commit has completed.
4240                  * This will allow us to write 10b which indicates the
4241                  * signature is valid.  We want to do this after the write
4242                  * has completed so that we don't mark the segment valid
4243                  * while the write is still in progress
4244                  */
4245                 if (i == E1000_ICH_NVM_SIG_WORD)
4246                         data |= E1000_ICH_NVM_SIG_MASK;
4247
4248                 /* Convert offset to bytes. */
4249                 act_offset = (i + new_bank_offset) << 1;
4250
4251                 usec_delay(100);
4252
4253                 /* Write the bytes to the new bank. */
4254                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4255                                                                act_offset,
4256                                                                (u8)data);
4257                 if (ret_val)
4258                         break;
4259
4260                 usec_delay(100);
4261                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4262                                                           act_offset + 1,
4263                                                           (u8)(data >> 8));
4264                 if (ret_val)
4265                         break;
4266         }
4267
4268         /* Don't bother writing the segment valid bits if sector
4269          * programming failed.
4270          */
4271         if (ret_val) {
4272                 DEBUGOUT("Flash commit failed.\n");
4273                 goto release;
4274         }
4275
4276         /* Finally validate the new segment by setting bit 15:14
4277          * to 10b in word 0x13 , this can be done without an
4278          * erase as well since these bits are 11 to start with
4279          * and we need to change bit 14 to 0b
4280          */
4281         act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4282         ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4283         if (ret_val)
4284                 goto release;
4285
4286         data &= 0xBFFF;
4287         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
4288                                                        (u8)(data >> 8));
4289         if (ret_val)
4290                 goto release;
4291
4292         /* And invalidate the previously valid segment by setting
4293          * its signature word (0x13) high_byte to 0b. This can be
4294          * done without an erase because flash erase sets all bits
4295          * to 1's. We can write 1's to 0's without an erase
4296          */
4297         act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4298
4299         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4300
4301         if (ret_val)
4302                 goto release;
4303
4304         /* Great!  Everything worked, we can now clear the cached entries. */
4305         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4306                 dev_spec->shadow_ram[i].modified = false;
4307                 dev_spec->shadow_ram[i].value = 0xFFFF;
4308         }
4309
4310 release:
4311         nvm->ops.release(hw);
4312
4313         /* Reload the EEPROM, or else modifications will not appear
4314          * until after the next adapter reset.
4315          */
4316         if (!ret_val) {
4317                 nvm->ops.reload(hw);
4318                 msec_delay(10);
4319         }
4320
4321 out:
4322         if (ret_val)
4323                 DEBUGOUT1("NVM update error: %d\n", ret_val);
4324
4325         return ret_val;
4326 }
4327
4328 /**
4329  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4330  *  @hw: pointer to the HW structure
4331  *
4332  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4333  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
4334  *  calculated, in which case we need to calculate the checksum and set bit 6.
4335  **/
4336 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4337 {
4338         s32 ret_val;
4339         u16 data;
4340         u16 word;
4341         u16 valid_csum_mask;
4342
4343         DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
4344
4345         /* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
4346          * the checksum needs to be fixed.  This bit is an indication that
4347          * the NVM was prepared by OEM software and did not calculate
4348          * the checksum...a likely scenario.
4349          */
4350         switch (hw->mac.type) {
4351         case e1000_pch_lpt:
4352         case e1000_pch_spt:
4353                 word = NVM_COMPAT;
4354                 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4355                 break;
4356         default:
4357                 word = NVM_FUTURE_INIT_WORD1;
4358                 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4359                 break;
4360         }
4361
4362         ret_val = hw->nvm.ops.read(hw, word, 1, &data);
4363         if (ret_val)
4364                 return ret_val;
4365
4366         if (!(data & valid_csum_mask)) {
4367                 data |= valid_csum_mask;
4368                 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
4369                 if (ret_val)
4370                         return ret_val;
4371                 ret_val = hw->nvm.ops.update(hw);
4372                 if (ret_val)
4373                         return ret_val;
4374         }
4375
4376         return e1000_validate_nvm_checksum_generic(hw);
4377 }
4378
4379 /**
4380  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4381  *  @hw: pointer to the HW structure
4382  *  @offset: The offset (in bytes) of the byte/word to read.
4383  *  @size: Size of data to read, 1=byte 2=word
4384  *  @data: The byte(s) to write to the NVM.
4385  *
4386  *  Writes one/two bytes to the NVM using the flash access registers.
4387  **/
4388 STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4389                                           u8 size, u16 data)
4390 {
4391         union ich8_hws_flash_status hsfsts;
4392         union ich8_hws_flash_ctrl hsflctl;
4393         u32 flash_linear_addr;
4394         u32 flash_data = 0;
4395         s32 ret_val;
4396         u8 count = 0;
4397
4398         DEBUGFUNC("e1000_write_ich8_data");
4399
4400         if (hw->mac.type >= e1000_pch_spt) {
4401                 if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4402                         return -E1000_ERR_NVM;
4403         } else {
4404                 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4405                         return -E1000_ERR_NVM;
4406         }
4407
4408         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4409                              hw->nvm.flash_base_addr);
4410
4411         do {
4412                 usec_delay(1);
4413                 /* Steps */
4414                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4415                 if (ret_val != E1000_SUCCESS)
4416                         break;
4417                 /* In SPT, This register is in Lan memory space, not
4418                  * flash.  Therefore, only 32 bit access is supported
4419                  */
4420                 if (hw->mac.type >= e1000_pch_spt)
4421                         hsflctl.regval =
4422                             E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
4423                 else
4424                         hsflctl.regval =
4425                             E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4426
4427                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4428                 hsflctl.hsf_ctrl.fldbcount = size - 1;
4429                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4430                 /* In SPT, This register is in Lan memory space,
4431                  * not flash.  Therefore, only 32 bit access is
4432                  * supported
4433                  */
4434                 if (hw->mac.type >= e1000_pch_spt)
4435                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4436                                               hsflctl.regval << 16);
4437                 else
4438                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4439                                                 hsflctl.regval);
4440
4441                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4442
4443                 if (size == 1)
4444                         flash_data = (u32)data & 0x00FF;
4445                 else
4446                         flash_data = (u32)data;
4447
4448                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
4449
4450                 /* check if FCERR is set to 1 , if set to 1, clear it
4451                  * and try the whole sequence a few more times else done
4452                  */
4453                 ret_val =
4454                     e1000_flash_cycle_ich8lan(hw,
4455                                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4456                 if (ret_val == E1000_SUCCESS)
4457                         break;
4458
4459                 /* If we're here, then things are most likely
4460                  * completely hosed, but if the error condition
4461                  * is detected, it won't hurt to give it another
4462                  * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4463                  */
4464                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4465                 if (hsfsts.hsf_status.flcerr)
4466                         /* Repeat for some time before giving up. */
4467                         continue;
4468                 if (!hsfsts.hsf_status.flcdone) {
4469                         DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4470                         break;
4471                 }
4472         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4473
4474         return ret_val;
4475 }
4476
4477 /**
4478 *  e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4479 *  @hw: pointer to the HW structure
4480 *  @offset: The offset (in bytes) of the dwords to read.
4481 *  @data: The 4 bytes to write to the NVM.
4482 *
4483 *  Writes one/two/four bytes to the NVM using the flash access registers.
4484 **/
4485 STATIC s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4486                                             u32 data)
4487 {
4488         union ich8_hws_flash_status hsfsts;
4489         union ich8_hws_flash_ctrl hsflctl;
4490         u32 flash_linear_addr;
4491         s32 ret_val;
4492         u8 count = 0;
4493
4494         DEBUGFUNC("e1000_write_flash_data32_ich8lan");
4495
4496         if (hw->mac.type >= e1000_pch_spt) {
4497                 if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4498                         return -E1000_ERR_NVM;
4499         }
4500         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4501                              hw->nvm.flash_base_addr);
4502         do {
4503                 usec_delay(1);
4504                 /* Steps */
4505                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4506                 if (ret_val != E1000_SUCCESS)
4507                         break;
4508
4509                 /* In SPT, This register is in Lan memory space, not
4510                  * flash.  Therefore, only 32 bit access is supported
4511                  */
4512                 if (hw->mac.type >= e1000_pch_spt)
4513                         hsflctl.regval = E1000_READ_FLASH_REG(hw,
4514                                                               ICH_FLASH_HSFSTS)
4515                                          >> 16;
4516                 else
4517                         hsflctl.regval = E1000_READ_FLASH_REG16(hw,
4518                                                               ICH_FLASH_HSFCTL);
4519
4520                 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4521                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4522
4523                 /* In SPT, This register is in Lan memory space,
4524                  * not flash.  Therefore, only 32 bit access is
4525                  * supported
4526                  */
4527                 if (hw->mac.type >= e1000_pch_spt)
4528                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4529                                               hsflctl.regval << 16);
4530                 else
4531                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4532                                                 hsflctl.regval);
4533
4534                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4535
4536                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
4537
4538                 /* check if FCERR is set to 1 , if set to 1, clear it
4539                  * and try the whole sequence a few more times else done
4540                  */
4541                 ret_val = e1000_flash_cycle_ich8lan(hw,
4542                                                ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4543
4544                 if (ret_val == E1000_SUCCESS)
4545                         break;
4546
4547                 /* If we're here, then things are most likely
4548                  * completely hosed, but if the error condition
4549                  * is detected, it won't hurt to give it another
4550                  * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4551                  */
4552                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4553
4554                 if (hsfsts.hsf_status.flcerr)
4555                         /* Repeat for some time before giving up. */
4556                         continue;
4557                 if (!hsfsts.hsf_status.flcdone) {
4558                         DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4559                         break;
4560                 }
4561         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4562
4563         return ret_val;
4564 }
4565
4566 /**
4567  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4568  *  @hw: pointer to the HW structure
4569  *  @offset: The index of the byte to read.
4570  *  @data: The byte to write to the NVM.
4571  *
4572  *  Writes a single byte to the NVM using the flash access registers.
4573  **/
4574 STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4575                                           u8 data)
4576 {
4577         u16 word = (u16)data;
4578
4579         DEBUGFUNC("e1000_write_flash_byte_ich8lan");
4580
4581         return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4582 }
4583
4584 /**
4585 *  e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
4586 *  @hw: pointer to the HW structure
4587 *  @offset: The offset of the word to write.
4588 *  @dword: The dword to write to the NVM.
4589 *
4590 *  Writes a single dword to the NVM using the flash access registers.
4591 *  Goes through a retry algorithm before giving up.
4592 **/
4593 STATIC s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4594                                                  u32 offset, u32 dword)
4595 {
4596         s32 ret_val;
4597         u16 program_retries;
4598
4599         DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan");
4600
4601         /* Must convert word offset into bytes. */
4602         offset <<= 1;
4603
4604         ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4605
4606         if (!ret_val)
4607                 return ret_val;
4608         for (program_retries = 0; program_retries < 100; program_retries++) {
4609                 DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset);
4610                 usec_delay(100);
4611                 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4612                 if (ret_val == E1000_SUCCESS)
4613                         break;
4614         }
4615         if (program_retries == 100)
4616                 return -E1000_ERR_NVM;
4617
4618         return E1000_SUCCESS;
4619 }
4620
4621 /**
4622  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4623  *  @hw: pointer to the HW structure
4624  *  @offset: The offset of the byte to write.
4625  *  @byte: The byte to write to the NVM.
4626  *
4627  *  Writes a single byte to the NVM using the flash access registers.
4628  *  Goes through a retry algorithm before giving up.
4629  **/
4630 STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4631                                                 u32 offset, u8 byte)
4632 {
4633         s32 ret_val;
4634         u16 program_retries;
4635
4636         DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
4637
4638         ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4639         if (!ret_val)
4640                 return ret_val;
4641
4642         for (program_retries = 0; program_retries < 100; program_retries++) {
4643                 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
4644                 usec_delay(100);
4645                 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4646                 if (ret_val == E1000_SUCCESS)
4647                         break;
4648         }
4649         if (program_retries == 100)
4650                 return -E1000_ERR_NVM;
4651
4652         return E1000_SUCCESS;
4653 }
4654
4655 /**
4656  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4657  *  @hw: pointer to the HW structure
4658  *  @bank: 0 for first bank, 1 for second bank, etc.
4659  *
4660  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4661  *  bank N is 4096 * N + flash_reg_addr.
4662  **/
4663 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4664 {
4665         struct e1000_nvm_info *nvm = &hw->nvm;
4666         union ich8_hws_flash_status hsfsts;
4667         union ich8_hws_flash_ctrl hsflctl;
4668         u32 flash_linear_addr;
4669         /* bank size is in 16bit words - adjust to bytes */
4670         u32 flash_bank_size = nvm->flash_bank_size * 2;
4671         s32 ret_val;
4672         s32 count = 0;
4673         s32 j, iteration, sector_size;
4674
4675         DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
4676
4677         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4678
4679         /* Determine HW Sector size: Read BERASE bits of hw flash status
4680          * register
4681          * 00: The Hw sector is 256 bytes, hence we need to erase 16
4682          *     consecutive sectors.  The start index for the nth Hw sector
4683          *     can be calculated as = bank * 4096 + n * 256
4684          * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4685          *     The start index for the nth Hw sector can be calculated
4686          *     as = bank * 4096
4687          * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4688          *     (ich9 only, otherwise error condition)
4689          * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4690          */
4691         switch (hsfsts.hsf_status.berasesz) {
4692         case 0:
4693                 /* Hw sector size 256 */
4694                 sector_size = ICH_FLASH_SEG_SIZE_256;
4695                 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4696                 break;
4697         case 1:
4698                 sector_size = ICH_FLASH_SEG_SIZE_4K;
4699                 iteration = 1;
4700                 break;
4701         case 2:
4702                 sector_size = ICH_FLASH_SEG_SIZE_8K;
4703                 iteration = 1;
4704                 break;
4705         case 3:
4706                 sector_size = ICH_FLASH_SEG_SIZE_64K;
4707                 iteration = 1;
4708                 break;
4709         default:
4710                 return -E1000_ERR_NVM;
4711         }
4712
4713         /* Start with the base address, then add the sector offset. */
4714         flash_linear_addr = hw->nvm.flash_base_addr;
4715         flash_linear_addr += (bank) ? flash_bank_size : 0;
4716
4717         for (j = 0; j < iteration; j++) {
4718                 do {
4719                         u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4720
4721                         /* Steps */
4722                         ret_val = e1000_flash_cycle_init_ich8lan(hw);
4723                         if (ret_val)
4724                                 return ret_val;
4725
4726                         /* Write a value 11 (block Erase) in Flash
4727                          * Cycle field in hw flash control
4728                          */
4729                         if (hw->mac.type >= e1000_pch_spt)
4730                                 hsflctl.regval =
4731                                     E1000_READ_FLASH_REG(hw,
4732                                                          ICH_FLASH_HSFSTS)>>16;
4733                         else
4734                                 hsflctl.regval =
4735                                     E1000_READ_FLASH_REG16(hw,
4736                                                            ICH_FLASH_HSFCTL);
4737
4738                         hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4739                         if (hw->mac.type >= e1000_pch_spt)
4740                                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4741                                                       hsflctl.regval << 16);
4742                         else
4743                                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4744                                                         hsflctl.regval);
4745
4746                         /* Write the last 24 bits of an index within the
4747                          * block into Flash Linear address field in Flash
4748                          * Address.
4749                          */
4750                         flash_linear_addr += (j * sector_size);
4751                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4752                                               flash_linear_addr);
4753
4754                         ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4755                         if (ret_val == E1000_SUCCESS)
4756                                 break;
4757
4758                         /* Check if FCERR is set to 1.  If 1,
4759                          * clear it and try the whole sequence
4760                          * a few more times else Done
4761                          */
4762                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4763                                                       ICH_FLASH_HSFSTS);
4764                         if (hsfsts.hsf_status.flcerr)
4765                                 /* repeat for some time before giving up */
4766                                 continue;
4767                         else if (!hsfsts.hsf_status.flcdone)
4768                                 return ret_val;
4769                 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4770         }
4771
4772         return E1000_SUCCESS;
4773 }
4774
4775 /**
4776  *  e1000_valid_led_default_ich8lan - Set the default LED settings
4777  *  @hw: pointer to the HW structure
4778  *  @data: Pointer to the LED settings
4779  *
4780  *  Reads the LED default settings from the NVM to data.  If the NVM LED
4781  *  settings is all 0's or F's, set the LED default to a valid LED default
4782  *  setting.
4783  **/
4784 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4785 {
4786         s32 ret_val;
4787
4788         DEBUGFUNC("e1000_valid_led_default_ich8lan");
4789
4790         ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4791         if (ret_val) {
4792                 DEBUGOUT("NVM Read Error\n");
4793                 return ret_val;
4794         }
4795
4796         if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4797                 *data = ID_LED_DEFAULT_ICH8LAN;
4798
4799         return E1000_SUCCESS;
4800 }
4801
4802 /**
4803  *  e1000_id_led_init_pchlan - store LED configurations
4804  *  @hw: pointer to the HW structure
4805  *
4806  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4807  *  the PHY LED configuration register.
4808  *
4809  *  PCH also does not have an "always on" or "always off" mode which
4810  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4811  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4812  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4813  *  link based on logic in e1000_led_[on|off]_pchlan().
4814  **/
4815 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4816 {
4817         struct e1000_mac_info *mac = &hw->mac;
4818         s32 ret_val;
4819         const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4820         const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4821         u16 data, i, temp, shift;
4822
4823         DEBUGFUNC("e1000_id_led_init_pchlan");
4824
4825         /* Get default ID LED modes */
4826         ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4827         if (ret_val)
4828                 return ret_val;
4829
4830         mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4831         mac->ledctl_mode1 = mac->ledctl_default;
4832         mac->ledctl_mode2 = mac->ledctl_default;
4833
4834         for (i = 0; i < 4; i++) {
4835                 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4836                 shift = (i * 5);
4837                 switch (temp) {
4838                 case ID_LED_ON1_DEF2:
4839                 case ID_LED_ON1_ON2:
4840                 case ID_LED_ON1_OFF2:
4841                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4842                         mac->ledctl_mode1 |= (ledctl_on << shift);
4843                         break;
4844                 case ID_LED_OFF1_DEF2:
4845                 case ID_LED_OFF1_ON2:
4846                 case ID_LED_OFF1_OFF2:
4847                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4848                         mac->ledctl_mode1 |= (ledctl_off << shift);
4849                         break;
4850                 default:
4851                         /* Do nothing */
4852                         break;
4853                 }
4854                 switch (temp) {
4855                 case ID_LED_DEF1_ON2:
4856                 case ID_LED_ON1_ON2:
4857                 case ID_LED_OFF1_ON2:
4858                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4859                         mac->ledctl_mode2 |= (ledctl_on << shift);
4860                         break;
4861                 case ID_LED_DEF1_OFF2:
4862                 case ID_LED_ON1_OFF2:
4863                 case ID_LED_OFF1_OFF2:
4864                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4865                         mac->ledctl_mode2 |= (ledctl_off << shift);
4866                         break;
4867                 default:
4868                         /* Do nothing */
4869                         break;
4870                 }
4871         }
4872
4873         return E1000_SUCCESS;
4874 }
4875
4876 /**
4877  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4878  *  @hw: pointer to the HW structure
4879  *
4880  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4881  *  register, so the the bus width is hard coded.
4882  **/
4883 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4884 {
4885         struct e1000_bus_info *bus = &hw->bus;
4886         s32 ret_val;
4887
4888         DEBUGFUNC("e1000_get_bus_info_ich8lan");
4889
4890         ret_val = e1000_get_bus_info_pcie_generic(hw);
4891
4892         /* ICH devices are "PCI Express"-ish.  They have
4893          * a configuration space, but do not contain
4894          * PCI Express Capability registers, so bus width
4895          * must be hardcoded.
4896          */
4897         if (bus->width == e1000_bus_width_unknown)
4898                 bus->width = e1000_bus_width_pcie_x1;
4899
4900         return ret_val;
4901 }
4902
4903 /**
4904  *  e1000_reset_hw_ich8lan - Reset the hardware
4905  *  @hw: pointer to the HW structure
4906  *
4907  *  Does a full reset of the hardware which includes a reset of the PHY and
4908  *  MAC.
4909  **/
4910 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4911 {
4912         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4913         u16 kum_cfg;
4914         u32 ctrl, reg;
4915         s32 ret_val;
4916
4917         DEBUGFUNC("e1000_reset_hw_ich8lan");
4918
4919         /* Prevent the PCI-E bus from sticking if there is no TLP connection
4920          * on the last TLP read/write transaction when MAC is reset.
4921          */
4922         ret_val = e1000_disable_pcie_master_generic(hw);
4923         if (ret_val)
4924                 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4925
4926         DEBUGOUT("Masking off all interrupts\n");
4927         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4928
4929         /* Disable the Transmit and Receive units.  Then delay to allow
4930          * any pending transactions to complete before we hit the MAC
4931          * with the global reset.
4932          */
4933         E1000_WRITE_REG(hw, E1000_RCTL, 0);
4934         E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4935         E1000_WRITE_FLUSH(hw);
4936
4937         msec_delay(10);
4938
4939         /* Workaround for ICH8 bit corruption issue in FIFO memory */
4940         if (hw->mac.type == e1000_ich8lan) {
4941                 /* Set Tx and Rx buffer allocation to 8k apiece. */
4942                 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4943                 /* Set Packet Buffer Size to 16k. */
4944                 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4945         }
4946
4947         if (hw->mac.type == e1000_pchlan) {
4948                 /* Save the NVM K1 bit setting*/
4949                 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4950                 if (ret_val)
4951                         return ret_val;
4952
4953                 if (kum_cfg & E1000_NVM_K1_ENABLE)
4954                         dev_spec->nvm_k1_enabled = true;
4955                 else
4956                         dev_spec->nvm_k1_enabled = false;
4957         }
4958
4959         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4960
4961         if (!hw->phy.ops.check_reset_block(hw)) {
4962                 /* Full-chip reset requires MAC and PHY reset at the same
4963                  * time to make sure the interface between MAC and the
4964                  * external PHY is reset.
4965                  */
4966                 ctrl |= E1000_CTRL_PHY_RST;
4967
4968                 /* Gate automatic PHY configuration by hardware on
4969                  * non-managed 82579
4970                  */
4971                 if ((hw->mac.type == e1000_pch2lan) &&
4972                     !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4973                         e1000_gate_hw_phy_config_ich8lan(hw, true);
4974         }
4975         ret_val = e1000_acquire_swflag_ich8lan(hw);
4976         DEBUGOUT("Issuing a global reset to ich8lan\n");
4977         E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4978         /* cannot issue a flush here because it hangs the hardware */
4979         msec_delay(20);
4980
4981         /* Set Phy Config Counter to 50msec */
4982         if (hw->mac.type == e1000_pch2lan) {
4983                 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4984                 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4985                 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4986                 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4987         }
4988
4989         if (!ret_val)
4990                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
4991
4992         if (ctrl & E1000_CTRL_PHY_RST) {
4993                 ret_val = hw->phy.ops.get_cfg_done(hw);
4994                 if (ret_val)
4995                         return ret_val;
4996
4997                 ret_val = e1000_post_phy_reset_ich8lan(hw);
4998                 if (ret_val)
4999                         return ret_val;
5000         }
5001
5002         /* For PCH, this write will make sure that any noise
5003          * will be detected as a CRC error and be dropped rather than show up
5004          * as a bad packet to the DMA engine.
5005          */
5006         if (hw->mac.type == e1000_pchlan)
5007                 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
5008
5009         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
5010         E1000_READ_REG(hw, E1000_ICR);
5011
5012         reg = E1000_READ_REG(hw, E1000_KABGTXD);
5013         reg |= E1000_KABGTXD_BGSQLBIAS;
5014         E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
5015
5016         return E1000_SUCCESS;
5017 }
5018
5019 /**
5020  *  e1000_init_hw_ich8lan - Initialize the hardware
5021  *  @hw: pointer to the HW structure
5022  *
5023  *  Prepares the hardware for transmit and receive by doing the following:
5024  *   - initialize hardware bits
5025  *   - initialize LED identification
5026  *   - setup receive address registers
5027  *   - setup flow control
5028  *   - setup transmit descriptors
5029  *   - clear statistics
5030  **/
5031 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
5032 {
5033         struct e1000_mac_info *mac = &hw->mac;
5034         u32 ctrl_ext, txdctl, snoop;
5035         s32 ret_val;
5036         u16 i;
5037
5038         DEBUGFUNC("e1000_init_hw_ich8lan");
5039
5040         e1000_initialize_hw_bits_ich8lan(hw);
5041
5042         /* Initialize identification LED */
5043         ret_val = mac->ops.id_led_init(hw);
5044         /* An error is not fatal and we should not stop init due to this */
5045         if (ret_val)
5046                 DEBUGOUT("Error initializing identification LED\n");
5047
5048         /* Setup the receive address. */
5049         e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
5050
5051         /* Zero out the Multicast HASH table */
5052         DEBUGOUT("Zeroing the MTA\n");
5053         for (i = 0; i < mac->mta_reg_count; i++)
5054                 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
5055
5056         /* The 82578 Rx buffer will stall if wakeup is enabled in host and
5057          * the ME.  Disable wakeup by clearing the host wakeup bit.
5058          * Reset the phy after disabling host wakeup to reset the Rx buffer.
5059          */
5060         if (hw->phy.type == e1000_phy_82578) {
5061                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
5062                 i &= ~BM_WUC_HOST_WU_BIT;
5063                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
5064                 ret_val = e1000_phy_hw_reset_ich8lan(hw);
5065                 if (ret_val)
5066                         return ret_val;
5067         }
5068
5069         /* Setup link and flow control */
5070         ret_val = mac->ops.setup_link(hw);
5071
5072         /* Set the transmit descriptor write-back policy for both queues */
5073         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
5074         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5075                   E1000_TXDCTL_FULL_TX_DESC_WB);
5076         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5077                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5078         E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
5079         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
5080         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5081                   E1000_TXDCTL_FULL_TX_DESC_WB);
5082         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5083                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5084         E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
5085
5086         /* ICH8 has opposite polarity of no_snoop bits.
5087          * By default, we should use snoop behavior.
5088          */
5089         if (mac->type == e1000_ich8lan)
5090                 snoop = PCIE_ICH8_SNOOP_ALL;
5091         else
5092                 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
5093         e1000_set_pcie_no_snoop_generic(hw, snoop);
5094
5095         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5096         ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
5097         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5098
5099         /* Clear all of the statistics registers (clear on read).  It is
5100          * important that we do this after we have tried to establish link
5101          * because the symbol error count will increment wildly if there
5102          * is no link.
5103          */
5104         e1000_clear_hw_cntrs_ich8lan(hw);
5105
5106         return ret_val;
5107 }
5108
5109 /**
5110  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
5111  *  @hw: pointer to the HW structure
5112  *
5113  *  Sets/Clears required hardware bits necessary for correctly setting up the
5114  *  hardware for transmit and receive.
5115  **/
5116 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
5117 {
5118         u32 reg;
5119
5120         DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
5121
5122         /* Extended Device Control */
5123         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
5124         reg |= (1 << 22);
5125         /* Enable PHY low-power state when MAC is at D3 w/o WoL */
5126         if (hw->mac.type >= e1000_pchlan)
5127                 reg |= E1000_CTRL_EXT_PHYPDEN;
5128         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
5129
5130         /* Transmit Descriptor Control 0 */
5131         reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
5132         reg |= (1 << 22);
5133         E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
5134
5135         /* Transmit Descriptor Control 1 */
5136         reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
5137         reg |= (1 << 22);
5138         E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
5139
5140         /* Transmit Arbitration Control 0 */
5141         reg = E1000_READ_REG(hw, E1000_TARC(0));
5142         if (hw->mac.type == e1000_ich8lan)
5143                 reg |= (1 << 28) | (1 << 29);
5144         reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
5145         E1000_WRITE_REG(hw, E1000_TARC(0), reg);
5146
5147         /* Transmit Arbitration Control 1 */
5148         reg = E1000_READ_REG(hw, E1000_TARC(1));
5149         if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
5150                 reg &= ~(1 << 28);
5151         else
5152                 reg |= (1 << 28);
5153         reg |= (1 << 24) | (1 << 26) | (1 << 30);
5154         E1000_WRITE_REG(hw, E1000_TARC(1), reg);
5155
5156         /* Device Status */
5157         if (hw->mac.type == e1000_ich8lan) {
5158                 reg = E1000_READ_REG(hw, E1000_STATUS);
5159                 reg &= ~(1 << 31);
5160                 E1000_WRITE_REG(hw, E1000_STATUS, reg);
5161         }
5162
5163         /* work-around descriptor data corruption issue during nfs v2 udp
5164          * traffic, just disable the nfs filtering capability
5165          */
5166         reg = E1000_READ_REG(hw, E1000_RFCTL);
5167         reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
5168
5169         /* Disable IPv6 extension header parsing because some malformed
5170          * IPv6 headers can hang the Rx.
5171          */
5172         if (hw->mac.type == e1000_ich8lan)
5173                 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
5174         E1000_WRITE_REG(hw, E1000_RFCTL, reg);
5175
5176         /* Enable ECC on Lynxpoint */
5177         if (hw->mac.type >= e1000_pch_lpt) {
5178                 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
5179                 reg |= E1000_PBECCSTS_ECC_ENABLE;
5180                 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
5181
5182                 reg = E1000_READ_REG(hw, E1000_CTRL);
5183                 reg |= E1000_CTRL_MEHE;
5184                 E1000_WRITE_REG(hw, E1000_CTRL, reg);
5185         }
5186
5187         return;
5188 }
5189
5190 /**
5191  *  e1000_setup_link_ich8lan - Setup flow control and link settings
5192  *  @hw: pointer to the HW structure
5193  *
5194  *  Determines which flow control settings to use, then configures flow
5195  *  control.  Calls the appropriate media-specific link configuration
5196  *  function.  Assuming the adapter has a valid link partner, a valid link
5197  *  should be established.  Assumes the hardware has previously been reset
5198  *  and the transmitter and receiver are not enabled.
5199  **/
5200 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
5201 {
5202         s32 ret_val;
5203
5204         DEBUGFUNC("e1000_setup_link_ich8lan");
5205
5206         if (hw->phy.ops.check_reset_block(hw))
5207                 return E1000_SUCCESS;
5208
5209         /* ICH parts do not have a word in the NVM to determine
5210          * the default flow control setting, so we explicitly
5211          * set it to full.
5212          */
5213         if (hw->fc.requested_mode == e1000_fc_default)
5214                 hw->fc.requested_mode = e1000_fc_full;
5215
5216         /* Save off the requested flow control mode for use later.  Depending
5217          * on the link partner's capabilities, we may or may not use this mode.
5218          */
5219         hw->fc.current_mode = hw->fc.requested_mode;
5220
5221         DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
5222                 hw->fc.current_mode);
5223
5224         /* Continue to configure the copper link. */
5225         ret_val = hw->mac.ops.setup_physical_interface(hw);
5226         if (ret_val)
5227                 return ret_val;
5228
5229         E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
5230         if ((hw->phy.type == e1000_phy_82578) ||
5231             (hw->phy.type == e1000_phy_82579) ||
5232             (hw->phy.type == e1000_phy_i217) ||
5233             (hw->phy.type == e1000_phy_82577)) {
5234                 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
5235
5236                 ret_val = hw->phy.ops.write_reg(hw,
5237                                              PHY_REG(BM_PORT_CTRL_PAGE, 27),
5238                                              hw->fc.pause_time);
5239                 if (ret_val)
5240                         return ret_val;
5241         }
5242
5243         return e1000_set_fc_watermarks_generic(hw);
5244 }
5245
5246 /**
5247  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
5248  *  @hw: pointer to the HW structure
5249  *
5250  *  Configures the kumeran interface to the PHY to wait the appropriate time
5251  *  when polling the PHY, then call the generic setup_copper_link to finish
5252  *  configuring the copper link.
5253  **/
5254 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
5255 {
5256         u32 ctrl;
5257         s32 ret_val;
5258         u16 reg_data;
5259
5260         DEBUGFUNC("e1000_setup_copper_link_ich8lan");
5261
5262         ctrl = E1000_READ_REG(hw, E1000_CTRL);
5263         ctrl |= E1000_CTRL_SLU;
5264         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5265         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5266
5267         /* Set the mac to wait the maximum time between each iteration
5268          * and increase the max iterations when polling the phy;
5269          * this fixes erroneous timeouts at 10Mbps.
5270          */
5271         ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
5272                                                0xFFFF);
5273         if (ret_val)
5274                 return ret_val;
5275         ret_val = e1000_read_kmrn_reg_generic(hw,
5276                                               E1000_KMRNCTRLSTA_INBAND_PARAM,
5277                                               &reg_data);
5278         if (ret_val)
5279                 return ret_val;
5280         reg_data |= 0x3F;
5281         ret_val = e1000_write_kmrn_reg_generic(hw,
5282                                                E1000_KMRNCTRLSTA_INBAND_PARAM,
5283                                                reg_data);
5284         if (ret_val)
5285                 return ret_val;
5286
5287         switch (hw->phy.type) {
5288         case e1000_phy_igp_3:
5289                 ret_val = e1000_copper_link_setup_igp(hw);
5290                 if (ret_val)
5291                         return ret_val;
5292                 break;
5293         case e1000_phy_bm:
5294         case e1000_phy_82578:
5295                 ret_val = e1000_copper_link_setup_m88(hw);
5296                 if (ret_val)
5297                         return ret_val;
5298                 break;
5299         case e1000_phy_82577:
5300         case e1000_phy_82579:
5301                 ret_val = e1000_copper_link_setup_82577(hw);
5302                 if (ret_val)
5303                         return ret_val;
5304                 break;
5305         case e1000_phy_ife:
5306                 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
5307                                                &reg_data);
5308                 if (ret_val)
5309                         return ret_val;
5310
5311                 reg_data &= ~IFE_PMC_AUTO_MDIX;
5312
5313                 switch (hw->phy.mdix) {
5314                 case 1:
5315                         reg_data &= ~IFE_PMC_FORCE_MDIX;
5316                         break;
5317                 case 2:
5318                         reg_data |= IFE_PMC_FORCE_MDIX;
5319                         break;
5320                 case 0:
5321                 default:
5322                         reg_data |= IFE_PMC_AUTO_MDIX;
5323                         break;
5324                 }
5325                 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
5326                                                 reg_data);
5327                 if (ret_val)
5328                         return ret_val;
5329                 break;
5330         default:
5331                 break;
5332         }
5333
5334         return e1000_setup_copper_link_generic(hw);
5335 }
5336
5337 /**
5338  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5339  *  @hw: pointer to the HW structure
5340  *
5341  *  Calls the PHY specific link setup function and then calls the
5342  *  generic setup_copper_link to finish configuring the link for
5343  *  Lynxpoint PCH devices
5344  **/
5345 STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5346 {
5347         u32 ctrl;
5348         s32 ret_val;
5349
5350         DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
5351
5352         ctrl = E1000_READ_REG(hw, E1000_CTRL);
5353         ctrl |= E1000_CTRL_SLU;
5354         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5355         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5356
5357         ret_val = e1000_copper_link_setup_82577(hw);
5358         if (ret_val)
5359                 return ret_val;
5360
5361         return e1000_setup_copper_link_generic(hw);
5362 }
5363
5364 /**
5365  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5366  *  @hw: pointer to the HW structure
5367  *  @speed: pointer to store current link speed
5368  *  @duplex: pointer to store the current link duplex
5369  *
5370  *  Calls the generic get_speed_and_duplex to retrieve the current link
5371  *  information and then calls the Kumeran lock loss workaround for links at
5372  *  gigabit speeds.
5373  **/
5374 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5375                                           u16 *duplex)
5376 {
5377         s32 ret_val;
5378
5379         DEBUGFUNC("e1000_get_link_up_info_ich8lan");
5380
5381         ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
5382         if (ret_val)
5383                 return ret_val;
5384
5385         if ((hw->mac.type == e1000_ich8lan) &&
5386             (hw->phy.type == e1000_phy_igp_3) &&
5387             (*speed == SPEED_1000)) {
5388                 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5389         }
5390
5391         return ret_val;
5392 }
5393
5394 /**
5395  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5396  *  @hw: pointer to the HW structure
5397  *
5398  *  Work-around for 82566 Kumeran PCS lock loss:
5399  *  On link status change (i.e. PCI reset, speed change) and link is up and
5400  *  speed is gigabit-
5401  *    0) if workaround is optionally disabled do nothing
5402  *    1) wait 1ms for Kumeran link to come up
5403  *    2) check Kumeran Diagnostic register PCS lock loss bit
5404  *    3) if not set the link is locked (all is good), otherwise...
5405  *    4) reset the PHY
5406  *    5) repeat up to 10 times
5407  *  Note: this is only called for IGP3 copper when speed is 1gb.
5408  **/
5409 STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5410 {
5411         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5412         u32 phy_ctrl;
5413         s32 ret_val;
5414         u16 i, data;
5415         bool link;
5416
5417         DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
5418
5419         if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5420                 return E1000_SUCCESS;
5421
5422         /* Make sure link is up before proceeding.  If not just return.
5423          * Attempting this while link is negotiating fouled up link
5424          * stability
5425          */
5426         ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
5427         if (!link)
5428                 return E1000_SUCCESS;
5429
5430         for (i = 0; i < 10; i++) {
5431                 /* read once to clear */
5432                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5433                 if (ret_val)
5434                         return ret_val;
5435                 /* and again to get new status */
5436                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5437                 if (ret_val)
5438                         return ret_val;
5439
5440                 /* check for PCS lock */
5441                 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5442                         return E1000_SUCCESS;
5443
5444                 /* Issue PHY reset */
5445                 hw->phy.ops.reset(hw);
5446                 msec_delay_irq(5);
5447         }
5448         /* Disable GigE link negotiation */
5449         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5450         phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5451                      E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5452         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5453
5454         /* Call gig speed drop workaround on Gig disable before accessing
5455          * any PHY registers
5456          */
5457         e1000_gig_downshift_workaround_ich8lan(hw);
5458
5459         /* unable to acquire PCS lock */
5460         return -E1000_ERR_PHY;
5461 }
5462
5463 /**
5464  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5465  *  @hw: pointer to the HW structure
5466  *  @state: boolean value used to set the current Kumeran workaround state
5467  *
5468  *  If ICH8, set the current Kumeran workaround state (enabled - true
5469  *  /disabled - false).
5470  **/
5471 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5472                                                  bool state)
5473 {
5474         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5475
5476         DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
5477
5478         if (hw->mac.type != e1000_ich8lan) {
5479                 DEBUGOUT("Workaround applies to ICH8 only.\n");
5480                 return;
5481         }
5482
5483         dev_spec->kmrn_lock_loss_workaround_enabled = state;
5484
5485         return;
5486 }
5487
5488 /**
5489  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5490  *  @hw: pointer to the HW structure
5491  *
5492  *  Workaround for 82566 power-down on D3 entry:
5493  *    1) disable gigabit link
5494  *    2) write VR power-down enable
5495  *    3) read it back
5496  *  Continue if successful, else issue LCD reset and repeat
5497  **/
5498 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5499 {
5500         u32 reg;
5501         u16 data;
5502         u8  retry = 0;
5503
5504         DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
5505
5506         if (hw->phy.type != e1000_phy_igp_3)
5507                 return;
5508
5509         /* Try the workaround twice (if needed) */
5510         do {
5511                 /* Disable link */
5512                 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
5513                 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5514                         E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5515                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
5516
5517                 /* Call gig speed drop workaround on Gig disable before
5518                  * accessing any PHY registers
5519                  */
5520                 if (hw->mac.type == e1000_ich8lan)
5521                         e1000_gig_downshift_workaround_ich8lan(hw);
5522
5523                 /* Write VR power-down enable */
5524                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5525                 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5526                 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
5527                                       data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5528
5529                 /* Read it back and test */
5530                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5531                 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5532                 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5533                         break;
5534
5535                 /* Issue PHY reset and repeat at most one more time */
5536                 reg = E1000_READ_REG(hw, E1000_CTRL);
5537                 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
5538                 retry++;
5539         } while (retry);
5540 }
5541
5542 /**
5543  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5544  *  @hw: pointer to the HW structure
5545  *
5546  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5547  *  LPLU, Gig disable, MDIC PHY reset):
5548  *    1) Set Kumeran Near-end loopback
5549  *    2) Clear Kumeran Near-end loopback
5550  *  Should only be called for ICH8[m] devices with any 1G Phy.
5551  **/
5552 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5553 {
5554         s32 ret_val;
5555         u16 reg_data;
5556
5557         DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
5558
5559         if ((hw->mac.type != e1000_ich8lan) ||
5560             (hw->phy.type == e1000_phy_ife))
5561                 return;
5562
5563         ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5564                                               &reg_data);
5565         if (ret_val)
5566                 return;
5567         reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5568         ret_val = e1000_write_kmrn_reg_generic(hw,
5569                                                E1000_KMRNCTRLSTA_DIAG_OFFSET,
5570                                                reg_data);
5571         if (ret_val)
5572                 return;
5573         reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5574         e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5575                                      reg_data);
5576 }
5577
5578 /**
5579  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5580  *  @hw: pointer to the HW structure
5581  *
5582  *  During S0 to Sx transition, it is possible the link remains at gig
5583  *  instead of negotiating to a lower speed.  Before going to Sx, set
5584  *  'Gig Disable' to force link speed negotiation to a lower speed based on
5585  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
5586  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5587  *  needs to be written.
5588  *  Parts that support (and are linked to a partner which support) EEE in
5589  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5590  *  than 10Mbps w/o EEE.
5591  **/
5592 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5593 {
5594         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5595         u32 phy_ctrl;
5596         s32 ret_val;
5597
5598         DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
5599
5600         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5601         phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5602
5603         if (hw->phy.type == e1000_phy_i217) {
5604                 u16 phy_reg, device_id = hw->device_id;
5605
5606                 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5607                     (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5608                     (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5609                     (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5610                     (hw->mac.type >= e1000_pch_spt)) {
5611                         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
5612
5613                         E1000_WRITE_REG(hw, E1000_FEXTNVM6,
5614                                         fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5615                 }
5616
5617                 ret_val = hw->phy.ops.acquire(hw);
5618                 if (ret_val)
5619                         goto out;
5620
5621                 if (!dev_spec->eee_disable) {
5622                         u16 eee_advert;
5623
5624                         ret_val =
5625                             e1000_read_emi_reg_locked(hw,
5626                                                       I217_EEE_ADVERTISEMENT,
5627                                                       &eee_advert);
5628                         if (ret_val)
5629                                 goto release;
5630
5631                         /* Disable LPLU if both link partners support 100BaseT
5632                          * EEE and 100Full is advertised on both ends of the
5633                          * link, and enable Auto Enable LPI since there will
5634                          * be no driver to enable LPI while in Sx.
5635                          */
5636                         if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5637                             (dev_spec->eee_lp_ability &
5638                              I82579_EEE_100_SUPPORTED) &&
5639                             (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5640                                 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5641                                               E1000_PHY_CTRL_NOND0A_LPLU);
5642
5643                                 /* Set Auto Enable LPI after link up */
5644                                 hw->phy.ops.read_reg_locked(hw,
5645                                                             I217_LPI_GPIO_CTRL,
5646                                                             &phy_reg);
5647                                 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5648                                 hw->phy.ops.write_reg_locked(hw,
5649                                                              I217_LPI_GPIO_CTRL,
5650                                                              phy_reg);
5651                         }
5652                 }
5653
5654                 /* For i217 Intel Rapid Start Technology support,
5655                  * when the system is going into Sx and no manageability engine
5656                  * is present, the driver must configure proxy to reset only on
5657                  * power good.  LPI (Low Power Idle) state must also reset only
5658                  * on power good, as well as the MTA (Multicast table array).
5659                  * The SMBus release must also be disabled on LCD reset.
5660                  */
5661                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5662                       E1000_ICH_FWSM_FW_VALID)) {
5663                         /* Enable proxy to reset only on power good. */
5664                         hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
5665                                                     &phy_reg);
5666                         phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5667                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
5668                                                      phy_reg);
5669
5670                         /* Set bit enable LPI (EEE) to reset only on
5671                          * power good.
5672                         */
5673                         hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
5674                         phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5675                         hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
5676
5677                         /* Disable the SMB release on LCD reset. */
5678                         hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
5679                         phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5680                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5681                 }
5682
5683                 /* Enable MTA to reset for Intel Rapid Start Technology
5684                  * Support
5685                  */
5686                 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
5687                 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5688                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5689
5690 release:
5691                 hw->phy.ops.release(hw);
5692         }
5693 out:
5694         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5695
5696         if (hw->mac.type == e1000_ich8lan)
5697                 e1000_gig_downshift_workaround_ich8lan(hw);
5698
5699         if (hw->mac.type >= e1000_pchlan) {
5700                 e1000_oem_bits_config_ich8lan(hw, false);
5701
5702                 /* Reset PHY to activate OEM bits on 82577/8 */
5703                 if (hw->mac.type == e1000_pchlan)
5704                         e1000_phy_hw_reset_generic(hw);
5705
5706                 ret_val = hw->phy.ops.acquire(hw);
5707                 if (ret_val)
5708                         return;
5709                 e1000_write_smbus_addr(hw);
5710                 hw->phy.ops.release(hw);
5711         }
5712
5713         return;
5714 }
5715
5716 /**
5717  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5718  *  @hw: pointer to the HW structure
5719  *
5720  *  During Sx to S0 transitions on non-managed devices or managed devices
5721  *  on which PHY resets are not blocked, if the PHY registers cannot be
5722  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
5723  *  the PHY.
5724  *  On i217, setup Intel Rapid Start Technology.
5725  **/
5726 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5727 {
5728         s32 ret_val;
5729
5730         DEBUGFUNC("e1000_resume_workarounds_pchlan");
5731         if (hw->mac.type < e1000_pch2lan)
5732                 return E1000_SUCCESS;
5733
5734         ret_val = e1000_init_phy_workarounds_pchlan(hw);
5735         if (ret_val) {
5736                 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5737                 return ret_val;
5738         }
5739
5740         /* For i217 Intel Rapid Start Technology support when the system
5741          * is transitioning from Sx and no manageability engine is present
5742          * configure SMBus to restore on reset, disable proxy, and enable
5743          * the reset on MTA (Multicast table array).
5744          */
5745         if (hw->phy.type == e1000_phy_i217) {
5746                 u16 phy_reg;
5747
5748                 ret_val = hw->phy.ops.acquire(hw);
5749                 if (ret_val) {
5750                         DEBUGOUT("Failed to setup iRST\n");
5751                         return ret_val;
5752                 }
5753
5754                 /* Clear Auto Enable LPI after link up */
5755                 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5756                 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5757                 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5758
5759                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5760                     E1000_ICH_FWSM_FW_VALID)) {
5761                         /* Restore clear on SMB if no manageability engine
5762                          * is present
5763                          */
5764                         ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5765                                                               &phy_reg);
5766                         if (ret_val)
5767                                 goto release;
5768                         phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5769                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5770
5771                         /* Disable Proxy */
5772                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5773                 }
5774                 /* Enable reset on MTA */
5775                 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5776                                                       &phy_reg);
5777                 if (ret_val)
5778                         goto release;
5779                 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5780                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5781 release:
5782                 if (ret_val)
5783                         DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5784                 hw->phy.ops.release(hw);
5785                 return ret_val;
5786         }
5787         return E1000_SUCCESS;
5788 }
5789
5790 /**
5791  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5792  *  @hw: pointer to the HW structure
5793  *
5794  *  Return the LED back to the default configuration.
5795  **/
5796 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5797 {
5798         DEBUGFUNC("e1000_cleanup_led_ich8lan");
5799
5800         if (hw->phy.type == e1000_phy_ife)
5801                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5802                                              0);
5803
5804         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5805         return E1000_SUCCESS;
5806 }
5807
5808 /**
5809  *  e1000_led_on_ich8lan - Turn LEDs on
5810  *  @hw: pointer to the HW structure
5811  *
5812  *  Turn on the LEDs.
5813  **/
5814 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5815 {
5816         DEBUGFUNC("e1000_led_on_ich8lan");
5817
5818         if (hw->phy.type == e1000_phy_ife)
5819                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5820                                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5821
5822         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5823         return E1000_SUCCESS;
5824 }
5825
5826 /**
5827  *  e1000_led_off_ich8lan - Turn LEDs off
5828  *  @hw: pointer to the HW structure
5829  *
5830  *  Turn off the LEDs.
5831  **/
5832 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5833 {
5834         DEBUGFUNC("e1000_led_off_ich8lan");
5835
5836         if (hw->phy.type == e1000_phy_ife)
5837                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5838                                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5839
5840         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5841         return E1000_SUCCESS;
5842 }
5843
5844 /**
5845  *  e1000_setup_led_pchlan - Configures SW controllable LED
5846  *  @hw: pointer to the HW structure
5847  *
5848  *  This prepares the SW controllable LED for use.
5849  **/
5850 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5851 {
5852         DEBUGFUNC("e1000_setup_led_pchlan");
5853
5854         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5855                                      (u16)hw->mac.ledctl_mode1);
5856 }
5857
5858 /**
5859  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5860  *  @hw: pointer to the HW structure
5861  *
5862  *  Return the LED back to the default configuration.
5863  **/
5864 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5865 {
5866         DEBUGFUNC("e1000_cleanup_led_pchlan");
5867
5868         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5869                                      (u16)hw->mac.ledctl_default);
5870 }
5871
5872 /**
5873  *  e1000_led_on_pchlan - Turn LEDs on
5874  *  @hw: pointer to the HW structure
5875  *
5876  *  Turn on the LEDs.
5877  **/
5878 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5879 {
5880         u16 data = (u16)hw->mac.ledctl_mode2;
5881         u32 i, led;
5882
5883         DEBUGFUNC("e1000_led_on_pchlan");
5884
5885         /* If no link, then turn LED on by setting the invert bit
5886          * for each LED that's mode is "link_up" in ledctl_mode2.
5887          */
5888         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5889                 for (i = 0; i < 3; i++) {
5890                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5891                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5892                             E1000_LEDCTL_MODE_LINK_UP)
5893                                 continue;
5894                         if (led & E1000_PHY_LED0_IVRT)
5895                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5896                         else
5897                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5898                 }
5899         }
5900
5901         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5902 }
5903
5904 /**
5905  *  e1000_led_off_pchlan - Turn LEDs off
5906  *  @hw: pointer to the HW structure
5907  *
5908  *  Turn off the LEDs.
5909  **/
5910 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5911 {
5912         u16 data = (u16)hw->mac.ledctl_mode1;
5913         u32 i, led;
5914
5915         DEBUGFUNC("e1000_led_off_pchlan");
5916
5917         /* If no link, then turn LED off by clearing the invert bit
5918          * for each LED that's mode is "link_up" in ledctl_mode1.
5919          */
5920         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5921                 for (i = 0; i < 3; i++) {
5922                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5923                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5924                             E1000_LEDCTL_MODE_LINK_UP)
5925                                 continue;
5926                         if (led & E1000_PHY_LED0_IVRT)
5927                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5928                         else
5929                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5930                 }
5931         }
5932
5933         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5934 }
5935
5936 /**
5937  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5938  *  @hw: pointer to the HW structure
5939  *
5940  *  Read appropriate register for the config done bit for completion status
5941  *  and configure the PHY through s/w for EEPROM-less parts.
5942  *
5943  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5944  *  config done bit, so only an error is logged and continues.  If we were
5945  *  to return with error, EEPROM-less silicon would not be able to be reset
5946  *  or change link.
5947  **/
5948 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5949 {
5950         s32 ret_val = E1000_SUCCESS;
5951         u32 bank = 0;
5952         u32 status;
5953
5954         DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5955
5956         e1000_get_cfg_done_generic(hw);
5957
5958         /* Wait for indication from h/w that it has completed basic config */
5959         if (hw->mac.type >= e1000_ich10lan) {
5960                 e1000_lan_init_done_ich8lan(hw);
5961         } else {
5962                 ret_val = e1000_get_auto_rd_done_generic(hw);
5963                 if (ret_val) {
5964                         /* When auto config read does not complete, do not
5965                          * return with an error. This can happen in situations
5966                          * where there is no eeprom and prevents getting link.
5967                          */
5968                         DEBUGOUT("Auto Read Done did not complete\n");
5969                         ret_val = E1000_SUCCESS;
5970                 }
5971         }
5972
5973         /* Clear PHY Reset Asserted bit */
5974         status = E1000_READ_REG(hw, E1000_STATUS);
5975         if (status & E1000_STATUS_PHYRA)
5976                 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5977         else
5978                 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5979
5980         /* If EEPROM is not marked present, init the IGP 3 PHY manually */
5981         if (hw->mac.type <= e1000_ich9lan) {
5982                 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5983                     (hw->phy.type == e1000_phy_igp_3)) {
5984                         e1000_phy_init_script_igp3(hw);
5985                 }
5986         } else {
5987                 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5988                         /* Maybe we should do a basic PHY config */
5989                         DEBUGOUT("EEPROM not present\n");
5990                         ret_val = -E1000_ERR_CONFIG;
5991                 }
5992         }
5993
5994         return ret_val;
5995 }
5996
5997 /**
5998  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5999  * @hw: pointer to the HW structure
6000  *
6001  * In the case of a PHY power down to save power, or to turn off link during a
6002  * driver unload, or wake on lan is not enabled, remove the link.
6003  **/
6004 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
6005 {
6006         /* If the management interface is not enabled, then power down */
6007         if (!(hw->mac.ops.check_mng_mode(hw) ||
6008               hw->phy.ops.check_reset_block(hw)))
6009                 e1000_power_down_phy_copper(hw);
6010
6011         return;
6012 }
6013
6014 /**
6015  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
6016  *  @hw: pointer to the HW structure
6017  *
6018  *  Clears hardware counters specific to the silicon family and calls
6019  *  clear_hw_cntrs_generic to clear all general purpose counters.
6020  **/
6021 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
6022 {
6023         u16 phy_data;
6024         s32 ret_val;
6025
6026         DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
6027
6028         e1000_clear_hw_cntrs_base_generic(hw);
6029
6030         E1000_READ_REG(hw, E1000_ALGNERRC);
6031         E1000_READ_REG(hw, E1000_RXERRC);
6032         E1000_READ_REG(hw, E1000_TNCRS);
6033         E1000_READ_REG(hw, E1000_CEXTERR);
6034         E1000_READ_REG(hw, E1000_TSCTC);
6035         E1000_READ_REG(hw, E1000_TSCTFC);
6036
6037         E1000_READ_REG(hw, E1000_MGTPRC);
6038         E1000_READ_REG(hw, E1000_MGTPDC);
6039         E1000_READ_REG(hw, E1000_MGTPTC);
6040
6041         E1000_READ_REG(hw, E1000_IAC);
6042         E1000_READ_REG(hw, E1000_ICRXOC);
6043
6044         /* Clear PHY statistics registers */
6045         if ((hw->phy.type == e1000_phy_82578) ||
6046             (hw->phy.type == e1000_phy_82579) ||
6047             (hw->phy.type == e1000_phy_i217) ||
6048             (hw->phy.type == e1000_phy_82577)) {
6049                 ret_val = hw->phy.ops.acquire(hw);
6050                 if (ret_val)
6051                         return;
6052                 ret_val = hw->phy.ops.set_page(hw,
6053                                                HV_STATS_PAGE << IGP_PAGE_SHIFT);
6054                 if (ret_val)
6055                         goto release;
6056                 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
6057                 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
6058                 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
6059                 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
6060                 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
6061                 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
6062                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
6063                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
6064                 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
6065                 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
6066                 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
6067                 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
6068                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
6069                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
6070 release:
6071                 hw->phy.ops.release(hw);
6072         }
6073 }
6074
6075 /**
6076  *  e1000_configure_k0s_lpt - Configure K0s power state
6077  *  @hw: pointer to the HW structure
6078  *  @entry_latency: Tx idle period for entering K0s - valid values are 0 to 3.
6079  *      0 corresponds to 128ns, each value over 0 doubles the duration.
6080  *  @min_time: Minimum Tx idle period allowed  - valid values are 0 to 4.
6081  *      0 corresponds to 128ns, each value over 0 doubles the duration.
6082  *
6083  *  Configure the K1 power state based on the provided parameter.
6084  *  Assumes semaphore already acquired.
6085  *
6086  *  Success returns 0, Failure returns:
6087  *      -E1000_ERR_PHY (-2) in case of access error
6088  *      -E1000_ERR_PARAM (-4) in case of parameters error
6089  **/
6090 s32 e1000_configure_k0s_lpt(struct e1000_hw *hw, u8 entry_latency, u8 min_time)
6091 {
6092         s32 ret_val;
6093         u16 kmrn_reg = 0;
6094
6095         DEBUGFUNC("e1000_configure_k0s_lpt");
6096
6097         if (entry_latency > 3 || min_time > 4)
6098                 return -E1000_ERR_PARAM;
6099
6100         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
6101                                              &kmrn_reg);
6102         if (ret_val)
6103                 return ret_val;
6104
6105         /* for now don't touch the latency */
6106         kmrn_reg &= ~(E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_MASK);
6107         kmrn_reg |= ((min_time << E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT));
6108
6109         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
6110                                               kmrn_reg);
6111         if (ret_val)
6112                 return ret_val;
6113
6114         return E1000_SUCCESS;
6115 }