enic: add device ids
[dpdk.git] / lib / librte_pmd_e1000 / e1000 / e1000_ich8lan.c
index 6e98a26..3b1627b 100644 (file)
@@ -1,6 +1,6 @@
 /*******************************************************************************
 
-Copyright (c) 2001-2012, Intel Corporation
+Copyright (c) 2001-2014, Intel Corporation
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
@@ -58,14 +58,15 @@ POSSIBILITY OF SUCH DAMAGE.
  * 82578DC Gigabit Network Connection
  * 82579LM Gigabit Network Connection
  * 82579V Gigabit Network Connection
+ * Ethernet Connection I217-LM
+ * Ethernet Connection I217-V
+ * Ethernet Connection I218-V
+ * Ethernet Connection I218-LM
  */
 
 #include "e1000_api.h"
 
-STATIC s32  e1000_init_phy_params_ich8lan(struct e1000_hw *hw);
-STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw);
-STATIC s32  e1000_init_nvm_params_ich8lan(struct e1000_hw *hw);
-STATIC s32  e1000_init_mac_params_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
 STATIC s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
 STATIC s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
@@ -73,6 +74,13 @@ STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
 STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
+STATIC void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
+STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
+#ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
+STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
+                                             u8 *mc_addr_list,
+                                             u32 mc_addr_count);
+#endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
 STATIC s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
 STATIC s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
 STATIC s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
@@ -94,6 +102,7 @@ STATIC s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
 STATIC s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
 STATIC s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
 STATIC s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
+STATIC s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
 STATIC s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
                                           u16 *speed, u16 *duplex);
 STATIC s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
@@ -106,27 +115,19 @@ STATIC s32  e1000_led_on_pchlan(struct e1000_hw *hw);
 STATIC s32  e1000_led_off_pchlan(struct e1000_hw *hw);
 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
 STATIC s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
-static s32  e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout);
-static s32  e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw);
-static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
-static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
+STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
+STATIC s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
 STATIC s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
                                          u32 offset, u8 *data);
-static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+STATIC s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
                                          u8 size, u16 *data);
 STATIC s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
                                          u32 offset, u16 *data);
-static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
+STATIC s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
                                                 u32 offset, u8 byte);
-STATIC s32  e1000_write_flash_byte_ich8lan(struct e1000_hw *hw,
-                                          u32 offset, u8 data);
-static s32  e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
-                                          u8 size, u16 data);
 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
-static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
-STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
-STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
@@ -182,12 +183,13 @@ union ich8_hws_flash_regacc {
  *
  *  Assumes the sw/fw/hw semaphore is already acquired.
  **/
-static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
+STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
 {
        u16 phy_reg = 0;
        u32 phy_id = 0;
-       s32 ret_val;
+       s32 ret_val = 0;
        u16 retry_count;
+       u32 mac_reg = 0;
 
        for (retry_count = 0; retry_count < 2; retry_count++) {
                ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
@@ -206,24 +208,84 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
 
        if (hw->phy.id) {
                if  (hw->phy.id == phy_id)
-                       return true;
+                       goto out;
        } else if (phy_id) {
                hw->phy.id = phy_id;
                hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
-               return true;
+               goto out;
        }
 
-       /*
-        * In case the PHY needs to be in mdio slow mode,
+       /* In case the PHY needs to be in mdio slow mode,
         * set slow mode and try to get the PHY id again.
         */
-       hw->phy.ops.release(hw);
-       ret_val = e1000_set_mdio_slow_mode_hv(hw);
-       if (!ret_val)
-               ret_val = e1000_get_phy_id(hw);
-       hw->phy.ops.acquire(hw);
+       if (hw->mac.type < e1000_pch_lpt) {
+               hw->phy.ops.release(hw);
+               ret_val = e1000_set_mdio_slow_mode_hv(hw);
+               if (!ret_val)
+                       ret_val = e1000_get_phy_id(hw);
+               hw->phy.ops.acquire(hw);
+       }
+
+       if (ret_val)
+               return false;
+out:
+       if (hw->mac.type == e1000_pch_lpt) {
+               /* Unforce SMBus mode in PHY */
+               hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
+               phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+               hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
+
+               /* Unforce SMBus mode in MAC */
+               mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+               mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+               E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+       }
 
-       return !ret_val;
+       return true;
+}
+
+/**
+ *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
+ *  @hw: pointer to the HW structure
+ *
+ *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
+ *  used to reset the PHY to a quiescent state when necessary.
+ **/
+STATIC void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
+{
+       u32 mac_reg;
+
+       DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
+
+       /* Set Phy Config Counter to 50msec */
+       mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
+       mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
+       mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
+       E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
+
+       /* Toggle LANPHYPC Value bit */
+       mac_reg = E1000_READ_REG(hw, E1000_CTRL);
+       mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
+       mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
+       E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
+       E1000_WRITE_FLUSH(hw);
+       usec_delay(10);
+       mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
+       E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
+       E1000_WRITE_FLUSH(hw);
+
+       if (hw->mac.type < e1000_pch_lpt) {
+               msec_delay(50);
+       } else {
+               u16 count = 20;
+
+               do {
+                       msec_delay(5);
+               } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
+                          E1000_CTRL_EXT_LPCD) && count--);
+
+               msec_delay(30);
+       }
 }
 
 /**
@@ -233,37 +295,57 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
  *  Workarounds/flow necessary for PHY initialization during driver load
  *  and resume paths.
  **/
-static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
+STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
 {
        u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
        s32 ret_val;
 
        DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
 
+       /* Gate automatic PHY configuration by hardware on managed and
+        * non-managed 82579 and newer adapters.
+        */
+       e1000_gate_hw_phy_config_ich8lan(hw, true);
+
+#if defined(NAHUM6LP_HW) && defined(ULP_SUPPORT)
+       /* It is not possible to be certain of the current state of ULP
+        * so forcibly disable it.
+        */
+       hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
+
+#endif /* NAHUM6LP_HW && ULP_SUPPORT */
        ret_val = hw->phy.ops.acquire(hw);
        if (ret_val) {
                DEBUGOUT("Failed to initialize PHY flow\n");
-               return ret_val;
+               goto out;
        }
 
-       /*
-        * The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
+       /* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
         * inaccessible and resetting the PHY is not blocked, toggle the
         * LANPHYPC Value bit to force the interconnect to PCIe mode.
         */
        switch (hw->mac.type) {
-       case e1000_pch2lan:
-               /*
-                * Gate automatic PHY configuration by hardware on
-                * non-managed 82579
+       case e1000_pch_lpt:
+               if (e1000_phy_is_accessible_pchlan(hw))
+                       break;
+
+               /* Before toggling LANPHYPC, see if PHY is accessible by
+                * forcing MAC to SMBus mode first.
                 */
-               if ((hw->mac.type == e1000_pch2lan) &&
-                   !(fwsm & E1000_ICH_FWSM_FW_VALID))
-                       e1000_gate_hw_phy_config_ich8lan(hw, true);
+               mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+               mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+               E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+
+               /* Wait 50 milliseconds for MAC to finish any retries
+                * that it might be trying to perform from previous
+                * attempts to acknowledge any phy read requests.
+                */
+                msec_delay(50);
 
-               if (e1000_phy_is_accessible_pchlan(hw)) {
+               /* fall-through */
+       case e1000_pch2lan:
+               if (e1000_phy_is_accessible_pchlan(hw))
                        break;
-               }
 
                /* fall-through */
        case e1000_pchlan:
@@ -273,43 +355,63 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
 
                if (hw->phy.ops.check_reset_block(hw)) {
                        DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
+                       ret_val = -E1000_ERR_PHY;
                        break;
                }
 
-               DEBUGOUT("Toggling LANPHYPC\n");
+               /* Toggle LANPHYPC Value bit */
+               e1000_toggle_lanphypc_pch_lpt(hw);
+               if (hw->mac.type >= e1000_pch_lpt) {
+                       if (e1000_phy_is_accessible_pchlan(hw))
+                               break;
 
-               /* Set Phy Config Counter to 50msec */
-               mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
-               mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
-               mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
-               E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
+                       /* Toggling LANPHYPC brings the PHY out of SMBus mode
+                        * so ensure that the MAC is also out of SMBus mode
+                        */
+                       mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+                       mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+                       E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
 
-               /* Toggle LANPHYPC Value bit */
-               mac_reg = E1000_READ_REG(hw, E1000_CTRL);
-               mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
-               mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
-               E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
-               E1000_WRITE_FLUSH(hw);
-               usec_delay(10);
-               mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
-               E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
-               E1000_WRITE_FLUSH(hw);
-               msec_delay(50);
+                       if (e1000_phy_is_accessible_pchlan(hw))
+                               break;
+
+                       ret_val = -E1000_ERR_PHY;
+               }
                break;
        default:
                break;
        }
 
        hw->phy.ops.release(hw);
+       if (!ret_val) {
 
-       /*
-        * Reset the PHY before any access to it.  Doing so, ensures
-        * that the PHY is in a known good state before we read/write
-        * PHY registers.  The generic reset is sufficient here,
-        * because we haven't determined the PHY type yet.
-        */
-       ret_val = e1000_phy_hw_reset_generic(hw);
+               /* Check to see if able to reset PHY.  Print error if not */
+               if (hw->phy.ops.check_reset_block(hw)) {
+                       ERROR_REPORT("Reset blocked by ME\n");
+                       goto out;
+               }
 
+               /* Reset the PHY before any access to it.  Doing so, ensures
+                * that the PHY is in a known good state before we read/write
+                * PHY registers.  The generic reset is sufficient here,
+                * because we haven't determined the PHY type yet.
+                */
+               ret_val = e1000_phy_hw_reset_generic(hw);
+               if (ret_val)
+                       goto out;
+
+               /* On a successful reset, possibly need to wait for the PHY
+                * to quiesce to an accessible state before returning control
+                * to the calling function.  If the PHY does not quiesce, then
+                * return E1000E_BLK_PHY_RESET, as this is the condition that
+                *  the PHY is in.
+                */
+               ret_val = hw->phy.ops.check_reset_block(hw);
+               if (ret_val)
+                       ERROR_REPORT("ME blocked access to PHY after reset\n");
+       }
+
+out:
        /* Ungate automatic PHY configuration on non-managed 82579 */
        if ((hw->mac.type == e1000_pch2lan) &&
            !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
@@ -329,7 +431,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
 {
        struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val = E1000_SUCCESS;
+       s32 ret_val;
 
        DEBUGFUNC("e1000_init_phy_params_pchlan");
 
@@ -370,8 +472,8 @@ STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
                                break;
                        /* fall-through */
                case e1000_pch2lan:
-                       /*
-                        * In case the PHY needs to be in mdio slow mode,
+               case e1000_pch_lpt:
+                       /* In case the PHY needs to be in mdio slow mode,
                         * set slow mode and try to get the PHY id again.
                         */
                        ret_val = e1000_set_mdio_slow_mode_hv(hw);
@@ -517,13 +619,12 @@ STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
        DEBUGFUNC("e1000_init_nvm_params_ich8lan");
 
        /* Can't read flash registers if the register set isn't mapped. */
+       nvm->type = e1000_nvm_flash_sw;
        if (!hw->flash_address) {
                DEBUGOUT("ERROR: Flash registers not mapped\n");
                return -E1000_ERR_CONFIG;
        }
 
-       nvm->type = e1000_nvm_flash_sw;
-
        gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
 
        /* sector_X_addr is a "sector"-aligned address (4096 bytes)
@@ -539,8 +640,8 @@ STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
        /* find total size of the NVM, then cut in half since the total
         * size represents two separate NVM banks.
         */
-       nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
-                               << FLASH_SECTOR_ADDR_SHIFT;
+       nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
+                               << FLASH_SECTOR_ADDR_SHIFT);
        nvm->flash_bank_size /= 2;
        /* Adjust to word count */
        nvm->flash_bank_size /= sizeof(u16);
@@ -578,6 +679,9 @@ STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
 {
        struct e1000_mac_info *mac = &hw->mac;
+#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
+       u16 pci_cfg;
+#endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
 
        DEBUGFUNC("e1000_init_mac_params_ich8lan");
 
@@ -645,7 +749,18 @@ STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
                mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
                mac->ops.rar_set = e1000_rar_set_pch2lan;
                /* fall-through */
+       case e1000_pch_lpt:
+#ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
+               /* multicast address update for pch2 */
+               mac->ops.update_mc_addr_list =
+                       e1000_update_mc_addr_list_pch2lan;
+#endif
        case e1000_pchlan:
+#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
+               /* save PCH revision_id */
+               e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
+               hw->revision_id = (u8)(pci_cfg &= 0x000F);
+#endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
                /* check management mode */
                mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
                /* ID LED init */
@@ -662,15 +777,16 @@ STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
                break;
        }
 
+       if (mac->type == e1000_pch_lpt) {
+               mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
+               mac->ops.rar_set = e1000_rar_set_pch_lpt;
+               mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
+       }
+
        /* Enable PCS Lock-loss workaround for ICH8 */
        if (mac->type == e1000_ich8lan)
                e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
 
-       /* Gate automatic PHY configuration by hardware on managed 82579 */
-       if ((mac->type == e1000_pch2lan) &&
-           (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
-               e1000_gate_hw_phy_config_ich8lan(hw, true);
-
        return E1000_SUCCESS;
 }
 
@@ -686,7 +802,7 @@ STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
 STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
                                         u16 *data, bool read)
 {
-       s32 ret_val = E1000_SUCCESS;
+       s32 ret_val;
 
        DEBUGFUNC("__e1000_access_emi_reg_locked");
 
@@ -727,7 +843,7 @@ s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
  *
  *  Assumes the SW/FW/HW Semaphore is already acquired.
  **/
-STATIC s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
+s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
 {
        DEBUGFUNC("e1000_read_emi_reg_locked");
 
@@ -741,18 +857,35 @@ STATIC s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
  *  the link and the EEE capabilities of the link partner.  The LPI Control
  *  register bits will remain set only if/when link is up.
+ *
+ *  EEE LPI must not be asserted earlier than one second after link is up.
+ *  On 82579, EEE LPI should not be enabled until such time otherwise there
+ *  can be link issues with some switches.  Other devices can have EEE LPI
+ *  enabled immediately upon link up since they have a timer in hardware which
+ *  prevents LPI from being asserted too early.
  **/
-STATIC s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
+s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
 {
        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
        s32 ret_val;
-       u16 lpi_ctrl;
+       u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
 
        DEBUGFUNC("e1000_set_eee_pchlan");
 
-       if ((hw->phy.type != e1000_phy_82579) &&
-           (hw->phy.type != e1000_phy_i217))
+       switch (hw->phy.type) {
+       case e1000_phy_82579:
+               lpa = I82579_EEE_LP_ABILITY;
+               pcs_status = I82579_EEE_PCS_STATUS;
+               adv_addr = I82579_EEE_ADVERTISEMENT;
+               break;
+       case e1000_phy_i217:
+               lpa = I217_EEE_LP_ABILITY;
+               pcs_status = I217_EEE_PCS_STATUS;
+               adv_addr = I217_EEE_ADVERTISEMENT;
+               break;
+       default:
                return E1000_SUCCESS;
+       }
 
        ret_val = hw->phy.ops.acquire(hw);
        if (ret_val)
@@ -767,61 +900,467 @@ STATIC s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
 
        /* Enable EEE if not disabled by user */
        if (!dev_spec->eee_disable) {
-               u16 lpa, pcs_status, data;
-
                /* Save off link partner's EEE ability */
-               switch (hw->phy.type) {
-               case e1000_phy_82579:
-                       lpa = I82579_EEE_LP_ABILITY;
-                       pcs_status = I82579_EEE_PCS_STATUS;
-                       break;
-               case e1000_phy_i217:
-                       lpa = I217_EEE_LP_ABILITY;
-                       pcs_status = I217_EEE_PCS_STATUS;
-                       break;
-               default:
-                       ret_val = -E1000_ERR_PHY;
-                       goto release;
-               }
                ret_val = e1000_read_emi_reg_locked(hw, lpa,
                                                    &dev_spec->eee_lp_ability);
                if (ret_val)
                        goto release;
 
-               /*
-                * Enable EEE only for speeds in which the link partner is
-                * EEE capable.
+               /* Read EEE advertisement */
+               ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
+               if (ret_val)
+                       goto release;
+
+               /* Enable EEE only for speeds in which the link partner is
+                * EEE capable and for which we advertise EEE.
                 */
-               if (dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
+               if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
                        lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
 
-               if (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
+               if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
                        hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
                        if (data & NWAY_LPAR_100TX_FD_CAPS)
                                lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
                        else
-                               /*
-                                * EEE is not supported in 100Half, so ignore
+                               /* EEE is not supported in 100Half, so ignore
                                 * partner's EEE in 100 ability if full-duplex
                                 * is not advertised.
                                 */
                                dev_spec->eee_lp_ability &=
                                    ~I82579_EEE_100_SUPPORTED;
                }
+       }
+
+       /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
+       ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
+       if (ret_val)
+               goto release;
+
+       ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
+release:
+       hw->phy.ops.release(hw);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
+ *  @hw:   pointer to the HW structure
+ *  @link: link up bool flag
+ *
+ *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
+ *  preventing further DMA write requests.  Workaround the issue by disabling
+ *  the de-assertion of the clock request when in 1Gpbs mode.
+ *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
+ *  speeds in order to avoid Tx hangs.
+ **/
+STATIC s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
+{
+       u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
+       u32 status = E1000_READ_REG(hw, E1000_STATUS);
+       s32 ret_val = E1000_SUCCESS;
+       u16 reg;
+
+       if (link && (status & E1000_STATUS_SPEED_1000)) {
+               ret_val = hw->phy.ops.acquire(hw);
+               if (ret_val)
+                       return ret_val;
+
+               ret_val =
+                   e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
+                                              &reg);
+               if (ret_val)
+                       goto release;
+
+               ret_val =
+                   e1000_write_kmrn_reg_locked(hw,
+                                               E1000_KMRNCTRLSTA_K1_CONFIG,
+                                               reg &
+                                               ~E1000_KMRNCTRLSTA_K1_ENABLE);
+               if (ret_val)
+                       goto release;
+
+               usec_delay(10);
+
+               E1000_WRITE_REG(hw, E1000_FEXTNVM6,
+                               fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
+
+               ret_val =
+                   e1000_write_kmrn_reg_locked(hw,
+                                               E1000_KMRNCTRLSTA_K1_CONFIG,
+                                               reg);
+release:
+               hw->phy.ops.release(hw);
+       } else {
+               /* clear FEXTNVM6 bit 8 on link down or 10/100 */
+               fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
+
+               if (!link || ((status & E1000_STATUS_SPEED_100) &&
+                             (status & E1000_STATUS_FD)))
+                       goto update_fextnvm6;
+
+               ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
+               if (ret_val)
+                       return ret_val;
+
+               /* Clear link status transmit timeout */
+               reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
+
+               if (status & E1000_STATUS_SPEED_100) {
+                       /* Set inband Tx timeout to 5x10us for 100Half */
+                       reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
+
+                       /* Do not extend the K1 entry latency for 100Half */
+                       fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
+               } else {
+                       /* Set inband Tx timeout to 50x10us for 10Full/Half */
+                       reg |= 50 <<
+                              I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
+
+                       /* Extend the K1 entry latency for 10 Mbps */
+                       fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
+               }
 
-               /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
-               ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
+               ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
+               if (ret_val)
+                       return ret_val;
+
+update_fextnvm6:
+               E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
+       }
+
+       return ret_val;
+}
+
+#if defined(NAHUM6LP_HW) && defined(ULP_SUPPORT)
+/**
+ *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
+ *  @hw: pointer to the HW structure
+ *  @to_sx: boolean indicating a system power state transition to Sx
+ *
+ *  When link is down, configure ULP mode to significantly reduce the power
+ *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
+ *  ME firmware to start the ULP configuration.  If not on an ME enabled
+ *  system, configure the ULP mode by software.
+ */
+s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
+{
+       u32 mac_reg;
+       s32 ret_val = E1000_SUCCESS;
+       u16 phy_reg;
+
+       if ((hw->mac.type < e1000_pch_lpt) ||
+           (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
+           (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
+           (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
+               return 0;
+
+       if (!to_sx) {
+               int i = 0;
+
+               /* Poll up to 5 seconds for Cable Disconnected indication */
+               while (!(E1000_READ_REG(hw, E1000_FEXT) &
+                        E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
+                       /* Bail if link is re-acquired */
+                       if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
+                               return -E1000_ERR_PHY;
+
+                       if (i++ == 100)
+                               break;
+
+                       msec_delay(50);
+               }
+               DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
+                         (E1000_READ_REG(hw, E1000_FEXT) &
+                          E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
+                         i * 50);
+       }
+
+       if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
+               /* Request ME configure ULP mode in the PHY */
+               mac_reg = E1000_READ_REG(hw, E1000_H2ME);
+               mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
+               E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
+
+               goto out;
+       }
+
+       ret_val = hw->phy.ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       /* During S0 Idle keep the phy in PCI-E mode */
+       if (hw->dev_spec.ich8lan.smbus_disable)
+               goto skip_smbus;
+
+       /* Force SMBus mode in PHY */
+       ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+       if (ret_val)
+               goto release;
+       phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
+       e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
+       /* Force SMBus mode in MAC */
+       mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+       mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+       E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+
+skip_smbus:
+       if (!to_sx) {
+               /* Change the 'Link Status Change' interrupt to trigger
+                * on 'Cable Status Change'
+                */
+               ret_val = e1000_read_kmrn_reg_locked(hw,
+                                                    E1000_KMRNCTRLSTA_OP_MODES,
+                                                    &phy_reg);
                if (ret_val)
                        goto release;
+               phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
+               e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
+                                           phy_reg);
        }
 
-       ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
+       /* Set Inband ULP Exit, Reset to SMBus mode and
+        * Disable SMBus Release on PERST# in PHY
+        */
+       ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
+       if (ret_val)
+               goto release;
+       phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
+                   I218_ULP_CONFIG1_DISABLE_SMB_PERST);
+       if (to_sx) {
+               if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
+                       phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
+
+               phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
+       } else {
+               phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
+       }
+       e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+       /* Set Disable SMBus Release on PERST# in MAC */
+       mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
+       mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
+       E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
+
+       /* Commit ULP changes in PHY by starting auto ULP configuration */
+       phy_reg |= I218_ULP_CONFIG1_START;
+       e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+       if (!to_sx) {
+               /* Disable Tx so that the MAC doesn't send any (buffered)
+                * packets to the PHY.
+                */
+               mac_reg = E1000_READ_REG(hw, E1000_TCTL);
+               mac_reg &= ~E1000_TCTL_EN;
+               E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
+       }
+release:
+       hw->phy.ops.release(hw);
+out:
+       if (ret_val)
+               DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
+       else
+               hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
+
+       return ret_val;
+}
+
+/**
+ *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
+ *  @hw: pointer to the HW structure
+ *  @force: boolean indicating whether or not to force disabling ULP
+ *
+ *  Un-configure ULP mode when link is up, the system is transitioned from
+ *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
+ *  system, poll for an indication from ME that ULP has been un-configured.
+ *  If not on an ME enabled system, un-configure the ULP mode by software.
+ *
+ *  During nominal operation, this function is called when link is acquired
+ *  to disable ULP mode (force=false); otherwise, for example when unloading
+ *  the driver or during Sx->S0 transitions, this is called with force=true
+ *  to forcibly disable ULP.
+
+ *  When the cable is plugged in while the device is in D0, a Cable Status
+ *  Change interrupt is generated which causes this function to be called
+ *  to partially disable ULP mode and restart autonegotiation.  This function
+ *  is then called again due to the resulting Link Status Change interrupt
+ *  to finish cleaning up after the ULP flow.
+ */
+s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u32 mac_reg;
+       u16 phy_reg;
+       int i = 0;
+
+       if ((hw->mac.type < e1000_pch_lpt) ||
+           (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
+           (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
+           (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
+               return 0;
+
+       if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
+               if (force) {
+                       /* Request ME un-configure ULP mode in the PHY */
+                       mac_reg = E1000_READ_REG(hw, E1000_H2ME);
+                       mac_reg &= ~E1000_H2ME_ULP;
+                       mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
+                       E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
+               }
+
+               /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
+               while (E1000_READ_REG(hw, E1000_FWSM) &
+                      E1000_FWSM_ULP_CFG_DONE) {
+                       if (i++ == 10) {
+                               ret_val = -E1000_ERR_PHY;
+                               goto out;
+                       }
+
+                       msec_delay(10);
+               }
+               DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
+
+               if (force) {
+                       mac_reg = E1000_READ_REG(hw, E1000_H2ME);
+                       mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
+                       E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
+               } else {
+                       /* Clear H2ME.ULP after ME ULP configuration */
+                       mac_reg = E1000_READ_REG(hw, E1000_H2ME);
+                       mac_reg &= ~E1000_H2ME_ULP;
+                       E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
+
+                       /* Restore link speed advertisements and restart
+                        * Auto-negotiation
+                        */
+                       ret_val = e1000_phy_setup_autoneg(hw);
+                       if (ret_val)
+                               goto out;
+
+                       ret_val = e1000_oem_bits_config_ich8lan(hw, true);
+               }
+
+               goto out;
+       }
+
+       ret_val = hw->phy.ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       /* Revert the change to the 'Link Status Change'
+        * interrupt to trigger on 'Cable Status Change'
+        */
+       ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
+                                            &phy_reg);
+       if (ret_val)
+               goto release;
+       phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
+       e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg);
+
+       if (force)
+               /* Toggle LANPHYPC Value bit */
+               e1000_toggle_lanphypc_pch_lpt(hw);
+
+       /* Unforce SMBus mode in PHY */
+       ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+       if (ret_val) {
+               /* The MAC might be in PCIe mode, so temporarily force to
+                * SMBus mode in order to access the PHY.
+                */
+               mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+               mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+               E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+
+               msec_delay(50);
+
+               ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
+                                                      &phy_reg);
+               if (ret_val)
+                       goto release;
+       }
+       phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+       e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
+       /* Unforce SMBus mode in MAC */
+       mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+       mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+       E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+
+       /* When ULP mode was previously entered, K1 was disabled by the
+        * hardware.  Re-Enable K1 in the PHY when exiting ULP.
+        */
+       ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
+       if (ret_val)
+               goto release;
+       phy_reg |= HV_PM_CTRL_K1_ENABLE;
+       e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
+
+       /* Clear ULP enabled configuration */
+       ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
+       if (ret_val)
+               goto release;
+       /* CSC interrupt received due to ULP Indication */
+       if ((phy_reg & I218_ULP_CONFIG1_IND) || force) {
+               phy_reg &= ~(I218_ULP_CONFIG1_IND |
+                            I218_ULP_CONFIG1_STICKY_ULP |
+                            I218_ULP_CONFIG1_RESET_TO_SMBUS |
+                            I218_ULP_CONFIG1_WOL_HOST |
+                            I218_ULP_CONFIG1_INBAND_EXIT |
+                            I218_ULP_CONFIG1_DISABLE_SMB_PERST);
+               e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+               /* Commit ULP changes by starting auto ULP configuration */
+               phy_reg |= I218_ULP_CONFIG1_START;
+               e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+               /* Clear Disable SMBus Release on PERST# in MAC */
+               mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
+               mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
+               E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
+
+               if (!force) {
+                       hw->phy.ops.release(hw);
+
+                       if (hw->mac.autoneg)
+                               e1000_phy_setup_autoneg(hw);
+
+                       e1000_sw_lcd_config_ich8lan(hw);
+
+                       e1000_oem_bits_config_ich8lan(hw, true);
+
+                       /* Set ULP state to unknown and return non-zero to
+                        * indicate no link (yet) and re-enter on the next LSC
+                        * to finish disabling ULP flow.
+                        */
+                       hw->dev_spec.ich8lan.ulp_state =
+                           e1000_ulp_state_unknown;
+
+                       return 1;
+               }
+       }
+
+       /* Re-enable Tx */
+       mac_reg = E1000_READ_REG(hw, E1000_TCTL);
+       mac_reg |= E1000_TCTL_EN;
+       E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
+
 release:
        hw->phy.ops.release(hw);
+       if (force) {
+               hw->phy.ops.reset(hw);
+               msec_delay(50);
+       }
+out:
+       if (ret_val)
+               DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
+       else
+               hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
 
        return ret_val;
 }
 
+#endif /* NAHUM6LP_HW && ULP_SUPPORT */
 /**
  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
  *  @hw: pointer to the HW structure
@@ -830,11 +1369,11 @@ release:
  *  change in link status has been detected, then we read the PHY registers
  *  to get the current speed/duplex if link exists.
  **/
-static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
+STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
 {
        struct e1000_mac_info *mac = &hw->mac;
        s32 ret_val;
-       bool link;
+       bool link = false;
        u16 phy_reg;
 
        DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
@@ -847,14 +1386,29 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
        if (!mac->get_link_status)
                return E1000_SUCCESS;
 
-       /*
-        * First we want to see if the MII Status Register reports
-        * link.  If so, then we want to get the current speed/duplex
-        * of the PHY.
-        */
-       ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
-       if (ret_val)
-               return ret_val;
+       if ((hw->mac.type < e1000_pch_lpt) ||
+           (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
+           (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) {
+               /* First we want to see if the MII Status Register reports
+                * link.  If so, then we want to get the current speed/duplex
+                * of the PHY.
+                */
+               ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+               if (ret_val)
+                       return ret_val;
+       } else {
+               /* Check the MAC's STATUS register to determine link state
+                * since the PHY could be inaccessible while in ULP mode.
+                */
+               link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
+               if (link)
+                       ret_val = e1000_disable_ulp_lpt_lp(hw, false);
+               else
+                       ret_val = e1000_enable_ulp_lpt_lp(hw, false);
+
+               if (ret_val)
+                       return ret_val;
+       }
 
        if (hw->mac.type == e1000_pchlan) {
                ret_val = e1000_k1_gig_workaround_hv(hw, link);
@@ -862,6 +1416,48 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
                        return ret_val;
        }
 
+       /* When connected at 10Mbps half-duplex, some parts are excessively
+        * aggressive resulting in many collisions. To avoid this, increase
+        * the IPG and reduce Rx latency in the PHY.
+        */
+       if (((hw->mac.type == e1000_pch2lan) ||
+            (hw->mac.type == e1000_pch_lpt)) && link) {
+               u32 reg;
+               reg = E1000_READ_REG(hw, E1000_STATUS);
+               if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
+                       u16 emi_addr;
+
+                       reg = E1000_READ_REG(hw, E1000_TIPG);
+                       reg &= ~E1000_TIPG_IPGT_MASK;
+                       reg |= 0xFF;
+                       E1000_WRITE_REG(hw, E1000_TIPG, reg);
+
+                       /* Reduce Rx latency in analog PHY */
+                       ret_val = hw->phy.ops.acquire(hw);
+                       if (ret_val)
+                               return ret_val;
+
+                       if (hw->mac.type == e1000_pch2lan)
+                               emi_addr = I82579_RX_CONFIG;
+                       else
+                               emi_addr = I217_RX_CONFIG;
+                       ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
+
+                       hw->phy.ops.release(hw);
+
+                       if (ret_val)
+                               return ret_val;
+               }
+       }
+
+       /* Work-around I218 hang issue */
+       if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
+           (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+               ret_val = e1000_k1_workaround_lpt_lp(hw, link);
+               if (ret_val)
+                       return ret_val;
+       }
+
        /* Clear link partner's EEE ability */
        hw->dev_spec.ich8lan.eee_lp_ability = 0;
 
@@ -907,9 +1503,11 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
        e1000_check_downshift_generic(hw);
 
        /* Enable/Disable EEE after link up */
-       ret_val = e1000_set_eee_pchlan(hw);
-       if (ret_val)
-               return ret_val;
+       if (hw->phy.type > e1000_phy_82579) {
+               ret_val = e1000_set_eee_pchlan(hw);
+               if (ret_val)
+                       return ret_val;
+       }
 
        /* If we are forcing speed/duplex, then we simply return since
         * we have already determined whether we have link or not.
@@ -955,6 +1553,7 @@ void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
                break;
        case e1000_pchlan:
        case e1000_pch2lan:
+       case e1000_pch_lpt:
                hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
                break;
        default:
@@ -1159,7 +1758,10 @@ STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
                return;
        }
 
-       if (index < hw->mac.rar_entry_count) {
+       /* RAR[1-6] are owned by manageability.  Skip those and program the
+        * next address into the SHRA register array.
+        */
+       if (index < (u32) (hw->mac.rar_entry_count)) {
                s32 ret_val;
 
                ret_val = e1000_acquire_swflag_ich8lan(hw);
@@ -1186,6 +1788,130 @@ out:
        DEBUGOUT1("Failed to write receive address at index %d\n", index);
 }
 
+/**
+ *  e1000_rar_set_pch_lpt - Set receive address registers
+ *  @hw: pointer to the HW structure
+ *  @addr: pointer to the receive address
+ *  @index: receive address array register
+ *
+ *  Sets the receive address register array at index to the address passed
+ *  in by addr. For LPT, RAR[0] is the base address register that is to
+ *  contain the MAC address. SHRA[0-10] are the shared receive address
+ *  registers that are shared between the Host and manageability engine (ME).
+ **/
+STATIC void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+       u32 rar_low, rar_high;
+       u32 wlock_mac;
+
+       DEBUGFUNC("e1000_rar_set_pch_lpt");
+
+       /* HW expects these in little endian so we reverse the byte order
+        * from network order (big endian) to little endian
+        */
+       rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+                  ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+       rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+       /* If MAC address zero, no need to set the AV bit */
+       if (rar_low || rar_high)
+               rar_high |= E1000_RAH_AV;
+
+       if (index == 0) {
+               E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
+               E1000_WRITE_FLUSH(hw);
+               E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
+               E1000_WRITE_FLUSH(hw);
+               return;
+       }
+
+       /* The manageability engine (ME) can lock certain SHRAR registers that
+        * it is using - those registers are unavailable for use.
+        */
+       if (index < hw->mac.rar_entry_count) {
+               wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
+                           E1000_FWSM_WLOCK_MAC_MASK;
+               wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
+
+               /* Check if all SHRAR registers are locked */
+               if (wlock_mac == 1)
+                       goto out;
+
+               if ((wlock_mac == 0) || (index <= wlock_mac)) {
+                       s32 ret_val;
+
+                       ret_val = e1000_acquire_swflag_ich8lan(hw);
+
+                       if (ret_val)
+                               goto out;
+
+                       E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
+                                       rar_low);
+                       E1000_WRITE_FLUSH(hw);
+                       E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
+                                       rar_high);
+                       E1000_WRITE_FLUSH(hw);
+
+                       e1000_release_swflag_ich8lan(hw);
+
+                       /* verify the register updates */
+                       if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
+                           (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
+                               return;
+               }
+       }
+
+out:
+       DEBUGOUT1("Failed to write receive address at index %d\n", index);
+}
+
+#ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
+/**
+ *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *
+ *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
+ *  The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
+                                             u8 *mc_addr_list,
+                                             u32 mc_addr_count)
+{
+       u16 phy_reg = 0;
+       int i;
+       s32 ret_val;
+
+       DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
+
+       e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
+
+       ret_val = hw->phy.ops.acquire(hw);
+       if (ret_val)
+               return;
+
+       ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+       if (ret_val)
+               goto release;
+
+       for (i = 0; i < hw->mac.mta_reg_count; i++) {
+               hw->phy.ops.write_reg_page(hw, BM_MTA(i),
+                                          (u16)(hw->mac.mta_shadow[i] &
+                                                0xFFFF));
+               hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
+                                          (u16)((hw->mac.mta_shadow[i] >> 16) &
+                                                0xFFFF));
+       }
+
+       e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+
+release:
+       hw->phy.ops.release(hw);
+}
+
+#endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
 /**
  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
  *  @hw: pointer to the HW structure
@@ -1197,13 +1923,21 @@ out:
 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
 {
        u32 fwsm;
+       bool blocked = false;
+       int i = 0;
 
        DEBUGFUNC("e1000_check_reset_block_ich8lan");
 
-       fwsm = E1000_READ_REG(hw, E1000_FWSM);
-
-       return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
-                                               : E1000_BLK_PHY_RESET;
+       do {
+               fwsm = E1000_READ_REG(hw, E1000_FWSM);
+               if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
+                       blocked = true;
+                       msec_delay(10);
+                       continue;
+               }
+               blocked = false;
+       } while (blocked && (i++ < 10));
+       return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
 }
 
 /**
@@ -1219,7 +1953,7 @@ STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
        u32 strap = E1000_READ_REG(hw, E1000_STRAP);
        u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
                E1000_STRAP_SMT_FREQ_SHIFT;
-       s32 ret_val = E1000_SUCCESS;
+       s32 ret_val;
 
        strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
 
@@ -1282,6 +2016,7 @@ STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
                /* Fall-thru */
        case e1000_pchlan:
        case e1000_pch2lan:
+       case e1000_pch_lpt:
                sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
                break;
        default:
@@ -1402,9 +2137,9 @@ STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
                        if (ret_val)
                                goto release;
 
-                       status_reg &= BM_CS_STATUS_LINK_UP |
-                                     BM_CS_STATUS_RESOLVED |
-                                     BM_CS_STATUS_SPEED_MASK;
+                       status_reg &= (BM_CS_STATUS_LINK_UP |
+                                      BM_CS_STATUS_RESOLVED |
+                                      BM_CS_STATUS_SPEED_MASK);
 
                        if (status_reg == (BM_CS_STATUS_LINK_UP |
                                           BM_CS_STATUS_RESOLVED |
@@ -1418,9 +2153,9 @@ STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
                        if (ret_val)
                                goto release;
 
-                       status_reg &= HV_M_STATUS_LINK_UP |
-                                     HV_M_STATUS_AUTONEG_COMPLETE |
-                                     HV_M_STATUS_SPEED_MASK;
+                       status_reg &= (HV_M_STATUS_LINK_UP |
+                                      HV_M_STATUS_AUTONEG_COMPLETE |
+                                      HV_M_STATUS_SPEED_MASK);
 
                        if (status_reg == (HV_M_STATUS_LINK_UP |
                                           HV_M_STATUS_AUTONEG_COMPLETE |
@@ -1462,7 +2197,7 @@ release:
  **/
 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
 {
-       s32 ret_val = E1000_SUCCESS;
+       s32 ret_val;
        u32 ctrl_reg = 0;
        u32 ctrl_ext = 0;
        u32 reg = 0;
@@ -1513,7 +2248,7 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
  **/
-static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
+STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
 {
        s32 ret_val = 0;
        u32 mac_reg;
@@ -1702,8 +2437,8 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
        if (ret_val)
                goto release;
 
-       /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
-       for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+       /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
+       for (i = 0; i < (hw->mac.rar_entry_count); i++) {
                mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
                hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
                                           (u16)(mac_reg & 0xFFFF));
@@ -1724,7 +2459,8 @@ release:
        hw->phy.ops.release(hw);
 }
 
-static u32 e1000_calc_rx_da_crc(u8 mac[])
+#ifndef CRC32_OS_SUPPORT
+STATIC u32 e1000_calc_rx_da_crc(u8 mac[])
 {
        u32 poly = 0xEDB88320;  /* Polynomial for 802.3 CRC calculation */
        u32 i, j, mask, crc;
@@ -1742,6 +2478,7 @@ static u32 e1000_calc_rx_da_crc(u8 mac[])
        return ~crc;
 }
 
+#endif /* CRC32_OS_SUPPORT */
 /**
  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
  *  with 82579 PHY
@@ -1757,7 +2494,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
 
        DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
 
-       if (hw->mac.type != e1000_pch2lan)
+       if (hw->mac.type < e1000_pch2lan)
                return E1000_SUCCESS;
 
        /* disable Rx path while enabling/disabling workaround */
@@ -1771,7 +2508,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
                /* Write Rx addresses (rar_entry_count for RAL/H, and
                 * SHRAL/H) and initial CRC values to the MAC
                 */
-               for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+               for (i = 0; i < hw->mac.rar_entry_count; i++) {
                        u8 mac_addr[ETH_ADDR_LEN] = {0};
                        u32 addr_high, addr_low;
 
@@ -1786,8 +2523,13 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
                        mac_addr[4] = (addr_high & 0xFF);
                        mac_addr[5] = ((addr_high >> 8) & 0xFF);
 
+#ifndef CRC32_OS_SUPPORT
                        E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
                                        e1000_calc_rx_da_crc(mac_addr));
+#else /* CRC32_OS_SUPPORT */
+                       E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
+                                       E1000_CRC32(ETH_ADDR_LEN, mac_addr));
+#endif /* CRC32_OS_SUPPORT */
                }
 
                /* Write Rx addresses to the PHY */
@@ -1954,55 +2696,47 @@ release:
  *  e1000_k1_gig_workaround_lv - K1 Si workaround
  *  @hw:   pointer to the HW structure
  *
- *  Workaround to set the K1 beacon duration for 82579 parts
+ *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
+ *  Disable K1 for 1000 and 100 speeds
  **/
 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
 {
        s32 ret_val = E1000_SUCCESS;
        u16 status_reg = 0;
-       u32 mac_reg;
-       u16 phy_reg;
 
        DEBUGFUNC("e1000_k1_workaround_lv");
 
        if (hw->mac.type != e1000_pch2lan)
                return E1000_SUCCESS;
 
-       /* Set K1 beacon duration based on 1Gbps speed or otherwise */
+       /* Set K1 beacon duration based on 10Mbs speed */
        ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
        if (ret_val)
                return ret_val;
 
        if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
            == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
-               mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
-               mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
-
-               ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
-               if (ret_val)
-                       return ret_val;
-
-               if (status_reg & HV_M_STATUS_SPEED_1000) {
+               if (status_reg &
+                   (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
                        u16 pm_phy_reg;
 
-                       mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
-                       phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
-                       /* LV 1G Packet drop issue wa  */
+                       /* LV 1G/100 Packet drop issue wa  */
                        ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
                                                       &pm_phy_reg);
                        if (ret_val)
                                return ret_val;
-                       pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
+                       pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
                        ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
                                                        pm_phy_reg);
                        if (ret_val)
                                return ret_val;
                } else {
+                       u32 mac_reg;
+                       mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
+                       mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
                        mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
-                       phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
+                       E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
                }
-               E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
-               ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
        }
 
        return ret_val;
@@ -2022,7 +2756,7 @@ STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
 
        DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
 
-       if (hw->mac.type != e1000_pch2lan)
+       if (hw->mac.type < e1000_pch2lan)
                return;
 
        extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
@@ -2176,7 +2910,7 @@ STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
  **/
 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
 {
-       s32 ret_val = E1000_SUCCESS;
+       s32 ret_val;
        u16 oem_reg;
 
        DEBUGFUNC("e1000_set_lplu_state_pchlan");
@@ -2240,6 +2974,8 @@ STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
                ret_val = phy->ops.read_reg(hw,
                                            IGP01E1000_PHY_PORT_CONFIG,
                                            &data);
+               if (ret_val)
+                       return ret_val;
                data &= ~IGP01E1000_PSCFR_SMART_SPEED;
                ret_val = phy->ops.write_reg(hw,
                                             IGP01E1000_PHY_PORT_CONFIG,
@@ -2519,7 +3255,7 @@ out:
  *  This function does initial flash setup so that a new read/write/erase cycle
  *  can be started.
  **/
-static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
+STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
 {
        union ich8_hws_flash_status hsfsts;
        s32 ret_val = -E1000_ERR_NVM;
@@ -2537,7 +3273,6 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
        /* Clear FCERR and DAEL in hw status by writing 1 */
        hsfsts.hsf_status.flcerr = 1;
        hsfsts.hsf_status.dael = 1;
-
        E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
 
        /* Either we should have a hardware SPI cycle in progress
@@ -2593,7 +3328,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
  *
  *  This function starts a flash cycle and waits for its completion.
  **/
-static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
+STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
 {
        union ich8_hws_flash_ctrl hsflctl;
        union ich8_hws_flash_status hsfsts;
@@ -2604,6 +3339,7 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
        /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
        hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
        hsflctl.hsf_ctrl.flcgo = 1;
+
        E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
 
        /* wait till FDONE bit is set to 1 */
@@ -2658,6 +3394,7 @@ STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
        u16 word = 0;
 
        ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
+
        if (ret_val)
                return ret_val;
 
@@ -2675,7 +3412,7 @@ STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
  *
  *  Reads a byte or word from the NVM using the flash access registers.
  **/
-static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
                                         u8 size, u16 *data)
 {
        union ich8_hws_flash_status hsfsts;
@@ -2687,11 +3424,10 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
 
        DEBUGFUNC("e1000_read_flash_data_ich8lan");
 
-       if (size < 1  || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+       if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
                return -E1000_ERR_NVM;
-
-       flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
-                           hw->nvm.flash_base_addr;
+       flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+                            hw->nvm.flash_base_addr);
 
        do {
                usec_delay(1);
@@ -2699,8 +3435,8 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
                ret_val = e1000_flash_cycle_init_ich8lan(hw);
                if (ret_val != E1000_SUCCESS)
                        break;
-
                hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+
                /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
                hsflctl.hsf_ctrl.fldbcount = size - 1;
                hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
@@ -2708,8 +3444,9 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
 
                E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
 
-               ret_val = e1000_flash_cycle_ich8lan(hw,
-                                               ICH_FLASH_READ_COMMAND_TIMEOUT);
+               ret_val =
+                   e1000_flash_cycle_ich8lan(hw,
+                                             ICH_FLASH_READ_COMMAND_TIMEOUT);
 
                /* Check if FCERR is set to 1, if set to 1, clear it
                 * and try the whole sequence a few more times, else
@@ -2960,6 +3697,10 @@ STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
         * the checksum...a likely scenario.
         */
        switch (hw->mac.type) {
+       case e1000_pch_lpt:
+               word = NVM_COMPAT;
+               valid_csum_mask = NVM_COMPAT_VALID_CSUM;
+               break;
        default:
                word = NVM_FUTURE_INIT_WORD1;
                valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
@@ -2992,7 +3733,7 @@ STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
  *
  *  Writes one/two bytes to the NVM using the flash access registers.
  **/
-static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
                                          u8 size, u16 data)
 {
        union ich8_hws_flash_status hsfsts;
@@ -3004,12 +3745,11 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
 
        DEBUGFUNC("e1000_write_ich8_data");
 
-       if (size < 1 || size > 2 || data > size * 0xff ||
-           offset > ICH_FLASH_LINEAR_ADDR_MASK)
+       if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
                return -E1000_ERR_NVM;
 
-       flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
-                           hw->nvm.flash_base_addr;
+       flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+                            hw->nvm.flash_base_addr);
 
        do {
                usec_delay(1);
@@ -3017,8 +3757,8 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
                ret_val = e1000_flash_cycle_init_ich8lan(hw);
                if (ret_val != E1000_SUCCESS)
                        break;
-
                hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+
                /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
                hsflctl.hsf_ctrl.fldbcount = size - 1;
                hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
@@ -3036,8 +3776,9 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
                /* check if FCERR is set to 1 , if set to 1, clear it
                 * and try the whole sequence a few more times else done
                 */
-               ret_val = e1000_flash_cycle_ich8lan(hw,
-                                              ICH_FLASH_WRITE_COMMAND_TIMEOUT);
+               ret_val =
+                   e1000_flash_cycle_ich8lan(hw,
+                                             ICH_FLASH_WRITE_COMMAND_TIMEOUT);
                if (ret_val == E1000_SUCCESS)
                        break;
 
@@ -3086,7 +3827,7 @@ STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
  *  Writes a single byte to the NVM using the flash access registers.
  *  Goes through a retry algorithm before giving up.
  **/
-static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
+STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
                                                u32 offset, u8 byte)
 {
        s32 ret_val;
@@ -3173,8 +3914,10 @@ STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
        flash_linear_addr = hw->nvm.flash_base_addr;
        flash_linear_addr += (bank) ? flash_bank_size : 0;
 
-       for (j = 0; j < iteration ; j++) {
+       for (j = 0; j < iteration; j++) {
                do {
+                       u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
+
                        /* Steps */
                        ret_val = e1000_flash_cycle_init_ich8lan(hw);
                        if (ret_val)
@@ -3183,8 +3926,9 @@ STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
                        /* Write a value 11 (block Erase) in Flash
                         * Cycle field in hw flash control
                         */
-                       hsflctl.regval = E1000_READ_FLASH_REG16(hw,
-                                                             ICH_FLASH_HSFCTL);
+                       hsflctl.regval =
+                           E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+
                        hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
                        E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
                                                hsflctl.regval);
@@ -3197,8 +3941,7 @@ STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
                        E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
                                              flash_linear_addr);
 
-                       ret_val = e1000_flash_cycle_ich8lan(hw,
-                                              ICH_FLASH_ERASE_COMMAND_TIMEOUT);
+                       ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
                        if (ret_val == E1000_SUCCESS)
                                break;
 
@@ -3488,9 +4231,9 @@ STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
 
        /* Initialize identification LED */
        ret_val = mac->ops.id_led_init(hw);
+       /* An error is not fatal and we should not stop init due to this */
        if (ret_val)
                DEBUGOUT("Error initializing identification LED\n");
-               /* This is not fatal and we should not stop init due to this */
 
        /* Setup the receive address. */
        e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
@@ -3518,16 +4261,16 @@ STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
 
        /* Set the transmit descriptor write-back policy for both queues */
        txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
-       txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
-                E1000_TXDCTL_FULL_TX_DESC_WB;
-       txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
-                E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
+       txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
+                 E1000_TXDCTL_FULL_TX_DESC_WB);
+       txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
+                 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
        E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
        txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
-       txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
-                E1000_TXDCTL_FULL_TX_DESC_WB;
-       txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
-                E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
+       txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
+                 E1000_TXDCTL_FULL_TX_DESC_WB);
+       txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
+                 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
        E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
 
        /* ICH8 has opposite polarity of no_snoop bits.
@@ -3560,7 +4303,7 @@ STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
  *  Sets/Clears required hardware bits necessary for correctly setting up the
  *  hardware for transmit and receive.
  **/
-static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
+STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
 {
        u32 reg;
 
@@ -3620,6 +4363,17 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
                reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
        E1000_WRITE_REG(hw, E1000_RFCTL, reg);
 
+       /* Enable ECC on Lynxpoint */
+       if (hw->mac.type == e1000_pch_lpt) {
+               reg = E1000_READ_REG(hw, E1000_PBECCSTS);
+               reg |= E1000_PBECCSTS_ECC_ENABLE;
+               E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
+
+               reg = E1000_READ_REG(hw, E1000_CTRL);
+               reg |= E1000_CTRL_MEHE;
+               E1000_WRITE_REG(hw, E1000_CTRL, reg);
+       }
+
        return;
 }
 
@@ -3734,7 +4488,6 @@ STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
                break;
        case e1000_phy_82577:
        case e1000_phy_82579:
-       case e1000_phy_i217:
                ret_val = e1000_copper_link_setup_82577(hw);
                if (ret_val)
                        return ret_val;
@@ -3771,6 +4524,33 @@ STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
        return e1000_setup_copper_link_generic(hw);
 }
 
+/**
+ *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY specific link setup function and then calls the
+ *  generic setup_copper_link to finish configuring the link for
+ *  Lynxpoint PCH devices
+ **/
+STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
+{
+       u32 ctrl;
+       s32 ret_val;
+
+       DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
+
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+       ctrl |= E1000_CTRL_SLU;
+       ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+       ret_val = e1000_copper_link_setup_82577(hw);
+       if (ret_val)
+               return ret_val;
+
+       return e1000_setup_copper_link_generic(hw);
+}
+
 /**
  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
  *  @hw: pointer to the HW structure
@@ -3816,7 +4596,7 @@ STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
  *    5) repeat up to 10 times
  *  Note: this is only called for IGP3 copper when speed is 1gb.
  **/
-static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
+STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
 {
        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
        u32 phy_ctrl;
@@ -3981,9 +4761,8 @@ void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
        if (ret_val)
                return;
        reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
-       ret_val = e1000_write_kmrn_reg_generic(hw,
-                                              E1000_KMRNCTRLSTA_DIAG_OFFSET,
-                                              reg_data);
+       e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
+                                    reg_data);
 }
 
 /**
@@ -4010,8 +4789,17 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
 
        phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
        phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
+
        if (hw->phy.type == e1000_phy_i217) {
-               u16 phy_reg;
+               u16 phy_reg, device_id = hw->device_id;
+
+               if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
+                   (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+                       u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
+
+                       E1000_WRITE_REG(hw, E1000_FEXTNVM6,
+                                       fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
+               }
 
                ret_val = hw->phy.ops.acquire(hw);
                if (ret_val)
@@ -4029,14 +4817,25 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
 
                        /* Disable LPLU if both link partners support 100BaseT
                         * EEE and 100Full is advertised on both ends of the
-                        * link.
+                        * link, and enable Auto Enable LPI since there will
+                        * be no driver to enable LPI while in Sx.
                         */
                        if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
                            (dev_spec->eee_lp_ability &
                             I82579_EEE_100_SUPPORTED) &&
-                           (hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
+                           (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
                                phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
                                              E1000_PHY_CTRL_NOND0A_LPLU);
+
+                               /* Set Auto Enable LPI after link up */
+                               hw->phy.ops.read_reg_locked(hw,
+                                                           I217_LPI_GPIO_CTRL,
+                                                           &phy_reg);
+                               phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
+                               hw->phy.ops.write_reg_locked(hw,
+                                                            I217_LPI_GPIO_CTRL,
+                                                            phy_reg);
+                       }
                }
 
                /* For i217 Intel Rapid Start Technology support,
@@ -4047,8 +4846,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
                 * The SMBus release must also be disabled on LCD reset.
                 */
                if (!(E1000_READ_REG(hw, E1000_FWSM) &
-                       E1000_ICH_FWSM_FW_VALID)) {
-
+                     E1000_ICH_FWSM_FW_VALID)) {
                        /* Enable proxy to reset only on power good. */
                        hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
                                                    &phy_reg);
@@ -4141,6 +4939,11 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
                        return;
                }
 
+               /* Clear Auto Enable LPI after link up */
+               hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
+               phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
+               hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
+
                if (!(E1000_READ_REG(hw, E1000_FWSM) &
                    E1000_ICH_FWSM_FW_VALID)) {
                        /* Restore clear on SMB if no manageability engine