igb: rename pmd driver as e1000
authorIntel <intel.com>
Wed, 19 Dec 2012 23:00:00 +0000 (00:00 +0100)
committerThomas Monjalon <thomas.monjalon@6wind.com>
Thu, 25 Jul 2013 13:23:28 +0000 (15:23 +0200)
Signed-off-by: Intel
65 files changed:
config/defconfig_i686-default-linuxapp-gcc
config/defconfig_i686-default-linuxapp-icc
config/defconfig_x86_64-default-linuxapp-gcc
config/defconfig_x86_64-default-linuxapp-icc
lib/Makefile
lib/librte_pmd_e1000/Makefile [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/README [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/e1000_82575.c [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/e1000_82575.h [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/e1000_api.c [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/e1000_api.h [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/e1000_defines.h [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/e1000_hw.h [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/e1000_mac.c [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/e1000_mac.h [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/e1000_manage.c [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/e1000_manage.h [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/e1000_mbx.c [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/e1000_mbx.h [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/e1000_nvm.c [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/e1000_nvm.h [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/e1000_osdep.c [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/e1000_osdep.h [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/e1000_phy.c [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/e1000_phy.h [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/e1000_regs.h [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/e1000_vf.c [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/e1000_vf.h [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/if_igb.c [new file with mode: 0644]
lib/librte_pmd_e1000/e1000/if_igb.h [new file with mode: 0644]
lib/librte_pmd_e1000/e1000_ethdev.h [new file with mode: 0644]
lib/librte_pmd_e1000/e1000_logs.h [new file with mode: 0644]
lib/librte_pmd_e1000/igb_ethdev.c [new file with mode: 0644]
lib/librte_pmd_e1000/igb_rxtx.c [new file with mode: 0644]
lib/librte_pmd_igb/Makefile [deleted file]
lib/librte_pmd_igb/e1000_ethdev.c [deleted file]
lib/librte_pmd_igb/e1000_ethdev.h [deleted file]
lib/librte_pmd_igb/e1000_logs.h [deleted file]
lib/librte_pmd_igb/e1000_rxtx.c [deleted file]
lib/librte_pmd_igb/igb/README [deleted file]
lib/librte_pmd_igb/igb/e1000_82575.c [deleted file]
lib/librte_pmd_igb/igb/e1000_82575.h [deleted file]
lib/librte_pmd_igb/igb/e1000_api.c [deleted file]
lib/librte_pmd_igb/igb/e1000_api.h [deleted file]
lib/librte_pmd_igb/igb/e1000_defines.h [deleted file]
lib/librte_pmd_igb/igb/e1000_hw.h [deleted file]
lib/librte_pmd_igb/igb/e1000_mac.c [deleted file]
lib/librte_pmd_igb/igb/e1000_mac.h [deleted file]
lib/librte_pmd_igb/igb/e1000_manage.c [deleted file]
lib/librte_pmd_igb/igb/e1000_manage.h [deleted file]
lib/librte_pmd_igb/igb/e1000_mbx.c [deleted file]
lib/librte_pmd_igb/igb/e1000_mbx.h [deleted file]
lib/librte_pmd_igb/igb/e1000_nvm.c [deleted file]
lib/librte_pmd_igb/igb/e1000_nvm.h [deleted file]
lib/librte_pmd_igb/igb/e1000_osdep.c [deleted file]
lib/librte_pmd_igb/igb/e1000_osdep.h [deleted file]
lib/librte_pmd_igb/igb/e1000_phy.c [deleted file]
lib/librte_pmd_igb/igb/e1000_phy.h [deleted file]
lib/librte_pmd_igb/igb/e1000_regs.h [deleted file]
lib/librte_pmd_igb/igb/e1000_vf.c [deleted file]
lib/librte_pmd_igb/igb/e1000_vf.h [deleted file]
lib/librte_pmd_igb/igb/if_igb.c [deleted file]
lib/librte_pmd_igb/igb/if_igb.h [deleted file]
mk/rte.app.mk
mk/rte.vars.mk

index d6c7c28..e076042 100644 (file)
@@ -133,11 +133,11 @@ CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS=16
 # Compile burst-oriented IGB PMD driver
 #
 CONFIG_RTE_LIBRTE_IGB_PMD=y
-CONFIG_RTE_LIBRTE_IGB_DEBUG_INIT=n
-CONFIG_RTE_LIBRTE_IGB_DEBUG_RX=n
-CONFIG_RTE_LIBRTE_IGB_DEBUG_TX=n
-CONFIG_RTE_LIBRTE_IGB_DEBUG_TX_FREE=n
-CONFIG_RTE_LIBRTE_IGB_DEBUG_DRIVER=n
+CONFIG_RTE_LIBRTE_E1000_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_E1000_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_E1000_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_E1000_DEBUG_TX_FREE=n
+CONFIG_RTE_LIBRTE_E1000_DEBUG_DRIVER=n
 
 #
 # Compile burst-oriented IXGBE PMD driver
index a981dc5..591e0e3 100644 (file)
@@ -133,11 +133,11 @@ CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS=16
 # Compile burst-oriented IGB PMD driver
 #
 CONFIG_RTE_LIBRTE_IGB_PMD=y
-CONFIG_RTE_LIBRTE_IGB_DEBUG_INIT=n
-CONFIG_RTE_LIBRTE_IGB_DEBUG_RX=n
-CONFIG_RTE_LIBRTE_IGB_DEBUG_TX=n
-CONFIG_RTE_LIBRTE_IGB_DEBUG_TX_FREE=n
-CONFIG_RTE_LIBRTE_IGB_DEBUG_DRIVER=n
+CONFIG_RTE_LIBRTE_E1000_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_E1000_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_E1000_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_E1000_DEBUG_TX_FREE=n
+CONFIG_RTE_LIBRTE_E1000_DEBUG_DRIVER=n
 
 #
 # Compile burst-oriented IXGBE PMD driver
index 3ddc39d..5da27c8 100644 (file)
@@ -133,11 +133,11 @@ CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS=16
 # Compile burst-oriented IGB PMD driver
 #
 CONFIG_RTE_LIBRTE_IGB_PMD=y
-CONFIG_RTE_LIBRTE_IGB_DEBUG_INIT=n
-CONFIG_RTE_LIBRTE_IGB_DEBUG_RX=n
-CONFIG_RTE_LIBRTE_IGB_DEBUG_TX=n
-CONFIG_RTE_LIBRTE_IGB_DEBUG_TX_FREE=n
-CONFIG_RTE_LIBRTE_IGB_DEBUG_DRIVER=n
+CONFIG_RTE_LIBRTE_E1000_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_E1000_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_E1000_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_E1000_DEBUG_TX_FREE=n
+CONFIG_RTE_LIBRTE_E1000_DEBUG_DRIVER=n
 
 #
 # Compile burst-oriented IXGBE PMD driver
index 63e3aa6..401f911 100644 (file)
@@ -133,11 +133,11 @@ CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS=16
 # Compile burst-oriented IGB PMD driver
 #
 CONFIG_RTE_LIBRTE_IGB_PMD=y
-CONFIG_RTE_LIBRTE_IGB_DEBUG_INIT=n
-CONFIG_RTE_LIBRTE_IGB_DEBUG_RX=n
-CONFIG_RTE_LIBRTE_IGB_DEBUG_TX=n
-CONFIG_RTE_LIBRTE_IGB_DEBUG_TX_FREE=n
-CONFIG_RTE_LIBRTE_IGB_DEBUG_DRIVER=n
+CONFIG_RTE_LIBRTE_E1000_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_E1000_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_E1000_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_E1000_DEBUG_TX_FREE=n
+CONFIG_RTE_LIBRTE_E1000_DEBUG_DRIVER=n
 
 #
 # Compile burst-oriented IXGBE PMD driver
index 7cffb84..06da89e 100644 (file)
@@ -41,7 +41,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_MBUF) += librte_mbuf
 DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += librte_timer
 DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += librte_cmdline
 DIRS-$(CONFIG_RTE_LIBRTE_ETHER) += librte_ether
-DIRS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += librte_pmd_igb
+DIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += librte_pmd_e1000
 DIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += librte_pmd_ixgbe
 DIRS-$(CONFIG_RTE_LIBRTE_HASH) += librte_hash
 DIRS-$(CONFIG_RTE_LIBRTE_LPM) += librte_lpm
diff --git a/lib/librte_pmd_e1000/Makefile b/lib/librte_pmd_e1000/Makefile
new file mode 100644 (file)
index 0000000..a3c88e8
--- /dev/null
@@ -0,0 +1,63 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_e1000.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82575.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_api.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_mac.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_manage.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_mbx.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_nvm.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_osdep.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_rxtx.c
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += lib/librte_eal lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += lib/librte_mempool lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += lib/librte_net lib/librte_malloc
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_pmd_e1000/e1000/README b/lib/librte_pmd_e1000/e1000/README
new file mode 100644 (file)
index 0000000..c511b6e
--- /dev/null
@@ -0,0 +1,73 @@
+..
+  BSD LICENSE
+
+  Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions 
+  are met:
+
+    * Redistributions of source code must retain the above copyright 
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in 
+      the documentation and/or other materials provided with the 
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived 
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Intel® IGB driver
+=================
+
+This directory contains code from the Intel® Network Adapter Driver for 82575/6
+and 82580-based Gigabit Network Connections under FreeBSD, version 2.2.3,
+dated 04/25/2011. This code is available from
+`http://downloadmirror.intel.com/15815/eng/igb-2.2.3.tar.gz`
+
+This driver is valid for the product(s) listed below
+
+* Intel® 82575EB Gigabit Ethernet Controller
+* Intel® 82576 Gigabit Ethernet Controller
+* Intel® 82580EB Gigabit Ethernet Controller
+* Intel® Ethernet Controller I350
+* Intel® Ethernet Server Adapter I340-F4
+* Intel® Ethernet Server Adapter I340-T4
+* Intel® Ethernet Server Adapter I350-F2
+* Intel® Ethernet Server Adapter I350-F4
+* Intel® Ethernet Server Adapter I350-T2
+* Intel® Ethernet Server Adapter I350-T4
+* Intel® Gigabit EF Dual Port Server Adapter
+* Intel® Gigabit ET Dual Port Server Adapter
+* Intel® Gigabit ET Quad Port Server Adapter
+* Intel® Gigabit ET2 Quad Port Server Adapter
+* Intel® Gigabit VT Quad Port Server Adapter
+
+
+Updating driver
+===============
+
+The following modifications have been made to this code to integrate it with the
+Intel® DPDK:
+
+
+e1000_osdep.h and e1000_osdep.c
+-------------------------------
+
+The OS dependency layer has been extensively modified to support the drivers in
+the Intel® DPDK environment. It is expected that these files will not need to be
+changed on updating the driver.
diff --git a/lib/librte_pmd_e1000/e1000/e1000_82575.c b/lib/librte_pmd_e1000/e1000/e1000_82575.c
new file mode 100644 (file)
index 0000000..b2f1fca
--- /dev/null
@@ -0,0 +1,2429 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+/*
+ * 82575EB Gigabit Network Connection
+ * 82575EB Gigabit Backplane Connection
+ * 82575GB Gigabit Network Connection
+ * 82576 Gigabit Network Connection
+ * 82576 Quad Port Gigabit Mezzanine Adapter
+ */
+
+#include "e1000_api.h"
+
+static s32  e1000_init_phy_params_82575(struct e1000_hw *hw);
+static s32  e1000_init_mac_params_82575(struct e1000_hw *hw);
+static s32  e1000_acquire_phy_82575(struct e1000_hw *hw);
+static void e1000_release_phy_82575(struct e1000_hw *hw);
+static s32  e1000_acquire_nvm_82575(struct e1000_hw *hw);
+static void e1000_release_nvm_82575(struct e1000_hw *hw);
+static s32  e1000_check_for_link_82575(struct e1000_hw *hw);
+static s32  e1000_get_cfg_done_82575(struct e1000_hw *hw);
+static s32  e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
+                                         u16 *duplex);
+static s32  e1000_init_hw_82575(struct e1000_hw *hw);
+static s32  e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw);
+static s32  e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+                                           u16 *data);
+static s32  e1000_reset_hw_82575(struct e1000_hw *hw);
+static s32  e1000_reset_hw_82580(struct e1000_hw *hw);
+static s32  e1000_read_phy_reg_82580(struct e1000_hw *hw,
+                                    u32 offset, u16 *data);
+static s32  e1000_write_phy_reg_82580(struct e1000_hw *hw,
+                                     u32 offset, u16 data);
+static s32  e1000_set_d0_lplu_state_82580(struct e1000_hw *hw,
+                                          bool active);
+static s32  e1000_set_d3_lplu_state_82580(struct e1000_hw *hw,
+                                          bool active);
+static s32  e1000_set_d0_lplu_state_82575(struct e1000_hw *hw,
+                                          bool active);
+static s32  e1000_setup_copper_link_82575(struct e1000_hw *hw);
+static s32  e1000_setup_serdes_link_82575(struct e1000_hw *hw);
+static s32  e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data);
+static s32  e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
+                                            u32 offset, u16 data);
+static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
+static s32  e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
+static s32  e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
+                                                 u16 *speed, u16 *duplex);
+static s32  e1000_get_phy_id_82575(struct e1000_hw *hw);
+static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
+static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
+static s32  e1000_reset_init_script_82575(struct e1000_hw *hw);
+static s32  e1000_read_mac_addr_82575(struct e1000_hw *hw);
+static void e1000_config_collision_dist_82575(struct e1000_hw *hw);
+static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
+static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
+static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw);
+static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
+static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw);
+static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw);
+static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw);
+static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw,
+                                               u16 offset);
+static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
+                                               u16 offset);
+static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw);
+static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw);
+
+static const u16 e1000_82580_rxpbs_table[] =
+       { 36, 72, 144, 1, 2, 4, 8, 16,
+         35, 70, 140 };
+#define E1000_82580_RXPBS_TABLE_SIZE \
+       (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
+
+
+/**
+ *  e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
+ *  @hw: pointer to the HW structure
+ *
+ *  Called to determine if the I2C pins are being used for I2C or as an
+ *  external MDIO interface since the two options are mutually exclusive.
+ **/
+static bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw)
+{
+       u32 reg = 0;
+       bool ext_mdio = FALSE;
+
+       DEBUGFUNC("e1000_sgmii_uses_mdio_82575");
+
+       switch (hw->mac.type) {
+       case e1000_82575:
+       case e1000_82576:
+               reg = E1000_READ_REG(hw, E1000_MDIC);
+               ext_mdio = !!(reg & E1000_MDIC_DEST);
+               break;
+       case e1000_82580:
+       case e1000_i350:
+               reg = E1000_READ_REG(hw, E1000_MDICNFG);
+               ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
+               break;
+       default:
+               break;
+       }
+       return ext_mdio;
+}
+
+/**
+ *  e1000_init_phy_params_82575 - Init PHY func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val = E1000_SUCCESS;
+       u32 ctrl_ext;
+
+       DEBUGFUNC("e1000_init_phy_params_82575");
+
+       if (hw->phy.media_type != e1000_media_type_copper) {
+               phy->type = e1000_phy_none;
+               goto out;
+       }
+
+       phy->ops.power_up   = e1000_power_up_phy_copper;
+       phy->ops.power_down = e1000_power_down_phy_copper_82575;
+
+       phy->autoneg_mask           = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+       phy->reset_delay_us         = 100;
+
+       phy->ops.acquire            = e1000_acquire_phy_82575;
+       phy->ops.check_reset_block  = e1000_check_reset_block_generic;
+       phy->ops.commit             = e1000_phy_sw_reset_generic;
+       phy->ops.get_cfg_done       = e1000_get_cfg_done_82575;
+       phy->ops.release            = e1000_release_phy_82575;
+
+       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+
+       if (e1000_sgmii_active_82575(hw)) {
+               phy->ops.reset      = e1000_phy_hw_reset_sgmii_82575;
+               ctrl_ext |= E1000_CTRL_I2C_ENA;
+       } else {
+               phy->ops.reset      = e1000_phy_hw_reset_generic;
+               ctrl_ext &= ~E1000_CTRL_I2C_ENA;
+       }
+
+       E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+       e1000_reset_mdicnfg_82580(hw);
+
+       if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) {
+               phy->ops.read_reg   = e1000_read_phy_reg_sgmii_82575;
+               phy->ops.write_reg  = e1000_write_phy_reg_sgmii_82575;
+       } else if (hw->mac.type >= e1000_82580) {
+               phy->ops.read_reg   = e1000_read_phy_reg_82580;
+               phy->ops.write_reg  = e1000_write_phy_reg_82580;
+       } else {
+               phy->ops.read_reg   = e1000_read_phy_reg_igp;
+               phy->ops.write_reg  = e1000_write_phy_reg_igp;
+       }
+
+       /* Set phy->phy_addr and phy->id. */
+       ret_val = e1000_get_phy_id_82575(hw);
+
+       /* Verify phy id and set remaining function pointers */
+       switch (phy->id) {
+       case I347AT4_E_PHY_ID:
+       case M88E1112_E_PHY_ID:
+       case M88E1340M_E_PHY_ID:
+       case M88E1111_I_PHY_ID:
+               phy->type                   = e1000_phy_m88;
+               phy->ops.check_polarity     = e1000_check_polarity_m88;
+               phy->ops.get_info           = e1000_get_phy_info_m88;
+               if (phy->id == I347AT4_E_PHY_ID ||
+                   phy->id == M88E1112_E_PHY_ID ||
+                   phy->id == M88E1340M_E_PHY_ID)
+                       phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2;
+               else
+                       phy->ops.get_cable_length = e1000_get_cable_length_m88;
+               phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+               break;
+       case IGP03E1000_E_PHY_ID:
+       case IGP04E1000_E_PHY_ID:
+               phy->type                   = e1000_phy_igp_3;
+               phy->ops.check_polarity     = e1000_check_polarity_igp;
+               phy->ops.get_info           = e1000_get_phy_info_igp;
+               phy->ops.get_cable_length   = e1000_get_cable_length_igp_2;
+               phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
+               phy->ops.set_d0_lplu_state  = e1000_set_d0_lplu_state_82575;
+               phy->ops.set_d3_lplu_state  = e1000_set_d3_lplu_state_generic;
+               break;
+       case I82580_I_PHY_ID:
+       case I350_I_PHY_ID:
+               phy->type                   = e1000_phy_82580;
+               phy->ops.check_polarity     = e1000_check_polarity_82577;
+               phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82577;
+               phy->ops.get_cable_length   = e1000_get_cable_length_82577;
+               phy->ops.get_info           = e1000_get_phy_info_82577;
+               phy->ops.set_d0_lplu_state  = e1000_set_d0_lplu_state_82580;
+               phy->ops.set_d3_lplu_state  = e1000_set_d3_lplu_state_82580;
+               break;
+       default:
+               ret_val = -E1000_ERR_PHY;
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params_82575 - Init NVM func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+s32 e1000_init_nvm_params_82575(struct e1000_hw *hw)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+       u16 size;
+
+       DEBUGFUNC("e1000_init_nvm_params_82575");
+
+       size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+                    E1000_EECD_SIZE_EX_SHIFT);
+       /*
+        * Added to a constant, "size" becomes the left-shift value
+        * for setting word_size.
+        */
+       size += NVM_WORD_SIZE_BASE_SHIFT;
+
+       nvm->word_size = 1 << size;
+       nvm->opcode_bits        = 8;
+       nvm->delay_usec         = 1;
+       switch (nvm->override) {
+       case e1000_nvm_override_spi_large:
+               nvm->page_size    = 32;
+               nvm->address_bits = 16;
+               break;
+       case e1000_nvm_override_spi_small:
+               nvm->page_size    = 8;
+               nvm->address_bits = 8;
+               break;
+       default:
+               nvm->page_size    = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+               nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
+               break;
+       }
+
+       nvm->type = e1000_nvm_eeprom_spi;
+
+       if (nvm->word_size == (1 << 15))
+               nvm->page_size = 128;
+
+       /* Function Pointers */
+       nvm->ops.acquire    = e1000_acquire_nvm_82575;
+       nvm->ops.release    = e1000_release_nvm_82575;
+       if (nvm->word_size < (1 << 15))
+               nvm->ops.read    = e1000_read_nvm_eerd;
+       else
+               nvm->ops.read    = e1000_read_nvm_spi;
+
+       nvm->ops.write              = e1000_write_nvm_spi;
+       nvm->ops.validate           = e1000_validate_nvm_checksum_generic;
+       nvm->ops.update             = e1000_update_nvm_checksum_generic;
+       nvm->ops.valid_led_default  = e1000_valid_led_default_82575;
+
+       /* override genric family function pointers for specific descendants */
+       switch (hw->mac.type) {
+       case e1000_82580:
+               nvm->ops.validate = e1000_validate_nvm_checksum_82580;
+               nvm->ops.update = e1000_update_nvm_checksum_82580;
+               break;
+       case e1000_i350:
+               nvm->ops.validate = e1000_validate_nvm_checksum_i350;
+               nvm->ops.update = e1000_update_nvm_checksum_i350;
+               break;
+       default:
+               break;
+       }
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_init_mac_params_82575 - Init MAC func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+       u32 ctrl_ext = 0;
+
+       DEBUGFUNC("e1000_init_mac_params_82575");
+
+       /* Set media type */
+        /*
+        * The 82575 uses bits 22:23 for link mode. The mode can be changed
+         * based on the EEPROM. We cannot rely upon device ID. There
+         * is no distinguishable difference between fiber and internal
+         * SerDes mode on the 82575. There can be an external PHY attached
+         * on the SGMII interface. For this, we'll set sgmii_active to TRUE.
+         */
+       hw->phy.media_type = e1000_media_type_copper;
+       dev_spec->sgmii_active = FALSE;
+
+       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+       switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
+       case E1000_CTRL_EXT_LINK_MODE_SGMII:
+               dev_spec->sgmii_active = TRUE;
+               break;
+       case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+       case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
+               hw->phy.media_type = e1000_media_type_internal_serdes;
+               break;
+       default:
+               break;
+       }
+
+       /* Set mta register count */
+       mac->mta_reg_count = 128;
+       /* Set uta register count */
+       mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
+       /* Set rar entry count */
+       mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
+       if (mac->type == e1000_82576)
+               mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
+       if (mac->type == e1000_82580)
+               mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+       if (mac->type == e1000_i350) {
+               mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
+               /* Enable EEE default settings for i350 */
+               dev_spec->eee_disable = FALSE;
+       }
+
+       /* Set if part includes ASF firmware */
+       mac->asf_firmware_present = TRUE;
+       /* FWSM register */
+       mac->has_fwsm = TRUE;
+       /* ARC supported; valid only if manageability features are enabled. */
+       mac->arc_subsystem_valid =
+               (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
+                       ? TRUE : FALSE;
+
+       /* Function pointers */
+
+       /* bus type/speed/width */
+       mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
+       /* reset */
+       if (mac->type >= e1000_82580)
+               mac->ops.reset_hw = e1000_reset_hw_82580;
+       else
+       mac->ops.reset_hw = e1000_reset_hw_82575;
+       /* hw initialization */
+       mac->ops.init_hw = e1000_init_hw_82575;
+       /* link setup */
+       mac->ops.setup_link = e1000_setup_link_generic;
+       /* physical interface link setup */
+       mac->ops.setup_physical_interface =
+               (hw->phy.media_type == e1000_media_type_copper)
+                       ? e1000_setup_copper_link_82575
+                       : e1000_setup_serdes_link_82575;
+       /* physical interface shutdown */
+       mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575;
+       /* physical interface power up */
+       mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575;
+       /* check for link */
+       mac->ops.check_for_link = e1000_check_for_link_82575;
+       /* receive address register setting */
+       mac->ops.rar_set = e1000_rar_set_generic;
+       /* read mac address */
+       mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
+       /* configure collision distance */
+       mac->ops.config_collision_dist = e1000_config_collision_dist_82575;
+       /* multicast address update */
+       mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
+       /* writing VFTA */
+       mac->ops.write_vfta = e1000_write_vfta_generic;
+       /* clearing VFTA */
+       mac->ops.clear_vfta = e1000_clear_vfta_generic;
+       /* ID LED init */
+       mac->ops.id_led_init = e1000_id_led_init_generic;
+       /* blink LED */
+       mac->ops.blink_led = e1000_blink_led_generic;
+       /* setup LED */
+       mac->ops.setup_led = e1000_setup_led_generic;
+       /* cleanup LED */
+       mac->ops.cleanup_led = e1000_cleanup_led_generic;
+       /* turn on/off LED */
+       mac->ops.led_on = e1000_led_on_generic;
+       mac->ops.led_off = e1000_led_off_generic;
+       /* clear hardware counters */
+       mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575;
+       /* link info */
+       mac->ops.get_link_up_info = e1000_get_link_up_info_82575;
+
+       /* set lan id for port to determine which phy lock to use */
+       hw->mac.ops.set_lan_id(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_init_function_pointers_82575 - Init func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82575(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_init_function_pointers_82575");
+
+       hw->mac.ops.init_params = e1000_init_mac_params_82575;
+       hw->nvm.ops.init_params = e1000_init_nvm_params_82575;
+       hw->phy.ops.init_params = e1000_init_phy_params_82575;
+       hw->mbx.ops.init_params = e1000_init_mbx_params_pf;
+}
+
+/**
+ *  e1000_acquire_phy_82575 - Acquire rights to access PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire access rights to the correct PHY.
+ **/
+static s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
+{
+       u16 mask = E1000_SWFW_PHY0_SM;
+
+       DEBUGFUNC("e1000_acquire_phy_82575");
+
+       if (hw->bus.func == E1000_FUNC_1)
+               mask = E1000_SWFW_PHY1_SM;
+       else if (hw->bus.func == E1000_FUNC_2)
+               mask = E1000_SWFW_PHY2_SM;
+       else if (hw->bus.func == E1000_FUNC_3)
+               mask = E1000_SWFW_PHY3_SM;
+
+       return e1000_acquire_swfw_sync_82575(hw, mask);
+}
+
+/**
+ *  e1000_release_phy_82575 - Release rights to access PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  A wrapper to release access rights to the correct PHY.
+ **/
+static void e1000_release_phy_82575(struct e1000_hw *hw)
+{
+       u16 mask = E1000_SWFW_PHY0_SM;
+
+       DEBUGFUNC("e1000_release_phy_82575");
+
+       if (hw->bus.func == E1000_FUNC_1)
+               mask = E1000_SWFW_PHY1_SM;
+       else if (hw->bus.func == E1000_FUNC_2)
+               mask = E1000_SWFW_PHY2_SM;
+       else if (hw->bus.func == E1000_FUNC_3)
+               mask = E1000_SWFW_PHY3_SM;
+
+       e1000_release_swfw_sync_82575(hw, mask);
+}
+
+/**
+ *  e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset using the serial gigabit media independent
+ *  interface and stores the retrieved information in data.
+ **/
+static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+                                          u16 *data)
+{
+       s32 ret_val = -E1000_ERR_PARAM;
+
+       DEBUGFUNC("e1000_read_phy_reg_sgmii_82575");
+
+       if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
+               DEBUGOUT1("PHY Address %u is out of range\n", offset);
+               goto out;
+       }
+
+       ret_val = hw->phy.ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = e1000_read_phy_reg_i2c(hw, offset, data);
+
+       hw->phy.ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset using the serial gigabit
+ *  media independent interface.
+ **/
+static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+                                           u16 data)
+{
+       s32 ret_val = -E1000_ERR_PARAM;
+
+       DEBUGFUNC("e1000_write_phy_reg_sgmii_82575");
+
+       if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
+               DEBUGOUT1("PHY Address %d is out of range\n", offset);
+               goto out;
+       }
+
+       ret_val = hw->phy.ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = e1000_write_phy_reg_i2c(hw, offset, data);
+
+       hw->phy.ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_get_phy_id_82575 - Retrieve PHY addr and id
+ *  @hw: pointer to the HW structure
+ *
+ *  Retrieves the PHY address and ID for both PHY's which do and do not use
+ *  sgmi interface.
+ **/
+static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32  ret_val = E1000_SUCCESS;
+       u16 phy_id;
+       u32 ctrl_ext;
+       u32 mdic;
+
+       DEBUGFUNC("e1000_get_phy_id_82575");
+
+       /*
+        * For SGMII PHYs, we try the list of possible addresses until
+        * we find one that works.  For non-SGMII PHYs
+        * (e.g. integrated copper PHYs), an address of 1 should
+        * work.  The result of this function should mean phy->phy_addr
+        * and phy->id are set correctly.
+        */
+       if (!e1000_sgmii_active_82575(hw)) {
+               phy->addr = 1;
+               ret_val = e1000_get_phy_id(hw);
+               goto out;
+       }
+
+       if (e1000_sgmii_uses_mdio_82575(hw)) {
+               switch (hw->mac.type) {
+               case e1000_82575:
+               case e1000_82576:
+                       mdic = E1000_READ_REG(hw, E1000_MDIC);
+                       mdic &= E1000_MDIC_PHY_MASK;
+                       phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
+                       break;
+               case e1000_82580:
+               case e1000_i350:
+                       mdic = E1000_READ_REG(hw, E1000_MDICNFG);
+                       mdic &= E1000_MDICNFG_PHY_MASK;
+                       phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
+                       break;
+               default:
+                       ret_val = -E1000_ERR_PHY;
+                       goto out;
+                       break;
+               }
+               ret_val = e1000_get_phy_id(hw);
+               goto out;
+       }
+
+       /* Power on sgmii phy if it is disabled */
+       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+       E1000_WRITE_REG(hw, E1000_CTRL_EXT,
+                       ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
+       E1000_WRITE_FLUSH(hw);
+       msec_delay(300);
+
+       /*
+        * The address field in the I2CCMD register is 3 bits and 0 is invalid.
+        * Therefore, we need to test 1-7
+        */
+       for (phy->addr = 1; phy->addr < 8; phy->addr++) {
+               ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
+               if (ret_val == E1000_SUCCESS) {
+                       DEBUGOUT2("Vendor ID 0x%08X read at address %u\n",
+                                 phy_id,
+                                 phy->addr);
+                       /*
+                        * At the time of this writing, The M88 part is
+                        * the only supported SGMII PHY product.
+                        */
+                       if (phy_id == M88_VENDOR)
+                               break;
+               } else {
+                       DEBUGOUT1("PHY address %u was unreadable\n",
+                                 phy->addr);
+               }
+       }
+
+       /* A valid PHY type couldn't be found. */
+       if (phy->addr == 8) {
+               phy->addr = 0;
+               ret_val = -E1000_ERR_PHY;
+       } else {
+               ret_val = e1000_get_phy_id(hw);
+       }
+
+       /* restore previous sfp cage power state */
+       E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Resets the PHY using the serial gigabit media independent interface.
+ **/
+static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575");
+
+       /*
+        * This isn't a TRUE "hard" reset, but is the only reset
+        * available to us at this time.
+        */
+
+       DEBUGOUT("Soft resetting SGMII attached PHY...\n");
+
+       if (!(hw->phy.ops.write_reg))
+               goto out;
+
+       /*
+        * SFP documentation requires the following to configure the SPF module
+        * to work on SGMII.  No further documentation is given.
+        */
+       ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
+       if (ret_val)
+               goto out;
+
+       ret_val = hw->phy.ops.commit(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: TRUE to enable LPLU, FALSE to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.  When
+ *  activating LPLU this function also disables smart speed
+ *  and vice versa.  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val = E1000_SUCCESS;
+       u16 data;
+
+       DEBUGFUNC("e1000_set_d0_lplu_state_82575");
+
+       if (!(hw->phy.ops.read_reg))
+               goto out;
+
+       ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+       if (ret_val)
+               goto out;
+
+       if (active) {
+               data |= IGP02E1000_PM_D0_LPLU;
+               ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+                                            data);
+               if (ret_val)
+                       goto out;
+
+               /* When LPLU is enabled, we should disable SmartSpeed */
+               ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                           &data);
+               data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+               ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                            data);
+               if (ret_val)
+                       goto out;
+       } else {
+               data &= ~IGP02E1000_PM_D0_LPLU;
+               ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+                                            data);
+               /*
+                * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+                * during Dx states where the power conservation is most
+                * important.  During driver activity we should enable
+                * SmartSpeed, so performance is maintained.
+                */
+               if (phy->smart_speed == e1000_smart_speed_on) {
+                       ret_val = phy->ops.read_reg(hw,
+                                                   IGP01E1000_PHY_PORT_CONFIG,
+                                                   &data);
+                       if (ret_val)
+                               goto out;
+
+                       data |= IGP01E1000_PSCFR_SMART_SPEED;
+                       ret_val = phy->ops.write_reg(hw,
+                                                    IGP01E1000_PHY_PORT_CONFIG,
+                                                    data);
+                       if (ret_val)
+                               goto out;
+               } else if (phy->smart_speed == e1000_smart_speed_off) {
+                       ret_val = phy->ops.read_reg(hw,
+                                                   IGP01E1000_PHY_PORT_CONFIG,
+                                                   &data);
+                       if (ret_val)
+                               goto out;
+
+                       data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+                       ret_val = phy->ops.write_reg(hw,
+                                                    IGP01E1000_PHY_PORT_CONFIG,
+                                                    data);
+                       if (ret_val)
+                               goto out;
+               }
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: TRUE to enable LPLU, FALSE to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.  When
+ *  activating LPLU this function also disables smart speed
+ *  and vice versa.  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val = E1000_SUCCESS;
+       u16 data;
+
+       DEBUGFUNC("e1000_set_d0_lplu_state_82580");
+
+       data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+
+       if (active) {
+               data |= E1000_82580_PM_D0_LPLU;
+
+               /* When LPLU is enabled, we should disable SmartSpeed */
+               data &= ~E1000_82580_PM_SPD;
+       } else {
+               data &= ~E1000_82580_PM_D0_LPLU;
+
+               /*
+                * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+                * during Dx states where the power conservation is most
+                * important.  During driver activity we should enable
+                * SmartSpeed, so performance is maintained.
+                */
+               if (phy->smart_speed == e1000_smart_speed_on) {
+                       data |= E1000_82580_PM_SPD;
+               } else if (phy->smart_speed == e1000_smart_speed_off) {
+                       data &= ~E1000_82580_PM_SPD;
+               }
+       }
+
+       E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
+       return ret_val;
+}
+
+/**
+ *  e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  and SmartSpeed is disabled when active is TRUE, else clear lplu for D3
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.
+ **/
+s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val = E1000_SUCCESS;
+       u16 data;
+
+       DEBUGFUNC("e1000_set_d3_lplu_state_82580");
+
+       data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+
+       if (!active) {
+               data &= ~E1000_82580_PM_D3_LPLU;
+               /*
+                * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+                * during Dx states where the power conservation is most
+                * important.  During driver activity we should enable
+                * SmartSpeed, so performance is maintained.
+                */
+               if (phy->smart_speed == e1000_smart_speed_on) {
+                       data |= E1000_82580_PM_SPD;
+               } else if (phy->smart_speed == e1000_smart_speed_off) {
+                       data &= ~E1000_82580_PM_SPD;
+               }
+       } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+                  (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+                  (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+               data |= E1000_82580_PM_D3_LPLU;
+               /* When LPLU is enabled, we should disable SmartSpeed */
+               data &= ~E1000_82580_PM_SPD;
+       }
+
+       E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
+       return ret_val;
+}
+
+/**
+ *  e1000_acquire_nvm_82575 - Request for access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the necessary semaphores for exclusive access to the EEPROM.
+ *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ *  Return successful if access grant bit set, else clear the request for
+ *  EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
+{
+       s32 ret_val;
+
+       DEBUGFUNC("e1000_acquire_nvm_82575");
+
+       ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+       if (ret_val)
+               goto out;
+
+       /*
+        * Check if there is some access
+        * error this access may hook on
+        */
+       if (hw->mac.type == e1000_i350) {
+               u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+               if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT |
+                   E1000_EECD_TIMEOUT)) {
+                       /* Clear all access error flags */
+                       E1000_WRITE_REG(hw, E1000_EECD, eecd |
+                                       E1000_EECD_ERROR_CLR);
+                       DEBUGOUT("Nvm bit banging access error"
+                               " detected and cleared.\n");
+               }
+       }
+       if (hw->mac.type == e1000_82580) {
+               u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+               if (eecd & E1000_EECD_BLOCKED) {
+                       /* Clear access error flag */
+                       E1000_WRITE_REG(hw, E1000_EECD, eecd |
+                                       E1000_EECD_BLOCKED);
+                       DEBUGOUT("Nvm bit banging access"
+                               " error detected and cleared.\n");
+               }
+       }
+
+       ret_val = e1000_acquire_nvm_generic(hw);
+       if (ret_val)
+               e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_release_nvm_82575 - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
+ *  then release the semaphores acquired.
+ **/
+static void e1000_release_nvm_82575(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_release_nvm_82575");
+
+       e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ *  e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
+ *  will also specify which port we're acquiring the lock for.
+ **/
+static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+{
+       u32 swfw_sync;
+       u32 swmask = mask;
+       u32 fwmask = mask << 16;
+       s32 ret_val = E1000_SUCCESS;
+       s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+
+       DEBUGFUNC("e1000_acquire_swfw_sync_82575");
+
+       while (i < timeout) {
+               if (e1000_get_hw_semaphore_generic(hw)) {
+                       ret_val = -E1000_ERR_SWFW_SYNC;
+                       goto out;
+               }
+
+               swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+               if (!(swfw_sync & (fwmask | swmask)))
+                       break;
+
+               /*
+                * Firmware currently using resource (fwmask)
+                * or other software thread using resource (swmask)
+                */
+               e1000_put_hw_semaphore_generic(hw);
+               msec_delay_irq(5);
+               i++;
+       }
+
+       if (i == timeout) {
+               DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+               ret_val = -E1000_ERR_SWFW_SYNC;
+               goto out;
+       }
+
+       swfw_sync |= swmask;
+       E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+       e1000_put_hw_semaphore_generic(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_release_swfw_sync_82575 - Release SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
+ *  will also specify which port we're releasing the lock for.
+ **/
+static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+{
+       u32 swfw_sync;
+
+       DEBUGFUNC("e1000_release_swfw_sync_82575");
+
+       while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS);
+       /* Empty */
+
+       swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+       swfw_sync &= ~mask;
+       E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+       e1000_put_hw_semaphore_generic(hw);
+}
+
+/**
+ *  e1000_get_cfg_done_82575 - Read config done bit
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the management control register for the config done bit for
+ *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
+ *  to read the config done bit, so an error is *ONLY* logged and returns
+ *  E1000_SUCCESS.  If we were to return with error, EEPROM-less silicon
+ *  would not be able to be reset or change link.
+ **/
+static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
+{
+       s32 timeout = PHY_CFG_TIMEOUT;
+       s32 ret_val = E1000_SUCCESS;
+       u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+       DEBUGFUNC("e1000_get_cfg_done_82575");
+
+       if (hw->bus.func == E1000_FUNC_1)
+               mask = E1000_NVM_CFG_DONE_PORT_1;
+       else if (hw->bus.func == E1000_FUNC_2)
+               mask = E1000_NVM_CFG_DONE_PORT_2;
+       else if (hw->bus.func == E1000_FUNC_3)
+               mask = E1000_NVM_CFG_DONE_PORT_3;
+       while (timeout) {
+               if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
+                       break;
+               msec_delay(1);
+               timeout--;
+       }
+       if (!timeout)
+               DEBUGOUT("MNG configuration cycle has not completed.\n");
+
+       /* If EEPROM is not marked present, init the PHY manually */
+       if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
+           (hw->phy.type == e1000_phy_igp_3))
+               e1000_phy_init_script_igp3(hw);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_get_link_up_info_82575 - Get link speed/duplex info
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  This is a wrapper function, if using the serial gigabit media independent
+ *  interface, use PCS to retrieve the link speed and duplex information.
+ *  Otherwise, use the generic function to get the link speed and duplex info.
+ **/
+static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
+                                        u16 *duplex)
+{
+       s32 ret_val;
+
+       DEBUGFUNC("e1000_get_link_up_info_82575");
+
+       if (hw->phy.media_type != e1000_media_type_copper)
+               ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed,
+                                                              duplex);
+       else
+               ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
+                                                                   duplex);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_link_82575 - Check for link
+ *  @hw: pointer to the HW structure
+ *
+ *  If sgmii is enabled, then use the pcs register to determine link, otherwise
+ *  use the generic interface for determining link.
+ **/
+static s32 e1000_check_for_link_82575(struct e1000_hw *hw)
+{
+       s32 ret_val;
+       u16 speed, duplex;
+
+       DEBUGFUNC("e1000_check_for_link_82575");
+
+       if (hw->phy.media_type != e1000_media_type_copper) {
+               ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
+                                                              &duplex);
+               /*
+                * Use this flag to determine if link needs to be checked or
+                * not.  If we have link clear the flag so that we do not
+                * continue to check for link.
+                */
+               hw->mac.get_link_status = !hw->mac.serdes_has_link;
+       } else {
+               ret_val = e1000_check_for_copper_link_generic(hw);
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown
+ *  @hw: pointer to the HW structure
+ **/
+static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw)
+{
+       u32 reg;
+
+       DEBUGFUNC("e1000_power_up_serdes_link_82575");
+
+       if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+           !e1000_sgmii_active_82575(hw))
+               return;
+
+       /* Enable PCS to turn on link */
+       reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
+       reg |= E1000_PCS_CFG_PCS_EN;
+       E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
+
+       /* Power up the laser */
+       reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+       reg &= ~E1000_CTRL_EXT_SDP3_DATA;
+       E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+       /* flush the write to verify completion */
+       E1000_WRITE_FLUSH(hw);
+       msec_delay(1);
+}
+
+/**
+ *  e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  Using the physical coding sub-layer (PCS), retrieve the current speed and
+ *  duplex, then store the values in the pointers provided.
+ **/
+static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
+                                                u16 *speed, u16 *duplex)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       u32 pcs;
+
+       DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575");
+
+       /* Set up defaults for the return values of this function */
+       mac->serdes_has_link = FALSE;
+       *speed = 0;
+       *duplex = 0;
+
+       /*
+        * Read the PCS Status register for link state. For non-copper mode,
+        * the status register is not accurate. The PCS status register is
+        * used instead.
+        */
+       pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT);
+
+       /*
+        * The link up bit determines when link is up on autoneg. The sync ok
+        * gets set once both sides sync up and agree upon link. Stable link
+        * can be determined by checking for both link up and link sync ok
+        */
+       if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
+               mac->serdes_has_link = TRUE;
+
+               /* Detect and store PCS speed */
+               if (pcs & E1000_PCS_LSTS_SPEED_1000) {
+                       *speed = SPEED_1000;
+               } else if (pcs & E1000_PCS_LSTS_SPEED_100) {
+                       *speed = SPEED_100;
+               } else {
+                       *speed = SPEED_10;
+               }
+
+               /* Detect and store PCS duplex */
+               if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
+                       *duplex = FULL_DUPLEX;
+               } else {
+                       *duplex = HALF_DUPLEX;
+               }
+       }
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_shutdown_serdes_link_82575 - Remove link during power down
+ *  @hw: pointer to the HW structure
+ *
+ *  In the case of serdes shut down sfp and PCS on driver unload
+ *  when management pass thru is not enabled.
+ **/
+void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
+{
+       u32 reg;
+
+       DEBUGFUNC("e1000_shutdown_serdes_link_82575");
+
+       if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+           !e1000_sgmii_active_82575(hw))
+               return;
+
+       if (!e1000_enable_mng_pass_thru(hw)) {
+               /* Disable PCS to turn off link */
+               reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
+               reg &= ~E1000_PCS_CFG_PCS_EN;
+               E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
+
+               /* shutdown the laser */
+               reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+               reg |= E1000_CTRL_EXT_SDP3_DATA;
+               E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+               /* flush the write to verify completion */
+               E1000_WRITE_FLUSH(hw);
+               msec_delay(1);
+       }
+
+       return;
+}
+
+/**
+ *  e1000_reset_hw_82575 - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the hardware into a known state.
+ **/
+static s32 e1000_reset_hw_82575(struct e1000_hw *hw)
+{
+       u32 ctrl;
+       s32 ret_val;
+
+       DEBUGFUNC("e1000_reset_hw_82575");
+
+       /*
+        * Prevent the PCI-E bus from sticking if there is no TLP connection
+        * on the last TLP read/write transaction when MAC is reset.
+        */
+       ret_val = e1000_disable_pcie_master_generic(hw);
+       if (ret_val) {
+               DEBUGOUT("PCI-E Master disable polling has failed.\n");
+       }
+
+       /* set the completion timeout for interface */
+       ret_val = e1000_set_pcie_completion_timeout(hw);
+       if (ret_val) {
+               DEBUGOUT("PCI-E Set completion timeout has failed.\n");
+       }
+
+       DEBUGOUT("Masking off all interrupts\n");
+       E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+       E1000_WRITE_REG(hw, E1000_RCTL, 0);
+       E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+       E1000_WRITE_FLUSH(hw);
+
+       msec_delay(10);
+
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+       DEBUGOUT("Issuing a global reset to MAC\n");
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+       ret_val = e1000_get_auto_rd_done_generic(hw);
+       if (ret_val) {
+               /*
+                * When auto config read does not complete, do not
+                * return with an error. This can happen in situations
+                * where there is no eeprom and prevents getting link.
+                */
+               DEBUGOUT("Auto Read Done did not complete\n");
+       }
+
+       /* If EEPROM is not present, run manual init scripts */
+       if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
+               e1000_reset_init_script_82575(hw);
+
+       /* Clear any pending interrupt events. */
+       E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+       E1000_READ_REG(hw, E1000_ICR);
+
+       /* Install any alternate MAC address into RAR0 */
+       ret_val = e1000_check_alt_mac_addr_generic(hw);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_init_hw_82575 - Initialize hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This inits the hardware readying it for operation.
+ **/
+static s32 e1000_init_hw_82575(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       s32 ret_val;
+       u16 i, rar_count = mac->rar_entry_count;
+
+       DEBUGFUNC("e1000_init_hw_82575");
+
+       /* Initialize identification LED */
+       ret_val = mac->ops.id_led_init(hw);
+       if (ret_val) {
+               DEBUGOUT("Error initializing identification LED\n");
+               /* This is not fatal and we should not stop init due to this */
+       }
+
+       /* Disabling VLAN filtering */
+       DEBUGOUT("Initializing the IEEE VLAN\n");
+       mac->ops.clear_vfta(hw);
+
+       /* Setup the receive address */
+       e1000_init_rx_addrs_generic(hw, rar_count);
+
+       /* Zero out the Multicast HASH table */
+       DEBUGOUT("Zeroing the MTA\n");
+       for (i = 0; i < mac->mta_reg_count; i++)
+               E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+       /* Zero out the Unicast HASH table */
+       DEBUGOUT("Zeroing the UTA\n");
+       for (i = 0; i < mac->uta_reg_count; i++)
+               E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
+
+       /* Setup link and flow control */
+       ret_val = mac->ops.setup_link(hw);
+
+       /*
+        * Clear all of the statistics registers (clear on read).  It is
+        * important that we do this after we have tried to establish link
+        * because the symbol error count will increment wildly if there
+        * is no link.
+        */
+       e1000_clear_hw_cntrs_82575(hw);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_setup_copper_link_82575 - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the link for auto-neg or forced speed and duplex.  Then we check
+ *  for link, once link is established calls to configure collision distance
+ *  and flow control are called.
+ **/
+static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
+{
+       u32 ctrl;
+       s32  ret_val;
+
+       DEBUGFUNC("e1000_setup_copper_link_82575");
+
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+       ctrl |= E1000_CTRL_SLU;
+       ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+       ret_val = e1000_setup_serdes_link_82575(hw);
+       if (ret_val)
+               goto out;
+
+       if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
+               /* allow time for SFP cage time to power up phy */
+               msec_delay(300);
+
+               ret_val = hw->phy.ops.reset(hw);
+               if (ret_val) {
+                       DEBUGOUT("Error resetting the PHY.\n");
+                       goto out;
+               }
+       }
+       switch (hw->phy.type) {
+       case e1000_phy_m88:
+               if (hw->phy.id == I347AT4_E_PHY_ID ||
+                   hw->phy.id == M88E1112_E_PHY_ID ||
+                   hw->phy.id == M88E1340M_E_PHY_ID)
+                       ret_val = e1000_copper_link_setup_m88_gen2(hw);
+               else
+                       ret_val = e1000_copper_link_setup_m88(hw);
+               break;
+       case e1000_phy_igp_3:
+               ret_val = e1000_copper_link_setup_igp(hw);
+               break;
+       case e1000_phy_82580:
+               ret_val = e1000_copper_link_setup_82577(hw);
+               break;
+       default:
+               ret_val = -E1000_ERR_PHY;
+               break;
+       }
+
+       if (ret_val)
+               goto out;
+
+       ret_val = e1000_setup_copper_link_generic(hw);
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_setup_serdes_link_82575 - Setup link for serdes
+ *  @hw: pointer to the HW structure
+ *
+ *  Configure the physical coding sub-layer (PCS) link.  The PCS link is
+ *  used on copper connections where the serialized gigabit media independent
+ *  interface (sgmii), or serdes fiber is being used.  Configures the link
+ *  for auto-negotiation or forces speed/duplex.
+ **/
+static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
+{
+       u32 ctrl_ext, ctrl_reg, reg;
+       bool pcs_autoneg;
+
+       DEBUGFUNC("e1000_setup_serdes_link_82575");
+
+       if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+           !e1000_sgmii_active_82575(hw))
+               return E1000_SUCCESS;
+
+       /*
+        * On the 82575, SerDes loopback mode persists until it is
+        * explicitly turned off or a power cycle is performed.  A read to
+        * the register does not indicate its status.  Therefore, we ensure
+        * loopback mode is disabled during initialization.
+        */
+       E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
+
+       /* power on the sfp cage if present */
+       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+       ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
+       E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+       ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
+       ctrl_reg |= E1000_CTRL_SLU;
+
+       /* set both sw defined pins on 82575/82576*/
+       if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576)
+               ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
+
+       reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
+
+       /* default pcs_autoneg to the same setting as mac autoneg */
+       pcs_autoneg = hw->mac.autoneg;
+
+       switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
+       case E1000_CTRL_EXT_LINK_MODE_SGMII:
+               /* sgmii mode lets the phy handle forcing speed/duplex */
+               pcs_autoneg = TRUE;
+               /* autoneg time out should be disabled for SGMII mode */
+               reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
+               break;
+       case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+               /* disable PCS autoneg and support parallel detect only */
+               pcs_autoneg = FALSE;
+               /* fall through to default case */
+       default:
+               /*
+                * non-SGMII modes only supports a speed of 1000/Full for the
+                * link so it is best to just force the MAC and let the pcs
+                * link either autoneg or be forced to 1000/Full
+                */
+               ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
+                           E1000_CTRL_FD | E1000_CTRL_FRCDPX;
+
+               /* set speed of 1000/Full if speed/duplex is forced */
+               reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
+               break;
+       }
+
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
+
+       /*
+        * New SerDes mode allows for forcing speed or autonegotiating speed
+        * at 1gb. Autoneg should be default set by most drivers. This is the
+        * mode that will be compatible with older link partners and switches.
+        * However, both are supported by the hardware and some drivers/tools.
+        */
+       reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
+                E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
+
+       /*
+        * We force flow control to prevent the CTRL register values from being
+        * overwritten by the autonegotiated flow control values
+        */
+       reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+
+       if (pcs_autoneg) {
+               /* Set PCS register for autoneg */
+               reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
+                      E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
+               DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
+       } else {
+               /* Set PCS register for forced link */
+               reg |= E1000_PCS_LCTL_FSD;        /* Force Speed */
+               DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
+       }
+
+       E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
+
+       if (!e1000_sgmii_active_82575(hw))
+               e1000_force_mac_fc_generic(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_valid_led_default_82575 - Verify a valid default LED config
+ *  @hw: pointer to the HW structure
+ *  @data: pointer to the NVM (EEPROM)
+ *
+ *  Read the EEPROM for the current default LED configuration.  If the
+ *  LED configuration is not valid, set to a valid LED configuration.
+ **/
+static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data)
+{
+       s32 ret_val;
+
+       DEBUGFUNC("e1000_valid_led_default_82575");
+
+       ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+       if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
+               switch(hw->phy.media_type) {
+               case e1000_media_type_internal_serdes:
+                       *data = ID_LED_DEFAULT_82575_SERDES;
+                       break;
+               case e1000_media_type_copper:
+               default:
+                       *data = ID_LED_DEFAULT;
+                       break;
+               }
+       }
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_sgmii_active_82575 - Return sgmii state
+ *  @hw: pointer to the HW structure
+ *
+ *  82575 silicon has a serialized gigabit media independent interface (sgmii)
+ *  which can be enabled for use in the embedded applications.  Simply
+ *  return the current state of the sgmii interface.
+ **/
+static bool e1000_sgmii_active_82575(struct e1000_hw *hw)
+{
+       struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+       return dev_spec->sgmii_active;
+}
+
+/**
+ *  e1000_reset_init_script_82575 - Inits HW defaults after reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Inits recommended HW defaults after a reset when there is no EEPROM
+ *  detected. This is only for the 82575.
+ **/
+static s32 e1000_reset_init_script_82575(struct e1000_hw* hw)
+{
+       DEBUGFUNC("e1000_reset_init_script_82575");
+
+       if (hw->mac.type == e1000_82575) {
+               DEBUGOUT("Running reset init script for 82575\n");
+               /* SerDes configuration via SERDESCTRL */
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C);
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78);
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23);
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15);
+
+               /* CCM configuration via CCMCTL register */
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00);
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00);
+
+               /* PCIe lanes configuration */
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC);
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF);
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05);
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81);
+
+               /* PCIe PLL Configuration */
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47);
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00);
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00);
+       }
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_read_mac_addr_82575 - Read device MAC address
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_read_mac_addr_82575");
+
+       /*
+        * If there's an alternate MAC address place it in RAR0
+        * so that it will override the Si installed default perm
+        * address.
+        */
+       ret_val = e1000_check_alt_mac_addr_generic(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = e1000_read_mac_addr_generic(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_config_collision_dist_82575 - Configure collision distance
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the collision distance to the default value and is used
+ *  during link setup.
+ **/
+static void e1000_config_collision_dist_82575(struct e1000_hw *hw)
+{
+       u32 tctl_ext;
+
+       DEBUGFUNC("e1000_config_collision_dist_82575");
+
+       tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT);
+
+       tctl_ext &= ~E1000_TCTL_EXT_COLD;
+       tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT;
+
+       E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext);
+       E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * e1000_power_down_phy_copper_82575 - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+
+       if (!(phy->ops.check_reset_block))
+               return;
+
+       /* If the management interface is not enabled, then power down */
+       if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw)))
+               e1000_power_down_phy_copper(hw);
+
+       return;
+}
+
+/**
+ *  e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_clear_hw_cntrs_82575");
+
+       e1000_clear_hw_cntrs_base_generic(hw);
+
+       E1000_READ_REG(hw, E1000_PRC64);
+       E1000_READ_REG(hw, E1000_PRC127);
+       E1000_READ_REG(hw, E1000_PRC255);
+       E1000_READ_REG(hw, E1000_PRC511);
+       E1000_READ_REG(hw, E1000_PRC1023);
+       E1000_READ_REG(hw, E1000_PRC1522);
+       E1000_READ_REG(hw, E1000_PTC64);
+       E1000_READ_REG(hw, E1000_PTC127);
+       E1000_READ_REG(hw, E1000_PTC255);
+       E1000_READ_REG(hw, E1000_PTC511);
+       E1000_READ_REG(hw, E1000_PTC1023);
+       E1000_READ_REG(hw, E1000_PTC1522);
+
+       E1000_READ_REG(hw, E1000_ALGNERRC);
+       E1000_READ_REG(hw, E1000_RXERRC);
+       E1000_READ_REG(hw, E1000_TNCRS);
+       E1000_READ_REG(hw, E1000_CEXTERR);
+       E1000_READ_REG(hw, E1000_TSCTC);
+       E1000_READ_REG(hw, E1000_TSCTFC);
+
+       E1000_READ_REG(hw, E1000_MGTPRC);
+       E1000_READ_REG(hw, E1000_MGTPDC);
+       E1000_READ_REG(hw, E1000_MGTPTC);
+
+       E1000_READ_REG(hw, E1000_IAC);
+       E1000_READ_REG(hw, E1000_ICRXOC);
+
+       E1000_READ_REG(hw, E1000_ICRXPTC);
+       E1000_READ_REG(hw, E1000_ICRXATC);
+       E1000_READ_REG(hw, E1000_ICTXPTC);
+       E1000_READ_REG(hw, E1000_ICTXATC);
+       E1000_READ_REG(hw, E1000_ICTXQEC);
+       E1000_READ_REG(hw, E1000_ICTXQMTC);
+       E1000_READ_REG(hw, E1000_ICRXDMTC);
+
+       E1000_READ_REG(hw, E1000_CBTMPC);
+       E1000_READ_REG(hw, E1000_HTDPMC);
+       E1000_READ_REG(hw, E1000_CBRMPC);
+       E1000_READ_REG(hw, E1000_RPTHC);
+       E1000_READ_REG(hw, E1000_HGPTC);
+       E1000_READ_REG(hw, E1000_HTCBDPC);
+       E1000_READ_REG(hw, E1000_HGORCL);
+       E1000_READ_REG(hw, E1000_HGORCH);
+       E1000_READ_REG(hw, E1000_HGOTCL);
+       E1000_READ_REG(hw, E1000_HGOTCH);
+       E1000_READ_REG(hw, E1000_LENERRS);
+
+       /* This register should not be read in copper configurations */
+       if ((hw->phy.media_type == e1000_media_type_internal_serdes) ||
+           e1000_sgmii_active_82575(hw))
+               E1000_READ_REG(hw, E1000_SCVPC);
+}
+
+/**
+ *  e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable
+ *  @hw: pointer to the HW structure
+ *
+ *  After rx enable if managability is enabled then there is likely some
+ *  bad data at the start of the fifo and possibly in the DMA fifo.  This
+ *  function clears the fifos and flushes any packets that came in as rx was
+ *  being enabled.
+ **/
+void e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
+{
+       u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
+       int i, ms_wait;
+
+       DEBUGFUNC("e1000_rx_fifo_workaround_82575");
+       if (hw->mac.type != e1000_82575 ||
+           !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
+               return;
+
+       /* Disable all Rx queues */
+       for (i = 0; i < 4; i++) {
+               rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
+               E1000_WRITE_REG(hw, E1000_RXDCTL(i),
+                               rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
+       }
+       /* Poll all queues to verify they have shut down */
+       for (ms_wait = 0; ms_wait < 10; ms_wait++) {
+               msec_delay(1);
+               rx_enabled = 0;
+               for (i = 0; i < 4; i++)
+                       rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
+               if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
+                       break;
+       }
+
+       if (ms_wait == 10)
+               DEBUGOUT("Queue disable timed out after 10ms\n");
+
+       /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
+        * incoming packets are rejected.  Set enable and wait 2ms so that
+        * any packet that was coming in as RCTL.EN was set is flushed
+        */
+       rfctl = E1000_READ_REG(hw, E1000_RFCTL);
+       E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
+
+       rlpml = E1000_READ_REG(hw, E1000_RLPML);
+       E1000_WRITE_REG(hw, E1000_RLPML, 0);
+
+       rctl = E1000_READ_REG(hw, E1000_RCTL);
+       temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
+       temp_rctl |= E1000_RCTL_LPE;
+
+       E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
+       E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
+       E1000_WRITE_FLUSH(hw);
+       msec_delay(2);
+
+       /* Enable Rx queues that were previously enabled and restore our
+        * previous state
+        */
+       for (i = 0; i < 4; i++)
+               E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
+       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+       E1000_WRITE_FLUSH(hw);
+
+       E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
+       E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
+
+       /* Flush receive errors generated by workaround */
+       E1000_READ_REG(hw, E1000_ROC);
+       E1000_READ_REG(hw, E1000_RNBC);
+       E1000_READ_REG(hw, E1000_MPC);
+}
+
+/**
+ *  e1000_set_pcie_completion_timeout - set pci-e completion timeout
+ *  @hw: pointer to the HW structure
+ *
+ *  The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
+ *  however the hardware default for these parts is 500us to 1ms which is less
+ *  than the 10ms recommended by the pci-e spec.  To address this we need to
+ *  increase the value to either 10ms to 200ms for capability version 1 config,
+ *  or 16ms to 55ms for version 2.
+ **/
+static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw)
+{
+       u32 gcr = E1000_READ_REG(hw, E1000_GCR);
+       s32 ret_val = E1000_SUCCESS;
+       u16 pcie_devctl2;
+
+       /* only take action if timeout value is defaulted to 0 */
+       if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
+               goto out;
+
+       /*
+        * if capababilities version is type 1 we can write the
+        * timeout of 10ms to 200ms through the GCR register
+        */
+       if (!(gcr & E1000_GCR_CAP_VER2)) {
+               gcr |= E1000_GCR_CMPL_TMOUT_10ms;
+               goto out;
+       }
+
+       /*
+        * for version 2 capabilities we need to write the config space
+        * directly in order to set the completion timeout value for
+        * 16ms to 55ms
+        */
+       ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+                                         &pcie_devctl2);
+       if (ret_val)
+               goto out;
+
+       pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
+
+       ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+                                          &pcie_devctl2);
+out:
+       /* disable completion timeout resend */
+       gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
+
+       E1000_WRITE_REG(hw, E1000_GCR, gcr);
+       return ret_val;
+}
+
+/**
+ *  e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
+ *  @hw: pointer to the hardware struct
+ *  @enable: state to enter, either enabled or disabled
+ *  @pf: Physical Function pool - do not set anti-spoofing for the PF
+ *
+ *  enables/disables L2 switch anti-spoofing functionality.
+ **/
+void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
+{
+       u32 dtxswc;
+
+       switch (hw->mac.type) {
+       case e1000_82576:
+               dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
+               if (enable) {
+                       dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
+                                  E1000_DTXSWC_VLAN_SPOOF_MASK);
+                       /* The PF can spoof - it has to in order to
+                        * support emulation mode NICs */
+                       dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
+               } else {
+                       dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
+                                   E1000_DTXSWC_VLAN_SPOOF_MASK);
+               }
+               E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
+               break;
+       case e1000_i350:
+               dtxswc = E1000_READ_REG(hw, E1000_TXSWC);
+               if (enable) {
+                       dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
+                                  E1000_DTXSWC_VLAN_SPOOF_MASK);
+                       /* The PF can spoof - it has to in order to
+                        * support emulation mode NICs
+                        */
+                       dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
+               } else {
+                       dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
+                                   E1000_DTXSWC_VLAN_SPOOF_MASK);
+               }
+               E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc);
+       default:
+               break;
+       }
+}
+
+/**
+ *  e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
+ *  @hw: pointer to the hardware struct
+ *  @enable: state to enter, either enabled or disabled
+ *
+ *  enables/disables L2 switch loopback functionality.
+ **/
+void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
+{
+       u32 dtxswc;
+
+       switch (hw->mac.type) {
+       case e1000_82576:
+               dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
+               if (enable)
+                       dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+               else
+                       dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+               E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
+               break;
+       case e1000_i350:
+               dtxswc = E1000_READ_REG(hw, E1000_TXSWC);
+               if (enable)
+                       dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+               else
+                       dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+               E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc);
+               break;
+       default:
+               /* Currently no other hardware supports loopback */
+               break;
+       }
+
+
+}
+
+/**
+ *  e1000_vmdq_set_replication_pf - enable or disable vmdq replication
+ *  @hw: pointer to the hardware struct
+ *  @enable: state to enter, either enabled or disabled
+ *
+ *  enables/disables replication of packets across multiple pools.
+ **/
+void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
+{
+       u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
+
+       if (enable)
+               vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
+       else
+               vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
+
+       E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
+}
+
+/**
+ *  e1000_read_phy_reg_82580 - Read 82580 MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the MDI control register in the PHY at offset and stores the
+ *  information read to data.
+ **/
+static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       s32 ret_val;
+
+       DEBUGFUNC("e1000_read_phy_reg_82580");
+
+       ret_val = hw->phy.ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
+
+       hw->phy.ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_82580 - Write 82580 MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write to register at offset
+ *
+ *  Writes data to MDI control register in the PHY at offset.
+ **/
+static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       s32 ret_val;
+
+       DEBUGFUNC("e1000_write_phy_reg_82580");
+
+       ret_val = hw->phy.ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
+
+       hw->phy.ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
+ *  the values found in the EEPROM.  This addresses an issue in which these
+ *  bits are not restored from EEPROM after reset.
+ **/
+static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u32 mdicnfg;
+       u16 nvm_data = 0;
+
+       DEBUGFUNC("e1000_reset_mdicnfg_82580");
+
+       if (hw->mac.type != e1000_82580)
+               goto out;
+       if (!e1000_sgmii_active_82575(hw))
+               goto out;
+
+       ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
+                                  NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+                                  &nvm_data);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+       mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
+       if (nvm_data & NVM_WORD24_EXT_MDIO)
+               mdicnfg |= E1000_MDICNFG_EXT_MDIO;
+       if (nvm_data & NVM_WORD24_COM_MDIO)
+               mdicnfg |= E1000_MDICNFG_COM_MDIO;
+       E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_reset_hw_82580 - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets function or entire device (all ports, etc.)
+ *  to a known state.
+ **/
+static s32 e1000_reset_hw_82580(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+       /* BH SW mailbox bit in SW_FW_SYNC */
+       u16 swmbsw_mask = E1000_SW_SYNCH_MB;
+       u32 ctrl;
+       bool global_device_reset = hw->dev_spec._82575.global_device_reset;
+
+       DEBUGFUNC("e1000_reset_hw_82580");
+
+       hw->dev_spec._82575.global_device_reset = FALSE;
+
+       /* Get current control state. */
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+       /*
+        * Prevent the PCI-E bus from sticking if there is no TLP connection
+        * on the last TLP read/write transaction when MAC is reset.
+        */
+       ret_val = e1000_disable_pcie_master_generic(hw);
+       if (ret_val)
+               DEBUGOUT("PCI-E Master disable polling has failed.\n");
+
+       DEBUGOUT("Masking off all interrupts\n");
+       E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+       E1000_WRITE_REG(hw, E1000_RCTL, 0);
+       E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+       E1000_WRITE_FLUSH(hw);
+
+       msec_delay(10);
+
+       /* Determine whether or not a global dev reset is requested */
+       if (global_device_reset &&
+               e1000_acquire_swfw_sync_82575(hw, swmbsw_mask))
+                       global_device_reset = FALSE;
+
+       if (global_device_reset &&
+               !(E1000_READ_REG(hw, E1000_STATUS) & E1000_STAT_DEV_RST_SET))
+               ctrl |= E1000_CTRL_DEV_RST;
+       else
+               ctrl |= E1000_CTRL_RST;
+
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+       /* Add delay to insure DEV_RST has time to complete */
+       if (global_device_reset)
+               msec_delay(5);
+
+       ret_val = e1000_get_auto_rd_done_generic(hw);
+       if (ret_val) {
+               /*
+                * When auto config read does not complete, do not
+                * return with an error. This can happen in situations
+                * where there is no eeprom and prevents getting link.
+                */
+               DEBUGOUT("Auto Read Done did not complete\n");
+       }
+
+       /* If EEPROM is not present, run manual init scripts */
+       if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
+               e1000_reset_init_script_82575(hw);
+
+       /* clear global device reset status bit */
+       E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET);
+
+       /* Clear any pending interrupt events. */
+       E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+       E1000_READ_REG(hw, E1000_ICR);
+
+       ret_val = e1000_reset_mdicnfg_82580(hw);
+       if (ret_val)
+               DEBUGOUT("Could not reset MDICNFG based on EEPROM\n");
+
+       /* Install any alternate MAC address into RAR0 */
+       ret_val = e1000_check_alt_mac_addr_generic(hw);
+
+       /* Release semaphore */
+       if (global_device_reset)
+               e1000_release_swfw_sync_82575(hw, swmbsw_mask);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size
+ *  @data: data received by reading RXPBS register
+ *
+ *  The 82580 uses a table based approach for packet buffer allocation sizes.
+ *  This function converts the retrieved value into the correct table value
+ *     0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
+ *  0x0 36  72 144   1   2   4   8  16
+ *  0x8 35  70 140 rsv rsv rsv rsv rsv
+ */
+u16 e1000_rxpbs_adjust_82580(u32 data)
+{
+       u16 ret_val = 0;
+
+       if (data < E1000_82580_RXPBS_TABLE_SIZE)
+               ret_val = e1000_82580_rxpbs_table[data];
+
+       return ret_val;
+}
+
+/**
+ *  e1000_validate_nvm_checksum_with_offset - Validate EEPROM
+ *  checksum
+ *  @hw: pointer to the HW structure
+ *  @offset: offset in words of the checksum protected region
+ *
+ *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u16 checksum = 0;
+       u16 i, nvm_data;
+
+       DEBUGFUNC("e1000_validate_nvm_checksum_with_offset");
+
+       for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
+               ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+               if (ret_val) {
+                       DEBUGOUT("NVM Read Error\n");
+                       goto out;
+               }
+               checksum += nvm_data;
+       }
+
+       if (checksum != (u16) NVM_SUM) {
+               DEBUGOUT("NVM Checksum Invalid\n");
+               ret_val = -E1000_ERR_NVM;
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_update_nvm_checksum_with_offset - Update EEPROM
+ *  checksum
+ *  @hw: pointer to the HW structure
+ *  @offset: offset in words of the checksum protected region
+ *
+ *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  up to the checksum.  Then calculates the EEPROM checksum and writes the
+ *  value to the EEPROM.
+ **/
+s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
+{
+       s32 ret_val;
+       u16 checksum = 0;
+       u16 i, nvm_data;
+
+       DEBUGFUNC("e1000_update_nvm_checksum_with_offset");
+
+       for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
+               ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+               if (ret_val) {
+                       DEBUGOUT("NVM Read Error while updating checksum.\n");
+                       goto out;
+               }
+               checksum += nvm_data;
+       }
+       checksum = (u16) NVM_SUM - checksum;
+       ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
+                               &checksum);
+       if (ret_val)
+               DEBUGOUT("NVM Write Error while updating checksum.\n");
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM section checksum by reading/adding each word of
+ *  the EEPROM and then verifies that the sum of the EEPROM is
+ *  equal to 0xBABA.
+ **/
+static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u16 eeprom_regions_count = 1;
+       u16 j, nvm_data;
+       u16 nvm_offset;
+
+       DEBUGFUNC("e1000_validate_nvm_checksum_82580");
+
+       ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+       if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
+               /* if chekcsums compatibility bit is set validate checksums
+                * for all 4 ports. */
+               eeprom_regions_count = 4;
+       }
+
+       for (j = 0; j < eeprom_regions_count; j++) {
+               nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+               ret_val = e1000_validate_nvm_checksum_with_offset(hw,
+                                                               nvm_offset);
+               if (ret_val != E1000_SUCCESS)
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_update_nvm_checksum_82580 - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM section checksums for all 4 ports by reading/adding
+ *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
+ *  checksum and writes the value to the EEPROM.
+ **/
+static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw)
+{
+       s32 ret_val;
+       u16 j, nvm_data;
+       u16 nvm_offset;
+
+       DEBUGFUNC("e1000_update_nvm_checksum_82580");
+
+       ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error while updating checksum"
+                       " compatibility bit.\n");
+               goto out;
+       }
+
+       if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
+               /* set compatibility bit to validate checksums appropriately */
+               nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
+               ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
+                                       &nvm_data);
+               if (ret_val) {
+                       DEBUGOUT("NVM Write Error while updating checksum"
+                               " compatibility bit.\n");
+                       goto out;
+               }
+       }
+
+       for (j = 0; j < 4; j++) {
+               nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+               ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
+               if (ret_val) {
+                       goto out;
+               }
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM section checksum by reading/adding each word of
+ *  the EEPROM and then verifies that the sum of the EEPROM is
+ *  equal to 0xBABA.
+ **/
+static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u16 j;
+       u16 nvm_offset;
+
+       DEBUGFUNC("e1000_validate_nvm_checksum_i350");
+
+       for (j = 0; j < 4; j++) {
+               nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+               ret_val = e1000_validate_nvm_checksum_with_offset(hw,
+                                                               nvm_offset);
+               if (ret_val != E1000_SUCCESS)
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_update_nvm_checksum_i350 - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM section checksums for all 4 ports by reading/adding
+ *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
+ *  checksum and writes the value to the EEPROM.
+ **/
+static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u16 j;
+       u16 nvm_offset;
+
+       DEBUGFUNC("e1000_update_nvm_checksum_i350");
+
+       for (j = 0; j < 4; j++) {
+               nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+               ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
+               if (ret_val != E1000_SUCCESS)
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_set_eee_i350 - Enable/disable EEE support
+ *  @hw: pointer to the HW structure
+ *
+ *  Enable/disable EEE based on setting in dev_spec structure.
+ *
+ **/
+s32 e1000_set_eee_i350(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u32 ipcnfg, eeer, ctrl_ext;
+
+       DEBUGFUNC("e1000_set_eee_i350");
+
+       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+       if ((hw->mac.type != e1000_i350) ||
+           (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK))
+               goto out;
+       ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
+       eeer = E1000_READ_REG(hw, E1000_EEER);
+
+       /* enable or disable per user setting */
+       if (!(hw->dev_spec._82575.eee_disable)) {
+               ipcnfg |= (E1000_IPCNFG_EEE_1G_AN |
+                          E1000_IPCNFG_EEE_100M_AN);
+               eeer |= (E1000_EEER_TX_LPI_EN |
+                        E1000_EEER_RX_LPI_EN |
+                        E1000_EEER_LPI_FC);
+
+       } else {
+               ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
+                           E1000_IPCNFG_EEE_100M_AN);
+               eeer &= ~(E1000_EEER_TX_LPI_EN |
+                         E1000_EEER_RX_LPI_EN |
+                         E1000_EEER_LPI_FC);
+       }
+       E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg);
+       E1000_WRITE_REG(hw, E1000_EEER, eeer);
+                       E1000_READ_REG(hw, E1000_IPCNFG);
+                       E1000_READ_REG(hw, E1000_EEER);
+out:
+
+       return ret_val;
+}
diff --git a/lib/librte_pmd_e1000/e1000/e1000_82575.h b/lib/librte_pmd_e1000/e1000/e1000_82575.h
new file mode 100644 (file)
index 0000000..415756e
--- /dev/null
@@ -0,0 +1,487 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_82575_H_
+#define _E1000_82575_H_
+
+#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
+                                     (ID_LED_DEF1_DEF2 <<  8) | \
+                                     (ID_LED_DEF1_DEF2 <<  4) | \
+                                     (ID_LED_OFF1_ON2))
+/*
+ * Receive Address Register Count
+ * Number of high/low register pairs in the RAR.  The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * These entries are also used for MAC-based filtering.
+ */
+/*
+ * For 82576, there are an additional set of RARs that begin at an offset
+ * separate from the first set of RARs.
+ */
+#define E1000_RAR_ENTRIES_82575        16
+#define E1000_RAR_ENTRIES_82576        24
+#define E1000_RAR_ENTRIES_82580        24
+#define E1000_RAR_ENTRIES_I350         32
+#define E1000_SW_SYNCH_MB              0x00000100
+#define E1000_STAT_DEV_RST_SET         0x00100000
+#define E1000_CTRL_DEV_RST             0x20000000
+
+#ifdef E1000_BIT_FIELDS
+struct e1000_adv_data_desc {
+       __le64 buffer_addr;    /* Address of the descriptor's data buffer */
+       union {
+               u32 data;
+               struct {
+                       u32 datalen :16; /* Data buffer length */
+                       u32 rsvd    :4;
+                       u32 dtyp    :4;  /* Descriptor type */
+                       u32 dcmd    :8;  /* Descriptor command */
+               } config;
+       } lower;
+       union {
+               u32 data;
+               struct {
+                       u32 status  :4;  /* Descriptor status */
+                       u32 idx     :4;
+                       u32 popts   :6;  /* Packet Options */
+                       u32 paylen  :18; /* Payload length */
+               } options;
+       } upper;
+};
+
+#define E1000_TXD_DTYP_ADV_C    0x2  /* Advanced Context Descriptor */
+#define E1000_TXD_DTYP_ADV_D    0x3  /* Advanced Data Descriptor */
+#define E1000_ADV_TXD_CMD_DEXT  0x20 /* Descriptor extension (0 = legacy) */
+#define E1000_ADV_TUCMD_IPV4    0x2  /* IP Packet Type: 1=IPv4 */
+#define E1000_ADV_TUCMD_IPV6    0x0  /* IP Packet Type: 0=IPv6 */
+#define E1000_ADV_TUCMD_L4T_UDP 0x0  /* L4 Packet TYPE of UDP */
+#define E1000_ADV_TUCMD_L4T_TCP 0x4  /* L4 Packet TYPE of TCP */
+#define E1000_ADV_TUCMD_MKRREQ  0x10 /* Indicates markers are required */
+#define E1000_ADV_DCMD_EOP      0x1  /* End of Packet */
+#define E1000_ADV_DCMD_IFCS     0x2  /* Insert FCS (Ethernet CRC) */
+#define E1000_ADV_DCMD_RS       0x8  /* Report Status */
+#define E1000_ADV_DCMD_VLE      0x40 /* Add VLAN tag */
+#define E1000_ADV_DCMD_TSE      0x80 /* TCP Seg enable */
+/* Extended Device Control */
+#define E1000_CTRL_EXT_NSICR    0x00000001 /* Disable Intr Clear all on read */
+
+struct e1000_adv_context_desc {
+       union {
+               u32 ip_config;
+               struct {
+                       u32 iplen    :9;
+                       u32 maclen   :7;
+                       u32 vlan_tag :16;
+               } fields;
+       } ip_setup;
+       u32 seq_num;
+       union {
+               u64 l4_config;
+               struct {
+                       u32 mkrloc :9;
+                       u32 tucmd  :11;
+                       u32 dtyp   :4;
+                       u32 adv    :8;
+                       u32 rsvd   :4;
+                       u32 idx    :4;
+                       u32 l4len  :8;
+                       u32 mss    :16;
+               } fields;
+       } l4_setup;
+};
+#endif
+
+/* SRRCTL bit definitions */
+#define E1000_SRRCTL_BSIZEPKT_SHIFT                     10 /* Shift _right_ */
+#define E1000_SRRCTL_BSIZEHDRSIZE_MASK                  0x00000F00
+#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT                 2  /* Shift _left_ */
+#define E1000_SRRCTL_DESCTYPE_LEGACY                    0x00000000
+#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF                0x02000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT                 0x04000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS          0x0A000000
+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION           0x06000000
+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define E1000_SRRCTL_DESCTYPE_MASK                      0x0E000000
+#define E1000_SRRCTL_TIMESTAMP                          0x40000000
+#define E1000_SRRCTL_DROP_EN                            0x80000000
+
+#define E1000_SRRCTL_BSIZEPKT_MASK      0x0000007F
+#define E1000_SRRCTL_BSIZEHDR_MASK      0x00003F00
+
+#define E1000_TX_HEAD_WB_ENABLE   0x1
+#define E1000_TX_SEQNUM_WB_ENABLE 0x2
+
+#define E1000_MRQC_ENABLE_RSS_4Q            0x00000002
+#define E1000_MRQC_ENABLE_VMDQ              0x00000003
+#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q       0x00000005
+#define E1000_MRQC_RSS_FIELD_IPV4_UDP       0x00400000
+#define E1000_MRQC_RSS_FIELD_IPV6_UDP       0x00800000
+#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX    0x01000000
+#define E1000_MRQC_ENABLE_RSS_8Q            0x00000002
+
+#define E1000_VMRCTL_MIRROR_PORT_SHIFT      8
+#define E1000_VMRCTL_MIRROR_DSTPORT_MASK    (7 << E1000_VMRCTL_MIRROR_PORT_SHIFT)
+#define E1000_VMRCTL_POOL_MIRROR_ENABLE     (1 << 0)
+#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE   (1 << 1)
+#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2)
+
+#define E1000_EICR_TX_QUEUE ( \
+    E1000_EICR_TX_QUEUE0 |    \
+    E1000_EICR_TX_QUEUE1 |    \
+    E1000_EICR_TX_QUEUE2 |    \
+    E1000_EICR_TX_QUEUE3)
+
+#define E1000_EICR_RX_QUEUE ( \
+    E1000_EICR_RX_QUEUE0 |    \
+    E1000_EICR_RX_QUEUE1 |    \
+    E1000_EICR_RX_QUEUE2 |    \
+    E1000_EICR_RX_QUEUE3)
+
+#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE
+#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE
+
+#define EIMS_ENABLE_MASK ( \
+    E1000_EIMS_RX_QUEUE  | \
+    E1000_EIMS_TX_QUEUE  | \
+    E1000_EIMS_TCP_TIMER | \
+    E1000_EIMS_OTHER)
+
+/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
+#define E1000_IMIR_PORT_IM_EN     0x00010000  /* TCP port enable */
+#define E1000_IMIR_PORT_BP        0x00020000  /* TCP port check bypass */
+#define E1000_IMIREXT_SIZE_BP     0x00001000  /* Packet size bypass */
+#define E1000_IMIREXT_CTRL_URG    0x00002000  /* Check URG bit in header */
+#define E1000_IMIREXT_CTRL_ACK    0x00004000  /* Check ACK bit in header */
+#define E1000_IMIREXT_CTRL_PSH    0x00008000  /* Check PSH bit in header */
+#define E1000_IMIREXT_CTRL_RST    0x00010000  /* Check RST bit in header */
+#define E1000_IMIREXT_CTRL_SYN    0x00020000  /* Check SYN bit in header */
+#define E1000_IMIREXT_CTRL_FIN    0x00040000  /* Check FIN bit in header */
+#define E1000_IMIREXT_CTRL_BP     0x00080000  /* Bypass check of ctrl bits */
+
+/* Receive Descriptor - Advanced */
+union e1000_adv_rx_desc {
+       struct {
+               __le64 pkt_addr;             /* Packet buffer address */
+               __le64 hdr_addr;             /* Header buffer address */
+       } read;
+       struct {
+               struct {
+                       union {
+                               __le32 data;
+                               struct {
+                                       __le16 pkt_info; /*RSS type, Pkt type*/
+                                       /* Split Header, header buffer len */
+                                       __le16 hdr_info;
+                               } hs_rss;
+                       } lo_dword;
+                       union {
+                               __le32 rss;          /* RSS Hash */
+                               struct {
+                                       __le16 ip_id;    /* IP id */
+                                       __le16 csum;     /* Packet Checksum */
+                               } csum_ip;
+                       } hi_dword;
+               } lower;
+               struct {
+                       __le32 status_error;     /* ext status/error */
+                       __le16 length;           /* Packet length */
+                       __le16 vlan;             /* VLAN tag */
+               } upper;
+       } wb;  /* writeback */
+};
+
+#define E1000_RXDADV_RSSTYPE_MASK        0x0000000F
+#define E1000_RXDADV_RSSTYPE_SHIFT       12
+#define E1000_RXDADV_HDRBUFLEN_MASK      0x7FE0
+#define E1000_RXDADV_HDRBUFLEN_SHIFT     5
+#define E1000_RXDADV_SPLITHEADER_EN      0x00001000
+#define E1000_RXDADV_SPH                 0x8000
+#define E1000_RXDADV_STAT_TS             0x10000 /* Pkt was time stamped */
+#define E1000_RXDADV_STAT_TSIP           0x08000 /* timestamp in packet */
+#define E1000_RXDADV_ERR_HBO             0x00800000
+
+/* RSS Hash results */
+#define E1000_RXDADV_RSSTYPE_NONE        0x00000000
+#define E1000_RXDADV_RSSTYPE_IPV4_TCP    0x00000001
+#define E1000_RXDADV_RSSTYPE_IPV4        0x00000002
+#define E1000_RXDADV_RSSTYPE_IPV6_TCP    0x00000003
+#define E1000_RXDADV_RSSTYPE_IPV6_EX     0x00000004
+#define E1000_RXDADV_RSSTYPE_IPV6        0x00000005
+#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
+#define E1000_RXDADV_RSSTYPE_IPV4_UDP    0x00000007
+#define E1000_RXDADV_RSSTYPE_IPV6_UDP    0x00000008
+#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
+
+/* RSS Packet Types as indicated in the receive descriptor */
+#define E1000_RXDADV_PKTTYPE_NONE        0x00000000
+#define E1000_RXDADV_PKTTYPE_IPV4        0x00000010 /* IPV4 hdr present */
+#define E1000_RXDADV_PKTTYPE_IPV4_EX     0x00000020 /* IPV4 hdr + extensions */
+#define E1000_RXDADV_PKTTYPE_IPV6        0x00000040 /* IPV6 hdr present */
+#define E1000_RXDADV_PKTTYPE_IPV6_EX     0x00000080 /* IPV6 hdr + extensions */
+#define E1000_RXDADV_PKTTYPE_TCP         0x00000100 /* TCP hdr present */
+#define E1000_RXDADV_PKTTYPE_UDP         0x00000200 /* UDP hdr present */
+#define E1000_RXDADV_PKTTYPE_SCTP        0x00000400 /* SCTP hdr present */
+#define E1000_RXDADV_PKTTYPE_NFS         0x00000800 /* NFS hdr present */
+
+#define E1000_RXDADV_PKTTYPE_IPSEC_ESP   0x00001000 /* IPSec ESP */
+#define E1000_RXDADV_PKTTYPE_IPSEC_AH    0x00002000 /* IPSec AH */
+#define E1000_RXDADV_PKTTYPE_LINKSEC     0x00004000 /* LinkSec Encap */
+#define E1000_RXDADV_PKTTYPE_ETQF        0x00008000 /* PKTTYPE is ETQF index */
+#define E1000_RXDADV_PKTTYPE_ETQF_MASK   0x00000070 /* ETQF has 8 indices */
+#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT  4          /* Right-shift 4 bits */
+
+/* LinkSec results */
+/* Security Processing bit Indication */
+#define E1000_RXDADV_LNKSEC_STATUS_SECP         0x00020000
+#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK      0x18000000
+#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH   0x08000000
+#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR  0x10000000
+#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG       0x18000000
+
+#define E1000_RXDADV_IPSEC_STATUS_SECP          0x00020000
+#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK       0x18000000
+#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL       0x08000000
+#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH         0x10000000
+#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED  0x18000000
+
+/* Transmit Descriptor - Advanced */
+union e1000_adv_tx_desc {
+       struct {
+               __le64 buffer_addr;    /* Address of descriptor's data buf */
+               __le32 cmd_type_len;
+               __le32 olinfo_status;
+       } read;
+       struct {
+               __le64 rsvd;       /* Reserved */
+               __le32 nxtseq_seed;
+               __le32 status;
+       } wb;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define E1000_ADVTXD_DTYP_CTXT    0x00200000 /* Advanced Context Descriptor */
+#define E1000_ADVTXD_DTYP_DATA    0x00300000 /* Advanced Data Descriptor */
+#define E1000_ADVTXD_DCMD_EOP     0x01000000 /* End of Packet */
+#define E1000_ADVTXD_DCMD_IFCS    0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADVTXD_DCMD_RS      0x08000000 /* Report Status */
+#define E1000_ADVTXD_DCMD_DDTYP_ISCSI  0x10000000 /* DDP hdr type or iSCSI */
+#define E1000_ADVTXD_DCMD_DEXT    0x20000000 /* Descriptor extension (1=Adv) */
+#define E1000_ADVTXD_DCMD_VLE     0x40000000 /* VLAN pkt enable */
+#define E1000_ADVTXD_DCMD_TSE     0x80000000 /* TCP Seg enable */
+#define E1000_ADVTXD_MAC_LINKSEC  0x00040000 /* Apply LinkSec on packet */
+#define E1000_ADVTXD_MAC_TSTAMP   0x00080000 /* IEEE1588 Timestamp packet */
+#define E1000_ADVTXD_STAT_SN_CRC  0x00000002 /* NXTSEQ/SEED present in WB */
+#define E1000_ADVTXD_IDX_SHIFT    4  /* Adv desc Index shift */
+#define E1000_ADVTXD_POPTS_ISCO_1ST  0x00000000 /* 1st TSO of iSCSI PDU */
+#define E1000_ADVTXD_POPTS_ISCO_MDL  0x00000800 /* Middle TSO of iSCSI PDU */
+#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
+#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU*/
+#define E1000_ADVTXD_POPTS_IPSEC     0x00000400 /* IPSec offload request */
+#define E1000_ADVTXD_PAYLEN_SHIFT    14 /* Adv desc PAYLEN shift */
+
+/* Context descriptors */
+struct e1000_adv_tx_context_desc {
+       __le32 vlan_macip_lens;
+       __le32 seqnum_seed;
+       __le32 type_tucmd_mlhl;
+       __le32 mss_l4len_idx;
+};
+
+#define E1000_ADVTXD_MACLEN_SHIFT    9  /* Adv ctxt desc mac len shift */
+#define E1000_ADVTXD_VLAN_SHIFT     16  /* Adv ctxt vlan tag shift */
+#define E1000_ADVTXD_TUCMD_IPV4    0x00000400  /* IP Packet Type: 1=IPv4 */
+#define E1000_ADVTXD_TUCMD_IPV6    0x00000000  /* IP Packet Type: 0=IPv6 */
+#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000  /* L4 Packet TYPE of UDP */
+#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800  /* L4 Packet TYPE of TCP */
+#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000  /* L4 Packet TYPE of SCTP */
+#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP    0x00002000 /* IPSec Type ESP */
+/* IPSec Encrypt Enable for ESP */
+#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN  0x00004000
+#define E1000_ADVTXD_TUCMD_MKRREQ  0x00002000 /* Req requires Markers and CRC */
+#define E1000_ADVTXD_L4LEN_SHIFT     8  /* Adv ctxt L4LEN shift */
+#define E1000_ADVTXD_MSS_SHIFT      16  /* Adv ctxt MSS shift */
+/* Adv ctxt IPSec SA IDX mask */
+#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK     0x000000FF
+/* Adv ctxt IPSec ESP len mask */
+#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK      0x000000FF
+
+/* Additional Transmit Descriptor Control definitions */
+#define E1000_TXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Tx Queue */
+#define E1000_TXDCTL_SWFLSH        0x04000000 /* Tx Desc. write-back flushing */
+/* Tx Queue Arbitration Priority 0=low, 1=high */
+#define E1000_TXDCTL_PRIORITY      0x08000000
+
+/* Additional Receive Descriptor Control definitions */
+#define E1000_RXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Rx Queue */
+#define E1000_RXDCTL_SWFLSH        0x04000000 /* Rx Desc. write-back flushing */
+
+/* Direct Cache Access (DCA) definitions */
+#define E1000_DCA_CTRL_DCA_ENABLE  0x00000000 /* DCA Enable */
+#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
+
+#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
+#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
+
+#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
+#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
+#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
+#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
+
+#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
+#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+
+#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
+#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
+#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */
+#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */
+
+/* Additional interrupt register bit definitions */
+#define E1000_ICR_LSECPNS       0x00000020          /* PN threshold - server */
+#define E1000_IMS_LSECPNS       E1000_ICR_LSECPNS   /* PN threshold - server */
+#define E1000_ICS_LSECPNS       E1000_ICR_LSECPNS   /* PN threshold - server */
+
+/* ETQF register bit definitions */
+#define E1000_ETQF_FILTER_ENABLE   (1 << 26)
+#define E1000_ETQF_IMM_INT         (1 << 29)
+#define E1000_ETQF_1588            (1 << 30)
+#define E1000_ETQF_QUEUE_ENABLE    (1 << 31)
+/*
+ * ETQF filter list: one static filter per filter consumer. This is
+ *                   to avoid filter collisions later. Add new filters
+ *                   here!!
+ *
+ * Current filters:
+ *    EAPOL 802.1x (0x888e): Filter 0
+ */
+#define E1000_ETQF_FILTER_EAPOL          0
+
+#define E1000_FTQF_VF_BP               0x00008000
+#define E1000_FTQF_1588_TIME_STAMP     0x08000000
+#define E1000_FTQF_MASK                0xF0000000
+#define E1000_FTQF_MASK_PROTO_BP       0x10000000
+#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000
+#define E1000_FTQF_MASK_DEST_ADDR_BP   0x40000000
+#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
+
+#define E1000_NVM_APME_82575          0x0400
+#define MAX_NUM_VFS                   8
+
+#define E1000_DTXSWC_MAC_SPOOF_MASK   0x000000FF /* Per VF MAC spoof control */
+#define E1000_DTXSWC_VLAN_SPOOF_MASK  0x0000FF00 /* Per VF VLAN spoof control */
+#define E1000_DTXSWC_LLE_MASK         0x00FF0000 /* Per VF Local LB enables */
+#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
+#define E1000_DTXSWC_LLE_SHIFT        16
+#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31)  /* global VF LB enable */
+
+/* Easy defines for setting default pool, would normally be left a zero */
+#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
+#define E1000_VT_CTL_DEFAULT_POOL_MASK  (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
+
+/* Other useful VMD_CTL register defines */
+#define E1000_VT_CTL_IGNORE_MAC         (1 << 28)
+#define E1000_VT_CTL_DISABLE_DEF_POOL   (1 << 29)
+#define E1000_VT_CTL_VM_REPL_EN         (1 << 30)
+
+/* Per VM Offload register setup */
+#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
+#define E1000_VMOLR_LPE        0x00010000 /* Accept Long packet */
+#define E1000_VMOLR_RSSE       0x00020000 /* Enable RSS */
+#define E1000_VMOLR_AUPE       0x01000000 /* Accept untagged packets */
+#define E1000_VMOLR_ROMPE      0x02000000 /* Accept overflow multicast */
+#define E1000_VMOLR_ROPE       0x04000000 /* Accept overflow unicast */
+#define E1000_VMOLR_BAM        0x08000000 /* Accept Broadcast packets */
+#define E1000_VMOLR_MPME       0x10000000 /* Multicast promiscuous mode */
+#define E1000_VMOLR_STRVLAN    0x40000000 /* Vlan stripping enable */
+#define E1000_VMOLR_STRCRC     0x80000000 /* CRC stripping enable */
+
+#define E1000_VMOLR_VPE        0x00800000 /* VLAN promiscuous enable */
+#define E1000_VMOLR_UPE        0x20000000 /* Unicast promisuous enable */
+#define E1000_DVMOLR_HIDVLAN   0x20000000 /* Vlan hiding enable */
+#define E1000_DVMOLR_STRVLAN   0x40000000 /* Vlan stripping enable */
+#define E1000_DVMOLR_STRCRC    0x80000000 /* CRC stripping enable */
+
+#define E1000_PBRWAC_WALPB     0x00000007 /* Wrap around event on LAN Rx PB */
+#define E1000_PBRWAC_PBE       0x00000008 /* Rx packet buffer empty */
+
+#define E1000_VLVF_ARRAY_SIZE     32
+#define E1000_VLVF_VLANID_MASK    0x00000FFF
+#define E1000_VLVF_POOLSEL_SHIFT  12
+#define E1000_VLVF_POOLSEL_MASK   (0xFF << E1000_VLVF_POOLSEL_SHIFT)
+#define E1000_VLVF_LVLAN          0x00100000
+#define E1000_VLVF_VLANID_ENABLE  0x80000000
+
+#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define E1000_VMVIR_VLANA_NEVER   0x80000000 /* Never insert VLAN tag */
+
+#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
+#define E1000_IOVCTL 0x05BBC
+#define E1000_IOVCTL_REUSE_VFQ 0x00000001
+
+#define E1000_RPLOLR_STRVLAN   0x40000000
+#define E1000_RPLOLR_STRCRC    0x80000000
+
+#define E1000_TCTL_EXT_COLD       0x000FFC00
+#define E1000_TCTL_EXT_COLD_SHIFT 10
+
+#define E1000_DTXCTL_8023LL     0x0004
+#define E1000_DTXCTL_VLAN_ADDED 0x0008
+#define E1000_DTXCTL_OOS_ENABLE 0x0010
+#define E1000_DTXCTL_MDP_EN     0x0020
+#define E1000_DTXCTL_SPOOF_INT  0x0040
+
+#define ALL_QUEUES   0xFFFF
+
+/* Rx packet buffer size defines */
+#define E1000_RXPBS_SIZE_MASK_82576  0x0000007F
+void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable);
+void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf);
+void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable);
+s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
+
+enum e1000_promisc_type {
+       e1000_promisc_disabled = 0,   /* all promisc modes disabled */
+       e1000_promisc_unicast = 1,    /* unicast promiscuous enabled */
+       e1000_promisc_multicast = 2,  /* multicast promiscuous enabled */
+       e1000_promisc_enabled = 3,    /* both uni and multicast promisc */
+       e1000_num_promisc_types
+};
+
+void e1000_vfta_set_vf(struct e1000_hw *, u16, bool);
+void e1000_rlpml_set_vf(struct e1000_hw *, u16);
+s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type type);
+u16 e1000_rxpbs_adjust_82580(u32 data);
+s32 e1000_set_eee_i350(struct e1000_hw *);
+#endif /* _E1000_82575_H_ */
diff --git a/lib/librte_pmd_e1000/e1000/e1000_api.c b/lib/librte_pmd_e1000/e1000/e1000_api.c
new file mode 100644 (file)
index 0000000..fc41f73
--- /dev/null
@@ -0,0 +1,1152 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "e1000_api.h"
+
+/**
+ *  e1000_init_mac_params - Initialize MAC function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  This function initializes the function pointers for the MAC
+ *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_mac_params(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       if (hw->mac.ops.init_params) {
+               ret_val = hw->mac.ops.init_params(hw);
+               if (ret_val) {
+                       DEBUGOUT("MAC Initialization Error\n");
+                       goto out;
+               }
+       } else {
+               DEBUGOUT("mac.init_mac_params was NULL\n");
+               ret_val = -E1000_ERR_CONFIG;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params - Initialize NVM function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  This function initializes the function pointers for the NVM
+ *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_nvm_params(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       if (hw->nvm.ops.init_params) {
+               ret_val = hw->nvm.ops.init_params(hw);
+               if (ret_val) {
+                       DEBUGOUT("NVM Initialization Error\n");
+                       goto out;
+               }
+       } else {
+               DEBUGOUT("nvm.init_nvm_params was NULL\n");
+               ret_val = -E1000_ERR_CONFIG;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_init_phy_params - Initialize PHY function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  This function initializes the function pointers for the PHY
+ *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_phy_params(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       if (hw->phy.ops.init_params) {
+               ret_val = hw->phy.ops.init_params(hw);
+               if (ret_val) {
+                       DEBUGOUT("PHY Initialization Error\n");
+                       goto out;
+               }
+       } else {
+               DEBUGOUT("phy.init_phy_params was NULL\n");
+               ret_val =  -E1000_ERR_CONFIG;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_init_mbx_params - Initialize mailbox function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  This function initializes the function pointers for the PHY
+ *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_mbx_params(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       if (hw->mbx.ops.init_params) {
+               ret_val = hw->mbx.ops.init_params(hw);
+               if (ret_val) {
+                       DEBUGOUT("Mailbox Initialization Error\n");
+                       goto out;
+               }
+       } else {
+               DEBUGOUT("mbx.init_mbx_params was NULL\n");
+               ret_val =  -E1000_ERR_CONFIG;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_set_mac_type - Sets MAC type
+ *  @hw: pointer to the HW structure
+ *
+ *  This function sets the mac type of the adapter based on the
+ *  device ID stored in the hw structure.
+ *  MUST BE FIRST FUNCTION CALLED (explicitly or through
+ *  e1000_setup_init_funcs()).
+ **/
+s32 e1000_set_mac_type(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_set_mac_type");
+
+       switch (hw->device_id) {
+       case E1000_DEV_ID_82575EB_COPPER:
+       case E1000_DEV_ID_82575EB_FIBER_SERDES:
+       case E1000_DEV_ID_82575GB_QUAD_COPPER:
+               mac->type = e1000_82575;
+               break;
+       case E1000_DEV_ID_82576:
+       case E1000_DEV_ID_82576_FIBER:
+       case E1000_DEV_ID_82576_SERDES:
+       case E1000_DEV_ID_82576_QUAD_COPPER:
+       case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
+       case E1000_DEV_ID_82576_NS:
+       case E1000_DEV_ID_82576_NS_SERDES:
+       case E1000_DEV_ID_82576_SERDES_QUAD:
+               mac->type = e1000_82576;
+               break;
+       case E1000_DEV_ID_82580_COPPER:
+       case E1000_DEV_ID_82580_FIBER:
+       case E1000_DEV_ID_82580_SERDES:
+       case E1000_DEV_ID_82580_SGMII:
+       case E1000_DEV_ID_82580_COPPER_DUAL:
+       case E1000_DEV_ID_82580_QUAD_FIBER:
+       case E1000_DEV_ID_DH89XXCC_SGMII:
+       case E1000_DEV_ID_DH89XXCC_SERDES:
+       case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+       case E1000_DEV_ID_DH89XXCC_SFP:
+               mac->type = e1000_82580;
+               break;
+       case E1000_DEV_ID_I350_COPPER:
+       case E1000_DEV_ID_I350_FIBER:
+       case E1000_DEV_ID_I350_SERDES:
+       case E1000_DEV_ID_I350_SGMII:
+       case E1000_DEV_ID_I350_DA4:
+               mac->type = e1000_i350;
+               break;
+       case E1000_DEV_ID_82576_VF:
+               mac->type = e1000_vfadapt;
+               break;
+       case E1000_DEV_ID_I350_VF:
+               mac->type = e1000_vfadapt_i350;
+               break;
+       default:
+               /* Should never have loaded on this device */
+               ret_val = -E1000_ERR_MAC_INIT;
+               break;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_setup_init_funcs - Initializes function pointers
+ *  @hw: pointer to the HW structure
+ *  @init_device: TRUE will initialize the rest of the function pointers
+ *                 getting the device ready for use.  FALSE will only set
+ *                 MAC type and the function pointers for the other init
+ *                 functions.  Passing FALSE will not generate any hardware
+ *                 reads or writes.
+ *
+ *  This function must be called by a driver in order to use the rest
+ *  of the 'shared' code files. Called by drivers only.
+ **/
+s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
+{
+       s32 ret_val;
+
+       /* Can't do much good without knowing the MAC type. */
+       ret_val = e1000_set_mac_type(hw);
+       if (ret_val) {
+               DEBUGOUT("ERROR: MAC type could not be set properly.\n");
+               goto out;
+       }
+
+       if (!hw->hw_addr) {
+               DEBUGOUT("ERROR: Registers not mapped\n");
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+       }
+
+       /*
+        * Init function pointers to generic implementations. We do this first
+        * allowing a driver module to override it afterward.
+        */
+       e1000_init_mac_ops_generic(hw);
+       e1000_init_phy_ops_generic(hw);
+       e1000_init_nvm_ops_generic(hw);
+       e1000_init_mbx_ops_generic(hw);
+
+       /*
+        * Set up the init function pointers. These are functions within the
+        * adapter family file that sets up function pointers for the rest of
+        * the functions in that family.
+        */
+       switch (hw->mac.type) {
+       case e1000_82575:
+       case e1000_82576:
+       case e1000_82580:
+       case e1000_i350:
+               e1000_init_function_pointers_82575(hw);
+               break;
+       case e1000_vfadapt:
+               e1000_init_function_pointers_vf(hw);
+               break;
+       case e1000_vfadapt_i350:
+               e1000_init_function_pointers_vf(hw);
+               break;
+       default:
+               DEBUGOUT("Hardware not supported\n");
+               ret_val = -E1000_ERR_CONFIG;
+               break;
+       }
+
+       /*
+        * Initialize the rest of the function pointers. These require some
+        * register reads/writes in some cases.
+        */
+       if (!(ret_val) && init_device) {
+               ret_val = e1000_init_mac_params(hw);
+               if (ret_val)
+                       goto out;
+
+               ret_val = e1000_init_nvm_params(hw);
+               if (ret_val)
+                       goto out;
+
+               ret_val = e1000_init_phy_params(hw);
+               if (ret_val)
+                       goto out;
+
+               ret_val = e1000_init_mbx_params(hw);
+               if (ret_val)
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_get_bus_info - Obtain bus information for adapter
+ *  @hw: pointer to the HW structure
+ *
+ *  This will obtain information about the HW bus for which the
+ *  adapter is attached and stores it in the hw structure. This is a
+ *  function pointer entry point called by drivers.
+ **/
+s32 e1000_get_bus_info(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.get_bus_info)
+               return hw->mac.ops.get_bus_info(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_clear_vfta - Clear VLAN filter table
+ *  @hw: pointer to the HW structure
+ *
+ *  This clears the VLAN filter table on the adapter. This is a function
+ *  pointer entry point called by drivers.
+ **/
+void e1000_clear_vfta(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.clear_vfta)
+               hw->mac.ops.clear_vfta(hw);
+}
+
+/**
+ *  e1000_write_vfta - Write value to VLAN filter table
+ *  @hw: pointer to the HW structure
+ *  @offset: the 32-bit offset in which to write the value to.
+ *  @value: the 32-bit value to write at location offset.
+ *
+ *  This writes a 32-bit value to a 32-bit offset in the VLAN filter
+ *  table. This is a function pointer entry point called by drivers.
+ **/
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+{
+       if (hw->mac.ops.write_vfta)
+               hw->mac.ops.write_vfta(hw, offset, value);
+}
+
+/**
+ *  e1000_update_mc_addr_list - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *
+ *  Updates the Multicast Table Array.
+ *  The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
+                               u32 mc_addr_count)
+{
+       if (hw->mac.ops.update_mc_addr_list)
+               hw->mac.ops.update_mc_addr_list(hw, mc_addr_list,
+                                               mc_addr_count);
+}
+
+/**
+ *  e1000_force_mac_fc - Force MAC flow control
+ *  @hw: pointer to the HW structure
+ *
+ *  Force the MAC's flow control settings. Currently no func pointer exists
+ *  and all implementations are handled in the generic version of this
+ *  function.
+ **/
+s32 e1000_force_mac_fc(struct e1000_hw *hw)
+{
+       return e1000_force_mac_fc_generic(hw);
+}
+
+/**
+ *  e1000_check_for_link - Check/Store link connection
+ *  @hw: pointer to the HW structure
+ *
+ *  This checks the link condition of the adapter and stores the
+ *  results in the hw->mac structure. This is a function pointer entry
+ *  point called by drivers.
+ **/
+s32 e1000_check_for_link(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.check_for_link)
+               return hw->mac.ops.check_for_link(hw);
+
+       return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_check_mng_mode - Check management mode
+ *  @hw: pointer to the HW structure
+ *
+ *  This checks if the adapter has manageability enabled.
+ *  This is a function pointer entry point called by drivers.
+ **/
+bool e1000_check_mng_mode(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.check_mng_mode)
+               return hw->mac.ops.check_mng_mode(hw);
+
+       return FALSE;
+}
+
+/**
+ *  e1000_mng_write_dhcp_info - Writes DHCP info to host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: pointer to the host interface
+ *  @length: size of the buffer
+ *
+ *  Writes the DHCP information to the host interface.
+ **/
+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
+{
+       return e1000_mng_write_dhcp_info_generic(hw, buffer, length);
+}
+
+/**
+ *  e1000_reset_hw - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the hardware into a known state. This is a function pointer
+ *  entry point called by drivers.
+ **/
+s32 e1000_reset_hw(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.reset_hw)
+               return hw->mac.ops.reset_hw(hw);
+
+       return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_init_hw - Initialize hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This inits the hardware readying it for operation. This is a function
+ *  pointer entry point called by drivers.
+ **/
+s32 e1000_init_hw(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.init_hw)
+               return hw->mac.ops.init_hw(hw);
+
+       return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_setup_link - Configures link and flow control
+ *  @hw: pointer to the HW structure
+ *
+ *  This configures link and flow control settings for the adapter. This
+ *  is a function pointer entry point called by drivers. While modules can
+ *  also call this, they probably call their own version of this function.
+ **/
+s32 e1000_setup_link(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.setup_link)
+               return hw->mac.ops.setup_link(hw);
+
+       return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_get_speed_and_duplex - Returns current speed and duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: pointer to a 16-bit value to store the speed
+ *  @duplex: pointer to a 16-bit value to store the duplex.
+ *
+ *  This returns the speed and duplex of the adapter in the two 'out'
+ *  variables passed in. This is a function pointer entry point called
+ *  by drivers.
+ **/
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+{
+       if (hw->mac.ops.get_link_up_info)
+               return hw->mac.ops.get_link_up_info(hw, speed, duplex);
+
+       return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_setup_led - Configures SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This prepares the SW controllable LED for use and saves the current state
+ *  of the LED so it can be later restored. This is a function pointer entry
+ *  point called by drivers.
+ **/
+s32 e1000_setup_led(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.setup_led)
+               return hw->mac.ops.setup_led(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_cleanup_led - Restores SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This restores the SW controllable LED to the value saved off by
+ *  e1000_setup_led. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_cleanup_led(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.cleanup_led)
+               return hw->mac.ops.cleanup_led(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_blink_led - Blink SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This starts the adapter LED blinking. Request the LED to be setup first
+ *  and cleaned up after. This is a function pointer entry point called by
+ *  drivers.
+ **/
+s32 e1000_blink_led(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.blink_led)
+               return hw->mac.ops.blink_led(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_id_led_init - store LED configurations in SW
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes the LED config in SW. This is a function pointer entry point
+ *  called by drivers.
+ **/
+s32 e1000_id_led_init(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.id_led_init)
+               return hw->mac.ops.id_led_init(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_on - Turn on SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Turns the SW defined LED on. This is a function pointer entry point
+ *  called by drivers.
+ **/
+s32 e1000_led_on(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.led_on)
+               return hw->mac.ops.led_on(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_off - Turn off SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Turns the SW defined LED off. This is a function pointer entry point
+ *  called by drivers.
+ **/
+s32 e1000_led_off(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.led_off)
+               return hw->mac.ops.led_off(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_reset_adaptive - Reset adaptive IFS
+ *  @hw: pointer to the HW structure
+ *
+ *  Resets the adaptive IFS. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+void e1000_reset_adaptive(struct e1000_hw *hw)
+{
+       e1000_reset_adaptive_generic(hw);
+}
+
+/**
+ *  e1000_update_adaptive - Update adaptive IFS
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates adapter IFS. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+void e1000_update_adaptive(struct e1000_hw *hw)
+{
+       e1000_update_adaptive_generic(hw);
+}
+
+/**
+ *  e1000_disable_pcie_master - Disable PCI-Express master access
+ *  @hw: pointer to the HW structure
+ *
+ *  Disables PCI-Express master access and verifies there are no pending
+ *  requests. Currently no func pointer exists and all implementations are
+ *  handled in the generic version of this function.
+ **/
+s32 e1000_disable_pcie_master(struct e1000_hw *hw)
+{
+       return e1000_disable_pcie_master_generic(hw);
+}
+
+/**
+ *  e1000_config_collision_dist - Configure collision distance
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the collision distance to the default value and is used
+ *  during link setup.
+ **/
+void e1000_config_collision_dist(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.config_collision_dist)
+               hw->mac.ops.config_collision_dist(hw);
+}
+
+/**
+ *  e1000_rar_set - Sets a receive address register
+ *  @hw: pointer to the HW structure
+ *  @addr: address to set the RAR to
+ *  @index: the RAR to set
+ *
+ *  Sets a Receive Address Register (RAR) to the specified address.
+ **/
+void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+       if (hw->mac.ops.rar_set)
+               hw->mac.ops.rar_set(hw, addr, index);
+}
+
+/**
+ *  e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state
+ *  @hw: pointer to the HW structure
+ *
+ *  Ensures that the MDI/MDIX SW state is valid.
+ **/
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.validate_mdi_setting)
+               return hw->mac.ops.validate_mdi_setting(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_hash_mc_addr - Determines address location in multicast table
+ *  @hw: pointer to the HW structure
+ *  @mc_addr: Multicast address to hash.
+ *
+ *  This hashes an address to determine its location in the multicast
+ *  table. Currently no func pointer exists and all implementations
+ *  are handled in the generic version of this function.
+ **/
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+{
+       return e1000_hash_mc_addr_generic(hw, mc_addr);
+}
+
+/**
+ *  e1000_enable_tx_pkt_filtering - Enable packet filtering on TX
+ *  @hw: pointer to the HW structure
+ *
+ *  Enables packet filtering on transmit packets if manageability is enabled
+ *  and host interface is enabled.
+ *  Currently no func pointer exists and all implementations are handled in the
+ *  generic version of this function.
+ **/
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
+{
+       return e1000_enable_tx_pkt_filtering_generic(hw);
+}
+
+/**
+ *  e1000_mng_host_if_write - Writes to the manageability host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: pointer to the host interface buffer
+ *  @length: size of the buffer
+ *  @offset: location in the buffer to write to
+ *  @sum: sum of the data (not checksum)
+ *
+ *  This function writes the buffer content at the offset given on the host if.
+ *  It also does alignment considerations to do the writes in most efficient
+ *  way.  Also fills up the sum of the buffer in *buffer parameter.
+ **/
+s32 e1000_mng_host_if_write(struct e1000_hw * hw, u8 *buffer, u16 length,
+                            u16 offset, u8 *sum)
+{
+       if (hw->mac.ops.mng_host_if_write)
+               return hw->mac.ops.mng_host_if_write(hw, buffer, length,
+                                                    offset, sum);
+
+       return E1000_NOT_IMPLEMENTED;
+}
+
+/**
+ *  e1000_mng_write_cmd_header - Writes manageability command header
+ *  @hw: pointer to the HW structure
+ *  @hdr: pointer to the host interface command header
+ *
+ *  Writes the command header after does the checksum calculation.
+ **/
+s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
+                               struct e1000_host_mng_command_header *hdr)
+{
+       if (hw->mac.ops.mng_write_cmd_header)
+               return hw->mac.ops.mng_write_cmd_header(hw, hdr);
+
+       return E1000_NOT_IMPLEMENTED;
+}
+
+/**
+ *  e1000_mng_enable_host_if - Checks host interface is enabled
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
+ *
+ *  This function checks whether the HOST IF is enabled for command operation
+ *  and also checks whether the previous command is completed.  It busy waits
+ *  in case of previous command is not completed.
+ **/
+s32 e1000_mng_enable_host_if(struct e1000_hw * hw)
+{
+       if (hw->mac.ops.mng_enable_host_if)
+               return hw->mac.ops.mng_enable_host_if(hw);
+
+       return E1000_NOT_IMPLEMENTED;
+}
+
+/**
+ *  e1000_wait_autoneg - Waits for autonegotiation completion
+ *  @hw: pointer to the HW structure
+ *
+ *  Waits for autoneg to complete. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+s32 e1000_wait_autoneg(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.wait_autoneg)
+               return hw->mac.ops.wait_autoneg(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_check_reset_block - Verifies PHY can be reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks if the PHY is in a state that can be reset or if manageability
+ *  has it tied up. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_check_reset_block(struct e1000_hw *hw)
+{
+       if (hw->phy.ops.check_reset_block)
+               return hw->phy.ops.check_reset_block(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_read_phy_reg - Reads PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: the register to read
+ *  @data: the buffer to store the 16-bit read.
+ *
+ *  Reads the PHY register and returns the value in data.
+ *  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       if (hw->phy.ops.read_reg)
+               return hw->phy.ops.read_reg(hw, offset, data);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_write_phy_reg - Writes PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: the register to write
+ *  @data: the value to write.
+ *
+ *  Writes the PHY register at offset with the value in data.
+ *  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       if (hw->phy.ops.write_reg)
+               return hw->phy.ops.write_reg(hw, offset, data);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_release_phy - Generic release PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Return if silicon family does not require a semaphore when accessing the
+ *  PHY.
+ **/
+void e1000_release_phy(struct e1000_hw *hw)
+{
+       if (hw->phy.ops.release)
+               hw->phy.ops.release(hw);
+}
+
+/**
+ *  e1000_acquire_phy - Generic acquire PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Return success if silicon family does not require a semaphore when
+ *  accessing the PHY.
+ **/
+s32 e1000_acquire_phy(struct e1000_hw *hw)
+{
+       if (hw->phy.ops.acquire)
+               return hw->phy.ops.acquire(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_read_kmrn_reg - Reads register using Kumeran interface
+ *  @hw: pointer to the HW structure
+ *  @offset: the register to read
+ *  @data: the location to store the 16-bit value read.
+ *
+ *  Reads a register out of the Kumeran interface. Currently no func pointer
+ *  exists and all implementations are handled in the generic version of
+ *  this function.
+ **/
+s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       return e1000_read_kmrn_reg_generic(hw, offset, data);
+}
+
+/**
+ *  e1000_write_kmrn_reg - Writes register using Kumeran interface
+ *  @hw: pointer to the HW structure
+ *  @offset: the register to write
+ *  @data: the value to write.
+ *
+ *  Writes a register to the Kumeran interface. Currently no func pointer
+ *  exists and all implementations are handled in the generic version of
+ *  this function.
+ **/
+s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       return e1000_write_kmrn_reg_generic(hw, offset, data);
+}
+
+/**
+ *  e1000_get_cable_length - Retrieves cable length estimation
+ *  @hw: pointer to the HW structure
+ *
+ *  This function estimates the cable length and stores them in
+ *  hw->phy.min_length and hw->phy.max_length. This is a function pointer
+ *  entry point called by drivers.
+ **/
+s32 e1000_get_cable_length(struct e1000_hw *hw)
+{
+       if (hw->phy.ops.get_cable_length)
+               return hw->phy.ops.get_cable_length(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_phy_info - Retrieves PHY information from registers
+ *  @hw: pointer to the HW structure
+ *
+ *  This function gets some information from various PHY registers and
+ *  populates hw->phy values with it. This is a function pointer entry
+ *  point called by drivers.
+ **/
+s32 e1000_get_phy_info(struct e1000_hw *hw)
+{
+       if (hw->phy.ops.get_info)
+               return hw->phy.ops.get_info(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_phy_hw_reset - Hard PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Performs a hard PHY reset. This is a function pointer entry point called
+ *  by drivers.
+ **/
+s32 e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+       if (hw->phy.ops.reset)
+               return hw->phy.ops.reset(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_phy_commit - Soft PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Performs a soft PHY reset on those that apply. This is a function pointer
+ *  entry point called by drivers.
+ **/
+s32 e1000_phy_commit(struct e1000_hw *hw)
+{
+       if (hw->phy.ops.commit)
+               return hw->phy.ops.commit(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_set_d0_lplu_state - Sets low power link up state for D0
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D0
+ *  and SmartSpeed is disabled when active is TRUE, else clear lplu for D0
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
+{
+       if (hw->phy.ops.set_d0_lplu_state)
+               return hw->phy.ops.set_d0_lplu_state(hw, active);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_set_d3_lplu_state - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  and SmartSpeed is disabled when active is TRUE, else clear lplu for D3
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+{
+       if (hw->phy.ops.set_d3_lplu_state)
+               return hw->phy.ops.set_d3_lplu_state(hw, active);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_read_mac_addr - Reads MAC address
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the MAC address out of the adapter and stores it in the HW structure.
+ *  Currently no func pointer exists and all implementations are handled in the
+ *  generic version of this function.
+ **/
+s32 e1000_read_mac_addr(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.read_mac_addr)
+               return hw->mac.ops.read_mac_addr(hw);
+
+       return e1000_read_mac_addr_generic(hw);
+}
+
+/**
+ *  e1000_read_pba_string - Read device part number string
+ *  @hw: pointer to the HW structure
+ *  @pba_num: pointer to device part number
+ *  @pba_num_size: size of part number buffer
+ *
+ *  Reads the product board assembly (PBA) number from the EEPROM and stores
+ *  the value in pba_num.
+ *  Currently no func pointer exists and all implementations are handled in the
+ *  generic version of this function.
+ **/
+s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size)
+{
+       return e1000_read_pba_string_generic(hw, pba_num, pba_num_size);
+}
+
+/**
+ *  e1000_read_pba_length - Read device part number string length
+ *  @hw: pointer to the HW structure
+ *  @pba_num_size: size of part number buffer
+ *
+ *  Reads the product board assembly (PBA) number length from the EEPROM and
+ *  stores the value in pba_num.
+ *  Currently no func pointer exists and all implementations are handled in the
+ *  generic version of this function.
+ **/
+s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size)
+{
+       return e1000_read_pba_length_generic(hw, pba_num_size);
+}
+
+/**
+ *  e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Validates the NVM checksum is correct. This is a function pointer entry
+ *  point called by drivers.
+ **/
+s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
+{
+       if (hw->nvm.ops.validate)
+               return hw->nvm.ops.validate(hw);
+
+       return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the NVM checksum. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+s32 e1000_update_nvm_checksum(struct e1000_hw *hw)
+{
+       if (hw->nvm.ops.update)
+               return hw->nvm.ops.update(hw);
+
+       return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_reload_nvm - Reloads EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ *  extended control register.
+ **/
+void e1000_reload_nvm(struct e1000_hw *hw)
+{
+       if (hw->nvm.ops.reload)
+               hw->nvm.ops.reload(hw);
+}
+
+/**
+ *  e1000_read_nvm - Reads NVM (EEPROM)
+ *  @hw: pointer to the HW structure
+ *  @offset: the word offset to read
+ *  @words: number of 16-bit words to read
+ *  @data: pointer to the properly sized buffer for the data.
+ *
+ *  Reads 16-bit chunks of data from the NVM (EEPROM). This is a function
+ *  pointer entry point called by drivers.
+ **/
+s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+       if (hw->nvm.ops.read)
+               return hw->nvm.ops.read(hw, offset, words, data);
+
+       return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_write_nvm - Writes to NVM (EEPROM)
+ *  @hw: pointer to the HW structure
+ *  @offset: the word offset to read
+ *  @words: number of 16-bit words to write
+ *  @data: pointer to the properly sized buffer for the data.
+ *
+ *  Writes 16-bit chunks of data to the NVM (EEPROM). This is a function
+ *  pointer entry point called by drivers.
+ **/
+s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+       if (hw->nvm.ops.write)
+               return hw->nvm.ops.write(hw, offset, words, data);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_write_8bit_ctrl_reg - Writes 8bit Control register
+ *  @hw: pointer to the HW structure
+ *  @reg: 32bit register offset
+ *  @offset: the register to write
+ *  @data: the value to write.
+ *
+ *  Writes the PHY register at offset with the value in data.
+ *  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
+                              u8 data)
+{
+       return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data);
+}
+
+/**
+ * e1000_power_up_phy - Restores link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * The phy may be powered down to save power, to turn off link when the
+ * driver is unloaded, or wake on lan is not enabled (among others).
+ **/
+void e1000_power_up_phy(struct e1000_hw *hw)
+{
+       if (hw->phy.ops.power_up)
+               hw->phy.ops.power_up(hw);
+
+       e1000_setup_link(hw);
+}
+
+/**
+ * e1000_power_down_phy - Power down PHY
+ * @hw: pointer to the HW structure
+ *
+ * The phy may be powered down to save power, to turn off link when the
+ * driver is unloaded, or wake on lan is not enabled (among others).
+ **/
+void e1000_power_down_phy(struct e1000_hw *hw)
+{
+       if (hw->phy.ops.power_down)
+               hw->phy.ops.power_down(hw);
+}
+
+/**
+ *  e1000_power_up_fiber_serdes_link - Power up serdes link
+ *  @hw: pointer to the HW structure
+ *
+ *  Power on the optics and PCS.
+ **/
+void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.power_up_serdes)
+               hw->mac.ops.power_up_serdes(hw);
+}
+
+/**
+ *  e1000_shutdown_fiber_serdes_link - Remove link during power down
+ *  @hw: pointer to the HW structure
+ *
+ *  Shutdown the optics and PCS on driver unload.
+ **/
+void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.shutdown_serdes)
+               hw->mac.ops.shutdown_serdes(hw);
+}
+
diff --git a/lib/librte_pmd_e1000/e1000/e1000_api.h b/lib/librte_pmd_e1000/e1000/e1000_api.h
new file mode 100644 (file)
index 0000000..daf8642
--- /dev/null
@@ -0,0 +1,156 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_API_H_
+#define _E1000_API_H_
+
+#include "e1000_hw.h"
+
+extern void    e1000_init_function_pointers_82575(struct e1000_hw *hw);
+extern void    e1000_rx_fifo_flush_82575(struct e1000_hw *hw);
+extern void    e1000_init_function_pointers_vf(struct e1000_hw *hw);
+extern void    e1000_power_up_fiber_serdes_link(struct e1000_hw *hw);
+extern void    e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw);
+
+s32  e1000_set_mac_type(struct e1000_hw *hw);
+s32  e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device);
+s32  e1000_init_mac_params(struct e1000_hw *hw);
+s32  e1000_init_nvm_params(struct e1000_hw *hw);
+s32  e1000_init_phy_params(struct e1000_hw *hw);
+s32  e1000_init_mbx_params(struct e1000_hw *hw);
+s32  e1000_get_bus_info(struct e1000_hw *hw);
+void e1000_clear_vfta(struct e1000_hw *hw);
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
+s32  e1000_force_mac_fc(struct e1000_hw *hw);
+s32  e1000_check_for_link(struct e1000_hw *hw);
+s32  e1000_reset_hw(struct e1000_hw *hw);
+s32  e1000_init_hw(struct e1000_hw *hw);
+s32  e1000_setup_link(struct e1000_hw *hw);
+s32  e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed,
+                                u16 *duplex);
+s32  e1000_disable_pcie_master(struct e1000_hw *hw);
+void e1000_config_collision_dist(struct e1000_hw *hw);
+void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
+u32  e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
+void e1000_update_mc_addr_list(struct e1000_hw *hw,
+                               u8 *mc_addr_list, u32 mc_addr_count);
+s32  e1000_setup_led(struct e1000_hw *hw);
+s32  e1000_cleanup_led(struct e1000_hw *hw);
+s32  e1000_check_reset_block(struct e1000_hw *hw);
+s32  e1000_blink_led(struct e1000_hw *hw);
+s32  e1000_led_on(struct e1000_hw *hw);
+s32  e1000_led_off(struct e1000_hw *hw);
+s32 e1000_id_led_init(struct e1000_hw *hw);
+void e1000_reset_adaptive(struct e1000_hw *hw);
+void e1000_update_adaptive(struct e1000_hw *hw);
+s32  e1000_get_cable_length(struct e1000_hw *hw);
+s32  e1000_validate_mdi_setting(struct e1000_hw *hw);
+s32  e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
+                               u32 offset, u8 data);
+s32  e1000_get_phy_info(struct e1000_hw *hw);
+void e1000_release_phy(struct e1000_hw *hw);
+s32  e1000_acquire_phy(struct e1000_hw *hw);
+s32  e1000_phy_hw_reset(struct e1000_hw *hw);
+s32  e1000_phy_commit(struct e1000_hw *hw);
+void e1000_power_up_phy(struct e1000_hw *hw);
+void e1000_power_down_phy(struct e1000_hw *hw);
+s32  e1000_read_mac_addr(struct e1000_hw *hw);
+s32  e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, 
+                           u32 pba_num_size);
+s32  e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size);
+void e1000_reload_nvm(struct e1000_hw *hw);
+s32  e1000_update_nvm_checksum(struct e1000_hw *hw);
+s32  e1000_validate_nvm_checksum(struct e1000_hw *hw);
+s32  e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32  e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words,
+                     u16 *data);
+s32  e1000_wait_autoneg(struct e1000_hw *hw);
+s32  e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+s32  e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
+bool e1000_check_mng_mode(struct e1000_hw *hw);
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
+s32  e1000_mng_enable_host_if(struct e1000_hw *hw);
+s32  e1000_mng_host_if_write(struct e1000_hw *hw,
+                             u8 *buffer, u16 length, u16 offset, u8 *sum);
+s32  e1000_mng_write_cmd_header(struct e1000_hw *hw,
+                                struct e1000_host_mng_command_header *hdr);
+s32  e1000_mng_write_dhcp_info(struct e1000_hw * hw,
+                                    u8 *buffer, u16 length);
+
+/*
+ * TBI_ACCEPT macro definition:
+ *
+ * This macro requires:
+ *      adapter = a pointer to struct e1000_hw
+ *      status = the 8 bit status field of the Rx descriptor with EOP set
+ *      error = the 8 bit error field of the Rx descriptor with EOP set
+ *      length = the sum of all the length fields of the Rx descriptors that
+ *               make up the current frame
+ *      last_byte = the last byte of the frame DMAed by the hardware
+ *      max_frame_length = the maximum frame length we want to accept.
+ *      min_frame_length = the minimum frame length we want to accept.
+ *
+ * This macro is a conditional that should be used in the interrupt
+ * handler's Rx processing routine when RxErrors have been detected.
+ *
+ * Typical use:
+ *  ...
+ *  if (TBI_ACCEPT) {
+ *      accept_frame = TRUE;
+ *      e1000_tbi_adjust_stats(adapter, MacAddress);
+ *      frame_length--;
+ *  } else {
+ *      accept_frame = FALSE;
+ *  }
+ *  ...
+ */
+
+/* The carrier extension symbol, as received by the NIC. */
+#define CARRIER_EXTENSION   0x0F
+
+#define TBI_ACCEPT(a, status, errors, length, last_byte, min_frame_size, max_frame_size) \
+    (e1000_tbi_sbp_enabled_82543(a) && \
+     (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
+     ((last_byte) == CARRIER_EXTENSION) && \
+     (((status) & E1000_RXD_STAT_VP) ? \
+          (((length) > (min_frame_size - VLAN_TAG_SIZE)) && \
+           ((length) <= (max_frame_size + 1))) : \
+          (((length) > min_frame_size) && \
+           ((length) <= (max_frame_size + VLAN_TAG_SIZE + 1)))))
+
+#endif
diff --git a/lib/librte_pmd_e1000/e1000/e1000_defines.h b/lib/librte_pmd_e1000/e1000/e1000_defines.h
new file mode 100644 (file)
index 0000000..a7be67c
--- /dev/null
@@ -0,0 +1,1733 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_DEFINES_H_
+#define _E1000_DEFINES_H_
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE  8
+#define REQ_RX_DESCRIPTOR_MULTIPLE  8
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_APME       0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN     0x00000002 /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define E1000_WUC_APMPME     0x00000008 /* Assert PME on APM Wakeup */
+#define E1000_WUC_LSCWE      0x00000010 /* Link Status wake up enable */
+#define E1000_WUC_PPROXYE    0x00000010 /* Protocol Proxy Enable */
+#define E1000_WUC_LSCWO      0x00000020 /* Link Status wake up override */
+#define E1000_WUC_SPM        0x80000000 /* Enable SPM */
+#define E1000_WUC_PHY_WAKE   0x00000100 /* if PHY supports wakeup */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG  0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX   0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC   0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC   0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP  0x00000020 /* ARP Request Packet Wakeup Enable */
+#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define E1000_WUFC_IGNORE_TCO   0x00008000 /* Ignore WakeOn TCO packets */
+#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define E1000_WUFC_FW_RST 0x80000000 /* Wake on FW Reset Enable */
+#define E1000_WUFC_ALL_FILTERS  0x000F00FF /* Mask for all wakeup filters */
+#define E1000_WUFC_FLX_OFFSET   16 /* Offset to the Flexible Filters bits */
+#define E1000_WUFC_FLX_FILTERS  0x000F0000 /*Mask for the 4 flexible filters */
+/*
+ * For 82576 to utilize Extended filter masks in addition to
+ * existing (filter) masks
+ */
+#define E1000_WUFC_EXT_FLX_FILTERS      0x00300000 /* Ext. FLX filter mask */
+
+/* Wake Up Status */
+#define E1000_WUS_LNKC         E1000_WUFC_LNKC
+#define E1000_WUS_MAG          E1000_WUFC_MAG
+#define E1000_WUS_EX           E1000_WUFC_EX
+#define E1000_WUS_MC           E1000_WUFC_MC
+#define E1000_WUS_BC           E1000_WUFC_BC
+#define E1000_WUS_ARP          E1000_WUFC_ARP
+#define E1000_WUS_IPV4         E1000_WUFC_IPV4
+#define E1000_WUS_IPV6         E1000_WUFC_IPV6
+#define E1000_WUS_FLX0         E1000_WUFC_FLX0
+#define E1000_WUS_FLX1         E1000_WUFC_FLX1
+#define E1000_WUS_FLX2         E1000_WUFC_FLX2
+#define E1000_WUS_FLX3         E1000_WUFC_FLX3
+#define E1000_WUS_FLX_FILTERS  E1000_WUFC_FLX_FILTERS
+
+/* Wake Up Packet Length */
+#define E1000_WUPL_LENGTH_MASK 0x0FFF   /* Only the lower 12 bits are valid */
+
+/* Four Flexible Filters are supported */
+#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
+/* Two Extended Flexible Filters are supported (82576) */
+#define E1000_EXT_FLEXIBLE_FILTER_COUNT_MAX     2
+#define E1000_FHFT_LENGTH_OFFSET        0xFC /* Length byte in FHFT */
+#define E1000_FHFT_LENGTH_MASK          0x0FF /* Length in lower byte */
+
+/* Each Flexible Filter is at most 128 (0x80) bytes in length */
+#define E1000_FLEXIBLE_FILTER_SIZE_MAX  128
+
+#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX
+#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_GPI0_EN   0x00000001 /* Maps SDP4 to GPI0 */
+#define E1000_CTRL_EXT_GPI1_EN   0x00000002 /* Maps SDP5 to GPI1 */
+#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN
+#define E1000_CTRL_EXT_GPI2_EN   0x00000004 /* Maps SDP6 to GPI2 */
+#define E1000_CTRL_EXT_GPI3_EN   0x00000008 /* Maps SDP7 to GPI3 */
+/* Reserved (bits 4,5) in >= 82575 */
+#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Definable Pin 4 */
+#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Definable Pin 5 */
+#define E1000_CTRL_EXT_PHY_INT   E1000_CTRL_EXT_SDP5_DATA
+#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Definable Pin 6 */
+#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
+/* SDP 4/5 (bits 8,9) are reserved in >= 82575 */
+#define E1000_CTRL_EXT_SDP4_DIR  0x00000100 /* Direction of SDP4 0=in 1=out */
+#define E1000_CTRL_EXT_SDP5_DIR  0x00000200 /* Direction of SDP5 0=in 1=out */
+#define E1000_CTRL_EXT_SDP6_DIR  0x00000400 /* Direction of SDP6 0=in 1=out */
+#define E1000_CTRL_EXT_SDP3_DIR  0x00000800 /* Direction of SDP3 0=in 1=out */
+#define E1000_CTRL_EXT_ASDCHK    0x00001000 /* Initiate an ASD sequence */
+#define E1000_CTRL_EXT_EE_RST    0x00002000 /* Reinitialize from EEPROM */
+#define E1000_CTRL_EXT_IPS       0x00004000 /* Invert Power State */
+/* Physical Func Reset Done Indication */
+#define E1000_CTRL_EXT_PFRSTD    0x00004000
+#define E1000_CTRL_EXT_SPD_BYPS  0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS    0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_82580_MASK 0x01C00000 /*82580 bit 24:22*/
+#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX  0x00400000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_TBI  0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_KMRN    0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES  0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_PCIX_SERDES  0x00800000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII   0x00800000
+#define E1000_CTRL_EXT_EIAME          0x01000000
+#define E1000_CTRL_EXT_IRCA           0x00000001
+#define E1000_CTRL_EXT_WR_WMARK_MASK  0x03000000
+#define E1000_CTRL_EXT_WR_WMARK_256   0x00000000
+#define E1000_CTRL_EXT_WR_WMARK_320   0x01000000
+#define E1000_CTRL_EXT_WR_WMARK_384   0x02000000
+#define E1000_CTRL_EXT_WR_WMARK_448   0x03000000
+#define E1000_CTRL_EXT_CANC           0x04000000 /* Int delay cancellation */
+#define E1000_CTRL_EXT_DRV_LOAD       0x10000000 /* Driver loaded bit for FW */
+/* IAME enable bit (27) was removed in >= 82575 */
+#define E1000_CTRL_EXT_IAME          0x08000000 /* Int acknowledge Auto-mask */
+#define E1000_CRTL_EXT_PB_PAREN       0x01000000 /* packet buffer parity error
+                                                  * detection enabled */
+#define E1000_CTRL_EXT_DF_PAREN       0x02000000 /* descriptor FIFO parity
+                                                  * error detection enable */
+#define E1000_CTRL_EXT_GHOST_PAREN    0x40000000
+#define E1000_CTRL_EXT_PBA_CLR        0x80000000 /* PBA Clear */
+#define E1000_I2CCMD_REG_ADDR_SHIFT   16
+#define E1000_I2CCMD_REG_ADDR         0x00FF0000
+#define E1000_I2CCMD_PHY_ADDR_SHIFT   24
+#define E1000_I2CCMD_PHY_ADDR         0x07000000
+#define E1000_I2CCMD_OPCODE_READ      0x08000000
+#define E1000_I2CCMD_OPCODE_WRITE     0x00000000
+#define E1000_I2CCMD_RESET            0x10000000
+#define E1000_I2CCMD_READY            0x20000000
+#define E1000_I2CCMD_INTERRUPT_ENA    0x40000000
+#define E1000_I2CCMD_ERROR            0x80000000
+#define E1000_MAX_SGMII_PHY_REG_ADDR  255
+#define E1000_I2CCMD_PHY_TIMEOUT      200
+#define E1000_IVAR_VALID        0x80
+#define E1000_GPIE_NSICR        0x00000001
+#define E1000_GPIE_MSIX_MODE    0x00000010
+#define E1000_GPIE_EIAME        0x40000000
+#define E1000_GPIE_PBA          0x80000000
+
+/* Receive Descriptor bit definitions */
+#define E1000_RXD_STAT_DD       0x01    /* Descriptor Done */
+#define E1000_RXD_STAT_EOP      0x02    /* End of Packet */
+#define E1000_RXD_STAT_IXSM     0x04    /* Ignore checksum */
+#define E1000_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS    0x10    /* UDP xsum calculated */
+#define E1000_RXD_STAT_TCPCS    0x20    /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS     0x40    /* IP xsum calculated */
+#define E1000_RXD_STAT_PIF      0x80    /* passed in-exact filter */
+#define E1000_RXD_STAT_CRCV     0x100   /* Speculative CRC Valid */
+#define E1000_RXD_STAT_IPIDV    0x200   /* IP identification valid */
+#define E1000_RXD_STAT_UDPV     0x400   /* Valid UDP checksum */
+#define E1000_RXD_STAT_DYNINT   0x800   /* Pkt caused INT via DYNINT */
+#define E1000_RXD_STAT_ACK      0x8000  /* ACK Packet indication */
+#define E1000_RXD_ERR_CE        0x01    /* CRC Error */
+#define E1000_RXD_ERR_SE        0x02    /* Symbol Error */
+#define E1000_RXD_ERR_SEQ       0x04    /* Sequence Error */
+#define E1000_RXD_ERR_CXE       0x10    /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE      0x20    /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE       0x40    /* IP Checksum Error */
+#define E1000_RXD_ERR_RXE       0x80    /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
+#define E1000_RXD_SPC_PRI_MASK  0xE000  /* Priority is in upper 3 bits */
+#define E1000_RXD_SPC_PRI_SHIFT 13
+#define E1000_RXD_SPC_CFI_MASK  0x1000  /* CFI is bit 12 */
+#define E1000_RXD_SPC_CFI_SHIFT 12
+
+#define E1000_RXDEXT_STATERR_LB    0x00040000
+#define E1000_RXDEXT_STATERR_CE    0x01000000
+#define E1000_RXDEXT_STATERR_SE    0x02000000
+#define E1000_RXDEXT_STATERR_SEQ   0x04000000
+#define E1000_RXDEXT_STATERR_CXE   0x10000000
+#define E1000_RXDEXT_STATERR_TCPE  0x20000000
+#define E1000_RXDEXT_STATERR_IPE   0x40000000
+#define E1000_RXDEXT_STATERR_RXE   0x80000000
+
+/* mask to determine if packets should be dropped due to frame errors */
+#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
+    E1000_RXD_ERR_CE  |                \
+    E1000_RXD_ERR_SE  |                \
+    E1000_RXD_ERR_SEQ |                \
+    E1000_RXD_ERR_CXE |                \
+    E1000_RXD_ERR_RXE)
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+    E1000_RXDEXT_STATERR_CE  |            \
+    E1000_RXDEXT_STATERR_SE  |            \
+    E1000_RXDEXT_STATERR_SEQ |            \
+    E1000_RXDEXT_STATERR_CXE |            \
+    E1000_RXDEXT_STATERR_RXE)
+
+#define E1000_MRQC_ENABLE_MASK                 0x00000007
+#define E1000_MRQC_ENABLE_RSS_2Q               0x00000001
+#define E1000_MRQC_ENABLE_RSS_INT              0x00000004
+#define E1000_MRQC_RSS_FIELD_MASK              0xFFFF0000
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP          0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4              0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX       0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6_EX           0x00080000
+#define E1000_MRQC_RSS_FIELD_IPV6              0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP          0x00200000
+
+#define E1000_RXDPS_HDRSTAT_HDRSP              0x00008000
+#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK        0x000003FF
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN      0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN        0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_R_ON_FORCE    0x00000004 /* Reset on Force TCO - RO */
+#define E1000_MANC_RMCP_EN       0x00000100 /* Enable RCMP 026Fh Filtering */
+#define E1000_MANC_0298_EN       0x00000200 /* Enable RCMP 0298h Filtering */
+#define E1000_MANC_IPV4_EN       0x00000400 /* Enable IPv4 */
+#define E1000_MANC_IPV6_EN       0x00000800 /* Enable IPv6 */
+#define E1000_MANC_SNAP_EN       0x00001000 /* Accept LLC/SNAP */
+#define E1000_MANC_ARP_EN        0x00002000 /* Enable ARP Request Filtering */
+/* Enable Neighbor Discovery Filtering */
+#define E1000_MANC_NEIGHBOR_EN   0x00004000
+#define E1000_MANC_ARP_RES_EN    0x00008000 /* Enable ARP response Filtering */
+#define E1000_MANC_TCO_RESET     0x00010000 /* TCO Reset Occurred */
+#define E1000_MANC_RCV_TCO_EN    0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
+#define E1000_MANC_RCV_ALL       0x00080000 /* Receive All Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE   0x00040000 /* Block phy resets */
+/* Enable MAC address filtering */
+#define E1000_MANC_EN_MAC_ADDR_FILTER   0x00100000
+/* Enable MNG packets to host memory */
+#define E1000_MANC_EN_MNG2HOST   0x00200000
+/* Enable IP address filtering */
+#define E1000_MANC_EN_IP_ADDR_FILTER    0x00400000
+#define E1000_MANC_EN_XSUM_FILTER   0x00800000 /* Enable checksum filtering */
+#define E1000_MANC_BR_EN            0x01000000 /* Enable broadcast filtering */
+#define E1000_MANC_SMB_REQ       0x01000000 /* SMBus Request */
+#define E1000_MANC_SMB_GNT       0x02000000 /* SMBus Grant */
+#define E1000_MANC_SMB_CLK_IN    0x04000000 /* SMBus Clock In */
+#define E1000_MANC_SMB_DATA_IN   0x08000000 /* SMBus Data In */
+#define E1000_MANC_SMB_DATA_OUT  0x10000000 /* SMBus Data Out */
+#define E1000_MANC_SMB_CLK_OUT   0x20000000 /* SMBus Clock Out */
+#define E1000_MANC_MPROXYE       0x40000000 /* Mngment Proxy Enable */
+#define E1000_MANC_EN_BMC2OS     0x10000000 /* OS2BMC is enabled or not */
+
+#define E1000_MANC_SMB_DATA_OUT_SHIFT  28 /* SMBus Data Out Shift */
+#define E1000_MANC_SMB_CLK_OUT_SHIFT   29 /* SMBus Clock Out Shift */
+
+#define E1000_MANC2H_PORT_623    0x00000020 /* Port 0x26f */
+#define E1000_MANC2H_PORT_664    0x00000040 /* Port 0x298 */
+#define E1000_MDEF_PORT_623      0x00000800 /* Port 0x26f */
+#define E1000_MDEF_PORT_664      0x00000400 /* Port 0x298 */
+
+/* Receive Control */
+#define E1000_RCTL_RST            0x00000001    /* Software reset */
+#define E1000_RCTL_EN             0x00000002    /* enable */
+#define E1000_RCTL_SBP            0x00000004    /* store bad packet */
+#define E1000_RCTL_UPE            0x00000008    /* unicast promisc enable */
+#define E1000_RCTL_MPE            0x00000010    /* multicast promisc enable */
+#define E1000_RCTL_LPE            0x00000020    /* long packet enable */
+#define E1000_RCTL_LBM_NO         0x00000000    /* no loopback mode */
+#define E1000_RCTL_LBM_MAC        0x00000040    /* MAC loopback mode */
+#define E1000_RCTL_LBM_SLP        0x00000080    /* serial link loopback mode */
+#define E1000_RCTL_LBM_TCVR       0x000000C0    /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_MASK      0x00000C00    /* Descriptor type mask */
+#define E1000_RCTL_DTYP_PS        0x00000400    /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF     0x00000000    /* Rx desc min thresh size */
+#define E1000_RCTL_RDMTS_QUAT     0x00000100    /* Rx desc min thresh size */
+#define E1000_RCTL_RDMTS_EIGTH    0x00000200    /* Rx desc min thresh size */
+#define E1000_RCTL_MO_SHIFT       12            /* multicast offset shift */
+#define E1000_RCTL_MO_0           0x00000000    /* multicast offset 11:0 */
+#define E1000_RCTL_MO_1           0x00001000    /* multicast offset 12:1 */
+#define E1000_RCTL_MO_2           0x00002000    /* multicast offset 13:2 */
+#define E1000_RCTL_MO_3           0x00003000    /* multicast offset 15:4 */
+#define E1000_RCTL_MDR            0x00004000    /* multicast desc ring 0 */
+#define E1000_RCTL_BAM            0x00008000    /* broadcast enable */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
+#define E1000_RCTL_SZ_2048        0x00000000    /* Rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024        0x00010000    /* Rx buffer size 1024 */
+#define E1000_RCTL_SZ_512         0x00020000    /* Rx buffer size 512 */
+#define E1000_RCTL_SZ_256         0x00030000    /* Rx buffer size 256 */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
+#define E1000_RCTL_SZ_16384       0x00010000    /* Rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192        0x00020000    /* Rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096        0x00030000    /* Rx buffer size 4096 */
+#define E1000_RCTL_VFE            0x00040000    /* vlan filter enable */
+#define E1000_RCTL_CFIEN          0x00080000    /* canonical form enable */
+#define E1000_RCTL_CFI            0x00100000    /* canonical form indicator */
+#define E1000_RCTL_DPF            0x00400000    /* discard pause frames */
+#define E1000_RCTL_PMCF           0x00800000    /* pass MAC control frames */
+#define E1000_RCTL_BSEX           0x02000000    /* Buffer size extension */
+#define E1000_RCTL_SECRC          0x04000000    /* Strip Ethernet CRC */
+#define E1000_RCTL_FLXBUF_MASK    0x78000000    /* Flexible buffer size */
+#define E1000_RCTL_FLXBUF_SHIFT   27            /* Flexible buffer shift */
+
+/*
+ * Use byte values for the following shift parameters
+ * Usage:
+ *     psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE0_MASK) |
+ *                ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE1_MASK) |
+ *                ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE2_MASK) |
+ *                ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ *                  E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256],  default=256
+ *       value1 = [1024..64512], default=4096
+ *       value2 = [0..64512],    default=4096
+ *       value3 = [0..64512],    default=0
+ */
+
+#define E1000_PSRCTL_BSIZE0_MASK   0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK   0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK   0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK   0x3F000000
+
+#define E1000_PSRCTL_BSIZE0_SHIFT  7            /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT  2            /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT  6            /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14            /* Shift _left_ 14 */
+
+/* SWFW_SYNC Definitions */
+#define E1000_SWFW_EEP_SM   0x01
+#define E1000_SWFW_PHY0_SM  0x02
+#define E1000_SWFW_PHY1_SM  0x04
+#define E1000_SWFW_CSR_SM   0x08
+#define E1000_SWFW_PHY2_SM  0x20
+#define E1000_SWFW_PHY3_SM  0x40
+#define E1000_SWFW_SW_MNG_SM 0x400
+
+/* FACTPS Definitions */
+#define E1000_FACTPS_LFS    0x40000000  /* LAN Function Select */
+/* Device Control */
+#define E1000_CTRL_FD       0x00000001  /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_BEM      0x00000002  /* Endian Mode.0=little,1=big */
+#define E1000_CTRL_PRIOR    0x00000004  /* Priority on PCI. 0=rx,1=fair */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */
+#define E1000_CTRL_LRST     0x00000008  /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_TME      0x00000010  /* Test mode. 0=normal,1=test */
+#define E1000_CTRL_SLE      0x00000020  /* Serial Link on 0=dis,1=en */
+#define E1000_CTRL_ASDE     0x00000020  /* Auto-speed detect enable */
+#define E1000_CTRL_SLU      0x00000040  /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS     0x00000080  /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL  0x00000300  /* Speed Select Mask */
+#define E1000_CTRL_SPD_10   0x00000000  /* Force 10Mb */
+#define E1000_CTRL_SPD_100  0x00000100  /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200  /* Force 1Gb */
+#define E1000_CTRL_BEM32    0x00000400  /* Big Endian 32 mode */
+#define E1000_CTRL_FRCSPD   0x00000800  /* Force Speed */
+#define E1000_CTRL_FRCDPX   0x00001000  /* Force Duplex */
+#define E1000_CTRL_D_UD_EN  0x00002000  /* Dock/Undock enable */
+#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock
+                                             * indication in SDP[0] */
+#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through
+                                               * PHYRST_N pin */
+#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external
+                                           * LINK_0 and LINK_1 pins */
+#define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIN2  0x00100000  /* SWDPIN 2 value */
+#define E1000_CTRL_ADVD3WUC 0x00100000  /* D3 WUC */
+#define E1000_CTRL_SWDPIN3  0x00200000  /* SWDPIN 3 value */
+#define E1000_CTRL_SWDPIO0  0x00400000  /* SWDPIN 0 Input or output */
+#define E1000_CTRL_SWDPIO1  0x00800000  /* SWDPIN 1 input or output */
+#define E1000_CTRL_SWDPIO2  0x01000000  /* SWDPIN 2 input or output */
+#define E1000_CTRL_SWDPIO3  0x02000000  /* SWDPIN 3 input or output */
+#define E1000_CTRL_RST      0x04000000  /* Global reset */
+#define E1000_CTRL_RFCE     0x08000000  /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE     0x10000000  /* Transmit flow control enable */
+#define E1000_CTRL_RTE      0x20000000  /* Routing tag enable */
+#define E1000_CTRL_VME      0x40000000  /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST  0x80000000  /* PHY Reset */
+#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to ME */
+#define E1000_CTRL_I2C_ENA  0x02000000  /* I2C enable */
+
+/*
+ * Bit definitions for the Management Data IO (MDIO) and Management Data
+ * Clock (MDC) pins in the Device Control Register.
+ */
+#define E1000_CTRL_PHY_RESET_DIR  E1000_CTRL_SWDPIO0
+#define E1000_CTRL_PHY_RESET      E1000_CTRL_SWDPIN0
+#define E1000_CTRL_MDIO_DIR       E1000_CTRL_SWDPIO2
+#define E1000_CTRL_MDIO           E1000_CTRL_SWDPIN2
+#define E1000_CTRL_MDC_DIR        E1000_CTRL_SWDPIO3
+#define E1000_CTRL_MDC            E1000_CTRL_SWDPIN3
+#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR
+#define E1000_CTRL_PHY_RESET4     E1000_CTRL_EXT_SDP4_DATA
+
+#define E1000_CONNSW_ENRGSRC             0x4
+#define E1000_PCS_CFG_PCS_EN             8
+#define E1000_PCS_LCTL_FLV_LINK_UP       1
+#define E1000_PCS_LCTL_FSV_10            0
+#define E1000_PCS_LCTL_FSV_100           2
+#define E1000_PCS_LCTL_FSV_1000          4
+#define E1000_PCS_LCTL_FDV_FULL          8
+#define E1000_PCS_LCTL_FSD               0x10
+#define E1000_PCS_LCTL_FORCE_LINK        0x20
+#define E1000_PCS_LCTL_LOW_LINK_LATCH    0x40
+#define E1000_PCS_LCTL_FORCE_FCTRL       0x80
+#define E1000_PCS_LCTL_AN_ENABLE         0x10000
+#define E1000_PCS_LCTL_AN_RESTART        0x20000
+#define E1000_PCS_LCTL_AN_TIMEOUT        0x40000
+#define E1000_PCS_LCTL_AN_SGMII_BYPASS   0x80000
+#define E1000_PCS_LCTL_AN_SGMII_TRIGGER  0x100000
+#define E1000_PCS_LCTL_FAST_LINK_TIMER   0x1000000
+#define E1000_PCS_LCTL_LINK_OK_FIX       0x2000000
+#define E1000_PCS_LCTL_CRS_ON_NI         0x4000000
+#define E1000_ENABLE_SERDES_LOOPBACK     0x0410
+
+#define E1000_PCS_LSTS_LINK_OK           1
+#define E1000_PCS_LSTS_SPEED_10          0
+#define E1000_PCS_LSTS_SPEED_100         2
+#define E1000_PCS_LSTS_SPEED_1000        4
+#define E1000_PCS_LSTS_DUPLEX_FULL       8
+#define E1000_PCS_LSTS_SYNK_OK           0x10
+#define E1000_PCS_LSTS_AN_COMPLETE       0x10000
+#define E1000_PCS_LSTS_AN_PAGE_RX        0x20000
+#define E1000_PCS_LSTS_AN_TIMED_OUT      0x40000
+#define E1000_PCS_LSTS_AN_REMOTE_FAULT   0x80000
+#define E1000_PCS_LSTS_AN_ERROR_RWS      0x100000
+
+/* Device Status */
+#define E1000_STATUS_FD         0x00000001      /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU         0x00000002      /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK  0x0000000C      /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_0     0x00000000      /* Function 0 */
+#define E1000_STATUS_FUNC_1     0x00000004      /* Function 1 */
+#define E1000_STATUS_TXOFF      0x00000010      /* transmission paused */
+#define E1000_STATUS_TBIMODE    0x00000020      /* TBI mode */
+#define E1000_STATUS_SPEED_MASK 0x000000C0
+#define E1000_STATUS_SPEED_10   0x00000000      /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100  0x00000040      /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080      /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200  /* Lan Init Completion by NVM */
+#define E1000_STATUS_ASDV       0x00000300      /* Auto speed detect value */
+#define E1000_STATUS_PHYRA      0x00000400      /* PHY Reset Asserted */
+#define E1000_STATUS_DOCK_CI    0x00000800      /* Change in Dock/Undock state.
+                                                 * Clear on write '0'. */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */
+#define E1000_STATUS_MTXCKOK    0x00000400      /* MTX clock running OK */
+#define E1000_STATUS_PCI66      0x00000800      /* In 66Mhz slot */
+#define E1000_STATUS_BUS64      0x00001000      /* In 64 bit slot */
+#define E1000_STATUS_PCIX_MODE  0x00002000      /* PCI-X mode */
+#define E1000_STATUS_PCIX_SPEED 0x0000C000      /* PCI-X bus speed */
+#define E1000_STATUS_BMC_SKU_0  0x00100000 /* BMC USB redirect disabled */
+#define E1000_STATUS_BMC_SKU_1  0x00200000 /* BMC SRAM disabled */
+#define E1000_STATUS_BMC_SKU_2  0x00400000 /* BMC SDRAM disabled */
+#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
+#define E1000_STATUS_BMC_LITE   0x01000000 /* BMC external code execution
+                                            * disabled */
+#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
+#define E1000_STATUS_FUSE_8       0x04000000
+#define E1000_STATUS_FUSE_9       0x08000000
+#define E1000_STATUS_SERDES0_DIS  0x10000000 /* SERDES disabled on port 0 */
+#define E1000_STATUS_SERDES1_DIS  0x20000000 /* SERDES disabled on port 1 */
+
+/* Constants used to interpret the masked PCI-X bus speed. */
+#define E1000_STATUS_PCIX_SPEED_66  0x00000000 /* PCI-X bus speed 50-66 MHz */
+#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */
+#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /*PCI-X bus speed 100-133 MHz*/
+
+#define SPEED_10    10
+#define SPEED_100   100
+#define SPEED_1000  1000
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+#define PHY_FORCE_TIME   20
+
+#define ADVERTISE_10_HALF                 0x0001
+#define ADVERTISE_10_FULL                 0x0002
+#define ADVERTISE_100_HALF                0x0004
+#define ADVERTISE_100_FULL                0x0008
+#define ADVERTISE_1000_HALF               0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL               0x0020
+
+/* 1000/H is not supported, nor spec-compliant. */
+#define E1000_ALL_SPEED_DUPLEX  (ADVERTISE_10_HALF |   ADVERTISE_10_FULL | \
+                                ADVERTISE_100_HALF |  ADVERTISE_100_FULL | \
+                                                     ADVERTISE_1000_FULL)
+#define E1000_ALL_NOT_GIG       (ADVERTISE_10_HALF |   ADVERTISE_10_FULL | \
+                                ADVERTISE_100_HALF |  ADVERTISE_100_FULL)
+#define E1000_ALL_100_SPEED    (ADVERTISE_100_HALF |  ADVERTISE_100_FULL)
+#define E1000_ALL_10_SPEED      (ADVERTISE_10_HALF |   ADVERTISE_10_FULL)
+#define E1000_ALL_FULL_DUPLEX   (ADVERTISE_10_FULL |  ADVERTISE_100_FULL | \
+                                                     ADVERTISE_1000_FULL)
+#define E1000_ALL_HALF_DUPLEX   (ADVERTISE_10_HALF |  ADVERTISE_100_HALF)
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT   E1000_ALL_SPEED_DUPLEX
+
+/* LED Control */
+#define E1000_LEDCTL_LED0_MODE_MASK       0x0000000F
+#define E1000_LEDCTL_LED0_MODE_SHIFT      0
+#define E1000_LEDCTL_LED0_BLINK_RATE      0x00000020
+#define E1000_LEDCTL_LED0_IVRT            0x00000040
+#define E1000_LEDCTL_LED0_BLINK           0x00000080
+#define E1000_LEDCTL_LED1_MODE_MASK       0x00000F00
+#define E1000_LEDCTL_LED1_MODE_SHIFT      8
+#define E1000_LEDCTL_LED1_BLINK_RATE      0x00002000
+#define E1000_LEDCTL_LED1_IVRT            0x00004000
+#define E1000_LEDCTL_LED1_BLINK           0x00008000
+#define E1000_LEDCTL_LED2_MODE_MASK       0x000F0000
+#define E1000_LEDCTL_LED2_MODE_SHIFT      16
+#define E1000_LEDCTL_LED2_BLINK_RATE      0x00200000
+#define E1000_LEDCTL_LED2_IVRT            0x00400000
+#define E1000_LEDCTL_LED2_BLINK           0x00800000
+#define E1000_LEDCTL_LED3_MODE_MASK       0x0F000000
+#define E1000_LEDCTL_LED3_MODE_SHIFT      24
+#define E1000_LEDCTL_LED3_BLINK_RATE      0x20000000
+#define E1000_LEDCTL_LED3_IVRT            0x40000000
+#define E1000_LEDCTL_LED3_BLINK           0x80000000
+
+#define E1000_LEDCTL_MODE_LINK_10_1000  0x0
+#define E1000_LEDCTL_MODE_LINK_100_1000 0x1
+#define E1000_LEDCTL_MODE_LINK_UP       0x2
+#define E1000_LEDCTL_MODE_ACTIVITY      0x3
+#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4
+#define E1000_LEDCTL_MODE_LINK_10       0x5
+#define E1000_LEDCTL_MODE_LINK_100      0x6
+#define E1000_LEDCTL_MODE_LINK_1000     0x7
+#define E1000_LEDCTL_MODE_PCIX_MODE     0x8
+#define E1000_LEDCTL_MODE_FULL_DUPLEX   0x9
+#define E1000_LEDCTL_MODE_COLLISION     0xA
+#define E1000_LEDCTL_MODE_BUS_SPEED     0xB
+#define E1000_LEDCTL_MODE_BUS_SIZE      0xC
+#define E1000_LEDCTL_MODE_PAUSED        0xD
+#define E1000_LEDCTL_MODE_LED_ON        0xE
+#define E1000_LEDCTL_MODE_LED_OFF       0xF
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_DTYP_D     0x00100000 /* Data Descriptor */
+#define E1000_TXD_DTYP_C     0x00000000 /* Context Descriptor */
+#define E1000_TXD_POPTS_SHIFT 8         /* POPTS shift */
+#define E1000_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP    0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC     0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS     0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS    0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE    0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD    0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC    0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC    0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU    0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP    0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP     0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE    0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC    0x00000004 /* Tx Underrun */
+/* Extended desc bits for Linksec and timesync */
+
+/* Transmit Control */
+#define E1000_TCTL_RST    0x00000001    /* software reset */
+#define E1000_TCTL_EN     0x00000002    /* enable Tx */
+#define E1000_TCTL_BCE    0x00000004    /* busy check enable */
+#define E1000_TCTL_PSP    0x00000008    /* pad short packets */
+#define E1000_TCTL_CT     0x00000ff0    /* collision threshold */
+#define E1000_TCTL_COLD   0x003ff000    /* collision distance */
+#define E1000_TCTL_SWXOFF 0x00400000    /* SW Xoff transmission */
+#define E1000_TCTL_PBE    0x00800000    /* Packet Burst Enable */
+#define E1000_TCTL_RTLC   0x01000000    /* Re-transmit on late collision */
+#define E1000_TCTL_NRTU   0x02000000    /* No Re-transmit on underrun */
+#define E1000_TCTL_MULR   0x10000000    /* Multiple request support */
+
+/* Transmit Arbitration Count */
+#define E1000_TARC0_ENABLE     0x00000400   /* Enable Tx Queue 0 */
+
+/* SerDes Control */
+#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_PCSS_MASK 0x000000FF   /* Packet Checksum Start */
+#define E1000_RXCSUM_IPOFL     0x00000100   /* IPv4 checksum offload */
+#define E1000_RXCSUM_TUOFL     0x00000200   /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_IPV6OFL   0x00000400   /* IPv6 checksum offload */
+#define E1000_RXCSUM_CRCOFL    0x00000800   /* CRC32 offload enable */
+#define E1000_RXCSUM_IPPCSE    0x00001000   /* IP payload checksum enable */
+#define E1000_RXCSUM_PCSD      0x00002000   /* packet checksum disabled */
+
+/* Header split receive */
+#define E1000_RFCTL_ISCSI_DIS           0x00000001
+#define E1000_RFCTL_ISCSI_DWC_MASK      0x0000003E
+#define E1000_RFCTL_ISCSI_DWC_SHIFT     1
+#define E1000_RFCTL_NFSW_DIS            0x00000040
+#define E1000_RFCTL_NFSR_DIS            0x00000080
+#define E1000_RFCTL_NFS_VER_MASK        0x00000300
+#define E1000_RFCTL_NFS_VER_SHIFT       8
+#define E1000_RFCTL_IPV6_DIS            0x00000400
+#define E1000_RFCTL_IPV6_XSUM_DIS       0x00000800
+#define E1000_RFCTL_ACK_DIS             0x00001000
+#define E1000_RFCTL_ACKD_DIS            0x00002000
+#define E1000_RFCTL_IPFRSP_DIS          0x00004000
+#define E1000_RFCTL_EXTEN               0x00008000
+#define E1000_RFCTL_IPV6_EX_DIS         0x00010000
+#define E1000_RFCTL_NEW_IPV6_EXT_DIS    0x00020000
+#define E1000_RFCTL_LEF                 0x00040000
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD       15
+#define E1000_CT_SHIFT                  4
+#define E1000_COLLISION_DISTANCE        63
+#define E1000_COLD_SHIFT                12
+
+/* Default values for the transmit IPG register */
+#define DEFAULT_82543_TIPG_IPGT_FIBER  9
+#define DEFAULT_82543_TIPG_IPGT_COPPER 8
+
+#define E1000_TIPG_IPGT_MASK  0x000003FF
+#define E1000_TIPG_IPGR1_MASK 0x000FFC00
+#define E1000_TIPG_IPGR2_MASK 0x3FF00000
+
+#define DEFAULT_82543_TIPG_IPGR1 8
+#define E1000_TIPG_IPGR1_SHIFT  10
+
+#define DEFAULT_82543_TIPG_IPGR2 6
+#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
+#define E1000_TIPG_IPGR2_SHIFT  20
+
+/* Ethertype field values */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100  /* 802.3ac packet */
+
+#define ETHERNET_FCS_SIZE       4
+#define MAX_JUMBO_FRAME_SIZE    0x3F00
+
+/* Extended Configuration Control and Size */
+#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP      0x00000020
+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE       0x00000001
+#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE       0x00000008
+#define E1000_EXTCNF_CTRL_SWFLAG                 0x00000020
+#define E1000_EXTCNF_CTRL_GATE_PHY_CFG           0x00000080
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK   0x00FF0000
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT          16
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK   0x0FFF0000
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT          16
+
+#define E1000_PHY_CTRL_SPD_EN             0x00000001
+#define E1000_PHY_CTRL_D0A_LPLU           0x00000002
+#define E1000_PHY_CTRL_NOND0A_LPLU        0x00000004
+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
+#define E1000_PHY_CTRL_GBE_DISABLE        0x00000040
+
+#define E1000_KABGTXD_BGSQLBIAS           0x00050000
+
+/* PBA constants */
+#define E1000_PBA_6K  0x0006    /* 6KB */
+#define E1000_PBA_8K  0x0008    /* 8KB */
+#define E1000_PBA_10K 0x000A    /* 10KB */
+#define E1000_PBA_12K 0x000C    /* 12KB */
+#define E1000_PBA_14K 0x000E    /* 14KB */
+#define E1000_PBA_16K 0x0010    /* 16KB */
+#define E1000_PBA_18K 0x0012
+#define E1000_PBA_20K 0x0014
+#define E1000_PBA_22K 0x0016
+#define E1000_PBA_24K 0x0018
+#define E1000_PBA_26K 0x001A
+#define E1000_PBA_30K 0x001E
+#define E1000_PBA_32K 0x0020
+#define E1000_PBA_34K 0x0022
+#define E1000_PBA_35K 0x0023
+#define E1000_PBA_38K 0x0026
+#define E1000_PBA_40K 0x0028
+#define E1000_PBA_48K 0x0030    /* 48KB */
+#define E1000_PBA_64K 0x0040    /* 64KB */
+
+#define E1000_PBS_16K E1000_PBA_16K
+#define E1000_PBS_24K E1000_PBA_24K
+
+#define IFS_MAX       80
+#define IFS_MIN       40
+#define IFS_RATIO     4
+#define IFS_STEP      10
+#define MIN_NUM_XMITS 1000
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI         0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI      0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_WMNG         0x00000004 /* Wake MNG Clock */
+#define E1000_SWSM_DRV_LOAD     0x00000008 /* Driver Loaded Bit */
+
+#define E1000_SWSM2_LOCK        0x00000002 /* Secondary driver semaphore bit */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW          0x00000001 /* Transmit desc written back */
+#define E1000_ICR_TXQE          0x00000002 /* Transmit Queue empty */
+#define E1000_ICR_LSC           0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ         0x00000008 /* Rx sequence error */
+#define E1000_ICR_RXDMT0        0x00000010 /* Rx desc min. threshold (0) */
+#define E1000_ICR_RXO           0x00000040 /* Rx overrun */
+#define E1000_ICR_RXT0          0x00000080 /* Rx timer intr (ring 0) */
+#define E1000_ICR_VMMB          0x00000100 /* VM MB event */
+#define E1000_ICR_MDAC          0x00000200 /* MDIO access complete */
+#define E1000_ICR_RXCFG         0x00000400 /* Rx /c/ ordered set */
+#define E1000_ICR_GPI_EN0       0x00000800 /* GP Int 0 */
+#define E1000_ICR_GPI_EN1       0x00001000 /* GP Int 1 */
+#define E1000_ICR_GPI_EN2       0x00002000 /* GP Int 2 */
+#define E1000_ICR_GPI_EN3       0x00004000 /* GP Int 3 */
+#define E1000_ICR_TXD_LOW       0x00008000
+#define E1000_ICR_SRPD          0x00010000
+#define E1000_ICR_ACK           0x00020000 /* Receive Ack frame */
+#define E1000_ICR_MNG           0x00040000 /* Manageability event */
+#define E1000_ICR_DOCK          0x00080000 /* Dock/Undock */
+#define E1000_ICR_DRSTA         0x40000000 /* Device Reset Asserted */
+#define E1000_ICR_INT_ASSERTED  0x80000000 /* If this bit asserted, the driver
+                                            * should claim the interrupt */
+#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* Q0 Rx desc FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* Q0 Tx desc FIFO parity error */
+#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity err */
+#define E1000_ICR_PB_PAR        0x00800000 /* packet buffer parity error */
+#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* Q1 Rx desc FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* Q1 Tx desc FIFO parity error */
+#define E1000_ICR_ALL_PARITY    0x03F00000 /* all parity error bits */
+#define E1000_ICR_DSW           0x00000020 /* FW changed the status of DISSW
+                                            * bit in the FWSM */
+#define E1000_ICR_PHYINT        0x00001000 /* LAN connected device generates
+                                            * an interrupt */
+#define E1000_ICR_DOUTSYNC      0x10000000 /* NIC DMA out of sync */
+#define E1000_ICR_EPRST         0x00100000 /* ME hardware reset occurs */
+#define E1000_ICR_FER           0x00400000 /* Fatal Error */
+
+#define E1000_ICR_THS           0x00800000 /* ICR.THS: Thermal Sensor Event*/
+#define E1000_ICR_MDDET         0x10000000 /* Malicious Driver Detect */
+
+/* Extended Interrupt Cause Read */
+#define E1000_EICR_RX_QUEUE0    0x00000001 /* Rx Queue 0 Interrupt */
+#define E1000_EICR_RX_QUEUE1    0x00000002 /* Rx Queue 1 Interrupt */
+#define E1000_EICR_RX_QUEUE2    0x00000004 /* Rx Queue 2 Interrupt */
+#define E1000_EICR_RX_QUEUE3    0x00000008 /* Rx Queue 3 Interrupt */
+#define E1000_EICR_TX_QUEUE0    0x00000100 /* Tx Queue 0 Interrupt */
+#define E1000_EICR_TX_QUEUE1    0x00000200 /* Tx Queue 1 Interrupt */
+#define E1000_EICR_TX_QUEUE2    0x00000400 /* Tx Queue 2 Interrupt */
+#define E1000_EICR_TX_QUEUE3    0x00000800 /* Tx Queue 3 Interrupt */
+#define E1000_EICR_TCP_TIMER    0x40000000 /* TCP Timer */
+#define E1000_EICR_OTHER        0x80000000 /* Interrupt Cause Active */
+/* TCP Timer */
+#define E1000_TCPTIMER_KS       0x00000100 /* KickStart */
+#define E1000_TCPTIMER_COUNT_ENABLE       0x00000200 /* Count Enable */
+#define E1000_TCPTIMER_COUNT_FINISH       0x00000400 /* Count finish */
+#define E1000_TCPTIMER_LOOP     0x00000800 /* Loop */
+
+/*
+ * This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register.  Each bit is documented below:
+ *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ *   o RXSEQ  = Receive Sequence Error
+ */
+#define POLL_IMS_ENABLE_MASK ( \
+    E1000_IMS_RXDMT0 |    \
+    E1000_IMS_RXSEQ)
+
+/*
+ * This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register.  Each bit is documented below:
+ *   o RXT0   = Receiver Timer Interrupt (ring 0)
+ *   o TXDW   = Transmit Descriptor Written Back
+ *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ *   o RXSEQ  = Receive Sequence Error
+ *   o LSC    = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+    E1000_IMS_RXT0   |    \
+    E1000_IMS_TXDW   |    \
+    E1000_IMS_RXDMT0 |    \
+    E1000_IMS_RXSEQ  |    \
+    E1000_IMS_LSC)
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Tx desc written back */
+#define E1000_IMS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
+#define E1000_IMS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_IMS_VMMB      E1000_ICR_VMMB      /* Mail box activity */
+#define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* Rx sequence error */
+#define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* Rx desc min. threshold */
+#define E1000_IMS_RXO       E1000_ICR_RXO       /* Rx overrun */
+#define E1000_IMS_RXT0      E1000_ICR_RXT0      /* Rx timer intr */
+#define E1000_IMS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
+#define E1000_IMS_RXCFG     E1000_ICR_RXCFG     /* Rx /c/ ordered set */
+#define E1000_IMS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
+#define E1000_IMS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
+#define E1000_IMS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
+#define E1000_IMS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_IMS_TXD_LOW   E1000_ICR_TXD_LOW
+#define E1000_IMS_SRPD      E1000_ICR_SRPD
+#define E1000_IMS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
+#define E1000_IMS_MNG       E1000_ICR_MNG       /* Manageability event */
+#define E1000_IMS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
+#define E1000_IMS_DRSTA     E1000_ICR_DRSTA     /* Device Reset Asserted */
+#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* Q0 Rx desc FIFO
+                                                         * parity error */
+#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* Q0 Tx desc FIFO
+                                                         * parity error */
+#define E1000_IMS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer
+                                                         * parity error */
+#define E1000_IMS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity
+                                                         * error */
+#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* Q1 Rx desc FIFO
+                                                         * parity error */
+#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* Q1 Tx desc FIFO
+                                                         * parity error */
+#define E1000_IMS_DSW       E1000_ICR_DSW
+#define E1000_IMS_PHYINT    E1000_ICR_PHYINT
+#define E1000_IMS_DOUTSYNC  E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
+#define E1000_IMS_EPRST     E1000_ICR_EPRST
+#define E1000_IMS_FER           E1000_ICR_FER /* Fatal Error */
+
+#define E1000_IMS_THS           E1000_ICR_THS /* ICR.TS: Thermal Sensor Event*/
+#define E1000_IMS_MDDET         E1000_ICR_MDDET /* Malicious Driver Detect */
+/* Extended Interrupt Mask Set */
+#define E1000_EIMS_RX_QUEUE0    E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
+#define E1000_EIMS_RX_QUEUE1    E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
+#define E1000_EIMS_RX_QUEUE2    E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
+#define E1000_EIMS_RX_QUEUE3    E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
+#define E1000_EIMS_TX_QUEUE0    E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
+#define E1000_EIMS_TX_QUEUE1    E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
+#define E1000_EIMS_TX_QUEUE2    E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
+#define E1000_EIMS_TX_QUEUE3    E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
+#define E1000_EIMS_TCP_TIMER    E1000_EICR_TCP_TIMER /* TCP Timer */
+#define E1000_EIMS_OTHER        E1000_EICR_OTHER   /* Interrupt Cause Active */
+
+/* Interrupt Cause Set */
+#define E1000_ICS_TXDW      E1000_ICR_TXDW      /* Tx desc written back */
+#define E1000_ICS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
+#define E1000_ICS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_ICS_RXSEQ     E1000_ICR_RXSEQ     /* Rx sequence error */
+#define E1000_ICS_RXDMT0    E1000_ICR_RXDMT0    /* Rx desc min. threshold */
+#define E1000_ICS_RXO       E1000_ICR_RXO       /* Rx overrun */
+#define E1000_ICS_RXT0      E1000_ICR_RXT0      /* Rx timer intr */
+#define E1000_ICS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
+#define E1000_ICS_RXCFG     E1000_ICR_RXCFG     /* Rx /c/ ordered set */
+#define E1000_ICS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
+#define E1000_ICS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
+#define E1000_ICS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
+#define E1000_ICS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_ICS_TXD_LOW   E1000_ICR_TXD_LOW
+#define E1000_ICS_SRPD      E1000_ICR_SRPD
+#define E1000_ICS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
+#define E1000_ICS_MNG       E1000_ICR_MNG       /* Manageability event */
+#define E1000_ICS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
+#define E1000_ICS_DRSTA     E1000_ICR_DRSTA     /* Device Reset Aserted */
+#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* Q0 Rx desc FIFO
+                                                         * parity error */
+#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* Q0 Tx desc FIFO
+                                                         * parity error */
+#define E1000_ICS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer
+                                                         * parity error */
+#define E1000_ICS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity
+                                                         * error */
+#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* Q1 Rx desc FIFO
+                                                         * parity error */
+#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* Q1 Tx desc FIFO
+                                                         * parity error */
+#define E1000_ICS_DSW       E1000_ICR_DSW
+#define E1000_ICS_DOUTSYNC  E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
+#define E1000_ICS_PHYINT    E1000_ICR_PHYINT
+#define E1000_ICS_EPRST     E1000_ICR_EPRST
+
+/* Extended Interrupt Cause Set */
+#define E1000_EICS_RX_QUEUE0    E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
+#define E1000_EICS_RX_QUEUE1    E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
+#define E1000_EICS_RX_QUEUE2    E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
+#define E1000_EICS_RX_QUEUE3    E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
+#define E1000_EICS_TX_QUEUE0    E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
+#define E1000_EICS_TX_QUEUE1    E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
+#define E1000_EICS_TX_QUEUE2    E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
+#define E1000_EICS_TX_QUEUE3    E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
+#define E1000_EICS_TCP_TIMER    E1000_EICR_TCP_TIMER /* TCP Timer */
+#define E1000_EICS_OTHER        E1000_EICR_OTHER   /* Interrupt Cause Active */
+
+#define E1000_EITR_ITR_INT_MASK 0x0000FFFF
+/* E1000_EITR_CNT_IGNR is only for 82576 and newer */
+#define E1000_EITR_CNT_IGNR     0x80000000 /* Don't reset counters on write */
+
+/* Transmit Descriptor Control */
+#define E1000_TXDCTL_PTHRESH    0x0000003F /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH    0x00003F00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH    0x003F0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN       0x01000000 /* TXDCTL Granularity */
+#define E1000_TXDCTL_LWTHRESH   0xFE000000 /* TXDCTL Low Threshold */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
+/* Enable the counting of descriptors still to be processed. */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW  0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE         0x8808
+
+/* 802.1q VLAN Packet Size */
+#define VLAN_TAG_SIZE              4    /* 802.3ac tag (not DMA'd) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128  /* VLAN Filter Table (4096 bits) */
+
+/* Receive Address */
+/*
+ * Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * Technically, we have 16 spots.  However, we reserve one of these spots
+ * (RAR[15]) for our directed address used by controllers with
+ * manageability enabled, allowing us room for 15 multicast addresses.
+ */
+#define E1000_RAR_ENTRIES     15
+#define E1000_RAH_AV  0x80000000        /* Receive descriptor valid */
+#define E1000_RAL_MAC_ADDR_LEN 4
+#define E1000_RAH_MAC_ADDR_LEN 2
+#define E1000_RAH_QUEUE_MASK_82575 0x000C0000
+#define E1000_RAH_POOL_MASK     0x03FC0000
+#define E1000_RAH_POOL_SHIFT    18
+#define E1000_RAH_POOL_1        0x00040000
+
+/* Error Codes */
+#define E1000_SUCCESS      0
+#define E1000_ERR_NVM      1
+#define E1000_ERR_PHY      2
+#define E1000_ERR_CONFIG   3
+#define E1000_ERR_PARAM    4
+#define E1000_ERR_MAC_INIT 5
+#define E1000_ERR_PHY_TYPE 6
+#define E1000_ERR_RESET   9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_ERR_HOST_INTERFACE_COMMAND 11
+#define E1000_BLK_PHY_RESET   12
+#define E1000_ERR_SWFW_SYNC 13
+#define E1000_NOT_IMPLEMENTED 14
+#define E1000_ERR_MBX      15
+#define E1000_ERR_INVALID_ARGUMENT  16
+#define E1000_ERR_NO_SPACE          17
+#define E1000_ERR_NVM_PBA_SECTION   18
+
+/* Loop limit on how long we wait for auto-negotiation to complete */
+#define FIBER_LINK_UP_LIMIT               50
+#define COPPER_LINK_UP_LIMIT              10
+#define PHY_AUTO_NEG_LIMIT                45
+#define PHY_FORCE_LIMIT                   20
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT      800
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT             100
+/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
+#define MDIO_OWNERSHIP_TIMEOUT      10
+/* Number of milliseconds for NVM auto read done after MAC reset. */
+#define AUTO_READ_DONE_TIMEOUT      10
+
+/* Flow Control */
+#define E1000_FCRTH_RTH  0x0000FFF8     /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTH_XFCE 0x80000000     /* External Flow Control Enable */
+#define E1000_FCRTL_RTL  0x0000FFF8     /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000     /* Enable XON frame transmission */
+
+/* Transmit Configuration Word */
+#define E1000_TXCW_FD         0x00000020        /* TXCW full duplex */
+#define E1000_TXCW_HD         0x00000040        /* TXCW half duplex */
+#define E1000_TXCW_PAUSE      0x00000080        /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR    0x00000100        /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180        /* TXCW pause request mask */
+#define E1000_TXCW_RF         0x00003000        /* TXCW remote fault */
+#define E1000_TXCW_NP         0x00008000        /* TXCW next page */
+#define E1000_TXCW_CW         0x0000ffff        /* TxConfigWord mask */
+#define E1000_TXCW_TXC        0x40000000        /* Transmit Config control */
+#define E1000_TXCW_ANE        0x80000000        /* Auto-neg enable */
+
+/* Receive Configuration Word */
+#define E1000_RXCW_CW         0x0000ffff        /* RxConfigWord mask */
+#define E1000_RXCW_NC         0x04000000        /* Receive config no carrier */
+#define E1000_RXCW_IV         0x08000000        /* Receive config invalid */
+#define E1000_RXCW_CC         0x10000000        /* Receive config change */
+#define E1000_RXCW_C          0x20000000        /* Receive config */
+#define E1000_RXCW_SYNCH      0x40000000        /* Receive config synch */
+#define E1000_RXCW_ANC        0x80000000        /* Auto-neg complete */
+
+#define E1000_TSYNCTXCTL_VALID    0x00000001 /* Tx timestamp valid */
+#define E1000_TSYNCTXCTL_ENABLED  0x00000010 /* enable Tx timestamping */
+
+#define E1000_TSYNCRXCTL_VALID      0x00000001 /* Rx timestamp valid */
+#define E1000_TSYNCRXCTL_TYPE_MASK  0x0000000E /* Rx type mask */
+#define E1000_TSYNCRXCTL_TYPE_L2_V2       0x00
+#define E1000_TSYNCRXCTL_TYPE_L4_V1       0x02
+#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2    0x04
+#define E1000_TSYNCRXCTL_TYPE_ALL         0x08
+#define E1000_TSYNCRXCTL_TYPE_EVENT_V2    0x0A
+#define E1000_TSYNCRXCTL_ENABLED    0x00000010 /* enable Rx timestamping */
+
+#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK   0x000000FF
+#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE       0x00
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE  0x01
+#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE   0x02
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
+#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
+
+#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK               0x00000F00
+#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE                 0x0000
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE            0x0100
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE       0x0200
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE      0x0300
+#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE             0x0800
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE           0x0900
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE  0x0A00
+#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE             0x0B00
+#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE           0x0C00
+#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE           0x0D00
+
+#define E1000_TIMINCA_16NS_SHIFT 24
+/* TUPLE Filtering Configuration */
+#define E1000_TTQF_DISABLE_MASK   0xF0008000     /* TTQF Disable Mask */
+#define E1000_TTQF_QUEUE_ENABLE   0x100          /* TTQF Queue Enable Bit */
+#define E1000_TTQF_PROTOCOL_MASK  0xFF           /* TTQF Protocol Mask */
+/* TTQF TCP Bit, shift with E1000_TTQF_PROTOCOL SHIFT */
+#define E1000_TTQF_PROTOCOL_TCP   0x0
+/* TTQF UDP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */
+#define E1000_TTQF_PROTOCOL_UDP   0x1
+/* TTQF SCTP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */
+#define E1000_TTQF_PROTOCOL_SCTP  0x2
+#define E1000_TTQF_PROTOCOL_SHIFT 5              /* TTQF Protocol Shift */
+#define E1000_TTQF_QUEUE_SHIFT    16             /* TTQF Queue Shfit */
+#define E1000_TTQF_RX_QUEUE_MASK  0x70000        /* TTQF Queue Mask */
+#define E1000_TTQF_MASK_ENABLE    0x10000000     /* TTQF Mask Enable Bit */
+#define E1000_IMIR_CLEAR_MASK     0xF001FFFF     /* IMIR Reg Clear Mask */
+#define E1000_IMIR_PORT_BYPASS    0x20000        /* IMIR Port Bypass Bit */
+#define E1000_IMIR_PRIORITY_SHIFT 29             /* IMIR Priority Shift */
+#define E1000_IMIREXT_CLEAR_MASK  0x7FFFF        /* IMIREXT Reg Clear Mask */
+
+#define E1000_MDICNFG_EXT_MDIO    0x80000000      /* MDI ext/int destination */
+#define E1000_MDICNFG_COM_MDIO    0x40000000      /* MDI shared w/ lan 0 */
+#define E1000_MDICNFG_PHY_MASK    0x03E00000
+#define E1000_MDICNFG_PHY_SHIFT   21
+
+#define E1000_THSTAT_LOW_EVENT      0x20000000  /* Low thermal threshold */
+#define E1000_THSTAT_MID_EVENT      0x00200000  /* Mid thermal threshold */
+#define E1000_THSTAT_HIGH_EVENT     0x00002000  /* High thermal threshold */
+#define E1000_THSTAT_PWR_DOWN       0x00000001  /* Power Down Event */
+#define E1000_THSTAT_LINK_THROTTLE  0x00000002  /* Link Speed Throttle Event */
+
+/* Powerville EEE defines */
+#define E1000_IPCNFG_EEE_1G_AN      0x00000008  /* IPCNFG EEE Enable 1G AN */
+#define E1000_IPCNFG_EEE_100M_AN    0x00000004  /* IPCNFG EEE Enable 100M AN */
+#define E1000_EEER_TX_LPI_EN        0x00010000  /* EEER Tx LPI Enable */
+#define E1000_EEER_RX_LPI_EN        0x00020000  /* EEER Rx LPI Enable */
+#define E1000_EEER_LPI_FC           0x00040000  /* EEER Enable on Flow Control*/
+/* EEE status */
+#define E1000_EEER_EEE_NEG          0x20000000  /* EEE capability negotiated */
+#define E1000_EEER_RX_LPI_STATUS    0x40000000  /* Rx in LPI state */
+#define E1000_EEER_TX_LPI_STATUS    0x80000000  /* Tx in LPI state */
+
+/* PCI Express Control */
+#define E1000_GCR_RXD_NO_SNOOP          0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP       0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP       0x00000004
+#define E1000_GCR_TXD_NO_SNOOP          0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP       0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP       0x00000020
+#define E1000_GCR_CMPL_TMOUT_MASK       0x0000F000
+#define E1000_GCR_CMPL_TMOUT_10ms       0x00001000
+#define E1000_GCR_CMPL_TMOUT_RESEND     0x00010000
+#define E1000_GCR_CAP_VER2              0x00040000
+
+#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP         | \
+                           E1000_GCR_RXDSCW_NO_SNOOP      | \
+                           E1000_GCR_RXDSCR_NO_SNOOP      | \
+                           E1000_GCR_TXD_NO_SNOOP         | \
+                           E1000_GCR_TXDSCW_NO_SNOOP      | \
+                           E1000_GCR_TXDSCR_NO_SNOOP)
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB 0x0040  /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080  /* Collision test enable */
+#define MII_CR_FULL_DUPLEX      0x0100  /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200  /* Restart auto negotiation */
+#define MII_CR_ISOLATE          0x0400  /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN       0x0800  /* Power down */
+#define MII_CR_AUTO_NEG_EN      0x1000  /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000  /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK         0x4000  /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET            0x8000  /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_1000       0x0040
+#define MII_CR_SPEED_100        0x2000
+#define MII_CR_SPEED_10         0x0000
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS     0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT     0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS       0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS      0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT      0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE  0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS   0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS     0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS     0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS       0x0800 /* 10T   Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS       0x1000 /* 10T   Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS      0x2000 /* 100X  Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS      0x4000 /* 100X  Full Duplex Capable */
+#define MII_SR_100T4_CAPS        0x8000 /* 100T4 Capable */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_SELECTOR_FIELD   0x0001   /* indicates IEEE 802.3 CSMA/CD */
+#define NWAY_AR_10T_HD_CAPS      0x0020   /* 10T   Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS      0x0040   /* 10T   Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS    0x0080   /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS    0x0100   /* 100TX Full Duplex Capable */
+#define NWAY_AR_100T4_CAPS       0x0200   /* 100T4 Capable */
+#define NWAY_AR_PAUSE            0x0400   /* Pause operation desired */
+#define NWAY_AR_ASM_DIR          0x0800   /* Asymmetric Pause Direction bit */
+#define NWAY_AR_REMOTE_FAULT     0x2000   /* Remote Fault detected */
+#define NWAY_AR_NEXT_PAGE        0x8000   /* Next Page ability supported */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
+#define NWAY_LPAR_10T_HD_CAPS    0x0020 /* LP is 10T   Half Duplex Capable */
+#define NWAY_LPAR_10T_FD_CAPS    0x0040 /* LP is 10T   Full Duplex Capable */
+#define NWAY_LPAR_100TX_HD_CAPS  0x0080 /* LP is 100TX Half Duplex Capable */
+#define NWAY_LPAR_100TX_FD_CAPS  0x0100 /* LP is 100TX Full Duplex Capable */
+#define NWAY_LPAR_100T4_CAPS     0x0200 /* LP is 100T4 Capable */
+#define NWAY_LPAR_PAUSE          0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR        0x0800 /* LP Asymmetric Pause Direction bit */
+#define NWAY_LPAR_REMOTE_FAULT   0x2000 /* LP has detected Remote Fault */
+#define NWAY_LPAR_ACKNOWLEDGE    0x4000 /* LP has rx'd link code word */
+#define NWAY_LPAR_NEXT_PAGE      0x8000 /* Next Page ability supported */
+
+/* Autoneg Expansion Register */
+#define NWAY_ER_LP_NWAY_CAPS      0x0001 /* LP has Auto Neg Capability */
+#define NWAY_ER_PAGE_RXD          0x0002 /* LP is 10T   Half Duplex Capable */
+#define NWAY_ER_NEXT_PAGE_CAPS    0x0004 /* LP is 10T   Full Duplex Capable */
+#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
+#define NWAY_ER_PAR_DETECT_FAULT  0x0010 /* LP is 100TX Full Duplex Capable */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_ASYM_PAUSE      0x0080 /* Advertise asymmetric pause bit */
+#define CR_1000T_HD_CAPS         0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS         0x0200 /* Advertise 1000T FD capability  */
+#define CR_1000T_REPEATER_DTE    0x0400 /* 1=Repeater/switch device port */
+                                        /* 0=DTE device */
+#define CR_1000T_MS_VALUE        0x0800 /* 1=Configure PHY as Master */
+                                        /* 0=Configure PHY as Slave */
+#define CR_1000T_MS_ENABLE      0x1000 /* 1=Master/Slave manual config value */
+                                        /* 0=Automatic Master/Slave config */
+#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define CR_1000T_TEST_MODE_1     0x2000 /* Transmit Waveform test */
+#define CR_1000T_TEST_MODE_2     0x4000 /* Master Transmit Jitter test */
+#define CR_1000T_TEST_MODE_3     0x6000 /* Slave Transmit Jitter test */
+#define CR_1000T_TEST_MODE_4     0x8000 /* Transmitter Distortion test */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_IDLE_ERROR_CNT   0x00FF /* Num idle errors since last read */
+#define SR_1000T_ASYM_PAUSE_DIR  0x0100 /* LP asymmetric pause direction bit */
+#define SR_1000T_LP_HD_CAPS       0x0400 /* LP is 1000T HD capable */
+#define SR_1000T_LP_FD_CAPS       0x0800 /* LP is 1000T FD capable */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS  0x2000 /* Local receiver OK */
+#define SR_1000T_MS_CONFIG_RES    0x4000 /* 1=Local Tx is Master, 0=Slave */
+#define SR_1000T_MS_CONFIG_FAULT  0x8000 /* Master/Slave config fault */
+
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CONTROL      0x00 /* Control Register */
+#define PHY_STATUS       0x01 /* Status Register */
+#define PHY_ID1          0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2          0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV  0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY   0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP  0x06 /* Autoneg Expansion Reg */
+#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */
+#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
+#define PHY_1000T_CTRL   0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS   0x0F /* Extended Status Reg */
+
+#define PHY_CONTROL_LB   0x4000 /* PHY Loopback bit */
+
+/* NVM Control */
+#define E1000_EECD_SK        0x00000001 /* NVM Clock */
+#define E1000_EECD_CS        0x00000002 /* NVM Chip Select */
+#define E1000_EECD_DI        0x00000004 /* NVM Data In */
+#define E1000_EECD_DO        0x00000008 /* NVM Data Out */
+#define E1000_EECD_FWE_MASK  0x00000030
+#define E1000_EECD_FWE_DIS   0x00000010 /* Disable FLASH writes */
+#define E1000_EECD_FWE_EN    0x00000020 /* Enable FLASH writes */
+#define E1000_EECD_FWE_SHIFT 4
+#define E1000_EECD_REQ       0x00000040 /* NVM Access Request */
+#define E1000_EECD_GNT       0x00000080 /* NVM Access Grant */
+#define E1000_EECD_PRES      0x00000100 /* NVM Present */
+#define E1000_EECD_SIZE      0x00000200 /* NVM Size (0=64 word 1=256 word) */
+#define E1000_EECD_BLOCKED   0x00008000 /* Bit banging access blocked flag */
+#define E1000_EECD_ABORT     0x00010000 /* NVM operation aborted flag */
+#define E1000_EECD_TIMEOUT   0x00020000 /* NVM read operation timeout flag */
+#define E1000_EECD_ERROR_CLR 0x00040000 /* NVM error status clear bit */
+/* NVM Addressing bits based on type 0=small, 1=large */
+#define E1000_EECD_ADDR_BITS 0x00000400
+#define E1000_EECD_TYPE      0x00002000 /* NVM Type (1-SPI, 0-Microwire) */
+#ifndef E1000_NVM_GRANT_ATTEMPTS
+#define E1000_NVM_GRANT_ATTEMPTS   1000 /* NVM # attempts to gain grant */
+#endif
+#define E1000_EECD_AUTO_RD          0x00000200  /* NVM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK     0x00007800  /* NVM Size */
+#define E1000_EECD_SIZE_EX_SHIFT     11
+#define E1000_EECD_NVADDS    0x00018000 /* NVM Address Size */
+#define E1000_EECD_SELSHAD   0x00020000 /* Select Shadow RAM */
+#define E1000_EECD_INITSRAM  0x00040000 /* Initialize Shadow RAM */
+#define E1000_EECD_FLUPD     0x00080000 /* Update FLASH */
+#define E1000_EECD_AUPDEN    0x00100000 /* Enable Autonomous FLASH update */
+#define E1000_EECD_SHADV     0x00200000 /* Shadow RAM Data Valid */
+#define E1000_EECD_SEC1VAL   0x00400000 /* Sector One Valid */
+#define E1000_EECD_SECVAL_SHIFT      22
+#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
+
+#define E1000_NVM_SWDPIN0   0x0001   /* SWDPIN 0 NVM Value */
+#define E1000_NVM_LED_LOGIC 0x0020   /* Led Logic Word */
+#define E1000_NVM_RW_REG_DATA   16  /* Offset to data in NVM read/write regs */
+#define E1000_NVM_RW_REG_DONE   2    /* Offset to READ/WRITE done bit */
+#define E1000_NVM_RW_REG_START  1    /* Start operation */
+#define E1000_NVM_RW_ADDR_SHIFT 2    /* Shift to the address bits */
+#define E1000_NVM_POLL_WRITE    1    /* Flag for polling for write complete */
+#define E1000_NVM_POLL_READ     0    /* Flag for polling for read complete */
+#define E1000_FLASH_UPDATES  2000
+
+/* NVM Word Offsets */
+#define NVM_COMPAT                 0x0003
+#define NVM_ID_LED_SETTINGS        0x0004
+#define NVM_VERSION                0x0005
+#define NVM_SERDES_AMPLITUDE       0x0006 /* SERDES output amplitude */
+#define NVM_PHY_CLASS_WORD         0x0007
+#define NVM_INIT_CONTROL1_REG      0x000A
+#define NVM_INIT_CONTROL2_REG      0x000F
+#define NVM_SWDEF_PINS_CTRL_PORT_1 0x0010
+#define NVM_INIT_CONTROL3_PORT_B   0x0014
+#define NVM_INIT_3GIO_3            0x001A
+#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020
+#define NVM_INIT_CONTROL3_PORT_A   0x0024
+#define NVM_CFG                    0x0012
+#define NVM_FLASH_VERSION          0x0032
+#define NVM_ALT_MAC_ADDR_PTR       0x0037
+#define NVM_CHECKSUM_REG           0x003F
+#define NVM_COMPATIBILITY_REG_3    0x0003
+#define NVM_COMPATIBILITY_BIT_MASK 0x8000
+
+#define E1000_NVM_CFG_DONE_PORT_0  0x040000 /* MNG config cycle done */
+#define E1000_NVM_CFG_DONE_PORT_1  0x080000 /* ...for second port */
+#define E1000_NVM_CFG_DONE_PORT_2  0x100000 /* ...for third port */
+#define E1000_NVM_CFG_DONE_PORT_3  0x200000 /* ...for fourth port */
+
+#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0)
+
+/* Mask bits for fields in Word 0x24 of the NVM */
+#define NVM_WORD24_COM_MDIO         0x0008 /* MDIO interface shared */
+#define NVM_WORD24_EXT_MDIO         0x0004 /* MDIO accesses routed external */
+
+/* Mask bits for fields in Word 0x0f of the NVM */
+#define NVM_WORD0F_PAUSE_MASK       0x3000
+#define NVM_WORD0F_PAUSE            0x1000
+#define NVM_WORD0F_ASM_DIR          0x2000
+#define NVM_WORD0F_ANE              0x0800
+#define NVM_WORD0F_SWPDIO_EXT_MASK  0x00F0
+#define NVM_WORD0F_LPLU             0x0001
+
+/* Mask bits for fields in Word 0x1a of the NVM */
+#define NVM_WORD1A_ASPM_MASK  0x000C
+
+/* Mask bits for fields in Word 0x03 of the EEPROM */
+#define NVM_COMPAT_LOM    0x0800
+
+/* length of string needed to store PBA number */
+#define E1000_PBANUM_LENGTH             11
+
+/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
+#define NVM_SUM                    0xBABA
+
+#define NVM_MAC_ADDR_OFFSET        0
+#define NVM_PBA_OFFSET_0           8
+#define NVM_PBA_OFFSET_1           9
+#define NVM_PBA_PTR_GUARD          0xFAFA
+#define NVM_RESERVED_WORD          0xFFFF
+#define NVM_PHY_CLASS_A            0x8000
+#define NVM_SERDES_AMPLITUDE_MASK  0x000F
+#define NVM_SIZE_MASK              0x1C00
+#define NVM_SIZE_SHIFT             10
+#define NVM_WORD_SIZE_BASE_SHIFT   6
+#define NVM_SWDPIO_EXT_SHIFT       4
+
+/* NVM Commands - Microwire */
+#define NVM_READ_OPCODE_MICROWIRE  0x6  /* NVM read opcode */
+#define NVM_WRITE_OPCODE_MICROWIRE 0x5  /* NVM write opcode */
+#define NVM_ERASE_OPCODE_MICROWIRE 0x7  /* NVM erase opcode */
+#define NVM_EWEN_OPCODE_MICROWIRE  0x13 /* NVM erase/write enable */
+#define NVM_EWDS_OPCODE_MICROWIRE  0x10 /* NVM erase/write disable */
+
+/* NVM Commands - SPI */
+#define NVM_MAX_RETRY_SPI          5000 /* Max wait of 5ms, for RDY signal */
+#define NVM_READ_OPCODE_SPI        0x03 /* NVM read opcode */
+#define NVM_WRITE_OPCODE_SPI       0x02 /* NVM write opcode */
+#define NVM_A8_OPCODE_SPI          0x08 /* opcode bit-3 = address bit-8 */
+#define NVM_WREN_OPCODE_SPI        0x06 /* NVM set Write Enable latch */
+#define NVM_WRDI_OPCODE_SPI        0x04 /* NVM reset Write Enable latch */
+#define NVM_RDSR_OPCODE_SPI        0x05 /* NVM read Status register */
+#define NVM_WRSR_OPCODE_SPI        0x01 /* NVM write Status register */
+
+/* SPI NVM Status Register */
+#define NVM_STATUS_RDY_SPI         0x01
+#define NVM_STATUS_WEN_SPI         0x02
+#define NVM_STATUS_BP0_SPI         0x04
+#define NVM_STATUS_BP1_SPI         0x08
+#define NVM_STATUS_WPEN_SPI        0x80
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_DEFAULT       ((ID_LED_OFF1_ON2  << 12) | \
+                              (ID_LED_OFF1_OFF2 <<  8) | \
+                              (ID_LED_DEF1_DEF2 <<  4) | \
+                              (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2     0x1
+#define ID_LED_DEF1_ON2      0x2
+#define ID_LED_DEF1_OFF2     0x3
+#define ID_LED_ON1_DEF2      0x4
+#define ID_LED_ON1_ON2       0x5
+#define ID_LED_ON1_OFF2      0x6
+#define ID_LED_OFF1_DEF2     0x7
+#define ID_LED_OFF1_ON2      0x8
+#define ID_LED_OFF1_OFF2     0x9
+
+#define IGP_ACTIVITY_LED_MASK   0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE           0x07000000
+
+/* PCI/PCI-X/PCI-EX Config space */
+#define PCIX_COMMAND_REGISTER        0xE6
+#define PCIX_STATUS_REGISTER_LO      0xE8
+#define PCIX_STATUS_REGISTER_HI      0xEA
+#define PCI_HEADER_TYPE_REGISTER     0x0E
+#define PCIE_LINK_STATUS             0x12
+#define PCIE_DEVICE_CONTROL2         0x28
+
+#define PCIX_COMMAND_MMRBC_MASK      0x000C
+#define PCIX_COMMAND_MMRBC_SHIFT     0x2
+#define PCIX_STATUS_HI_MMRBC_MASK    0x0060
+#define PCIX_STATUS_HI_MMRBC_SHIFT   0x5
+#define PCIX_STATUS_HI_MMRBC_4K      0x3
+#define PCIX_STATUS_HI_MMRBC_2K      0x2
+#define PCIX_STATUS_LO_FUNC_MASK     0x7
+#define PCI_HEADER_TYPE_MULTIFUNC    0x80
+#define PCIE_LINK_WIDTH_MASK         0x3F0
+#define PCIE_LINK_WIDTH_SHIFT        4
+#define PCIE_LINK_SPEED_MASK         0x0F
+#define PCIE_LINK_SPEED_2500         0x01
+#define PCIE_LINK_SPEED_5000         0x02
+#define PCIE_DEVICE_CONTROL2_16ms    0x0005
+
+#ifndef ETH_ADDR_LEN
+#define ETH_ADDR_LEN                 6
+#endif
+
+#define PHY_REVISION_MASK      0xFFFFFFF0
+#define MAX_PHY_REG_ADDRESS    0x1F  /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF
+
+/* Bit definitions for valid PHY IDs. */
+/*
+ * I = Integrated
+ * E = External
+ */
+#define M88E1000_E_PHY_ID    0x01410C50
+#define M88E1000_I_PHY_ID    0x01410C30
+#define M88E1011_I_PHY_ID    0x01410C20
+#define IGP01E1000_I_PHY_ID  0x02A80380
+#define M88E1011_I_REV_4     0x04
+#define M88E1111_I_PHY_ID    0x01410CC0
+#define M88E1112_E_PHY_ID    0x01410C90
+#define I347AT4_E_PHY_ID     0x01410DC0
+#define M88E1340M_E_PHY_ID   0x01410DF0
+#define GG82563_E_PHY_ID     0x01410CA0
+#define IGP03E1000_E_PHY_ID  0x02A80390
+#define IFE_E_PHY_ID         0x02A80330
+#define IFE_PLUS_E_PHY_ID    0x02A80320
+#define IFE_C_E_PHY_ID       0x02A80310
+#define I82580_I_PHY_ID      0x015403A0
+#define I350_I_PHY_ID        0x015403B0
+#define IGP04E1000_E_PHY_ID  0x02A80391
+#define M88_VENDOR           0x0141
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
+#define M88E1000_PHY_SPEC_STATUS   0x11  /* PHY Specific Status Register */
+#define M88E1000_INT_ENABLE        0x12  /* Interrupt Enable Register */
+#define M88E1000_INT_STATUS        0x13  /* Interrupt Status Register */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14  /* Extended PHY Specific Control */
+#define M88E1000_RX_ERR_CNTR       0x15  /* Receive Error Counter */
+
+#define M88E1000_PHY_EXT_CTRL      0x1A  /* PHY extend control register */
+#define M88E1000_PHY_PAGE_SELECT   0x1D  /* Reg 29 for page number setting */
+#define M88E1000_PHY_GEN_CONTROL   0x1E  /* Its meaning depends on reg 29 */
+#define M88E1000_PHY_VCO_REG_BIT8  0x100 /* Bits 8 & 11 are adjusted for */
+#define M88E1000_PHY_VCO_REG_BIT11 0x800    /* improved BER performance */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_JABBER_DISABLE    0x0001 /* 1=Jabber Function disabled */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */
+#define M88E1000_PSCR_SQE_TEST          0x0004 /* 1=SQE Test enabled */
+/* 1=CLK125 low, 0=CLK125 toggling */
+#define M88E1000_PSCR_CLK125_DISABLE    0x0010
+#define M88E1000_PSCR_MDI_MANUAL_MODE  0x0000 /* MDI Crossover Mode bits 6:5 */
+                                               /* Manual MDI configuration */
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020  /* Manual MDIX configuration */
+/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
+#define M88E1000_PSCR_AUTO_X_1000T     0x0040
+/* Auto crossover enabled all speeds */
+#define M88E1000_PSCR_AUTO_X_MODE      0x0060
+/*
+ * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
+ * 0=Normal 10BASE-T Rx Threshold
+ */
+#define M88E1000_PSCR_EN_10BT_EXT_DIST 0x0080
+/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
+#define M88E1000_PSCR_MII_5BIT_ENABLE      0x0100
+#define M88E1000_PSCR_SCRAMBLER_DISABLE    0x0200 /* 1=Scrambler disable */
+#define M88E1000_PSCR_FORCE_LINK_GOOD      0x0400 /* 1=Force link good */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX     0x0800 /* 1=Assert CRS on Tx */
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_JABBER             0x0001 /* 1=Jabber */
+#define M88E1000_PSSR_REV_POLARITY       0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT          0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX               0x0040 /* 1=MDIX; 0=MDI */
+/*
+ * 0 = <50M
+ * 1 = 50-80M
+ * 2 = 80-110M
+ * 3 = 110-140M
+ * 4 = >140M
+ */
+#define M88E1000_PSSR_CABLE_LENGTH       0x0380
+#define M88E1000_PSSR_LINK               0x0400 /* 1=Link up, 0=Link down */
+#define M88E1000_PSSR_SPD_DPLX_RESOLVED  0x0800 /* 1=Speed & Duplex resolved */
+#define M88E1000_PSSR_PAGE_RCVD          0x1000 /* 1=Page received */
+#define M88E1000_PSSR_DPLX               0x2000 /* 1=Duplex 0=Half Duplex */
+#define M88E1000_PSSR_SPEED              0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_10MBS              0x0000 /* 00=10Mbs */
+#define M88E1000_PSSR_100MBS             0x4000 /* 01=100Mbs */
+#define M88E1000_PSSR_1000MBS            0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/* M88E1000 Extended PHY Specific Control Register */
+#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
+/*
+ * 1 = Lost lock detect enabled.
+ * Will assert lost lock and bring
+ * link down if idle not seen
+ * within 1ms in 1000BASE-T
+ */
+#define M88E1000_EPSCR_DOWN_NO_IDLE   0x8000
+/*
+ * Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X   0x0000
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X   0x0400
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X   0x0800
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X   0x0C00
+/*
+ * Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave
+ */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK  0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS   0x0000
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X    0x0100
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X    0x0200
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X    0x0300
+#define M88E1000_EPSCR_TX_CLK_2_5       0x0060 /* 2.5 MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_25        0x0070 /* 25  MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_0         0x0000 /* NO  TX_CLK */
+
+/* M88E1111 Specific Registers */
+#define M88E1111_PHY_PAGE_SELECT1       0x16  /* for registers 0-28 */
+#define M88E1111_PHY_PAGE_SELECT2       0x1D  /* for registers 30-31 */
+
+/* M88E1111 page select register mask */
+#define M88E1111_PHY_PAGE_SELECT_MASK1  0xFF
+#define M88E1111_PHY_PAGE_SELECT_MASK2  0x3F
+
+/* Intel I347AT4 Registers */
+
+#define I347AT4_PCDL            0x10 /* PHY Cable Diagnostics Length */
+#define I347AT4_PCDC            0x15 /* PHY Cable Diagnostics Control */
+#define I347AT4_PAGE_SELECT     0x16
+
+/* I347AT4 Extended PHY Specific Control Register */
+
+/*
+ * Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
+#define I347AT4_PSCR_DOWNSHIFT_MASK   0x7000
+#define I347AT4_PSCR_DOWNSHIFT_1X     0x0000
+#define I347AT4_PSCR_DOWNSHIFT_2X     0x1000
+#define I347AT4_PSCR_DOWNSHIFT_3X     0x2000
+#define I347AT4_PSCR_DOWNSHIFT_4X     0x3000
+#define I347AT4_PSCR_DOWNSHIFT_5X     0x4000
+#define I347AT4_PSCR_DOWNSHIFT_6X     0x5000
+#define I347AT4_PSCR_DOWNSHIFT_7X     0x6000
+#define I347AT4_PSCR_DOWNSHIFT_8X     0x7000
+
+/* I347AT4 PHY Cable Diagnostics Control */
+#define I347AT4_PCDC_CABLE_LENGTH_UNIT  0x0400 /* 0=cm 1=meters */
+
+/* M88E1112 only registers */
+#define M88E1112_VCT_DSP_DISTANCE       0x001A
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK  0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X    0x0000
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X    0x0200
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X    0x0400
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X    0x0600
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X    0x0800
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X    0x0A00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X    0x0C00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X    0x0E00
+
+/*
+ * Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define GG82563_PAGE_SHIFT        5
+#define GG82563_REG(page, reg)    \
+        (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+#define GG82563_MIN_ALT_REG       30
+
+/* GG82563 Specific Registers */
+#define GG82563_PHY_SPEC_CTRL           \
+        GG82563_REG(0, 16) /* PHY Specific Control */
+#define GG82563_PHY_SPEC_STATUS         \
+        GG82563_REG(0, 17) /* PHY Specific Status */
+#define GG82563_PHY_INT_ENABLE          \
+        GG82563_REG(0, 18) /* Interrupt Enable */
+#define GG82563_PHY_SPEC_STATUS_2       \
+        GG82563_REG(0, 19) /* PHY Specific Status 2 */
+#define GG82563_PHY_RX_ERR_CNTR         \
+        GG82563_REG(0, 21) /* Receive Error Counter */
+#define GG82563_PHY_PAGE_SELECT         \
+        GG82563_REG(0, 22) /* Page Select */
+#define GG82563_PHY_SPEC_CTRL_2         \
+        GG82563_REG(0, 26) /* PHY Specific Control 2 */
+#define GG82563_PHY_PAGE_SELECT_ALT     \
+        GG82563_REG(0, 29) /* Alternate Page Select */
+#define GG82563_PHY_TEST_CLK_CTRL       \
+        GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */
+
+#define GG82563_PHY_MAC_SPEC_CTRL       \
+        GG82563_REG(2, 21) /* MAC Specific Control Register */
+#define GG82563_PHY_MAC_SPEC_CTRL_2     \
+        GG82563_REG(2, 26) /* MAC Specific Control 2 */
+
+#define GG82563_PHY_DSP_DISTANCE    \
+        GG82563_REG(5, 26) /* DSP Distance */
+
+/* Page 193 - Port Control Registers */
+#define GG82563_PHY_KMRN_MODE_CTRL   \
+        GG82563_REG(193, 16) /* Kumeran Mode Control */
+#define GG82563_PHY_PORT_RESET          \
+        GG82563_REG(193, 17) /* Port Reset */
+#define GG82563_PHY_REVISION_ID         \
+        GG82563_REG(193, 18) /* Revision ID */
+#define GG82563_PHY_DEVICE_ID           \
+        GG82563_REG(193, 19) /* Device ID */
+#define GG82563_PHY_PWR_MGMT_CTRL       \
+        GG82563_REG(193, 20) /* Power Management Control */
+#define GG82563_PHY_RATE_ADAPT_CTRL     \
+        GG82563_REG(193, 25) /* Rate Adaptation Control */
+
+/* Page 194 - KMRN Registers */
+#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \
+        GG82563_REG(194, 16) /* FIFO's Control/Status */
+#define GG82563_PHY_KMRN_CTRL           \
+        GG82563_REG(194, 17) /* Control */
+#define GG82563_PHY_INBAND_CTRL         \
+        GG82563_REG(194, 18) /* Inband Control */
+#define GG82563_PHY_KMRN_DIAGNOSTIC     \
+        GG82563_REG(194, 19) /* Diagnostic */
+#define GG82563_PHY_ACK_TIMEOUTS        \
+        GG82563_REG(194, 20) /* Acknowledge Timeouts */
+#define GG82563_PHY_ADV_ABILITY         \
+        GG82563_REG(194, 21) /* Advertised Ability */
+#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \
+        GG82563_REG(194, 23) /* Link Partner Advertised Ability */
+#define GG82563_PHY_ADV_NEXT_PAGE       \
+        GG82563_REG(194, 24) /* Advertised Next Page */
+#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \
+        GG82563_REG(194, 25) /* Link Partner Advertised Next page */
+#define GG82563_PHY_KMRN_MISC           \
+        GG82563_REG(194, 26) /* Misc. */
+
+/* MDI Control */
+#define E1000_MDIC_DATA_MASK 0x0000FFFF
+#define E1000_MDIC_REG_MASK  0x001F0000
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK  0x03E00000
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE  0x04000000
+#define E1000_MDIC_OP_READ   0x08000000
+#define E1000_MDIC_READY     0x10000000
+#define E1000_MDIC_INT_EN    0x20000000
+#define E1000_MDIC_ERROR     0x40000000
+#define E1000_MDIC_DEST      0x80000000
+
+/* SerDes Control */
+#define E1000_GEN_CTL_READY             0x80000000
+#define E1000_GEN_CTL_ADDRESS_SHIFT     8
+#define E1000_GEN_POLL_TIMEOUT          640
+
+/* LinkSec register fields */
+#define E1000_LSECTXCAP_SUM_MASK        0x00FF0000
+#define E1000_LSECTXCAP_SUM_SHIFT       16
+#define E1000_LSECRXCAP_SUM_MASK        0x00FF0000
+#define E1000_LSECRXCAP_SUM_SHIFT       16
+
+#define E1000_LSECTXCTRL_EN_MASK        0x00000003
+#define E1000_LSECTXCTRL_DISABLE        0x0
+#define E1000_LSECTXCTRL_AUTH           0x1
+#define E1000_LSECTXCTRL_AUTH_ENCRYPT   0x2
+#define E1000_LSECTXCTRL_AISCI          0x00000020
+#define E1000_LSECTXCTRL_PNTHRSH_MASK   0xFFFFFF00
+#define E1000_LSECTXCTRL_RSV_MASK       0x000000D8
+
+#define E1000_LSECRXCTRL_EN_MASK        0x0000000C
+#define E1000_LSECRXCTRL_EN_SHIFT       2
+#define E1000_LSECRXCTRL_DISABLE        0x0
+#define E1000_LSECRXCTRL_CHECK          0x1
+#define E1000_LSECRXCTRL_STRICT         0x2
+#define E1000_LSECRXCTRL_DROP           0x3
+#define E1000_LSECRXCTRL_PLSH           0x00000040
+#define E1000_LSECRXCTRL_RP             0x00000080
+#define E1000_LSECRXCTRL_RSV_MASK       0xFFFFFF33
+
+/* Tx Rate-Scheduler Config fields */
+#define E1000_RTTBCNRC_RS_ENA          0x80000000
+#define E1000_RTTBCNRC_RF_DEC_MASK     0x00003FFF
+#define E1000_RTTBCNRC_RF_INT_SHIFT     14
+#define E1000_RTTBCNRC_RF_INT_MASK     \
+       (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
+
+/* DMA Coalescing register fields */
+#define E1000_DMACR_DMACWT_MASK         0x00003FFF /* DMA Coalescing
+                                                    * Watchdog Timer */
+#define E1000_DMACR_DMACTHR_MASK        0x00FF0000 /* DMA Coalescing Rx
+                                                    * Threshold */
+#define E1000_DMACR_DMACTHR_SHIFT       16
+#define E1000_DMACR_DMAC_LX_MASK        0x30000000 /* Lx when no PCIe
+                                                    * transactions */
+#define E1000_DMACR_DMAC_LX_SHIFT       28
+#define E1000_DMACR_DMAC_EN             0x80000000 /* Enable DMA Coalescing */
+
+#define E1000_DMCTXTH_DMCTTHR_MASK      0x00000FFF /* DMA Coalescing Transmit
+                                                    * Threshold */
+
+#define E1000_DMCTLX_TTLX_MASK          0x00000FFF /* Time to LX request */
+
+#define E1000_DMCRTRH_UTRESH_MASK       0x0007FFFF /* Rx Traffic Rate
+                                                    * Threshold */
+#define E1000_DMCRTRH_LRPRCW            0x80000000 /* Rx packet rate in
+                                                    * current window */
+
+#define E1000_DMCCNT_CCOUNT_MASK        0x01FFFFFF /* DMA Coal Rx Traffic
+                                                    * Current Cnt */
+
+#define E1000_FCRTC_RTH_COAL_MASK       0x0003FFF0 /* Flow ctrl Rx Threshold
+                                                    * High val */
+#define E1000_FCRTC_RTH_COAL_SHIFT      4
+#define E1000_PCIEMISC_LX_DECISION      0x00000080 /* Lx power decision based
+                                                      on DMA coal */
+
+/* Proxy Filer Control */
+#define E1000_PROXYFC_D0               0x00000001  /* Enable offload in D0 */
+#define E1000_PROXYFC_EX               0x00000004  /* Directed exact proxy */
+#define E1000_PROXYFC_MC               0x00000008  /* Directed Multicast
+                                                    * Proxy */
+#define E1000_PROXYFC_BC               0x00000010  /* Broadcast Proxy Enable */
+#define E1000_PROXYFC_ARP_DIRECTED     0x00000020  /* Directed ARP Proxy
+                                                    * Enable */
+#define E1000_PROXYFC_IPV4             0x00000040  /* Directed IPv4 Enable */
+#define E1000_PROXYFC_IPV6             0x00000080  /* Directed IPv6 Enable */
+#define E1000_PROXYFC_NS               0x00000200  /* IPv4 Neighborhood
+                                                    * Solicitation */
+#define E1000_PROXYFC_ARP              0x00000800  /* ARP Request Proxy
+                                                    * Enable */
+/* Proxy Status */
+#define E1000_PROXYS_CLEAR             0xFFFFFFFF  /* Clear */
+
+/* Firmware Status */
+#define E1000_FWSTS_FWRI               0x80000000 /* Firmware Reset
+                                                   * Indication */
+
+
+#endif /* _E1000_DEFINES_H_ */
diff --git a/lib/librte_pmd_e1000/e1000/e1000_hw.h b/lib/librte_pmd_e1000/e1000/e1000_hw.h
new file mode 100644 (file)
index 0000000..bed673b
--- /dev/null
@@ -0,0 +1,767 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include "e1000_osdep.h"
+#include "e1000_regs.h"
+#include "e1000_defines.h"
+
+struct e1000_hw;
+
+#define E1000_DEV_ID_82576                    0x10C9
+#define E1000_DEV_ID_82576_FIBER              0x10E6
+#define E1000_DEV_ID_82576_SERDES             0x10E7
+#define E1000_DEV_ID_82576_QUAD_COPPER        0x10E8
+#define E1000_DEV_ID_82576_QUAD_COPPER_ET2    0x1526
+#define E1000_DEV_ID_82576_NS                 0x150A
+#define E1000_DEV_ID_82576_NS_SERDES          0x1518
+#define E1000_DEV_ID_82576_SERDES_QUAD        0x150D
+#define E1000_DEV_ID_82576_VF                 0x10CA
+#define E1000_DEV_ID_I350_VF                  0x1520
+#define E1000_DEV_ID_82575EB_COPPER           0x10A7
+#define E1000_DEV_ID_82575EB_FIBER_SERDES     0x10A9
+#define E1000_DEV_ID_82575GB_QUAD_COPPER      0x10D6
+#define E1000_DEV_ID_82580_COPPER             0x150E
+#define E1000_DEV_ID_82580_FIBER              0x150F
+#define E1000_DEV_ID_82580_SERDES             0x1510
+#define E1000_DEV_ID_82580_SGMII              0x1511
+#define E1000_DEV_ID_82580_COPPER_DUAL        0x1516
+#define E1000_DEV_ID_82580_QUAD_FIBER         0x1527
+#define E1000_DEV_ID_I350_COPPER              0x1521
+#define E1000_DEV_ID_I350_FIBER               0x1522
+#define E1000_DEV_ID_I350_SERDES              0x1523
+#define E1000_DEV_ID_I350_SGMII               0x1524
+#define E1000_DEV_ID_I350_DA4                 0x1546
+#define E1000_DEV_ID_DH89XXCC_SGMII           0x0438
+#define E1000_DEV_ID_DH89XXCC_SERDES          0x043A
+#define E1000_DEV_ID_DH89XXCC_BACKPLANE       0x043C
+#define E1000_DEV_ID_DH89XXCC_SFP             0x0440
+#define E1000_REVISION_0 0
+#define E1000_REVISION_1 1
+#define E1000_REVISION_2 2
+#define E1000_REVISION_3 3
+#define E1000_REVISION_4 4
+
+#define E1000_FUNC_0     0
+#define E1000_FUNC_1     1
+#define E1000_FUNC_2     2
+#define E1000_FUNC_3     3
+
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0   0
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1   3
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2   6
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3   9
+
+enum e1000_mac_type {
+       e1000_undefined = 0,
+       e1000_82575,
+       e1000_82576,
+       e1000_82580,
+       e1000_i350,
+       e1000_vfadapt,
+       e1000_vfadapt_i350,
+       e1000_num_macs  /* List is 1-based, so subtract 1 for TRUE count. */
+};
+
+enum e1000_media_type {
+       e1000_media_type_unknown = 0,
+       e1000_media_type_copper = 1,
+       e1000_media_type_fiber = 2,
+       e1000_media_type_internal_serdes = 3,
+       e1000_num_media_types
+};
+
+enum e1000_nvm_type {
+       e1000_nvm_unknown = 0,
+       e1000_nvm_none,
+       e1000_nvm_eeprom_spi,
+       e1000_nvm_eeprom_microwire,
+       e1000_nvm_flash_hw,
+       e1000_nvm_flash_sw
+};
+
+enum e1000_nvm_override {
+       e1000_nvm_override_none = 0,
+       e1000_nvm_override_spi_small,
+       e1000_nvm_override_spi_large,
+       e1000_nvm_override_microwire_small,
+       e1000_nvm_override_microwire_large
+};
+
+enum e1000_phy_type {
+       e1000_phy_unknown = 0,
+       e1000_phy_none,
+       e1000_phy_m88,
+       e1000_phy_igp,
+       e1000_phy_igp_2,
+       e1000_phy_gg82563,
+       e1000_phy_igp_3,
+       e1000_phy_ife,
+       e1000_phy_82580,
+       e1000_phy_vf,
+};
+
+enum e1000_bus_type {
+       e1000_bus_type_unknown = 0,
+       e1000_bus_type_pci,
+       e1000_bus_type_pcix,
+       e1000_bus_type_pci_express,
+       e1000_bus_type_reserved
+};
+
+enum e1000_bus_speed {
+       e1000_bus_speed_unknown = 0,
+       e1000_bus_speed_33,
+       e1000_bus_speed_66,
+       e1000_bus_speed_100,
+       e1000_bus_speed_120,
+       e1000_bus_speed_133,
+       e1000_bus_speed_2500,
+       e1000_bus_speed_5000,
+       e1000_bus_speed_reserved
+};
+
+enum e1000_bus_width {
+       e1000_bus_width_unknown = 0,
+       e1000_bus_width_pcie_x1,
+       e1000_bus_width_pcie_x2,
+       e1000_bus_width_pcie_x4 = 4,
+       e1000_bus_width_pcie_x8 = 8,
+       e1000_bus_width_32,
+       e1000_bus_width_64,
+       e1000_bus_width_reserved
+};
+
+enum e1000_1000t_rx_status {
+       e1000_1000t_rx_status_not_ok = 0,
+       e1000_1000t_rx_status_ok,
+       e1000_1000t_rx_status_undefined = 0xFF
+};
+
+enum e1000_rev_polarity {
+       e1000_rev_polarity_normal = 0,
+       e1000_rev_polarity_reversed,
+       e1000_rev_polarity_undefined = 0xFF
+};
+
+enum e1000_fc_mode {
+       e1000_fc_none = 0,
+       e1000_fc_rx_pause,
+       e1000_fc_tx_pause,
+       e1000_fc_full,
+       e1000_fc_default = 0xFF
+};
+
+enum e1000_ms_type {
+       e1000_ms_hw_default = 0,
+       e1000_ms_force_master,
+       e1000_ms_force_slave,
+       e1000_ms_auto
+};
+
+enum e1000_smart_speed {
+       e1000_smart_speed_default = 0,
+       e1000_smart_speed_on,
+       e1000_smart_speed_off
+};
+
+enum e1000_serdes_link_state {
+       e1000_serdes_link_down = 0,
+       e1000_serdes_link_autoneg_progress,
+       e1000_serdes_link_autoneg_complete,
+       e1000_serdes_link_forced_up
+};
+
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+/* Receive Descriptor */
+struct e1000_rx_desc {
+       __le64 buffer_addr; /* Address of the descriptor's data buffer */
+       __le16 length;      /* Length of data DMAed into data buffer */
+       __le16 csum;        /* Packet checksum */
+       u8  status;         /* Descriptor status */
+       u8  errors;         /* Descriptor Errors */
+       __le16 special;
+};
+
+/* Receive Descriptor - Extended */
+union e1000_rx_desc_extended {
+       struct {
+               __le64 buffer_addr;
+               __le64 reserved;
+       } read;
+       struct {
+               struct {
+                       __le32 mrq;           /* Multiple Rx Queues */
+                       union {
+                               __le32 rss;         /* RSS Hash */
+                               struct {
+                                       __le16 ip_id;  /* IP id */
+                                       __le16 csum;   /* Packet Checksum */
+                               } csum_ip;
+                       } hi_dword;
+               } lower;
+               struct {
+                       __le32 status_error;  /* ext status/error */
+                       __le16 length;
+                       __le16 vlan;          /* VLAN tag */
+               } upper;
+       } wb;  /* writeback */
+};
+
+#define MAX_PS_BUFFERS 4
+/* Receive Descriptor - Packet Split */
+union e1000_rx_desc_packet_split {
+       struct {
+               /* one buffer for protocol header(s), three data buffers */
+               __le64 buffer_addr[MAX_PS_BUFFERS];
+       } read;
+       struct {
+               struct {
+                       __le32 mrq;           /* Multiple Rx Queues */
+                       union {
+                               __le32 rss;           /* RSS Hash */
+                               struct {
+                                       __le16 ip_id;    /* IP id */
+                                       __le16 csum;     /* Packet Checksum */
+                               } csum_ip;
+                       } hi_dword;
+               } lower;
+               struct {
+                       __le32 status_error;  /* ext status/error */
+                       __le16 length0;       /* length of buffer 0 */
+                       __le16 vlan;          /* VLAN tag */
+               } middle;
+               struct {
+                       __le16 header_status;
+                       __le16 length[3];     /* length of buffers 1-3 */
+               } upper;
+               __le64 reserved;
+       } wb; /* writeback */
+};
+
+/* Transmit Descriptor */
+struct e1000_tx_desc {
+       __le64 buffer_addr;   /* Address of the descriptor's data buffer */
+       union {
+               __le32 data;
+               struct {
+                       __le16 length;    /* Data buffer length */
+                       u8 cso;           /* Checksum offset */
+                       u8 cmd;           /* Descriptor control */
+               } flags;
+       } lower;
+       union {
+               __le32 data;
+               struct {
+                       u8 status;        /* Descriptor status */
+                       u8 css;           /* Checksum start */
+                       __le16 special;
+               } fields;
+       } upper;
+};
+
+/* Offload Context Descriptor */
+struct e1000_context_desc {
+       union {
+               __le32 ip_config;
+               struct {
+                       u8 ipcss;         /* IP checksum start */
+                       u8 ipcso;         /* IP checksum offset */
+                       __le16 ipcse;     /* IP checksum end */
+               } ip_fields;
+       } lower_setup;
+       union {
+               __le32 tcp_config;
+               struct {
+                       u8 tucss;         /* TCP checksum start */
+                       u8 tucso;         /* TCP checksum offset */
+                       __le16 tucse;     /* TCP checksum end */
+               } tcp_fields;
+       } upper_setup;
+       __le32 cmd_and_length;
+       union {
+               __le32 data;
+               struct {
+                       u8 status;        /* Descriptor status */
+                       u8 hdr_len;       /* Header length */
+                       __le16 mss;       /* Maximum segment size */
+               } fields;
+       } tcp_seg_setup;
+};
+
+/* Offload data descriptor */
+struct e1000_data_desc {
+       __le64 buffer_addr;   /* Address of the descriptor's buffer address */
+       union {
+               __le32 data;
+               struct {
+                       __le16 length;    /* Data buffer length */
+                       u8 typ_len_ext;
+                       u8 cmd;
+               } flags;
+       } lower;
+       union {
+               __le32 data;
+               struct {
+                       u8 status;        /* Descriptor status */
+                       u8 popts;         /* Packet Options */
+                       __le16 special;
+               } fields;
+       } upper;
+};
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+       u64 crcerrs;
+       u64 algnerrc;
+       u64 symerrs;
+       u64 rxerrc;
+       u64 mpc;
+       u64 scc;
+       u64 ecol;
+       u64 mcc;
+       u64 latecol;
+       u64 colc;
+       u64 dc;
+       u64 tncrs;
+       u64 sec;
+       u64 cexterr;
+       u64 rlec;
+       u64 xonrxc;
+       u64 xontxc;
+       u64 xoffrxc;
+       u64 xofftxc;
+       u64 fcruc;
+       u64 prc64;
+       u64 prc127;
+       u64 prc255;
+       u64 prc511;
+       u64 prc1023;
+       u64 prc1522;
+       u64 gprc;
+       u64 bprc;
+       u64 mprc;
+       u64 gptc;
+       u64 gorc;
+       u64 gotc;
+       u64 rnbc;
+       u64 ruc;
+       u64 rfc;
+       u64 roc;
+       u64 rjc;
+       u64 mgprc;
+       u64 mgpdc;
+       u64 mgptc;
+       u64 tor;
+       u64 tot;
+       u64 tpr;
+       u64 tpt;
+       u64 ptc64;
+       u64 ptc127;
+       u64 ptc255;
+       u64 ptc511;
+       u64 ptc1023;
+       u64 ptc1522;
+       u64 mptc;
+       u64 bptc;
+       u64 tsctc;
+       u64 tsctfc;
+       u64 iac;
+       u64 icrxptc;
+       u64 icrxatc;
+       u64 ictxptc;
+       u64 ictxatc;
+       u64 ictxqec;
+       u64 ictxqmtc;
+       u64 icrxdmtc;
+       u64 icrxoc;
+       u64 cbtmpc;
+       u64 htdpmc;
+       u64 cbrdpc;
+       u64 cbrmpc;
+       u64 rpthc;
+       u64 hgptc;
+       u64 htcbdpc;
+       u64 hgorc;
+       u64 hgotc;
+       u64 lenerrs;
+       u64 scvpc;
+       u64 hrmpc;
+       u64 doosync;
+};
+
+struct e1000_vf_stats {
+       u64 base_gprc;
+       u64 base_gptc;
+       u64 base_gorc;
+       u64 base_gotc;
+       u64 base_mprc;
+       u64 base_gotlbc;
+       u64 base_gptlbc;
+       u64 base_gorlbc;
+       u64 base_gprlbc;
+
+       u32 last_gprc;
+       u32 last_gptc;
+       u32 last_gorc;
+       u32 last_gotc;
+       u32 last_mprc;
+       u32 last_gotlbc;
+       u32 last_gptlbc;
+       u32 last_gorlbc;
+       u32 last_gprlbc;
+
+       u64 gprc;
+       u64 gptc;
+       u64 gorc;
+       u64 gotc;
+       u64 mprc;
+       u64 gotlbc;
+       u64 gptlbc;
+       u64 gorlbc;
+       u64 gprlbc;
+};
+
+struct e1000_phy_stats {
+       u32 idle_errors;
+       u32 receive_errors;
+};
+
+struct e1000_host_mng_dhcp_cookie {
+       u32 signature;
+       u8  status;
+       u8  reserved0;
+       u16 vlan_id;
+       u32 reserved1;
+       u16 reserved2;
+       u8  reserved3;
+       u8  checksum;
+};
+
+/* Host Interface "Rev 1" */
+struct e1000_host_command_header {
+       u8 command_id;
+       u8 command_length;
+       u8 command_options;
+       u8 checksum;
+};
+
+#define E1000_HI_MAX_DATA_LENGTH     252
+struct e1000_host_command_info {
+       struct e1000_host_command_header command_header;
+       u8 command_data[E1000_HI_MAX_DATA_LENGTH];
+};
+
+/* Host Interface "Rev 2" */
+struct e1000_host_mng_command_header {
+       u8  command_id;
+       u8  checksum;
+       u16 reserved1;
+       u16 reserved2;
+       u16 command_length;
+};
+
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
+struct e1000_host_mng_command_info {
+       struct e1000_host_mng_command_header command_header;
+       u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
+};
+
+#include "e1000_mac.h"
+#include "e1000_phy.h"
+#include "e1000_nvm.h"
+#include "e1000_manage.h"
+#include "e1000_mbx.h"
+
+struct e1000_mac_operations {
+       /* Function pointers for the MAC. */
+       s32  (*init_params)(struct e1000_hw *);
+       s32  (*id_led_init)(struct e1000_hw *);
+       s32  (*blink_led)(struct e1000_hw *);
+       s32  (*check_for_link)(struct e1000_hw *);
+       bool (*check_mng_mode)(struct e1000_hw *hw);
+       s32  (*cleanup_led)(struct e1000_hw *);
+       void (*clear_hw_cntrs)(struct e1000_hw *);
+       void (*clear_vfta)(struct e1000_hw *);
+       s32  (*get_bus_info)(struct e1000_hw *);
+       void (*set_lan_id)(struct e1000_hw *);
+       s32  (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
+       s32  (*led_on)(struct e1000_hw *);
+       s32  (*led_off)(struct e1000_hw *);
+       void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
+       s32  (*reset_hw)(struct e1000_hw *);
+       s32  (*init_hw)(struct e1000_hw *);
+       void (*shutdown_serdes)(struct e1000_hw *);
+       void (*power_up_serdes)(struct e1000_hw *);
+       s32  (*setup_link)(struct e1000_hw *);
+       s32  (*setup_physical_interface)(struct e1000_hw *);
+       s32  (*setup_led)(struct e1000_hw *);
+       void (*write_vfta)(struct e1000_hw *, u32, u32);
+       void (*config_collision_dist)(struct e1000_hw *);
+       void (*rar_set)(struct e1000_hw *, u8*, u32);
+       s32  (*read_mac_addr)(struct e1000_hw *);
+       s32  (*validate_mdi_setting)(struct e1000_hw *);
+       s32  (*mng_host_if_write)(struct e1000_hw *, u8*, u16, u16, u8*);
+       s32  (*mng_write_cmd_header)(struct e1000_hw *hw,
+                      struct e1000_host_mng_command_header*);
+       s32  (*mng_enable_host_if)(struct e1000_hw *);
+       s32  (*wait_autoneg)(struct e1000_hw *);
+};
+
+struct e1000_phy_operations {
+       s32  (*init_params)(struct e1000_hw *);
+       s32  (*acquire)(struct e1000_hw *);
+       s32  (*check_polarity)(struct e1000_hw *);
+       s32  (*check_reset_block)(struct e1000_hw *);
+       s32  (*commit)(struct e1000_hw *);
+       s32  (*force_speed_duplex)(struct e1000_hw *);
+       s32  (*get_cfg_done)(struct e1000_hw *hw);
+       s32  (*get_cable_length)(struct e1000_hw *);
+       s32  (*get_info)(struct e1000_hw *);
+       s32  (*read_reg)(struct e1000_hw *, u32, u16 *);
+       s32  (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
+       void (*release)(struct e1000_hw *);
+       s32  (*reset)(struct e1000_hw *);
+       s32  (*set_d0_lplu_state)(struct e1000_hw *, bool);
+       s32  (*set_d3_lplu_state)(struct e1000_hw *, bool);
+       s32  (*write_reg)(struct e1000_hw *, u32, u16);
+       s32  (*write_reg_locked)(struct e1000_hw *, u32, u16);
+       void (*power_up)(struct e1000_hw *);
+       void (*power_down)(struct e1000_hw *);
+};
+
+struct e1000_nvm_operations {
+       s32  (*init_params)(struct e1000_hw *);
+       s32  (*acquire)(struct e1000_hw *);
+       s32  (*read)(struct e1000_hw *, u16, u16, u16 *);
+       void (*release)(struct e1000_hw *);
+       void (*reload)(struct e1000_hw *);
+       s32  (*update)(struct e1000_hw *);
+       s32  (*valid_led_default)(struct e1000_hw *, u16 *);
+       s32  (*validate)(struct e1000_hw *);
+       s32  (*write)(struct e1000_hw *, u16, u16, u16 *);
+};
+
+struct e1000_mac_info {
+       struct e1000_mac_operations ops;
+       u8 addr[ETH_ADDR_LEN];
+       u8 perm_addr[ETH_ADDR_LEN];
+
+       enum e1000_mac_type type;
+
+       u32 collision_delta;
+       u32 ledctl_default;
+       u32 ledctl_mode1;
+       u32 ledctl_mode2;
+       u32 mc_filter_type;
+       u32 tx_packet_delta;
+       u32 txcw;
+
+       u16 current_ifs_val;
+       u16 ifs_max_val;
+       u16 ifs_min_val;
+       u16 ifs_ratio;
+       u16 ifs_step_size;
+       u16 mta_reg_count;
+       u16 uta_reg_count;
+
+       /* Maximum size of the MTA register table in all supported adapters */
+       #define MAX_MTA_REG 128
+       u32 mta_shadow[MAX_MTA_REG];
+       u16 rar_entry_count;
+
+       u8  forced_speed_duplex;
+
+       bool adaptive_ifs;
+       bool has_fwsm;
+       bool arc_subsystem_valid;
+       bool asf_firmware_present;
+       bool autoneg;
+       bool autoneg_failed;
+       bool get_link_status;
+       bool in_ifs_mode;
+       enum e1000_serdes_link_state serdes_link_state;
+       bool serdes_has_link;
+       bool tx_pkt_filtering;
+};
+
+struct e1000_phy_info {
+       struct e1000_phy_operations ops;
+       enum e1000_phy_type type;
+
+       enum e1000_1000t_rx_status local_rx;
+       enum e1000_1000t_rx_status remote_rx;
+       enum e1000_ms_type ms_type;
+       enum e1000_ms_type original_ms_type;
+       enum e1000_rev_polarity cable_polarity;
+       enum e1000_smart_speed smart_speed;
+
+       u32 addr;
+       u32 id;
+       u32 reset_delay_us; /* in usec */
+       u32 revision;
+
+       enum e1000_media_type media_type;
+
+       u16 autoneg_advertised;
+       u16 autoneg_mask;
+       u16 cable_length;
+       u16 max_cable_length;
+       u16 min_cable_length;
+
+       u8 mdix;
+
+       bool disable_polarity_correction;
+       bool is_mdix;
+       bool polarity_correction;
+       bool reset_disable;
+       bool speed_downgraded;
+       bool autoneg_wait_to_complete;
+};
+
+struct e1000_nvm_info {
+       struct e1000_nvm_operations ops;
+       enum e1000_nvm_type type;
+       enum e1000_nvm_override override;
+
+       u32 flash_bank_size;
+       u32 flash_base_addr;
+
+       u16 word_size;
+       u16 delay_usec;
+       u16 address_bits;
+       u16 opcode_bits;
+       u16 page_size;
+};
+
+struct e1000_bus_info {
+       enum e1000_bus_type type;
+       enum e1000_bus_speed speed;
+       enum e1000_bus_width width;
+
+       u16 func;
+       u16 pci_cmd_word;
+};
+
+struct e1000_fc_info {
+       u32 high_water;          /* Flow control high-water mark */
+       u32 low_water;           /* Flow control low-water mark */
+       u16 pause_time;          /* Flow control pause timer */
+       u16 refresh_time;        /* Flow control refresh timer */
+       bool send_xon;           /* Flow control send XON */
+       bool strict_ieee;        /* Strict IEEE mode */
+       enum e1000_fc_mode current_mode; /* FC mode in effect */
+       enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+struct e1000_mbx_operations {
+       s32 (*init_params)(struct e1000_hw *hw);
+       s32 (*read)(struct e1000_hw *, u32 *, u16,  u16);
+       s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
+       s32 (*read_posted)(struct e1000_hw *, u32 *, u16,  u16);
+       s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
+       s32 (*check_for_msg)(struct e1000_hw *, u16);
+       s32 (*check_for_ack)(struct e1000_hw *, u16);
+       s32 (*check_for_rst)(struct e1000_hw *, u16);
+};
+
+struct e1000_mbx_stats {
+       u32 msgs_tx;
+       u32 msgs_rx;
+
+       u32 acks;
+       u32 reqs;
+       u32 rsts;
+};
+
+struct e1000_mbx_info {
+       struct e1000_mbx_operations ops;
+       struct e1000_mbx_stats stats;
+       u32 timeout;
+       u32 usec_delay;
+       u16 size;
+};
+
+struct e1000_dev_spec_82575 {
+       bool sgmii_active;
+       bool global_device_reset;
+       bool eee_disable;
+};
+
+struct e1000_dev_spec_vf {
+       u32 vf_number;
+       u32 v2p_mailbox;
+};
+
+struct e1000_hw {
+       void *back;
+
+       u8 *hw_addr;
+       u8 *flash_address;
+       unsigned long io_base;
+
+       struct e1000_mac_info  mac;
+       struct e1000_fc_info   fc;
+       struct e1000_phy_info  phy;
+       struct e1000_nvm_info  nvm;
+       struct e1000_bus_info  bus;
+       struct e1000_mbx_info mbx;
+       struct e1000_host_mng_dhcp_cookie mng_cookie;
+
+       union {
+               struct e1000_dev_spec_82575 _82575;
+               struct e1000_dev_spec_vf vf;
+       } dev_spec;
+
+       u16 device_id;
+       u16 subsystem_vendor_id;
+       u16 subsystem_device_id;
+       u16 vendor_id;
+
+       u8  revision_id;
+};
+
+#include "e1000_82575.h"
+
+/* These functions must be implemented by drivers */
+s32  e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32  e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+
+#endif
diff --git a/lib/librte_pmd_e1000/e1000/e1000_mac.c b/lib/librte_pmd_e1000/e1000/e1000_mac.c
new file mode 100644 (file)
index 0000000..1fff576
--- /dev/null
@@ -0,0 +1,2170 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "e1000_api.h"
+
+static s32 e1000_set_default_fc_generic(struct e1000_hw *hw);
+static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw);
+static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw);
+static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
+static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
+
+/**
+ *  e1000_init_mac_ops_generic - Initialize MAC function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Setups up the function pointers to no-op functions
+ **/
+void e1000_init_mac_ops_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       DEBUGFUNC("e1000_init_mac_ops_generic");
+
+       /* General Setup */
+       mac->ops.init_params = e1000_null_ops_generic;
+       mac->ops.init_hw = e1000_null_ops_generic;
+       mac->ops.reset_hw = e1000_null_ops_generic;
+       mac->ops.setup_physical_interface = e1000_null_ops_generic;
+       mac->ops.get_bus_info = e1000_null_ops_generic;
+       mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie;
+       mac->ops.read_mac_addr = e1000_read_mac_addr_generic;
+       mac->ops.config_collision_dist = e1000_config_collision_dist_generic;
+       mac->ops.clear_hw_cntrs = e1000_null_mac_generic;
+       /* LED */
+       mac->ops.cleanup_led = e1000_null_ops_generic;
+       mac->ops.setup_led = e1000_null_ops_generic;
+       mac->ops.blink_led = e1000_null_ops_generic;
+       mac->ops.led_on = e1000_null_ops_generic;
+       mac->ops.led_off = e1000_null_ops_generic;
+       /* LINK */
+       mac->ops.setup_link = e1000_null_ops_generic;
+       mac->ops.get_link_up_info = e1000_null_link_info;
+       mac->ops.check_for_link = e1000_null_ops_generic;
+       mac->ops.wait_autoneg = e1000_wait_autoneg_generic;
+       /* Management */
+       mac->ops.check_mng_mode = e1000_null_mng_mode;
+       mac->ops.mng_host_if_write = e1000_mng_host_if_write_generic;
+       mac->ops.mng_write_cmd_header = e1000_mng_write_cmd_header_generic;
+       mac->ops.mng_enable_host_if = e1000_mng_enable_host_if_generic;
+       /* VLAN, MC, etc. */
+       mac->ops.update_mc_addr_list = e1000_null_update_mc;
+       mac->ops.clear_vfta = e1000_null_mac_generic;
+       mac->ops.write_vfta = e1000_null_write_vfta;
+       mac->ops.rar_set = e1000_rar_set_generic;
+       mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic;
+}
+
+/**
+ *  e1000_null_ops_generic - No-op function, returns 0
+ *  @hw: pointer to the HW structure
+ **/
+s32 e1000_null_ops_generic(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_null_ops_generic");
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_null_mac_generic - No-op function, return void
+ *  @hw: pointer to the HW structure
+ **/
+void e1000_null_mac_generic(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_null_mac_generic");
+       return;
+}
+
+/**
+ *  e1000_null_link_info - No-op function, return 0
+ *  @hw: pointer to the HW structure
+ **/
+s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d)
+{
+       DEBUGFUNC("e1000_null_link_info");
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_null_mng_mode - No-op function, return FALSE
+ *  @hw: pointer to the HW structure
+ **/
+bool e1000_null_mng_mode(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_null_mng_mode");
+       return FALSE;
+}
+
+/**
+ *  e1000_null_update_mc - No-op function, return void
+ *  @hw: pointer to the HW structure
+ **/
+void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a)
+{
+       DEBUGFUNC("e1000_null_update_mc");
+       return;
+}
+
+/**
+ *  e1000_null_write_vfta - No-op function, return void
+ *  @hw: pointer to the HW structure
+ **/
+void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b)
+{
+       DEBUGFUNC("e1000_null_write_vfta");
+       return;
+}
+
+/**
+ *  e1000_null_rar_set - No-op function, return void
+ *  @hw: pointer to the HW structure
+ **/
+void e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a)
+{
+       DEBUGFUNC("e1000_null_rar_set");
+       return;
+}
+
+/**
+ *  e1000_get_bus_info_pci_generic - Get PCI(x) bus information
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines and stores the system bus information for a particular
+ *  network interface.  The following bus information is determined and stored:
+ *  bus speed, bus width, type (PCI/PCIx), and PCI(-x) function.
+ **/
+s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       struct e1000_bus_info *bus = &hw->bus;
+       u32 status = E1000_READ_REG(hw, E1000_STATUS);
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_get_bus_info_pci_generic");
+
+       /* PCI or PCI-X? */
+       bus->type = (status & E1000_STATUS_PCIX_MODE)
+                       ? e1000_bus_type_pcix
+                       : e1000_bus_type_pci;
+
+       /* Bus speed */
+       if (bus->type == e1000_bus_type_pci) {
+               bus->speed = (status & E1000_STATUS_PCI66)
+                            ? e1000_bus_speed_66
+                            : e1000_bus_speed_33;
+       } else {
+               switch (status & E1000_STATUS_PCIX_SPEED) {
+               case E1000_STATUS_PCIX_SPEED_66:
+                       bus->speed = e1000_bus_speed_66;
+                       break;
+               case E1000_STATUS_PCIX_SPEED_100:
+                       bus->speed = e1000_bus_speed_100;
+                       break;
+               case E1000_STATUS_PCIX_SPEED_133:
+                       bus->speed = e1000_bus_speed_133;
+                       break;
+               default:
+                       bus->speed = e1000_bus_speed_reserved;
+                       break;
+               }
+       }
+
+       /* Bus width */
+       bus->width = (status & E1000_STATUS_BUS64)
+                    ? e1000_bus_width_64
+                    : e1000_bus_width_32;
+
+       /* Which PCI(-X) function? */
+       mac->ops.set_lan_id(hw);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_get_bus_info_pcie_generic - Get PCIe bus information
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines and stores the system bus information for a particular
+ *  network interface.  The following bus information is determined and stored:
+ *  bus speed, bus width, type (PCIe), and PCIe function.
+ **/
+s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       struct e1000_bus_info *bus = &hw->bus;
+       s32 ret_val;
+       u16 pcie_link_status;
+
+       DEBUGFUNC("e1000_get_bus_info_pcie_generic");
+
+       bus->type = e1000_bus_type_pci_express;
+
+       ret_val = e1000_read_pcie_cap_reg(hw,
+                                         PCIE_LINK_STATUS,
+                                         &pcie_link_status);
+       if (ret_val) {
+               bus->width = e1000_bus_width_unknown;
+               bus->speed = e1000_bus_speed_unknown;
+       } else {
+               switch (pcie_link_status & PCIE_LINK_SPEED_MASK) {
+               case PCIE_LINK_SPEED_2500:
+                       bus->speed = e1000_bus_speed_2500;
+                       break;
+               case PCIE_LINK_SPEED_5000:
+                       bus->speed = e1000_bus_speed_5000;
+                       break;
+               default:
+                       bus->speed = e1000_bus_speed_unknown;
+                       break;
+               }
+
+               bus->width = (enum e1000_bus_width)((pcie_link_status &
+                                               PCIE_LINK_WIDTH_MASK) >>
+                                              PCIE_LINK_WIDTH_SHIFT);
+       }
+
+       mac->ops.set_lan_id(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ *
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines the LAN function id by reading memory-mapped registers
+ *  and swaps the port value if requested.
+ **/
+static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
+{
+       struct e1000_bus_info *bus = &hw->bus;
+       u32 reg;
+
+       /*
+        * The status register reports the correct function number
+        * for the device regardless of function swap state.
+        */
+       reg = E1000_READ_REG(hw, E1000_STATUS);
+       bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+}
+
+/**
+ *  e1000_set_lan_id_multi_port_pci - Set LAN id for PCI multiple port devices
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines the LAN function id by reading PCI config space.
+ **/
+void e1000_set_lan_id_multi_port_pci(struct e1000_hw *hw)
+{
+       struct e1000_bus_info *bus = &hw->bus;
+       u16 pci_header_type;
+       u32 status;
+
+       e1000_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type);
+       if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
+               status = E1000_READ_REG(hw, E1000_STATUS);
+               bus->func = (status & E1000_STATUS_FUNC_MASK)
+                           >> E1000_STATUS_FUNC_SHIFT;
+       } else {
+               bus->func = 0;
+       }
+}
+
+/**
+ *  e1000_set_lan_id_single_port - Set LAN id for a single port device
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets the LAN function id to zero for a single port device.
+ **/
+void e1000_set_lan_id_single_port(struct e1000_hw *hw)
+{
+       struct e1000_bus_info *bus = &hw->bus;
+
+       bus->func = 0;
+}
+
+/**
+ *  e1000_clear_vfta_generic - Clear VLAN filter table
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the register array which contains the VLAN filter table by
+ *  setting all the values to 0.
+ **/
+void e1000_clear_vfta_generic(struct e1000_hw *hw)
+{
+       u32 offset;
+
+       DEBUGFUNC("e1000_clear_vfta_generic");
+
+       for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+               E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
+               E1000_WRITE_FLUSH(hw);
+       }
+}
+
+/**
+ *  e1000_write_vfta_generic - Write value to VLAN filter table
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset in VLAN filter table
+ *  @value: register value written to VLAN filter table
+ *
+ *  Writes value at the given offset in the register array which stores
+ *  the VLAN filter table.
+ **/
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
+{
+       DEBUGFUNC("e1000_write_vfta_generic");
+
+       E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
+       E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ *  e1000_init_rx_addrs_generic - Initialize receive address's
+ *  @hw: pointer to the HW structure
+ *  @rar_count: receive address registers
+ *
+ *  Setup the receive address registers by setting the base receive address
+ *  register to the devices MAC address and clearing all the other receive
+ *  address registers to 0.
+ **/
+void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count)
+{
+       u32 i;
+       u8 mac_addr[ETH_ADDR_LEN] = {0};
+
+       DEBUGFUNC("e1000_init_rx_addrs_generic");
+
+       /* Setup the receive address */
+       DEBUGOUT("Programming MAC Address into RAR[0]\n");
+
+       hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
+
+       /* Zero out the other (rar_entry_count - 1) receive addresses */
+       DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1);
+       for (i = 1; i < rar_count; i++)
+               hw->mac.ops.rar_set(hw, mac_addr, i);
+}
+
+/**
+ *  e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks the nvm for an alternate MAC address.  An alternate MAC address
+ *  can be setup by pre-boot software and must be treated like a permanent
+ *  address and must override the actual permanent MAC address. If an
+ *  alternate MAC address is found it is programmed into RAR0, replacing
+ *  the permanent address that was installed into RAR0 by the Si on reset.
+ *  This function will return SUCCESS unless it encounters an error while
+ *  reading the EEPROM.
+ **/
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
+{
+       u32 i;
+       s32 ret_val = E1000_SUCCESS;
+       u16 offset, nvm_alt_mac_addr_offset, nvm_data;
+       u8 alt_mac_addr[ETH_ADDR_LEN];
+
+       DEBUGFUNC("e1000_check_alt_mac_addr_generic");
+
+       ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data);
+       if (ret_val)
+               goto out;
+
+       if (!(nvm_data & NVM_COMPAT_LOM))
+               goto out;
+
+       ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
+                                &nvm_alt_mac_addr_offset);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+       if (nvm_alt_mac_addr_offset == 0xFFFF) {
+               /* There is no Alternate MAC Address */
+               goto out;
+       }
+
+       if (hw->bus.func == E1000_FUNC_1)
+               nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
+       if (hw->bus.func == E1000_FUNC_2)
+               nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
+
+       if (hw->bus.func == E1000_FUNC_3)
+               nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
+       for (i = 0; i < ETH_ADDR_LEN; i += 2) {
+               offset = nvm_alt_mac_addr_offset + (i >> 1);
+               ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
+               if (ret_val) {
+                       DEBUGOUT("NVM Read Error\n");
+                       goto out;
+               }
+
+               alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
+               alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
+       }
+
+       /* if multicast bit is set, the alternate address will not be used */
+       if (alt_mac_addr[0] & 0x01) {
+               DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n");
+               goto out;
+       }
+
+       /*
+        * We have a valid alternate MAC address, and we want to treat it the
+        * same as the normal permanent MAC address stored by the HW into the
+        * RAR. Do this by mapping this address into RAR0.
+        */
+       hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_rar_set_generic - Set receive address register
+ *  @hw: pointer to the HW structure
+ *  @addr: pointer to the receive address
+ *  @index: receive address array register
+ *
+ *  Sets the receive address array register at index to the address passed
+ *  in by addr.
+ **/
+void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+       u32 rar_low, rar_high;
+
+       DEBUGFUNC("e1000_rar_set_generic");
+
+       /*
+        * HW expects these in little endian so we reverse the byte order
+        * from network order (big endian) to little endian
+        */
+       rar_low = ((u32) addr[0] |
+                  ((u32) addr[1] << 8) |
+                  ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+       rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+       /* If MAC address zero, no need to set the AV bit */
+       if (rar_low || rar_high)
+               rar_high |= E1000_RAH_AV;
+
+       /*
+        * Some bridges will combine consecutive 32-bit writes into
+        * a single burst write, which will malfunction on some parts.
+        * The flushes avoid this.
+        */
+       E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
+       E1000_WRITE_FLUSH(hw);
+       E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
+       E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ *  e1000_update_mc_addr_list_generic - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *
+ *  Updates entire Multicast Table Array.
+ *  The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
+                                       u8 *mc_addr_list, u32 mc_addr_count)
+{
+       u32 hash_value, hash_bit, hash_reg;
+       int i;
+
+       DEBUGFUNC("e1000_update_mc_addr_list_generic");
+
+       /* clear mta_shadow */
+       memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+
+       /* update mta_shadow from mc_addr_list */
+       for (i = 0; (u32) i < mc_addr_count; i++) {
+               hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list);
+
+               hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+               hash_bit = hash_value & 0x1F;
+
+               hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+               mc_addr_list += (ETH_ADDR_LEN);
+       }
+
+       /* replace the entire MTA table */
+       for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+               E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
+       E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ *  e1000_hash_mc_addr_generic - Generate a multicast hash value
+ *  @hw: pointer to the HW structure
+ *  @mc_addr: pointer to a multicast address
+ *
+ *  Generates a multicast address hash value which is used to determine
+ *  the multicast filter table array address and new table value.
+ **/
+u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr)
+{
+       u32 hash_value, hash_mask;
+       u8 bit_shift = 0;
+
+       DEBUGFUNC("e1000_hash_mc_addr_generic");
+
+       /* Register count multiplied by bits per register */
+       hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+       /*
+        * For a mc_filter_type of 0, bit_shift is the number of left-shifts
+        * where 0xFF would still fall within the hash mask.
+        */
+       while (hash_mask >> bit_shift != 0xFF)
+               bit_shift++;
+
+       /*
+        * The portion of the address that is used for the hash table
+        * is determined by the mc_filter_type setting.
+        * The algorithm is such that there is a total of 8 bits of shifting.
+        * The bit_shift for a mc_filter_type of 0 represents the number of
+        * left-shifts where the MSB of mc_addr[5] would still fall within
+        * the hash_mask.  Case 0 does this exactly.  Since there are a total
+        * of 8 bits of shifting, then mc_addr[4] will shift right the
+        * remaining number of bits. Thus 8 - bit_shift.  The rest of the
+        * cases are a variation of this algorithm...essentially raising the
+        * number of bits to shift mc_addr[5] left, while still keeping the
+        * 8-bit shifting total.
+        *
+        * For example, given the following Destination MAC Address and an
+        * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
+        * we can see that the bit_shift for case 0 is 4.  These are the hash
+        * values resulting from each mc_filter_type...
+        * [0] [1] [2] [3] [4] [5]
+        * 01  AA  00  12  34  56
+        * LSB                 MSB
+        *
+        * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
+        * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
+        * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
+        * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
+        */
+       switch (hw->mac.mc_filter_type) {
+       default:
+       case 0:
+               break;
+       case 1:
+               bit_shift += 1;
+               break;
+       case 2:
+               bit_shift += 2;
+               break;
+       case 3:
+               bit_shift += 4;
+               break;
+       }
+
+       hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+                                 (((u16) mc_addr[5]) << bit_shift)));
+
+       return hash_value;
+}
+
+/**
+ *  e1000_pcix_mmrbc_workaround_generic - Fix incorrect MMRBC value
+ *  @hw: pointer to the HW structure
+ *
+ *  In certain situations, a system BIOS may report that the PCIx maximum
+ *  memory read byte count (MMRBC) value is higher than than the actual
+ *  value. We check the PCIx command register with the current PCIx status
+ *  register.
+ **/
+void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw)
+{
+       u16 cmd_mmrbc;
+       u16 pcix_cmd;
+       u16 pcix_stat_hi_word;
+       u16 stat_mmrbc;
+
+       DEBUGFUNC("e1000_pcix_mmrbc_workaround_generic");
+
+       /* Workaround for PCI-X issue when BIOS sets MMRBC incorrectly */
+       if (hw->bus.type != e1000_bus_type_pcix)
+               return;
+
+       e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd);
+       e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word);
+       cmd_mmrbc = (pcix_cmd & PCIX_COMMAND_MMRBC_MASK) >>
+                    PCIX_COMMAND_MMRBC_SHIFT;
+       stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >>
+                     PCIX_STATUS_HI_MMRBC_SHIFT;
+       if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K)
+               stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K;
+       if (cmd_mmrbc > stat_mmrbc) {
+               pcix_cmd &= ~PCIX_COMMAND_MMRBC_MASK;
+               pcix_cmd |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT;
+               e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd);
+       }
+}
+
+/**
+ *  e1000_clear_hw_cntrs_base_generic - Clear base hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the base hardware counters by reading the counter registers.
+ **/
+void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_clear_hw_cntrs_base_generic");
+
+       E1000_READ_REG(hw, E1000_CRCERRS);
+       E1000_READ_REG(hw, E1000_SYMERRS);
+       E1000_READ_REG(hw, E1000_MPC);
+       E1000_READ_REG(hw, E1000_SCC);
+       E1000_READ_REG(hw, E1000_ECOL);
+       E1000_READ_REG(hw, E1000_MCC);
+       E1000_READ_REG(hw, E1000_LATECOL);
+       E1000_READ_REG(hw, E1000_COLC);
+       E1000_READ_REG(hw, E1000_DC);
+       E1000_READ_REG(hw, E1000_SEC);
+       E1000_READ_REG(hw, E1000_RLEC);
+       E1000_READ_REG(hw, E1000_XONRXC);
+       E1000_READ_REG(hw, E1000_XONTXC);
+       E1000_READ_REG(hw, E1000_XOFFRXC);
+       E1000_READ_REG(hw, E1000_XOFFTXC);
+       E1000_READ_REG(hw, E1000_FCRUC);
+       E1000_READ_REG(hw, E1000_GPRC);
+       E1000_READ_REG(hw, E1000_BPRC);
+       E1000_READ_REG(hw, E1000_MPRC);
+       E1000_READ_REG(hw, E1000_GPTC);
+       E1000_READ_REG(hw, E1000_GORCL);
+       E1000_READ_REG(hw, E1000_GORCH);
+       E1000_READ_REG(hw, E1000_GOTCL);
+       E1000_READ_REG(hw, E1000_GOTCH);
+       E1000_READ_REG(hw, E1000_RNBC);
+       E1000_READ_REG(hw, E1000_RUC);
+       E1000_READ_REG(hw, E1000_RFC);
+       E1000_READ_REG(hw, E1000_ROC);
+       E1000_READ_REG(hw, E1000_RJC);
+       E1000_READ_REG(hw, E1000_TORL);
+       E1000_READ_REG(hw, E1000_TORH);
+       E1000_READ_REG(hw, E1000_TOTL);
+       E1000_READ_REG(hw, E1000_TOTH);
+       E1000_READ_REG(hw, E1000_TPR);
+       E1000_READ_REG(hw, E1000_TPT);
+       E1000_READ_REG(hw, E1000_MPTC);
+       E1000_READ_REG(hw, E1000_BPTC);
+}
+
+/**
+ *  e1000_check_for_copper_link_generic - Check for link (Copper)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks to see of the link status of the hardware has changed.  If a
+ *  change in link status has been detected, then we read the PHY registers
+ *  to get the current speed/duplex if link exists.
+ **/
+s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       s32 ret_val;
+       bool link;
+
+       DEBUGFUNC("e1000_check_for_copper_link");
+
+       /*
+        * We only want to go out to the PHY registers to see if Auto-Neg
+        * has completed and/or if our link status has changed.  The
+        * get_link_status flag is set upon receiving a Link Status
+        * Change or Rx Sequence Error interrupt.
+        */
+       if (!mac->get_link_status) {
+               ret_val = E1000_SUCCESS;
+               goto out;
+       }
+
+       /*
+        * First we want to see if the MII Status Register reports
+        * link.  If so, then we want to get the current speed/duplex
+        * of the PHY.
+        */
+       ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+       if (ret_val)
+               goto out;
+
+       if (!link)
+               goto out; /* No link detected */
+
+       mac->get_link_status = FALSE;
+
+       /*
+        * Check if there was DownShift, must be checked
+        * immediately after link-up
+        */
+       e1000_check_downshift_generic(hw);
+
+       /*
+        * If we are forcing speed/duplex, then we simply return since
+        * we have already determined whether we have link or not.
+        */
+       if (!mac->autoneg) {
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+       }
+
+       /*
+        * Auto-Neg is enabled.  Auto Speed Detection takes care
+        * of MAC speed/duplex configuration.  So we only need to
+        * configure Collision Distance in the MAC.
+        */
+       mac->ops.config_collision_dist(hw);
+
+       /*
+        * Configure Flow Control now that Auto-Neg has completed.
+        * First, we need to restore the desired flow control
+        * settings because we may have had to re-autoneg with a
+        * different link partner.
+        */
+       ret_val = e1000_config_fc_after_link_up_generic(hw);
+       if (ret_val)
+               DEBUGOUT("Error configuring flow control\n");
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_fiber_link_generic - Check for link (Fiber)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks for link up on the hardware.  If link is not up and we have
+ *  a signal, then we need to force link up.
+ **/
+s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       u32 rxcw;
+       u32 ctrl;
+       u32 status;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_check_for_fiber_link_generic");
+
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+       status = E1000_READ_REG(hw, E1000_STATUS);
+       rxcw = E1000_READ_REG(hw, E1000_RXCW);
+
+       /*
+        * If we don't have link (auto-negotiation failed or link partner
+        * cannot auto-negotiate), the cable is plugged in (we have signal),
+        * and our link partner is not trying to auto-negotiate with us (we
+        * are receiving idles or data), we need to force link up. We also
+        * need to give auto-negotiation time to complete, in case the cable
+        * was just plugged in. The autoneg_failed flag does this.
+        */
+       /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+       if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) &&
+           (!(rxcw & E1000_RXCW_C))) {
+               if (mac->autoneg_failed == 0) {
+                       mac->autoneg_failed = 1;
+                       goto out;
+               }
+               DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
+
+               /* Disable auto-negotiation in the TXCW register */
+               E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+               /* Force link-up and also force full-duplex. */
+               ctrl = E1000_READ_REG(hw, E1000_CTRL);
+               ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+               E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+               /* Configure Flow Control after forcing link up. */
+               ret_val = e1000_config_fc_after_link_up_generic(hw);
+               if (ret_val) {
+                       DEBUGOUT("Error configuring flow control\n");
+                       goto out;
+               }
+       } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+               /*
+                * If we are forcing link and we are receiving /C/ ordered
+                * sets, re-enable auto-negotiation in the TXCW register
+                * and disable forced link in the Device Control register
+                * in an attempt to auto-negotiate with our link partner.
+                */
+               DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
+               E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+               E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+               mac->serdes_has_link = TRUE;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_serdes_link_generic - Check for link (Serdes)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks for link up on the hardware.  If link is not up and we have
+ *  a signal, then we need to force link up.
+ **/
+s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       u32 rxcw;
+       u32 ctrl;
+       u32 status;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_check_for_serdes_link_generic");
+
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+       status = E1000_READ_REG(hw, E1000_STATUS);
+       rxcw = E1000_READ_REG(hw, E1000_RXCW);
+
+       /*
+        * If we don't have link (auto-negotiation failed or link partner
+        * cannot auto-negotiate), and our link partner is not trying to
+        * auto-negotiate with us (we are receiving idles or data),
+        * we need to force link up. We also need to give auto-negotiation
+        * time to complete.
+        */
+       /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+       if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
+               if (mac->autoneg_failed == 0) {
+                       mac->autoneg_failed = 1;
+                       goto out;
+               }
+               DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
+
+               /* Disable auto-negotiation in the TXCW register */
+               E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+               /* Force link-up and also force full-duplex. */
+               ctrl = E1000_READ_REG(hw, E1000_CTRL);
+               ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+               E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+               /* Configure Flow Control after forcing link up. */
+               ret_val = e1000_config_fc_after_link_up_generic(hw);
+               if (ret_val) {
+                       DEBUGOUT("Error configuring flow control\n");
+                       goto out;
+               }
+       } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+               /*
+                * If we are forcing link and we are receiving /C/ ordered
+                * sets, re-enable auto-negotiation in the TXCW register
+                * and disable forced link in the Device Control register
+                * in an attempt to auto-negotiate with our link partner.
+                */
+               DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
+               E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+               E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+               mac->serdes_has_link = TRUE;
+       } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) {
+               /*
+                * If we force link for non-auto-negotiation switch, check
+                * link status based on MAC synchronization for internal
+                * serdes media type.
+                */
+               /* SYNCH bit and IV bit are sticky. */
+               usec_delay(10);
+               rxcw = E1000_READ_REG(hw, E1000_RXCW);
+               if (rxcw & E1000_RXCW_SYNCH) {
+                       if (!(rxcw & E1000_RXCW_IV)) {
+                               mac->serdes_has_link = TRUE;
+                               DEBUGOUT("SERDES: Link up - forced.\n");
+                       }
+               } else {
+                       mac->serdes_has_link = FALSE;
+                       DEBUGOUT("SERDES: Link down - force failed.\n");
+               }
+       }
+
+       if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) {
+               status = E1000_READ_REG(hw, E1000_STATUS);
+               if (status & E1000_STATUS_LU) {
+                       /* SYNCH bit and IV bit are sticky, so reread rxcw. */
+                       usec_delay(10);
+                       rxcw = E1000_READ_REG(hw, E1000_RXCW);
+                       if (rxcw & E1000_RXCW_SYNCH) {
+                               if (!(rxcw & E1000_RXCW_IV)) {
+                                       mac->serdes_has_link = TRUE;
+                                       DEBUGOUT("SERDES: Link up - autoneg "
+                                          "completed sucessfully.\n");
+                               } else {
+                                       mac->serdes_has_link = FALSE;
+                                       DEBUGOUT("SERDES: Link down - invalid"
+                                          "codewords detected in autoneg.\n");
+                               }
+                       } else {
+                               mac->serdes_has_link = FALSE;
+                               DEBUGOUT("SERDES: Link down - no sync.\n");
+                       }
+               } else {
+                       mac->serdes_has_link = FALSE;
+                       DEBUGOUT("SERDES: Link down - autoneg failed\n");
+               }
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_setup_link_generic - Setup flow control and link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines which flow control settings to use, then configures flow
+ *  control.  Calls the appropriate media-specific link configuration
+ *  function.  Assuming the adapter has a valid link partner, a valid link
+ *  should be established.  Assumes the hardware has previously been reset
+ *  and the transmitter and receiver are not enabled.
+ **/
+s32 e1000_setup_link_generic(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_setup_link_generic");
+
+       /*
+        * In the case of the phy reset being blocked, we already have a link.
+        * We do not need to set it up again.
+        */
+       if (e1000_check_reset_block(hw))
+               goto out;
+
+       /*
+        * If requested flow control is set to default, set flow control
+        * based on the EEPROM flow control settings.
+        */
+       if (hw->fc.requested_mode == e1000_fc_default) {
+               ret_val = e1000_set_default_fc_generic(hw);
+               if (ret_val)
+                       goto out;
+       }
+
+       /*
+        * Save off the requested flow control mode for use later.  Depending
+        * on the link partner's capabilities, we may or may not use this mode.
+        */
+       hw->fc.current_mode = hw->fc.requested_mode;
+
+       DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
+               hw->fc.current_mode);
+
+       /* Call the necessary media_type subroutine to configure the link. */
+       ret_val = hw->mac.ops.setup_physical_interface(hw);
+       if (ret_val)
+               goto out;
+
+       /*
+        * Initialize the flow control address, type, and PAUSE timer
+        * registers to their default values.  This is done even if flow
+        * control is disabled, because it does not hurt anything to
+        * initialize these registers.
+        */
+       DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
+       E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE);
+       E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+       E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
+
+       E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
+
+       ret_val = e1000_set_fc_watermarks_generic(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures collision distance and flow control for fiber and serdes
+ *  links.  Upon successful setup, poll for link.
+ **/
+s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       u32 ctrl;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_setup_fiber_serdes_link_generic");
+
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+       /* Take the link out of reset */
+       ctrl &= ~E1000_CTRL_LRST;
+
+       mac->ops.config_collision_dist(hw);
+
+       ret_val = e1000_commit_fc_settings_generic(hw);
+       if (ret_val)
+               goto out;
+
+       /*
+        * Since auto-negotiation is enabled, take the link out of reset (the
+        * link will be in reset, because we previously reset the chip). This
+        * will restart auto-negotiation.  If auto-negotiation is successful
+        * then the link-up status bit will be set and the flow control enable
+        * bits (RFCE and TFCE) will be set according to their negotiated value.
+        */
+       DEBUGOUT("Auto-negotiation enabled\n");
+
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+       E1000_WRITE_FLUSH(hw);
+       msec_delay(1);
+
+       /*
+        * For these adapters, the SW definable pin 1 is set when the optics
+        * detect a signal.  If we have a signal, then poll for a "Link-Up"
+        * indication.
+        */
+       if (hw->phy.media_type == e1000_media_type_internal_serdes ||
+           (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) {
+               ret_val = e1000_poll_fiber_serdes_link_generic(hw);
+       } else {
+               DEBUGOUT("No signal detected\n");
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_config_collision_dist_generic - Configure collision distance
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the collision distance to the default value and is used
+ *  during link setup.
+ **/
+void e1000_config_collision_dist_generic(struct e1000_hw *hw)
+{
+       u32 tctl;
+
+       DEBUGFUNC("e1000_config_collision_dist_generic");
+
+       tctl = E1000_READ_REG(hw, E1000_TCTL);
+
+       tctl &= ~E1000_TCTL_COLD;
+       tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
+
+       E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+       E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ *  e1000_poll_fiber_serdes_link_generic - Poll for link up
+ *  @hw: pointer to the HW structure
+ *
+ *  Polls for link up by reading the status register, if link fails to come
+ *  up with auto-negotiation, then the link is forced if a signal is detected.
+ **/
+static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       u32 i, status;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_poll_fiber_serdes_link_generic");
+
+       /*
+        * If we have a signal (the cable is plugged in, or assumed TRUE for
+        * serdes media) then poll for a "Link-Up" indication in the Device
+        * Status Register.  Time-out if a link isn't seen in 500 milliseconds
+        * seconds (Auto-negotiation should complete in less than 500
+        * milliseconds even if the other end is doing it in SW).
+        */
+       for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
+               msec_delay(10);
+               status = E1000_READ_REG(hw, E1000_STATUS);
+               if (status & E1000_STATUS_LU)
+                       break;
+       }
+       if (i == FIBER_LINK_UP_LIMIT) {
+               DEBUGOUT("Never got a valid link from auto-neg!!!\n");
+               mac->autoneg_failed = 1;
+               /*
+                * AutoNeg failed to achieve a link, so we'll call
+                * mac->check_for_link. This routine will force the
+                * link up if we detect a signal. This will allow us to
+                * communicate with non-autonegotiating link partners.
+                */
+               ret_val = mac->ops.check_for_link(hw);
+               if (ret_val) {
+                       DEBUGOUT("Error while checking for link\n");
+                       goto out;
+               }
+               mac->autoneg_failed = 0;
+       } else {
+               mac->autoneg_failed = 0;
+               DEBUGOUT("Valid Link Found\n");
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_commit_fc_settings_generic - Configure flow control
+ *  @hw: pointer to the HW structure
+ *
+ *  Write the flow control settings to the Transmit Config Word Register (TXCW)
+ *  base on the flow control settings in e1000_mac_info.
+ **/
+static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       u32 txcw;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_commit_fc_settings_generic");
+
+       /*
+        * Check for a software override of the flow control settings, and
+        * setup the device accordingly.  If auto-negotiation is enabled, then
+        * software will have to set the "PAUSE" bits to the correct value in
+        * the Transmit Config Word Register (TXCW) and re-start auto-
+        * negotiation.  However, if auto-negotiation is disabled, then
+        * software will have to manually configure the two flow control enable
+        * bits in the CTRL register.
+        *
+        * The possible values of the "fc" parameter are:
+        *      0:  Flow control is completely disabled
+        *      1:  Rx flow control is enabled (we can receive pause frames,
+        *          but not send pause frames).
+        *      2:  Tx flow control is enabled (we can send pause frames but we
+        *          do not support receiving pause frames).
+        *      3:  Both Rx and Tx flow control (symmetric) are enabled.
+        */
+       switch (hw->fc.current_mode) {
+       case e1000_fc_none:
+               /* Flow control completely disabled by a software over-ride. */
+               txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+               break;
+       case e1000_fc_rx_pause:
+               /*
+                * Rx Flow control is enabled and Tx Flow control is disabled
+                * by a software over-ride. Since there really isn't a way to
+                * advertise that we are capable of Rx Pause ONLY, we will
+                * advertise that we support both symmetric and asymmetric Rx
+                * PAUSE.  Later, we will disable the adapter's ability to send
+                * PAUSE frames.
+                */
+               txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+               break;
+       case e1000_fc_tx_pause:
+               /*
+                * Tx Flow control is enabled, and Rx Flow control is disabled,
+                * by a software over-ride.
+                */
+               txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+               break;
+       case e1000_fc_full:
+               /*
+                * Flow control (both Rx and Tx) is enabled by a software
+                * over-ride.
+                */
+               txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+               break;
+       default:
+               DEBUGOUT("Flow control param set incorrectly\n");
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+               break;
+       }
+
+       E1000_WRITE_REG(hw, E1000_TXCW, txcw);
+       mac->txcw = txcw;
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_set_fc_watermarks_generic - Set flow control high/low watermarks
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets the flow control high/low threshold (watermark) registers.  If
+ *  flow control XON frame transmission is enabled, then set XON frame
+ *  transmission as well.
+ **/
+s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw)
+{
+       u32 fcrtl = 0, fcrth = 0;
+
+       DEBUGFUNC("e1000_set_fc_watermarks_generic");
+
+       /*
+        * Set the flow control receive threshold registers.  Normally,
+        * these registers will be set to a default threshold that may be
+        * adjusted later by the driver's runtime code.  However, if the
+        * ability to transmit pause frames is not enabled, then these
+        * registers will be set to 0.
+        */
+       if (hw->fc.current_mode & e1000_fc_tx_pause) {
+               /*
+                * We need to set up the Receive Threshold high and low water
+                * marks as well as (optionally) enabling the transmission of
+                * XON frames.
+                */
+               fcrtl = hw->fc.low_water;
+               if (hw->fc.send_xon)
+                       fcrtl |= E1000_FCRTL_XONE;
+
+               fcrth = hw->fc.high_water;
+       }
+       E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl);
+       E1000_WRITE_REG(hw, E1000_FCRTH, fcrth);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_set_default_fc_generic - Set flow control default values
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the EEPROM for the default values for flow control and store the
+ *  values.
+ **/
+static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u16 nvm_data;
+
+       DEBUGFUNC("e1000_set_default_fc_generic");
+
+       /*
+        * Read and store word 0x0F of the EEPROM. This word contains bits
+        * that determine the hardware's default PAUSE (flow control) mode,
+        * a bit that determines whether the HW defaults to enabling or
+        * disabling auto-negotiation, and the direction of the
+        * SW defined pins. If there is no SW over-ride of the flow
+        * control setting, then the variable hw->fc will
+        * be initialized based on a value in the EEPROM.
+        */
+       ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
+
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+       if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
+               hw->fc.requested_mode = e1000_fc_none;
+       else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
+                NVM_WORD0F_ASM_DIR)
+               hw->fc.requested_mode = e1000_fc_tx_pause;
+       else
+               hw->fc.requested_mode = e1000_fc_full;
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_force_mac_fc_generic - Force the MAC's flow control settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Force the MAC's flow control settings.  Sets the TFCE and RFCE bits in the
+ *  device control register to reflect the adapter settings.  TFCE and RFCE
+ *  need to be explicitly set by software when a copper PHY is used because
+ *  autonegotiation is managed by the PHY rather than the MAC.  Software must
+ *  also configure these bits when link is forced on a fiber connection.
+ **/
+s32 e1000_force_mac_fc_generic(struct e1000_hw *hw)
+{
+       u32 ctrl;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_force_mac_fc_generic");
+
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+       /*
+        * Because we didn't get link via the internal auto-negotiation
+        * mechanism (we either forced link or we got link via PHY
+        * auto-neg), we have to manually enable/disable transmit an
+        * receive flow control.
+        *
+        * The "Case" statement below enables/disable flow control
+        * according to the "hw->fc.current_mode" parameter.
+        *
+        * The possible values of the "fc" parameter are:
+        *      0:  Flow control is completely disabled
+        *      1:  Rx flow control is enabled (we can receive pause
+        *          frames but not send pause frames).
+        *      2:  Tx flow control is enabled (we can send pause frames
+        *          frames but we do not receive pause frames).
+        *      3:  Both Rx and Tx flow control (symmetric) is enabled.
+        *  other:  No other values should be possible at this point.
+        */
+       DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode);
+
+       switch (hw->fc.current_mode) {
+       case e1000_fc_none:
+               ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+               break;
+       case e1000_fc_rx_pause:
+               ctrl &= (~E1000_CTRL_TFCE);
+               ctrl |= E1000_CTRL_RFCE;
+               break;
+       case e1000_fc_tx_pause:
+               ctrl &= (~E1000_CTRL_RFCE);
+               ctrl |= E1000_CTRL_TFCE;
+               break;
+       case e1000_fc_full:
+               ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+               break;
+       default:
+               DEBUGOUT("Flow control param set incorrectly\n");
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+       }
+
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_config_fc_after_link_up_generic - Configures flow control after link
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks the status of auto-negotiation after link up to ensure that the
+ *  speed and duplex were not forced.  If the link needed to be forced, then
+ *  flow control needs to be forced also.  If auto-negotiation is enabled
+ *  and did not fail, then we configure flow control based on our link
+ *  partner.
+ **/
+s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       s32 ret_val = E1000_SUCCESS;
+       u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
+       u16 speed, duplex;
+
+       DEBUGFUNC("e1000_config_fc_after_link_up_generic");
+
+       /*
+        * Check for the case where we have fiber media and auto-neg failed
+        * so we had to force link.  In this case, we need to force the
+        * configuration of the MAC to match the "fc" parameter.
+        */
+       if (mac->autoneg_failed) {
+               if (hw->phy.media_type == e1000_media_type_fiber ||
+                   hw->phy.media_type == e1000_media_type_internal_serdes)
+                       ret_val = e1000_force_mac_fc_generic(hw);
+       } else {
+               if (hw->phy.media_type == e1000_media_type_copper)
+                       ret_val = e1000_force_mac_fc_generic(hw);
+       }
+
+       if (ret_val) {
+               DEBUGOUT("Error forcing flow control settings\n");
+               goto out;
+       }
+
+       /*
+        * Check for the case where we have copper media and auto-neg is
+        * enabled.  In this case, we need to check and see if Auto-Neg
+        * has completed, and if so, how the PHY and link partner has
+        * flow control configured.
+        */
+       if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
+               /*
+                * Read the MII Status Register and check to see if AutoNeg
+                * has completed.  We read this twice because this reg has
+                * some "sticky" (latched) bits.
+                */
+               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
+               if (ret_val)
+                       goto out;
+               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
+               if (ret_val)
+                       goto out;
+
+               if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+                       DEBUGOUT("Copper PHY and Auto Neg "
+                                "has not completed.\n");
+                       goto out;
+               }
+
+               /*
+                * The AutoNeg process has completed, so we now need to
+                * read both the Auto Negotiation Advertisement
+                * Register (Address 4) and the Auto_Negotiation Base
+                * Page Ability Register (Address 5) to determine how
+                * flow control was negotiated.
+                */
+               ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
+                                            &mii_nway_adv_reg);
+               if (ret_val)
+                       goto out;
+               ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
+                                            &mii_nway_lp_ability_reg);
+               if (ret_val)
+                       goto out;
+
+               /*
+                * Two bits in the Auto Negotiation Advertisement Register
+                * (Address 4) and two bits in the Auto Negotiation Base
+                * Page Ability Register (Address 5) determine flow control
+                * for both the PHY and the link partner.  The following
+                * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+                * 1999, describes these PAUSE resolution bits and how flow
+                * control is determined based upon these settings.
+                * NOTE:  DC = Don't Care
+                *
+                *   LOCAL DEVICE  |   LINK PARTNER
+                * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+                *-------|---------|-------|---------|--------------------
+                *   0   |    0    |  DC   |   DC    | e1000_fc_none
+                *   0   |    1    |   0   |   DC    | e1000_fc_none
+                *   0   |    1    |   1   |    0    | e1000_fc_none
+                *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+                *   1   |    0    |   0   |   DC    | e1000_fc_none
+                *   1   |   DC    |   1   |   DC    | e1000_fc_full
+                *   1   |    1    |   0   |    0    | e1000_fc_none
+                *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+                *
+                * Are both PAUSE bits set to 1?  If so, this implies
+                * Symmetric Flow Control is enabled at both ends.  The
+                * ASM_DIR bits are irrelevant per the spec.
+                *
+                * For Symmetric Flow Control:
+                *
+                *   LOCAL DEVICE  |   LINK PARTNER
+                * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+                *-------|---------|-------|---------|--------------------
+                *   1   |   DC    |   1   |   DC    | E1000_fc_full
+                *
+                */
+               if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                   (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+                       /*
+                        * Now we need to check if the user selected Rx ONLY
+                        * of pause frames.  In this case, we had to advertise
+                        * FULL flow control because we could not advertise Rx
+                        * ONLY. Hence, we must now check to see if we need to
+                        * turn OFF the TRANSMISSION of PAUSE frames.
+                        */
+                       if (hw->fc.requested_mode == e1000_fc_full) {
+                               hw->fc.current_mode = e1000_fc_full;
+                               DEBUGOUT("Flow Control = FULL.\r\n");
+                       } else {
+                               hw->fc.current_mode = e1000_fc_rx_pause;
+                               DEBUGOUT("Flow Control = "
+                                        "Rx PAUSE frames only.\r\n");
+                       }
+               }
+               /*
+                * For receiving PAUSE frames ONLY.
+                *
+                *   LOCAL DEVICE  |   LINK PARTNER
+                * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+                *-------|---------|-------|---------|--------------------
+                *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+                */
+               else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                         (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+                         (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+                         (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+                       hw->fc.current_mode = e1000_fc_tx_pause;
+                       DEBUGOUT("Flow Control = Tx PAUSE frames only.\r\n");
+               }
+               /*
+                * For transmitting PAUSE frames ONLY.
+                *
+                *   LOCAL DEVICE  |   LINK PARTNER
+                * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+                *-------|---------|-------|---------|--------------------
+                *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+                */
+               else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                        (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+                        !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+                        (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+                       hw->fc.current_mode = e1000_fc_rx_pause;
+                       DEBUGOUT("Flow Control = Rx PAUSE frames only.\r\n");
+               } else {
+                       /*
+                        * Per the IEEE spec, at this point flow control
+                        * should be disabled.
+                        */
+                       hw->fc.current_mode = e1000_fc_none;
+                       DEBUGOUT("Flow Control = NONE.\r\n");
+               }
+
+               /*
+                * Now we need to do one last check...  If we auto-
+                * negotiated to HALF DUPLEX, flow control should not be
+                * enabled per IEEE 802.3 spec.
+                */
+               ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
+               if (ret_val) {
+                       DEBUGOUT("Error getting link speed and duplex\n");
+                       goto out;
+               }
+
+               if (duplex == HALF_DUPLEX)
+                       hw->fc.current_mode = e1000_fc_none;
+
+               /*
+                * Now we call a subroutine to actually force the MAC
+                * controller to use the correct flow control settings.
+                */
+               ret_val = e1000_force_mac_fc_generic(hw);
+               if (ret_val) {
+                       DEBUGOUT("Error forcing flow control settings\n");
+                       goto out;
+               }
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  Read the status register for the current speed/duplex and store the current
+ *  speed and duplex for copper connections.
+ **/
+s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
+                                              u16 *duplex)
+{
+       u32 status;
+
+       DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic");
+
+       status = E1000_READ_REG(hw, E1000_STATUS);
+       if (status & E1000_STATUS_SPEED_1000) {
+               *speed = SPEED_1000;
+               DEBUGOUT("1000 Mbs, ");
+       } else if (status & E1000_STATUS_SPEED_100) {
+               *speed = SPEED_100;
+               DEBUGOUT("100 Mbs, ");
+       } else {
+               *speed = SPEED_10;
+               DEBUGOUT("10 Mbs, ");
+       }
+
+       if (status & E1000_STATUS_FD) {
+               *duplex = FULL_DUPLEX;
+               DEBUGOUT("Full Duplex\n");
+       } else {
+               *duplex = HALF_DUPLEX;
+               DEBUGOUT("Half Duplex\n");
+       }
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  Sets the speed and duplex to gigabit full duplex (the only possible option)
+ *  for fiber/serdes links.
+ **/
+s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
+                                                    u16 *speed, u16 *duplex)
+{
+       DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic");
+
+       *speed = SPEED_1000;
+       *duplex = FULL_DUPLEX;
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_hw_semaphore_generic - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore to access the PHY or NVM
+ **/
+s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw)
+{
+       u32 swsm;
+       s32 ret_val = E1000_SUCCESS;
+       s32 timeout = hw->nvm.word_size + 1;
+       s32 i = 0;
+
+       DEBUGFUNC("e1000_get_hw_semaphore_generic");
+
+       /* Get the SW semaphore */
+       while (i < timeout) {
+               swsm = E1000_READ_REG(hw, E1000_SWSM);
+               if (!(swsm & E1000_SWSM_SMBI))
+                       break;
+
+               usec_delay(50);
+               i++;
+       }
+
+       if (i == timeout) {
+               DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+               ret_val = -E1000_ERR_NVM;
+               goto out;
+       }
+
+       /* Get the FW semaphore. */
+       for (i = 0; i < timeout; i++) {
+               swsm = E1000_READ_REG(hw, E1000_SWSM);
+               E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+               /* Semaphore acquired if bit latched */
+               if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
+                       break;
+
+               usec_delay(50);
+       }
+
+       if (i == timeout) {
+               /* Release semaphores */
+               e1000_put_hw_semaphore_generic(hw);
+               DEBUGOUT("Driver can't access the NVM\n");
+               ret_val = -E1000_ERR_NVM;
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_put_hw_semaphore_generic - Release hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Release hardware semaphore used to access the PHY or NVM
+ **/
+void e1000_put_hw_semaphore_generic(struct e1000_hw *hw)
+{
+       u32 swsm;
+
+       DEBUGFUNC("e1000_put_hw_semaphore_generic");
+
+       swsm = E1000_READ_REG(hw, E1000_SWSM);
+
+       swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+
+       E1000_WRITE_REG(hw, E1000_SWSM, swsm);
+}
+
+/**
+ *  e1000_get_auto_rd_done_generic - Check for auto read completion
+ *  @hw: pointer to the HW structure
+ *
+ *  Check EEPROM for Auto Read done bit.
+ **/
+s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw)
+{
+       s32 i = 0;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_get_auto_rd_done_generic");
+
+       while (i < AUTO_READ_DONE_TIMEOUT) {
+               if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD)
+                       break;
+               msec_delay(1);
+               i++;
+       }
+
+       if (i == AUTO_READ_DONE_TIMEOUT) {
+               DEBUGOUT("Auto read by HW from NVM has not completed.\n");
+               ret_val = -E1000_ERR_RESET;
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_valid_led_default_generic - Verify a valid default LED config
+ *  @hw: pointer to the HW structure
+ *  @data: pointer to the NVM (EEPROM)
+ *
+ *  Read the EEPROM for the current default LED configuration.  If the
+ *  LED configuration is not valid, set to a valid LED configuration.
+ **/
+s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data)
+{
+       s32 ret_val;
+
+       DEBUGFUNC("e1000_valid_led_default_generic");
+
+       ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+       if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
+               *data = ID_LED_DEFAULT;
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_id_led_init_generic -
+ *  @hw: pointer to the HW structure
+ *
+ **/
+s32 e1000_id_led_init_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       s32 ret_val;
+       const u32 ledctl_mask = 0x000000FF;
+       const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+       const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+       u16 data, i, temp;
+       const u16 led_mask = 0x0F;
+
+       DEBUGFUNC("e1000_id_led_init_generic");
+
+       ret_val = hw->nvm.ops.valid_led_default(hw, &data);
+       if (ret_val)
+               goto out;
+
+       mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
+       mac->ledctl_mode1 = mac->ledctl_default;
+       mac->ledctl_mode2 = mac->ledctl_default;
+
+       for (i = 0; i < 4; i++) {
+               temp = (data >> (i << 2)) & led_mask;
+               switch (temp) {
+               case ID_LED_ON1_DEF2:
+               case ID_LED_ON1_ON2:
+               case ID_LED_ON1_OFF2:
+                       mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+                       mac->ledctl_mode1 |= ledctl_on << (i << 3);
+                       break;
+               case ID_LED_OFF1_DEF2:
+               case ID_LED_OFF1_ON2:
+               case ID_LED_OFF1_OFF2:
+                       mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+                       mac->ledctl_mode1 |= ledctl_off << (i << 3);
+                       break;
+               default:
+                       /* Do nothing */
+                       break;
+               }
+               switch (temp) {
+               case ID_LED_DEF1_ON2:
+               case ID_LED_ON1_ON2:
+               case ID_LED_OFF1_ON2:
+                       mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+                       mac->ledctl_mode2 |= ledctl_on << (i << 3);
+                       break;
+               case ID_LED_DEF1_OFF2:
+               case ID_LED_ON1_OFF2:
+               case ID_LED_OFF1_OFF2:
+                       mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+                       mac->ledctl_mode2 |= ledctl_off << (i << 3);
+                       break;
+               default:
+                       /* Do nothing */
+                       break;
+               }
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_setup_led_generic - Configures SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This prepares the SW controllable LED for use and saves the current state
+ *  of the LED so it can be later restored.
+ **/
+s32 e1000_setup_led_generic(struct e1000_hw *hw)
+{
+       u32 ledctl;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_setup_led_generic");
+
+       if (hw->mac.ops.setup_led != e1000_setup_led_generic) {
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+       }
+
+       if (hw->phy.media_type == e1000_media_type_fiber) {
+               ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
+               hw->mac.ledctl_default = ledctl;
+               /* Turn off LED0 */
+               ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
+                           E1000_LEDCTL_LED0_BLINK |
+                           E1000_LEDCTL_LED0_MODE_MASK);
+               ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+                          E1000_LEDCTL_LED0_MODE_SHIFT);
+               E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
+       } else if (hw->phy.media_type == e1000_media_type_copper) {
+               E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_cleanup_led_generic - Set LED config to default operation
+ *  @hw: pointer to the HW structure
+ *
+ *  Remove the current LED configuration and set the LED configuration
+ *  to the default value, saved from the EEPROM.
+ **/
+s32 e1000_cleanup_led_generic(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_cleanup_led_generic");
+
+       E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_blink_led_generic - Blink LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Blink the LEDs which are set to be on.
+ **/
+s32 e1000_blink_led_generic(struct e1000_hw *hw)
+{
+       u32 ledctl_blink = 0;
+       u32 i;
+
+       DEBUGFUNC("e1000_blink_led_generic");
+
+       if (hw->phy.media_type == e1000_media_type_fiber) {
+               /* always blink LED0 for PCI-E fiber */
+               ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+                    (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+       } else {
+               /*
+                * set the blink bit for each LED that's "on" (0x0E)
+                * in ledctl_mode2
+                */
+               ledctl_blink = hw->mac.ledctl_mode2;
+               for (i = 0; i < 4; i++)
+                       if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
+                           E1000_LEDCTL_MODE_LED_ON)
+                               ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
+                                                (i * 8));
+       }
+
+       E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_on_generic - Turn LED on
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn LED on.
+ **/
+s32 e1000_led_on_generic(struct e1000_hw *hw)
+{
+       u32 ctrl;
+
+       DEBUGFUNC("e1000_led_on_generic");
+
+       switch (hw->phy.media_type) {
+       case e1000_media_type_fiber:
+               ctrl = E1000_READ_REG(hw, E1000_CTRL);
+               ctrl &= ~E1000_CTRL_SWDPIN0;
+               ctrl |= E1000_CTRL_SWDPIO0;
+               E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+               break;
+       case e1000_media_type_copper:
+               E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
+               break;
+       default:
+               break;
+       }
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_off_generic - Turn LED off
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn LED off.
+ **/
+s32 e1000_led_off_generic(struct e1000_hw *hw)
+{
+       u32 ctrl;
+
+       DEBUGFUNC("e1000_led_off_generic");
+
+       switch (hw->phy.media_type) {
+       case e1000_media_type_fiber:
+               ctrl = E1000_READ_REG(hw, E1000_CTRL);
+               ctrl |= E1000_CTRL_SWDPIN0;
+               ctrl |= E1000_CTRL_SWDPIO0;
+               E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+               break;
+       case e1000_media_type_copper:
+               E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+               break;
+       default:
+               break;
+       }
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities
+ *  @hw: pointer to the HW structure
+ *  @no_snoop: bitmap of snoop events
+ *
+ *  Set the PCI-express register to snoop for events enabled in 'no_snoop'.
+ **/
+void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop)
+{
+       u32 gcr;
+
+       DEBUGFUNC("e1000_set_pcie_no_snoop_generic");
+
+       if (hw->bus.type != e1000_bus_type_pci_express)
+               goto out;
+
+       if (no_snoop) {
+               gcr = E1000_READ_REG(hw, E1000_GCR);
+               gcr &= ~(PCIE_NO_SNOOP_ALL);
+               gcr |= no_snoop;
+               E1000_WRITE_REG(hw, E1000_GCR, gcr);
+       }
+out:
+       return;
+}
+
+/**
+ *  e1000_disable_pcie_master_generic - Disables PCI-express master access
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns E1000_SUCCESS if successful, else returns -10
+ *  (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
+ *  the master requests to be disabled.
+ *
+ *  Disables PCI-Express master access and verifies there are no pending
+ *  requests.
+ **/
+s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw)
+{
+       u32 ctrl;
+       s32 timeout = MASTER_DISABLE_TIMEOUT;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_disable_pcie_master_generic");
+
+       if (hw->bus.type != e1000_bus_type_pci_express)
+               goto out;
+
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+       ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+       while (timeout) {
+               if (!(E1000_READ_REG(hw, E1000_STATUS) &
+                     E1000_STATUS_GIO_MASTER_ENABLE))
+                       break;
+               usec_delay(100);
+               timeout--;
+       }
+
+       if (!timeout) {
+               DEBUGOUT("Master requests are pending.\n");
+               ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing
+ *  @hw: pointer to the HW structure
+ *
+ *  Reset the Adaptive Interframe Spacing throttle to default values.
+ **/
+void e1000_reset_adaptive_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+
+       DEBUGFUNC("e1000_reset_adaptive_generic");
+
+       if (!mac->adaptive_ifs) {
+               DEBUGOUT("Not in Adaptive IFS mode!\n");
+               goto out;
+       }
+
+       mac->current_ifs_val = 0;
+       mac->ifs_min_val = IFS_MIN;
+       mac->ifs_max_val = IFS_MAX;
+       mac->ifs_step_size = IFS_STEP;
+       mac->ifs_ratio = IFS_RATIO;
+
+       mac->in_ifs_mode = FALSE;
+       E1000_WRITE_REG(hw, E1000_AIT, 0);
+out:
+       return;
+}
+
+/**
+ *  e1000_update_adaptive_generic - Update Adaptive Interframe Spacing
+ *  @hw: pointer to the HW structure
+ *
+ *  Update the Adaptive Interframe Spacing Throttle value based on the
+ *  time between transmitted packets and time between collisions.
+ **/
+void e1000_update_adaptive_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+
+       DEBUGFUNC("e1000_update_adaptive_generic");
+
+       if (!mac->adaptive_ifs) {
+               DEBUGOUT("Not in Adaptive IFS mode!\n");
+               goto out;
+       }
+
+       if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
+               if (mac->tx_packet_delta > MIN_NUM_XMITS) {
+                       mac->in_ifs_mode = TRUE;
+                       if (mac->current_ifs_val < mac->ifs_max_val) {
+                               if (!mac->current_ifs_val)
+                                       mac->current_ifs_val = mac->ifs_min_val;
+                               else
+                                       mac->current_ifs_val +=
+                                               mac->ifs_step_size;
+                               E1000_WRITE_REG(hw, E1000_AIT, mac->current_ifs_val);
+                       }
+               }
+       } else {
+               if (mac->in_ifs_mode &&
+                   (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
+                       mac->current_ifs_val = 0;
+                       mac->in_ifs_mode = FALSE;
+                       E1000_WRITE_REG(hw, E1000_AIT, 0);
+               }
+       }
+out:
+       return;
+}
+
+/**
+ *  e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Verify that when not using auto-negotiation that MDI/MDIx is correctly
+ *  set, which is forced to MDI mode only.
+ **/
+static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_validate_mdi_setting_generic");
+
+       if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
+               DEBUGOUT("Invalid MDI setting detected\n");
+               hw->phy.mdix = 1;
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register
+ *  @hw: pointer to the HW structure
+ *  @reg: 32bit register offset such as E1000_SCTL
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes an address/data control type register.  There are several of these
+ *  and they all have the format address << 8 | data and bit 31 is polled for
+ *  completion.
+ **/
+s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
+                                      u32 offset, u8 data)
+{
+       u32 i, regvalue = 0;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic");
+
+       /* Set up the address and data */
+       regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
+       E1000_WRITE_REG(hw, reg, regvalue);
+
+       /* Poll the ready bit to see if the MDI read completed */
+       for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
+               usec_delay(5);
+               regvalue = E1000_READ_REG(hw, reg);
+               if (regvalue & E1000_GEN_CTL_READY)
+                       break;
+       }
+       if (!(regvalue & E1000_GEN_CTL_READY)) {
+               DEBUGOUT1("Reg %08x did not indicate ready\n", reg);
+               ret_val = -E1000_ERR_PHY;
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
diff --git a/lib/librte_pmd_e1000/e1000/e1000_mac.h b/lib/librte_pmd_e1000/e1000/e1000_mac.h
new file mode 100644 (file)
index 0000000..a5a98d0
--- /dev/null
@@ -0,0 +1,95 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_MAC_H_
+#define _E1000_MAC_H_
+
+/*
+ * Functions that should not be called directly from drivers but can be used
+ * by other files in this 'shared code'
+ */
+void e1000_init_mac_ops_generic(struct e1000_hw *hw);
+void e1000_null_mac_generic(struct e1000_hw *hw);
+s32  e1000_null_ops_generic(struct e1000_hw *hw);
+s32  e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d);
+bool e1000_null_mng_mode(struct e1000_hw *hw);
+void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a);
+void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b);
+void e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a);
+s32  e1000_blink_led_generic(struct e1000_hw *hw);
+s32  e1000_check_for_copper_link_generic(struct e1000_hw *hw);
+s32  e1000_check_for_fiber_link_generic(struct e1000_hw *hw);
+s32  e1000_check_for_serdes_link_generic(struct e1000_hw *hw);
+s32  e1000_cleanup_led_generic(struct e1000_hw *hw);
+s32  e1000_config_fc_after_link_up_generic(struct e1000_hw *hw);
+s32  e1000_disable_pcie_master_generic(struct e1000_hw *hw);
+s32  e1000_force_mac_fc_generic(struct e1000_hw *hw);
+s32  e1000_get_auto_rd_done_generic(struct e1000_hw *hw);
+s32  e1000_get_bus_info_pci_generic(struct e1000_hw *hw);
+s32  e1000_get_bus_info_pcie_generic(struct e1000_hw *hw);
+void e1000_set_lan_id_single_port(struct e1000_hw *hw);
+void e1000_set_lan_id_multi_port_pci(struct e1000_hw *hw);
+s32  e1000_get_hw_semaphore_generic(struct e1000_hw *hw);
+s32  e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
+                                               u16 *duplex);
+s32  e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
+                                                     u16 *speed, u16 *duplex);
+s32  e1000_id_led_init_generic(struct e1000_hw *hw);
+s32  e1000_led_on_generic(struct e1000_hw *hw);
+s32  e1000_led_off_generic(struct e1000_hw *hw);
+void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
+                                       u8 *mc_addr_list, u32 mc_addr_count);
+s32  e1000_set_fc_watermarks_generic(struct e1000_hw *hw);
+s32  e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw);
+s32  e1000_setup_led_generic(struct e1000_hw *hw);
+s32  e1000_setup_link_generic(struct e1000_hw *hw);
+s32  e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
+                                       u32 offset, u8 data);
+
+u32  e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr);
+
+void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw);
+void e1000_clear_vfta_generic(struct e1000_hw *hw);
+void e1000_config_collision_dist_generic(struct e1000_hw *hw);
+void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
+void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw);
+void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
+void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
+s32  e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
+void e1000_reset_adaptive_generic(struct e1000_hw *hw);
+void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop);
+void e1000_update_adaptive_generic(struct e1000_hw *hw);
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+
+#endif
diff --git a/lib/librte_pmd_e1000/e1000/e1000_manage.c b/lib/librte_pmd_e1000/e1000/e1000_manage.c
new file mode 100644 (file)
index 0000000..bb0a10b
--- /dev/null
@@ -0,0 +1,472 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "e1000_api.h"
+
+/**
+ *  e1000_calculate_checksum - Calculate checksum for buffer
+ *  @buffer: pointer to EEPROM
+ *  @length: size of EEPROM to calculate a checksum for
+ *
+ *  Calculates the checksum for some buffer on a specified length.  The
+ *  checksum calculated is returned.
+ **/
+u8 e1000_calculate_checksum(u8 *buffer, u32 length)
+{
+       u32 i;
+       u8 sum = 0;
+
+       DEBUGFUNC("e1000_calculate_checksum");
+
+       if (!buffer)
+               return 0;
+
+       for (i = 0; i < length; i++)
+               sum += buffer[i];
+
+       return (u8) (0 - sum);
+}
+
+/**
+ *  e1000_mng_enable_host_if_generic - Checks host interface is enabled
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
+ *
+ *  This function checks whether the HOST IF is enabled for command operation
+ *  and also checks whether the previous command is completed.  It busy waits
+ *  in case of previous command is not completed.
+ **/
+s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw)
+{
+       u32 hicr;
+       s32 ret_val = E1000_SUCCESS;
+       u8 i;
+
+       DEBUGFUNC("e1000_mng_enable_host_if_generic");
+
+       if (!(hw->mac.arc_subsystem_valid)) {
+               DEBUGOUT("ARC subsystem not valid.\n");
+               ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
+               goto out;
+       }
+
+       /* Check that the host interface is enabled. */
+       hicr = E1000_READ_REG(hw, E1000_HICR);
+       if ((hicr & E1000_HICR_EN) == 0) {
+               DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+               ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
+               goto out;
+       }
+       /* check the previous command is completed */
+       for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
+               hicr = E1000_READ_REG(hw, E1000_HICR);
+               if (!(hicr & E1000_HICR_C))
+                       break;
+               msec_delay_irq(1);
+       }
+
+       if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
+               DEBUGOUT("Previous command timeout failed .\n");
+               ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_check_mng_mode_generic - Generic check management mode
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the firmware semaphore register and returns TRUE (>0) if
+ *  manageability is enabled, else FALSE (0).
+ **/
+bool e1000_check_mng_mode_generic(struct e1000_hw *hw)
+{
+       u32 fwsm = E1000_READ_REG(hw, E1000_FWSM);
+
+       DEBUGFUNC("e1000_check_mng_mode_generic");
+
+
+       return (fwsm & E1000_FWSM_MODE_MASK) ==
+               (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
+}
+
+/**
+ *  e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on Tx
+ *  @hw: pointer to the HW structure
+ *
+ *  Enables packet filtering on transmit packets if manageability is enabled
+ *  and host interface is enabled.
+ **/
+bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw)
+{
+       struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
+       u32 *buffer = (u32 *)&hw->mng_cookie;
+       u32 offset;
+       s32 ret_val, hdr_csum, csum;
+       u8 i, len;
+
+       DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic");
+
+       hw->mac.tx_pkt_filtering = TRUE;
+
+       /* No manageability, no filtering */
+       if (!hw->mac.ops.check_mng_mode(hw)) {
+               hw->mac.tx_pkt_filtering = FALSE;
+               goto out;
+       }
+
+       /*
+        * If we can't read from the host interface for whatever
+        * reason, disable filtering.
+        */
+       ret_val = hw->mac.ops.mng_enable_host_if(hw);
+       if (ret_val != E1000_SUCCESS) {
+               hw->mac.tx_pkt_filtering = FALSE;
+               goto out;
+       }
+
+       /* Read in the header.  Length and offset are in dwords. */
+       len    = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
+       offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
+       for (i = 0; i < len; i++)
+               *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF,
+                                                          offset + i);
+       hdr_csum = hdr->checksum;
+       hdr->checksum = 0;
+       csum = e1000_calculate_checksum((u8 *)hdr,
+                                       E1000_MNG_DHCP_COOKIE_LENGTH);
+       /*
+        * If either the checksums or signature don't match, then
+        * the cookie area isn't considered valid, in which case we
+        * take the safe route of assuming Tx filtering is enabled.
+        */
+       if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
+               hw->mac.tx_pkt_filtering = TRUE;
+               goto out;
+       }
+
+       /* Cookie area is valid, make the final check for filtering. */
+       if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
+               hw->mac.tx_pkt_filtering = FALSE;
+               goto out;
+       }
+
+out:
+       return hw->mac.tx_pkt_filtering;
+}
+
+/**
+ *  e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: pointer to the host interface
+ *  @length: size of the buffer
+ *
+ *  Writes the DHCP information to the host interface.
+ **/
+s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer,
+                                      u16 length)
+{
+       struct e1000_host_mng_command_header hdr;
+       s32 ret_val;
+       u32 hicr;
+
+       DEBUGFUNC("e1000_mng_write_dhcp_info_generic");
+
+       hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
+       hdr.command_length = length;
+       hdr.reserved1 = 0;
+       hdr.reserved2 = 0;
+       hdr.checksum = 0;
+
+       /* Enable the host interface */
+       ret_val = hw->mac.ops.mng_enable_host_if(hw);
+       if (ret_val)
+               goto out;
+
+       /* Populate the host interface with the contents of "buffer". */
+       ret_val = hw->mac.ops.mng_host_if_write(hw, buffer, length,
+                                         sizeof(hdr), &(hdr.checksum));
+       if (ret_val)
+               goto out;
+
+       /* Write the manageability command header */
+       ret_val = hw->mac.ops.mng_write_cmd_header(hw, &hdr);
+       if (ret_val)
+               goto out;
+
+       /* Tell the ARC a new command is pending. */
+       hicr = E1000_READ_REG(hw, E1000_HICR);
+       E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_mng_write_cmd_header_generic - Writes manageability command header
+ *  @hw: pointer to the HW structure
+ *  @hdr: pointer to the host interface command header
+ *
+ *  Writes the command header after does the checksum calculation.
+ **/
+s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
+                                    struct e1000_host_mng_command_header *hdr)
+{
+       u16 i, length = sizeof(struct e1000_host_mng_command_header);
+
+       DEBUGFUNC("e1000_mng_write_cmd_header_generic");
+
+       /* Write the whole command header structure with new checksum. */
+
+       hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
+
+       length >>= 2;
+       /* Write the relevant command block into the ram area. */
+       for (i = 0; i < length; i++) {
+               E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
+                                           *((u32 *) hdr + i));
+               E1000_WRITE_FLUSH(hw);
+       }
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_mng_host_if_write_generic - Write to the manageability host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: pointer to the host interface buffer
+ *  @length: size of the buffer
+ *  @offset: location in the buffer to write to
+ *  @sum: sum of the data (not checksum)
+ *
+ *  This function writes the buffer content at the offset given on the host if.
+ *  It also does alignment considerations to do the writes in most efficient
+ *  way.  Also fills up the sum of the buffer in *buffer parameter.
+ **/
+s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
+                                    u16 length, u16 offset, u8 *sum)
+{
+       u8 *tmp;
+       u8 *bufptr = buffer;
+       u32 data = 0;
+       s32 ret_val = E1000_SUCCESS;
+       u16 remaining, i, j, prev_bytes;
+
+       DEBUGFUNC("e1000_mng_host_if_write_generic");
+
+       /* sum = only sum of the data and it is not checksum */
+
+       if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) {
+               ret_val = -E1000_ERR_PARAM;
+               goto out;
+       }
+
+       tmp = (u8 *)&data;
+       prev_bytes = offset & 0x3;
+       offset >>= 2;
+
+       if (prev_bytes) {
+               data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset);
+               for (j = prev_bytes; j < sizeof(u32); j++) {
+                       *(tmp + j) = *bufptr++;
+                       *sum += *(tmp + j);
+               }
+               E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data);
+               length -= j - prev_bytes;
+               offset++;
+       }
+
+       remaining = length & 0x3;
+       length -= remaining;
+
+       /* Calculate length in DWORDs */
+       length >>= 2;
+
+       /*
+        * The device driver writes the relevant command block into the
+        * ram area.
+        */
+       for (i = 0; i < length; i++) {
+               for (j = 0; j < sizeof(u32); j++) {
+                       *(tmp + j) = *bufptr++;
+                       *sum += *(tmp + j);
+               }
+
+               E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i,
+                                           data);
+       }
+       if (remaining) {
+               for (j = 0; j < sizeof(u32); j++) {
+                       if (j < remaining)
+                               *(tmp + j) = *bufptr++;
+                       else
+                               *(tmp + j) = 0;
+
+                       *sum += *(tmp + j);
+               }
+               E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, data);
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_enable_mng_pass_thru - Check if management passthrough is needed
+ *  @hw: pointer to the HW structure
+ *
+ *  Verifies the hardware needs to leave interface enabled so that frames can
+ *  be directed to and from the management interface.
+ **/
+bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+       u32 manc;
+       u32 fwsm, factps;
+       bool ret_val = FALSE;
+
+       DEBUGFUNC("e1000_enable_mng_pass_thru");
+
+       if (!hw->mac.asf_firmware_present)
+               goto out;
+
+       manc = E1000_READ_REG(hw, E1000_MANC);
+
+       if (!(manc & E1000_MANC_RCV_TCO_EN))
+               goto out;
+
+       if (hw->mac.has_fwsm) {
+               fwsm = E1000_READ_REG(hw, E1000_FWSM);
+               factps = E1000_READ_REG(hw, E1000_FACTPS);
+
+               if (!(factps & E1000_FACTPS_MNGCG) &&
+                   ((fwsm & E1000_FWSM_MODE_MASK) ==
+                    (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
+                       ret_val = TRUE;
+                       goto out;
+               }
+       } else if ((manc & E1000_MANC_SMBUS_EN) &&
+                   !(manc & E1000_MANC_ASF_EN)) {
+                       ret_val = TRUE;
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_host_interface_command - Writes buffer to host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: contains a command to write
+ *  @length: the byte length of the buffer, must be multiple of 4 bytes
+ *
+ *  Writes a buffer to the Host Interface.  Upon success, returns E1000_SUCCESS
+ *  else returns E1000_ERR_HOST_INTERFACE_COMMAND.
+ **/
+s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length)
+{
+       u32 hicr, i;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_host_interface_command");
+
+       if (!(hw->mac.arc_subsystem_valid)) {
+               DEBUGOUT("Hardware doesn't support host interface command.\n");
+               goto out;
+       }
+
+       if (!hw->mac.asf_firmware_present) {
+               DEBUGOUT("Firmware is not present.\n");
+               goto out;
+       }
+
+       if (length == 0 || length & 0x3 ||
+           length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) {
+               DEBUGOUT("Buffer length failure.\n");
+               ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
+               goto out;
+       }
+
+       /* Check that the host interface is enabled. */
+       hicr = E1000_READ_REG(hw, E1000_HICR);
+       if ((hicr & E1000_HICR_EN) == 0) {
+               DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+               ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
+               goto out;
+       }
+
+       /* Calculate length in DWORDs */
+       length >>= 2;
+
+       /*
+        * The device driver writes the relevant command block
+        * into the ram area.
+        */
+       for (i = 0; i < length; i++)
+               E1000_WRITE_REG_ARRAY_DWORD(hw,
+                                           E1000_HOST_IF,
+                                           i,
+                                           *((u32 *)buffer + i));
+
+       /* Setting this bit tells the ARC that a new command is pending. */
+       E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
+
+       for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
+               hicr = E1000_READ_REG(hw, E1000_HICR);
+               if (!(hicr & E1000_HICR_C))
+                       break;
+               msec_delay(1);
+       }
+
+       /* Check command successful completion. */
+       if (i == E1000_HI_COMMAND_TIMEOUT ||
+           (!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) {
+               DEBUGOUT("Command has failed with no status valid.\n");
+               ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
+               goto out;
+       }
+
+       for (i = 0; i < length; i++)
+               *((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw,
+                                                                 E1000_HOST_IF,
+                                                                 i);
+
+out:
+       return ret_val;
+}
+
diff --git a/lib/librte_pmd_e1000/e1000/e1000_manage.h b/lib/librte_pmd_e1000/e1000/e1000_manage.h
new file mode 100644 (file)
index 0000000..9a8d756
--- /dev/null
@@ -0,0 +1,90 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_MANAGE_H_
+#define _E1000_MANAGE_H_
+
+bool e1000_check_mng_mode_generic(struct e1000_hw *hw);
+bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw);
+s32  e1000_mng_enable_host_if_generic(struct e1000_hw *hw);
+s32  e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
+                                     u16 length, u16 offset, u8 *sum);
+s32  e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
+                                    struct e1000_host_mng_command_header *hdr);
+s32  e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw,
+                                       u8 *buffer, u16 length);
+bool e1000_enable_mng_pass_thru(struct e1000_hw *hw);
+u8 e1000_calculate_checksum(u8 *buffer, u32 length);
+s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length);
+
+enum e1000_mng_mode {
+       e1000_mng_mode_none = 0,
+       e1000_mng_mode_asf,
+       e1000_mng_mode_pt,
+       e1000_mng_mode_ipmi,
+       e1000_mng_mode_host_if_only
+};
+
+#define E1000_FACTPS_MNGCG    0x20000000
+
+#define E1000_FWSM_MODE_MASK  0xE
+#define E1000_FWSM_MODE_SHIFT 1
+
+#define E1000_MNG_IAMT_MODE                  0x3
+#define E1000_MNG_DHCP_COOKIE_LENGTH         0x10
+#define E1000_MNG_DHCP_COOKIE_OFFSET         0x6F0
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT       10
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD        64
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN    0x2
+
+#define E1000_VFTA_ENTRY_SHIFT               5
+#define E1000_VFTA_ENTRY_MASK                0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK      0x1F
+
+#define E1000_HI_MAX_BLOCK_BYTE_LENGTH       1792 /* Num of bytes in range */
+#define E1000_HI_MAX_BLOCK_DWORD_LENGTH      448 /* Num of dwords in range */
+#define E1000_HI_COMMAND_TIMEOUT             500 /* Process HI command limit */
+
+#define E1000_HICR_EN              0x01  /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define E1000_HICR_C               0x02
+#define E1000_HICR_SV              0x04  /* Status Validity */
+#define E1000_HICR_FW_RESET_ENABLE 0x40
+#define E1000_HICR_FW_RESET        0x80
+
+/* Intel(R) Active Management Technology signature */
+#define E1000_IAMT_SIGNATURE  0x544D4149
+
+#endif
diff --git a/lib/librte_pmd_e1000/e1000/e1000_mbx.c b/lib/librte_pmd_e1000/e1000/e1000_mbx.c
new file mode 100644 (file)
index 0000000..67dbc64
--- /dev/null
@@ -0,0 +1,764 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "e1000_mbx.h"
+
+/**
+ *  e1000_null_mbx_check_for_flag - No-op function, return 0
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_null_mbx_check_for_flag(struct e1000_hw *hw, u16 mbx_id)
+{
+       DEBUGFUNC("e1000_null_mbx_check_flag");
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_null_mbx_transact - No-op function, return 0
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_null_mbx_transact(struct e1000_hw *hw, u32 *msg, u16 size,
+                            u16 mbx_id)
+{
+       DEBUGFUNC("e1000_null_mbx_rw_msg");
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_read_mbx - Reads a message from the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to read
+ *
+ *  returns SUCCESS if it successfuly read message from buffer
+ **/
+s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_read_mbx");
+
+       /* limit read to size of mailbox */
+       if (size > mbx->size)
+               size = mbx->size;
+
+       if (mbx->ops.read)
+               ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_write_mbx - Write a message to the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_write_mbx");
+
+       if (size > mbx->size)
+               ret_val = -E1000_ERR_MBX;
+
+       else if (mbx->ops.write)
+               ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_msg - checks to see if someone sent us mail
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_check_for_msg");
+
+       if (mbx->ops.check_for_msg)
+               ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_ack - checks to see if someone sent us ACK
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_check_for_ack");
+
+       if (mbx->ops.check_for_ack)
+               ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_rst - checks to see if other side has reset
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_check_for_rst");
+
+       if (mbx->ops.check_for_rst)
+               ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_poll_for_msg - Wait for message notification
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification
+ **/
+static s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       int countdown = mbx->timeout;
+
+       DEBUGFUNC("e1000_poll_for_msg");
+
+       if (!countdown || !mbx->ops.check_for_msg)
+               goto out;
+
+       while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+               countdown--;
+               if (!countdown)
+                       break;
+               usec_delay(mbx->usec_delay);
+       }
+
+       /* if we failed, all future posted messages fail until reset */
+       if (!countdown)
+               mbx->timeout = 0;
+out:
+       return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
+}
+
+/**
+ *  e1000_poll_for_ack - Wait for message acknowledgement
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message acknowledgement
+ **/
+static s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       int countdown = mbx->timeout;
+
+       DEBUGFUNC("e1000_poll_for_ack");
+
+       if (!countdown || !mbx->ops.check_for_ack)
+               goto out;
+
+       while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+               countdown--;
+               if (!countdown)
+                       break;
+               usec_delay(mbx->usec_delay);
+       }
+
+       /* if we failed, all future posted messages fail until reset */
+       if (!countdown)
+               mbx->timeout = 0;
+out:
+       return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
+}
+
+/**
+ *  e1000_read_posted_mbx - Wait for message notification and receive message
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification and
+ *  copied it into the receive buffer.
+ **/
+s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_read_posted_mbx");
+
+       if (!mbx->ops.read)
+               goto out;
+
+       ret_val = e1000_poll_for_msg(hw, mbx_id);
+
+       /* if ack received read message, otherwise we timed out */
+       if (!ret_val)
+               ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_write_posted_mbx - Write a message to the mailbox, wait for ack
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer and
+ *  received an ack to that message within delay * timeout period
+ **/
+s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_write_posted_mbx");
+
+       /* exit if either we can't write or there isn't a defined timeout */
+       if (!mbx->ops.write || !mbx->timeout)
+               goto out;
+
+       /* send msg */
+       ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+       /* if msg sent wait until we receive an ack */
+       if (!ret_val)
+               ret_val = e1000_poll_for_ack(hw, mbx_id);
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_init_mbx_ops_generic - Initialize mbx function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets the function pointers to no-op functions
+ **/
+void e1000_init_mbx_ops_generic(struct e1000_hw *hw)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       mbx->ops.init_params = e1000_null_ops_generic;
+       mbx->ops.read = e1000_null_mbx_transact;
+       mbx->ops.write = e1000_null_mbx_transact;
+       mbx->ops.check_for_msg = e1000_null_mbx_check_for_flag;
+       mbx->ops.check_for_ack = e1000_null_mbx_check_for_flag;
+       mbx->ops.check_for_rst = e1000_null_mbx_check_for_flag;
+       mbx->ops.read_posted = e1000_read_posted_mbx;
+       mbx->ops.write_posted = e1000_write_posted_mbx;
+}
+
+/**
+ *  e1000_read_v2p_mailbox - read v2p mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  This function is used to read the v2p mailbox without losing the read to
+ *  clear status bits.
+ **/
+static u32 e1000_read_v2p_mailbox(struct e1000_hw *hw)
+{
+       u32 v2p_mailbox = E1000_READ_REG(hw, E1000_V2PMAILBOX(0));
+
+       v2p_mailbox |= hw->dev_spec.vf.v2p_mailbox;
+       hw->dev_spec.vf.v2p_mailbox |= v2p_mailbox & E1000_V2PMAILBOX_R2C_BITS;
+
+       return v2p_mailbox;
+}
+
+/**
+ *  e1000_check_for_bit_vf - Determine if a status bit was set
+ *  @hw: pointer to the HW structure
+ *  @mask: bitmask for bits to be tested and cleared
+ *
+ *  This function is used to check for the read to clear bits within
+ *  the V2P mailbox.
+ **/
+static s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask)
+{
+       u32 v2p_mailbox = e1000_read_v2p_mailbox(hw);
+       s32 ret_val = -E1000_ERR_MBX;
+
+       if (v2p_mailbox & mask)
+               ret_val = E1000_SUCCESS;
+
+       hw->dev_spec.vf.v2p_mailbox &= ~mask;
+
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_msg_vf - checks to see if the PF has sent mail
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the PF has set the Status bit or else ERR_MBX
+ **/
+static s32 e1000_check_for_msg_vf(struct e1000_hw *hw, u16 mbx_id)
+{
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_check_for_msg_vf");
+
+       if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFSTS)) {
+               ret_val = E1000_SUCCESS;
+               hw->mbx.stats.reqs++;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_ack_vf - checks to see if the PF has ACK'd
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
+ **/
+static s32 e1000_check_for_ack_vf(struct e1000_hw *hw, u16 mbx_id)
+{
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_check_for_ack_vf");
+
+       if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFACK)) {
+               ret_val = E1000_SUCCESS;
+               hw->mbx.stats.acks++;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_rst_vf - checks to see if the PF has reset
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns TRUE if the PF has set the reset done bit or else FALSE
+ **/
+static s32 e1000_check_for_rst_vf(struct e1000_hw *hw, u16 mbx_id)
+{
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_check_for_rst_vf");
+
+       if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD |
+                                        E1000_V2PMAILBOX_RSTI))) {
+               ret_val = E1000_SUCCESS;
+               hw->mbx.stats.rsts++;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_obtain_mbx_lock_vf - obtain mailbox lock
+ *  @hw: pointer to the HW structure
+ *
+ *  return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw)
+{
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_obtain_mbx_lock_vf");
+
+       /* Take ownership of the buffer */
+       E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_VFU);
+
+       /* reserve mailbox for vf use */
+       if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU)
+               ret_val = E1000_SUCCESS;
+
+       return ret_val;
+}
+
+/**
+ *  e1000_write_mbx_vf - Write a message to the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size,
+                              u16 mbx_id)
+{
+       s32 ret_val;
+       u16 i;
+
+
+       DEBUGFUNC("e1000_write_mbx_vf");
+
+       /* lock the mailbox to prevent pf/vf race condition */
+       ret_val = e1000_obtain_mbx_lock_vf(hw);
+       if (ret_val)
+               goto out_no_write;
+
+       /* flush msg and acks as we are overwriting the message buffer */
+       e1000_check_for_msg_vf(hw, 0);
+       e1000_check_for_ack_vf(hw, 0);
+
+       /* copy the caller specified message to the mailbox memory buffer */
+       for (i = 0; i < size; i++)
+               E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(0), i, msg[i]);
+
+       /* update stats */
+       hw->mbx.stats.msgs_tx++;
+
+       /* Drop VFU and interrupt the PF to tell it a message has been sent */
+       E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_REQ);
+
+out_no_write:
+       return ret_val;
+}
+
+/**
+ *  e1000_read_mbx_vf - Reads a message from the inbox intended for vf
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to read
+ *
+ *  returns SUCCESS if it successfuly read message from buffer
+ **/
+static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size,
+                             u16 mbx_id)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u16 i;
+
+       DEBUGFUNC("e1000_read_mbx_vf");
+
+       /* lock the mailbox to prevent pf/vf race condition */
+       ret_val = e1000_obtain_mbx_lock_vf(hw);
+       if (ret_val)
+               goto out_no_read;
+
+       /* copy the message from the mailbox memory buffer */
+       for (i = 0; i < size; i++)
+               msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(0), i);
+
+       /* Acknowledge receipt and release mailbox, then we're done */
+       E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_ACK);
+
+       /* update stats */
+       hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+       return ret_val;
+}
+
+/**
+ *  e1000_init_mbx_params_vf - set initial values for vf mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+s32 e1000_init_mbx_params_vf(struct e1000_hw *hw)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+
+       /* start mailbox as timed out and let the reset_hw call set the timeout
+        * value to begin communications */
+       mbx->timeout = 0;
+       mbx->usec_delay = E1000_VF_MBX_INIT_DELAY;
+
+       mbx->size = E1000_VFMAILBOX_SIZE;
+
+       mbx->ops.read = e1000_read_mbx_vf;
+       mbx->ops.write = e1000_write_mbx_vf;
+       mbx->ops.read_posted = e1000_read_posted_mbx;
+       mbx->ops.write_posted = e1000_write_posted_mbx;
+       mbx->ops.check_for_msg = e1000_check_for_msg_vf;
+       mbx->ops.check_for_ack = e1000_check_for_ack_vf;
+       mbx->ops.check_for_rst = e1000_check_for_rst_vf;
+
+       mbx->stats.msgs_tx = 0;
+       mbx->stats.msgs_rx = 0;
+       mbx->stats.reqs = 0;
+       mbx->stats.acks = 0;
+       mbx->stats.rsts = 0;
+
+       return E1000_SUCCESS;
+}
+
+static s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
+{
+       u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR);
+       s32 ret_val = -E1000_ERR_MBX;
+
+       if (mbvficr & mask) {
+               ret_val = E1000_SUCCESS;
+               E1000_WRITE_REG(hw, E1000_MBVFICR, mask);
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_msg_pf - checks to see if the VF has sent mail
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
+{
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_check_for_msg_pf");
+
+       if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
+               ret_val = E1000_SUCCESS;
+               hw->mbx.stats.reqs++;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_ack_pf - checks to see if the VF has ACKed
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
+{
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_check_for_ack_pf");
+
+       if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
+               ret_val = E1000_SUCCESS;
+               hw->mbx.stats.acks++;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_rst_pf - checks to see if the VF has reset
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
+{
+       u32 vflre = E1000_READ_REG(hw, E1000_VFLRE);
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_check_for_rst_pf");
+
+       if (vflre & (1 << vf_number)) {
+               ret_val = E1000_SUCCESS;
+               E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number));
+               hw->mbx.stats.rsts++;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_obtain_mbx_lock_pf - obtain mailbox lock
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
+{
+       s32 ret_val = -E1000_ERR_MBX;
+       u32 p2v_mailbox;
+
+       DEBUGFUNC("e1000_obtain_mbx_lock_pf");
+
+       /* Take ownership of the buffer */
+       E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
+
+       /* reserve mailbox for vf use */
+       p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number));
+       if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
+               ret_val = E1000_SUCCESS;
+
+       return ret_val;
+}
+
+/**
+ *  e1000_write_mbx_pf - Places a message in the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
+                              u16 vf_number)
+{
+       s32 ret_val;
+       u16 i;
+
+       DEBUGFUNC("e1000_write_mbx_pf");
+
+       /* lock the mailbox to prevent pf/vf race condition */
+       ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
+       if (ret_val)
+               goto out_no_write;
+
+       /* flush msg and acks as we are overwriting the message buffer */
+       e1000_check_for_msg_pf(hw, vf_number);
+       e1000_check_for_ack_pf(hw, vf_number);
+
+       /* copy the caller specified message to the mailbox memory buffer */
+       for (i = 0; i < size; i++)
+               E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]);
+
+       /* Interrupt VF to tell it a message has been sent and release buffer*/
+       E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
+
+       /* update stats */
+       hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+       return ret_val;
+
+}
+
+/**
+ *  e1000_read_mbx_pf - Read a message from the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @vf_number: the VF index
+ *
+ *  This function copies a message from the mailbox buffer to the caller's
+ *  memory buffer.  The presumption is that the caller knows that there was
+ *  a message due to a VF request so no polling for message is needed.
+ **/
+static s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
+                             u16 vf_number)
+{
+       s32 ret_val;
+       u16 i;
+
+       DEBUGFUNC("e1000_read_mbx_pf");
+
+       /* lock the mailbox to prevent pf/vf race condition */
+       ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
+       if (ret_val)
+               goto out_no_read;
+
+       /* copy the message to the mailbox memory buffer */
+       for (i = 0; i < size; i++)
+               msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i);
+
+       /* Acknowledge the message and release buffer */
+       E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
+
+       /* update stats */
+       hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+       return ret_val;
+}
+
+/**
+ *  e1000_init_mbx_params_pf - set initial values for pf mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+s32 e1000_init_mbx_params_pf(struct e1000_hw *hw)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+
+       switch (hw->mac.type) {
+       case e1000_82576:
+       case e1000_i350:
+               mbx->timeout = 0;
+               mbx->usec_delay = 0;
+
+               mbx->size = E1000_VFMAILBOX_SIZE;
+
+               mbx->ops.read = e1000_read_mbx_pf;
+               mbx->ops.write = e1000_write_mbx_pf;
+               mbx->ops.read_posted = e1000_read_posted_mbx;
+               mbx->ops.write_posted = e1000_write_posted_mbx;
+               mbx->ops.check_for_msg = e1000_check_for_msg_pf;
+               mbx->ops.check_for_ack = e1000_check_for_ack_pf;
+               mbx->ops.check_for_rst = e1000_check_for_rst_pf;
+
+               mbx->stats.msgs_tx = 0;
+               mbx->stats.msgs_rx = 0;
+               mbx->stats.reqs = 0;
+               mbx->stats.acks = 0;
+               mbx->stats.rsts = 0;
+       default:
+               return E1000_SUCCESS;
+       }
+}
+
diff --git a/lib/librte_pmd_e1000/e1000/e1000_mbx.h b/lib/librte_pmd_e1000/e1000/e1000_mbx.h
new file mode 100644 (file)
index 0000000..6e9d538
--- /dev/null
@@ -0,0 +1,106 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_MBX_H_
+#define _E1000_MBX_H_
+
+#include "e1000_api.h"
+
+/* Define mailbox register bits */
+#define E1000_V2PMAILBOX_REQ   0x00000001 /* Request for PF Ready bit */
+#define E1000_V2PMAILBOX_ACK   0x00000002 /* Ack PF message received */
+#define E1000_V2PMAILBOX_VFU   0x00000004 /* VF owns the mailbox buffer */
+#define E1000_V2PMAILBOX_PFU   0x00000008 /* PF owns the mailbox buffer */
+#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
+#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
+#define E1000_V2PMAILBOX_RSTI  0x00000040 /* PF has reset indication */
+#define E1000_V2PMAILBOX_RSTD  0x00000080 /* PF has indicated reset done */
+#define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
+
+#define E1000_P2VMAILBOX_STS   0x00000001 /* Initiate message send to VF */
+#define E1000_P2VMAILBOX_ACK   0x00000002 /* Ack message recv'd from VF */
+#define E1000_P2VMAILBOX_VFU   0x00000004 /* VF owns the mailbox buffer */
+#define E1000_P2VMAILBOX_PFU   0x00000008 /* PF owns the mailbox buffer */
+#define E1000_P2VMAILBOX_RVFU  0x00000010 /* Reset VFU - used when VF stuck */
+
+#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
+#define E1000_MBVFICR_VFREQ_VF1  0x00000001 /* bit for VF 1 message */
+#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
+#define E1000_MBVFICR_VFACK_VF1  0x00010000 /* bit for VF 1 ack */
+
+#define E1000_VFMAILBOX_SIZE   16 /* 16 32 bit words - 64 bytes */
+
+/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
+ * PF.  The reverse is TRUE if it is E1000_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define E1000_VT_MSGTYPE_ACK      0x80000000  /* Messages below or'd with
+                                               * this are the ACK */
+#define E1000_VT_MSGTYPE_NACK     0x40000000  /* Messages below or'd with
+                                               * this are the NACK */
+#define E1000_VT_MSGTYPE_CTS      0x20000000  /* Indicates that VF is still
+                                                 clear to send requests */
+#define E1000_VT_MSGINFO_SHIFT    16
+/* bits 23:16 are used for exra info for certain messages */
+#define E1000_VT_MSGINFO_MASK     (0xFF << E1000_VT_MSGINFO_SHIFT)
+
+#define E1000_VF_RESET            0x01 /* VF requests reset */
+#define E1000_VF_SET_MAC_ADDR     0x02 /* VF requests to set MAC addr */
+#define E1000_VF_SET_MULTICAST    0x03 /* VF requests to set MC addr */
+#define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_MULTICAST_OVERFLOW   (0x80 << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_VLAN         0x04 /* VF requests to set VLAN */
+#define E1000_VF_SET_VLAN_ADD             (0x01 << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_LPE          0x05 /* VF requests to set VMOLR.LPE */
+#define E1000_VF_SET_PROMISC      0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
+#define E1000_VF_SET_PROMISC_UNICAST      (0x01 << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_PROMISC_MULTICAST    (0x02 << E1000_VT_MSGINFO_SHIFT)
+
+#define E1000_PF_CONTROL_MSG      0x0100 /* PF control message */
+
+#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define E1000_VF_MBX_INIT_DELAY   500  /* microseconds between retries */
+
+s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_check_for_msg(struct e1000_hw *, u16);
+s32 e1000_check_for_ack(struct e1000_hw *, u16);
+s32 e1000_check_for_rst(struct e1000_hw *, u16);
+void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
+s32 e1000_init_mbx_params_vf(struct e1000_hw *);
+s32 e1000_init_mbx_params_pf(struct e1000_hw *);
+
+#endif /* _E1000_MBX_H_ */
diff --git a/lib/librte_pmd_e1000/e1000/e1000_nvm.c b/lib/librte_pmd_e1000/e1000/e1000_nvm.c
new file mode 100644 (file)
index 0000000..1c44270
--- /dev/null
@@ -0,0 +1,1071 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "e1000_api.h"
+
+static void e1000_stop_nvm(struct e1000_hw *hw);
+static void e1000_reload_nvm_generic(struct e1000_hw *hw);
+
+/**
+ *  e1000_init_nvm_ops_generic - Initialize NVM function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Setups up the function pointers to no-op functions
+ **/
+void e1000_init_nvm_ops_generic(struct e1000_hw *hw)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       DEBUGFUNC("e1000_init_nvm_ops_generic");
+
+       /* Initialize function pointers */
+       nvm->ops.init_params = e1000_null_ops_generic;
+       nvm->ops.acquire = e1000_null_ops_generic;
+       nvm->ops.read = e1000_null_read_nvm;
+       nvm->ops.release = e1000_null_nvm_generic;
+       nvm->ops.reload = e1000_reload_nvm_generic;
+       nvm->ops.update = e1000_null_ops_generic;
+       nvm->ops.valid_led_default = e1000_null_led_default;
+       nvm->ops.validate = e1000_null_ops_generic;
+       nvm->ops.write = e1000_null_write_nvm;
+}
+
+/**
+ *  e1000_null_nvm_read - No-op function, return 0
+ *  @hw: pointer to the HW structure
+ **/
+s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c)
+{
+       DEBUGFUNC("e1000_null_read_nvm");
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_null_nvm_generic - No-op function, return void
+ *  @hw: pointer to the HW structure
+ **/
+void e1000_null_nvm_generic(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_null_nvm_generic");
+       return;
+}
+
+/**
+ *  e1000_null_led_default - No-op function, return 0
+ *  @hw: pointer to the HW structure
+ **/
+s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data)
+{
+       DEBUGFUNC("e1000_null_led_default");
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_null_write_nvm - No-op function, return 0
+ *  @hw: pointer to the HW structure
+ **/
+s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c)
+{
+       DEBUGFUNC("e1000_null_write_nvm");
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_raise_eec_clk - Raise EEPROM clock
+ *  @hw: pointer to the HW structure
+ *  @eecd: pointer to the EEPROM
+ *
+ *  Enable/Raise the EEPROM clock bit.
+ **/
+static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+       *eecd = *eecd | E1000_EECD_SK;
+       E1000_WRITE_REG(hw, E1000_EECD, *eecd);
+       E1000_WRITE_FLUSH(hw);
+       usec_delay(hw->nvm.delay_usec);
+}
+
+/**
+ *  e1000_lower_eec_clk - Lower EEPROM clock
+ *  @hw: pointer to the HW structure
+ *  @eecd: pointer to the EEPROM
+ *
+ *  Clear/Lower the EEPROM clock bit.
+ **/
+static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+       *eecd = *eecd & ~E1000_EECD_SK;
+       E1000_WRITE_REG(hw, E1000_EECD, *eecd);
+       E1000_WRITE_FLUSH(hw);
+       usec_delay(hw->nvm.delay_usec);
+}
+
+/**
+ *  e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
+ *  @hw: pointer to the HW structure
+ *  @data: data to send to the EEPROM
+ *  @count: number of bits to shift out
+ *
+ *  We need to shift 'count' bits out to the EEPROM.  So, the value in the
+ *  "data" parameter will be shifted out to the EEPROM one bit at a time.
+ *  In order to do this, "data" must be broken down into bits.
+ **/
+static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+       u32 mask;
+
+       DEBUGFUNC("e1000_shift_out_eec_bits");
+
+       mask = 0x01 << (count - 1);
+       if (nvm->type == e1000_nvm_eeprom_microwire)
+               eecd &= ~E1000_EECD_DO;
+       else
+       if (nvm->type == e1000_nvm_eeprom_spi)
+               eecd |= E1000_EECD_DO;
+
+       do {
+               eecd &= ~E1000_EECD_DI;
+
+               if (data & mask)
+                       eecd |= E1000_EECD_DI;
+
+               E1000_WRITE_REG(hw, E1000_EECD, eecd);
+               E1000_WRITE_FLUSH(hw);
+
+               usec_delay(nvm->delay_usec);
+
+               e1000_raise_eec_clk(hw, &eecd);
+               e1000_lower_eec_clk(hw, &eecd);
+
+               mask >>= 1;
+       } while (mask);
+
+       eecd &= ~E1000_EECD_DI;
+       E1000_WRITE_REG(hw, E1000_EECD, eecd);
+}
+
+/**
+ *  e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
+ *  @hw: pointer to the HW structure
+ *  @count: number of bits to shift in
+ *
+ *  In order to read a register from the EEPROM, we need to shift 'count' bits
+ *  in from the EEPROM.  Bits are "shifted in" by raising the clock input to
+ *  the EEPROM (setting the SK bit), and then reading the value of the data out
+ *  "DO" bit.  During this "shifting in" process the data in "DI" bit should
+ *  always be clear.
+ **/
+static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
+{
+       u32 eecd;
+       u32 i;
+       u16 data;
+
+       DEBUGFUNC("e1000_shift_in_eec_bits");
+
+       eecd = E1000_READ_REG(hw, E1000_EECD);
+
+       eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+       data = 0;
+
+       for (i = 0; i < count; i++) {
+               data <<= 1;
+               e1000_raise_eec_clk(hw, &eecd);
+
+               eecd = E1000_READ_REG(hw, E1000_EECD);
+
+               eecd &= ~E1000_EECD_DI;
+               if (eecd & E1000_EECD_DO)
+                       data |= 1;
+
+               e1000_lower_eec_clk(hw, &eecd);
+       }
+
+       return data;
+}
+
+/**
+ *  e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion
+ *  @hw: pointer to the HW structure
+ *  @ee_reg: EEPROM flag for polling
+ *
+ *  Polls the EEPROM status bit for either read or write completion based
+ *  upon the value of 'ee_reg'.
+ **/
+s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
+{
+       u32 attempts = 100000;
+       u32 i, reg = 0;
+       s32 ret_val = -E1000_ERR_NVM;
+
+       DEBUGFUNC("e1000_poll_eerd_eewr_done");
+
+       for (i = 0; i < attempts; i++) {
+               if (ee_reg == E1000_NVM_POLL_READ)
+                       reg = E1000_READ_REG(hw, E1000_EERD);
+               else
+                       reg = E1000_READ_REG(hw, E1000_EEWR);
+
+               if (reg & E1000_NVM_RW_REG_DONE) {
+                       ret_val = E1000_SUCCESS;
+                       break;
+               }
+
+               usec_delay(5);
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_acquire_nvm_generic - Generic request for access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ *  Return successful if access grant bit set, else clear the request for
+ *  EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+s32 e1000_acquire_nvm_generic(struct e1000_hw *hw)
+{
+       u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+       s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_acquire_nvm_generic");
+
+       E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ);
+       eecd = E1000_READ_REG(hw, E1000_EECD);
+
+       while (timeout) {
+               if (eecd & E1000_EECD_GNT)
+                       break;
+               usec_delay(5);
+               eecd = E1000_READ_REG(hw, E1000_EECD);
+               timeout--;
+       }
+
+       if (!timeout) {
+               eecd &= ~E1000_EECD_REQ;
+               E1000_WRITE_REG(hw, E1000_EECD, eecd);
+               DEBUGOUT("Could not acquire NVM grant\n");
+               ret_val = -E1000_ERR_NVM;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_standby_nvm - Return EEPROM to standby state
+ *  @hw: pointer to the HW structure
+ *
+ *  Return the EEPROM to a standby state.
+ **/
+static void e1000_standby_nvm(struct e1000_hw *hw)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+
+       DEBUGFUNC("e1000_standby_nvm");
+
+       if (nvm->type == e1000_nvm_eeprom_microwire) {
+               eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+               E1000_WRITE_REG(hw, E1000_EECD, eecd);
+               E1000_WRITE_FLUSH(hw);
+               usec_delay(nvm->delay_usec);
+
+               e1000_raise_eec_clk(hw, &eecd);
+
+               /* Select EEPROM */
+               eecd |= E1000_EECD_CS;
+               E1000_WRITE_REG(hw, E1000_EECD, eecd);
+               E1000_WRITE_FLUSH(hw);
+               usec_delay(nvm->delay_usec);
+
+               e1000_lower_eec_clk(hw, &eecd);
+       } else
+       if (nvm->type == e1000_nvm_eeprom_spi) {
+               /* Toggle CS to flush commands */
+               eecd |= E1000_EECD_CS;
+               E1000_WRITE_REG(hw, E1000_EECD, eecd);
+               E1000_WRITE_FLUSH(hw);
+               usec_delay(nvm->delay_usec);
+               eecd &= ~E1000_EECD_CS;
+               E1000_WRITE_REG(hw, E1000_EECD, eecd);
+               E1000_WRITE_FLUSH(hw);
+               usec_delay(nvm->delay_usec);
+       }
+}
+
+/**
+ *  e1000_stop_nvm - Terminate EEPROM command
+ *  @hw: pointer to the HW structure
+ *
+ *  Terminates the current command by inverting the EEPROM's chip select pin.
+ **/
+static void e1000_stop_nvm(struct e1000_hw *hw)
+{
+       u32 eecd;
+
+       DEBUGFUNC("e1000_stop_nvm");
+
+       eecd = E1000_READ_REG(hw, E1000_EECD);
+       if (hw->nvm.type == e1000_nvm_eeprom_spi) {
+               /* Pull CS high */
+               eecd |= E1000_EECD_CS;
+               e1000_lower_eec_clk(hw, &eecd);
+       } else if (hw->nvm.type == e1000_nvm_eeprom_microwire) {
+               /* CS on Microwire is active-high */
+               eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
+               E1000_WRITE_REG(hw, E1000_EECD, eecd);
+               e1000_raise_eec_clk(hw, &eecd);
+               e1000_lower_eec_clk(hw, &eecd);
+       }
+}
+
+/**
+ *  e1000_release_nvm_generic - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+void e1000_release_nvm_generic(struct e1000_hw *hw)
+{
+       u32 eecd;
+
+       DEBUGFUNC("e1000_release_nvm_generic");
+
+       e1000_stop_nvm(hw);
+
+       eecd = E1000_READ_REG(hw, E1000_EECD);
+       eecd &= ~E1000_EECD_REQ;
+       E1000_WRITE_REG(hw, E1000_EECD, eecd);
+}
+
+/**
+ *  e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
+ *  @hw: pointer to the HW structure
+ *
+ *  Setups the EEPROM for reading and writing.
+ **/
+static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+       s32 ret_val = E1000_SUCCESS;
+       u8 spi_stat_reg;
+
+       DEBUGFUNC("e1000_ready_nvm_eeprom");
+
+       if (nvm->type == e1000_nvm_eeprom_microwire) {
+               /* Clear SK and DI */
+               eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
+               E1000_WRITE_REG(hw, E1000_EECD, eecd);
+               /* Set CS */
+               eecd |= E1000_EECD_CS;
+               E1000_WRITE_REG(hw, E1000_EECD, eecd);
+       } else
+       if (nvm->type == e1000_nvm_eeprom_spi) {
+               u16 timeout = NVM_MAX_RETRY_SPI;
+
+               /* Clear SK and CS */
+               eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+               E1000_WRITE_REG(hw, E1000_EECD, eecd);
+               usec_delay(1);
+
+               /*
+                * Read "Status Register" repeatedly until the LSB is cleared.
+                * The EEPROM will signal that the command has been completed
+                * by clearing bit 0 of the internal status register.  If it's
+                * not cleared within 'timeout', then error out.
+                */
+               while (timeout) {
+                       e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
+                                                hw->nvm.opcode_bits);
+                       spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
+                       if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
+                               break;
+
+                       usec_delay(5);
+                       e1000_standby_nvm(hw);
+                       timeout--;
+               }
+
+               if (!timeout) {
+                       DEBUGOUT("SPI NVM Status error\n");
+                       ret_val = -E1000_ERR_NVM;
+                       goto out;
+               }
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_read_nvm_spi - Read EEPROM's using SPI
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the EEPROM to read
+ *  @words: number of words to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM.
+ **/
+s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       u32 i = 0;
+       s32 ret_val;
+       u16 word_in;
+       u8 read_opcode = NVM_READ_OPCODE_SPI;
+
+       DEBUGFUNC("e1000_read_nvm_spi");
+
+       /*
+        * A check for invalid values:  offset too large, too many words,
+        * and not enough words.
+        */
+       if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+           (words == 0)) {
+               DEBUGOUT("nvm parameter(s) out of bounds\n");
+               ret_val = -E1000_ERR_NVM;
+               goto out;
+       }
+
+       ret_val = nvm->ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = e1000_ready_nvm_eeprom(hw);
+       if (ret_val)
+               goto release;
+
+       e1000_standby_nvm(hw);
+
+       if ((nvm->address_bits == 8) && (offset >= 128))
+               read_opcode |= NVM_A8_OPCODE_SPI;
+
+       /* Send the READ command (opcode + addr) */
+       e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
+       e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
+
+       /*
+        * Read the data.  SPI NVMs increment the address with each byte
+        * read and will roll over if reading beyond the end.  This allows
+        * us to read the whole NVM from any offset
+        */
+       for (i = 0; i < words; i++) {
+               word_in = e1000_shift_in_eec_bits(hw, 16);
+               data[i] = (word_in >> 8) | (word_in << 8);
+       }
+
+release:
+       nvm->ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_read_nvm_microwire - Reads EEPROM's using microwire
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the EEPROM to read
+ *  @words: number of words to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM.
+ **/
+s32 e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words,
+                             u16 *data)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       u32 i = 0;
+       s32 ret_val;
+       u8 read_opcode = NVM_READ_OPCODE_MICROWIRE;
+
+       DEBUGFUNC("e1000_read_nvm_microwire");
+
+       /*
+        * A check for invalid values:  offset too large, too many words,
+        * and not enough words.
+        */
+       if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+           (words == 0)) {
+               DEBUGOUT("nvm parameter(s) out of bounds\n");
+               ret_val = -E1000_ERR_NVM;
+               goto out;
+       }
+
+       ret_val = nvm->ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = e1000_ready_nvm_eeprom(hw);
+       if (ret_val)
+               goto release;
+
+       for (i = 0; i < words; i++) {
+               /* Send the READ command (opcode + addr) */
+               e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
+               e1000_shift_out_eec_bits(hw, (u16)(offset + i),
+                                       nvm->address_bits);
+
+               /*
+                * Read the data.  For microwire, each word requires the
+                * overhead of setup and tear-down.
+                */
+               data[i] = e1000_shift_in_eec_bits(hw, 16);
+               e1000_standby_nvm(hw);
+       }
+
+release:
+       nvm->ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_read_nvm_eerd - Reads EEPROM using EERD register
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the EEPROM to read
+ *  @words: number of words to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       u32 i, eerd = 0;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_read_nvm_eerd");
+
+       /*
+        * A check for invalid values:  offset too large, too many words,
+        * too many words for the offset, and not enough words.
+        */
+       if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+           (words == 0)) {
+               DEBUGOUT("nvm parameter(s) out of bounds\n");
+               ret_val = -E1000_ERR_NVM;
+               goto out;
+       }
+
+       for (i = 0; i < words; i++) {
+               eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
+                      E1000_NVM_RW_REG_START;
+
+               E1000_WRITE_REG(hw, E1000_EERD, eerd);
+               ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
+               if (ret_val)
+                       break;
+
+               data[i] = (E1000_READ_REG(hw, E1000_EERD) >>
+                          E1000_NVM_RW_REG_DATA);
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_write_nvm_spi - Write to EEPROM using SPI
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  Writes data to EEPROM at offset using SPI interface.
+ *
+ *  If e1000_update_nvm_checksum is not called after this function , the
+ *  EEPROM will most likely contain an invalid checksum.
+ **/
+s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       s32 ret_val;
+       u16 widx = 0;
+
+       DEBUGFUNC("e1000_write_nvm_spi");
+
+       /*
+        * A check for invalid values:  offset too large, too many words,
+        * and not enough words.
+        */
+       if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+           (words == 0)) {
+               DEBUGOUT("nvm parameter(s) out of bounds\n");
+               ret_val = -E1000_ERR_NVM;
+               goto out;
+       }
+
+       ret_val = nvm->ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       while (widx < words) {
+               u8 write_opcode = NVM_WRITE_OPCODE_SPI;
+
+               ret_val = e1000_ready_nvm_eeprom(hw);
+               if (ret_val)
+                       goto release;
+
+               e1000_standby_nvm(hw);
+
+               /* Send the WRITE ENABLE command (8 bit opcode) */
+               e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
+                                        nvm->opcode_bits);
+
+               e1000_standby_nvm(hw);
+
+               /*
+                * Some SPI eeproms use the 8th address bit embedded in the
+                * opcode
+                */
+               if ((nvm->address_bits == 8) && (offset >= 128))
+                       write_opcode |= NVM_A8_OPCODE_SPI;
+
+               /* Send the Write command (8-bit opcode + addr) */
+               e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
+               e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
+                                        nvm->address_bits);
+
+               /* Loop to allow for up to whole page write of eeprom */
+               while (widx < words) {
+                       u16 word_out = data[widx];
+                       word_out = (word_out >> 8) | (word_out << 8);
+                       e1000_shift_out_eec_bits(hw, word_out, 16);
+                       widx++;
+
+                       if ((((offset + widx) * 2) % nvm->page_size) == 0) {
+                               e1000_standby_nvm(hw);
+                               break;
+                       }
+               }
+       }
+
+       msec_delay(10);
+release:
+       nvm->ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_write_nvm_microwire - Writes EEPROM using microwire
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  Writes data to EEPROM at offset using microwire interface.
+ *
+ *  If e1000_update_nvm_checksum is not called after this function , the
+ *  EEPROM will most likely contain an invalid checksum.
+ **/
+s32 e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words,
+                              u16 *data)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       s32  ret_val;
+       u32 eecd;
+       u16 words_written = 0;
+       u16 widx = 0;
+
+       DEBUGFUNC("e1000_write_nvm_microwire");
+
+       /*
+        * A check for invalid values:  offset too large, too many words,
+        * and not enough words.
+        */
+       if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+           (words == 0)) {
+               DEBUGOUT("nvm parameter(s) out of bounds\n");
+               ret_val = -E1000_ERR_NVM;
+               goto out;
+       }
+
+       ret_val = nvm->ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = e1000_ready_nvm_eeprom(hw);
+       if (ret_val)
+               goto release;
+
+       e1000_shift_out_eec_bits(hw, NVM_EWEN_OPCODE_MICROWIRE,
+                                (u16)(nvm->opcode_bits + 2));
+
+       e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2));
+
+       e1000_standby_nvm(hw);
+
+       while (words_written < words) {
+               e1000_shift_out_eec_bits(hw, NVM_WRITE_OPCODE_MICROWIRE,
+                                        nvm->opcode_bits);
+
+               e1000_shift_out_eec_bits(hw, (u16)(offset + words_written),
+                                        nvm->address_bits);
+
+               e1000_shift_out_eec_bits(hw, data[words_written], 16);
+
+               e1000_standby_nvm(hw);
+
+               for (widx = 0; widx < 200; widx++) {
+                       eecd = E1000_READ_REG(hw, E1000_EECD);
+                       if (eecd & E1000_EECD_DO)
+                               break;
+                       usec_delay(50);
+               }
+
+               if (widx == 200) {
+                       DEBUGOUT("NVM Write did not complete\n");
+                       ret_val = -E1000_ERR_NVM;
+                       goto release;
+               }
+
+               e1000_standby_nvm(hw);
+
+               words_written++;
+       }
+
+       e1000_shift_out_eec_bits(hw, NVM_EWDS_OPCODE_MICROWIRE,
+                                (u16)(nvm->opcode_bits + 2));
+
+       e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2));
+
+release:
+       nvm->ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_read_pba_string_generic - Read device part number
+ *  @hw: pointer to the HW structure
+ *  @pba_num: pointer to device part number
+ *  @pba_num_size: size of part number buffer
+ *
+ *  Reads the product board assembly (PBA) number from the EEPROM and stores
+ *  the value in pba_num.
+ **/
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+                                  u32 pba_num_size)
+{
+       s32 ret_val;
+       u16 nvm_data;
+       u16 pba_ptr;
+       u16 offset;
+       u16 length;
+
+       DEBUGFUNC("e1000_read_pba_string_generic");
+
+       if (pba_num == NULL) {
+               DEBUGOUT("PBA string buffer was null\n");
+               ret_val = E1000_ERR_INVALID_ARGUMENT;
+               goto out;
+       }
+
+       ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+       ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+       /*
+        * if nvm_data is not ptr guard the PBA must be in legacy format which
+        * means pba_ptr is actually our second data word for the PBA number
+        * and we can decode it into an ascii string
+        */
+       if (nvm_data != NVM_PBA_PTR_GUARD) {
+               DEBUGOUT("NVM PBA number is not stored as string\n");
+
+               /* we will need 11 characters to store the PBA */
+               if (pba_num_size < 11) {
+                       DEBUGOUT("PBA string buffer too small\n");
+                       return E1000_ERR_NO_SPACE;
+               }
+
+               /* extract hex string from data and pba_ptr */
+               pba_num[0] = (nvm_data >> 12) & 0xF;
+               pba_num[1] = (nvm_data >> 8) & 0xF;
+               pba_num[2] = (nvm_data >> 4) & 0xF;
+               pba_num[3] = nvm_data & 0xF;
+               pba_num[4] = (pba_ptr >> 12) & 0xF;
+               pba_num[5] = (pba_ptr >> 8) & 0xF;
+               pba_num[6] = '-';
+               pba_num[7] = 0;
+               pba_num[8] = (pba_ptr >> 4) & 0xF;
+               pba_num[9] = pba_ptr & 0xF;
+
+               /* put a null character on the end of our string */
+               pba_num[10] = '\0';
+
+               /* switch all the data but the '-' to hex char */
+               for (offset = 0; offset < 10; offset++) {
+                       if (pba_num[offset] < 0xA)
+                               pba_num[offset] += '0';
+                       else if (pba_num[offset] < 0x10)
+                               pba_num[offset] += 'A' - 0xA;
+               }
+
+               goto out;
+       }
+
+       ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+       if (length == 0xFFFF || length == 0) {
+               DEBUGOUT("NVM PBA number section invalid length\n");
+               ret_val = E1000_ERR_NVM_PBA_SECTION;
+               goto out;
+       }
+       /* check if pba_num buffer is big enough */
+       if (pba_num_size < (((u32)length * 2) - 1)) {
+               DEBUGOUT("PBA string buffer too small\n");
+               ret_val = E1000_ERR_NO_SPACE;
+               goto out;
+       }
+
+       /* trim pba length from start of string */
+       pba_ptr++;
+       length--;
+
+       for (offset = 0; offset < length; offset++) {
+               ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data);
+               if (ret_val) {
+                       DEBUGOUT("NVM Read Error\n");
+                       goto out;
+               }
+               pba_num[offset * 2] = (u8)(nvm_data >> 8);
+               pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+       }
+       pba_num[offset * 2] = '\0';
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_read_pba_length_generic - Read device part number length
+ *  @hw: pointer to the HW structure
+ *  @pba_num_size: size of part number buffer
+ *
+ *  Reads the product board assembly (PBA) number length from the EEPROM and
+ *  stores the value in pba_num_size.
+ **/
+s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size)
+{
+       s32 ret_val;
+       u16 nvm_data;
+       u16 pba_ptr;
+       u16 length;
+
+       DEBUGFUNC("e1000_read_pba_length_generic");
+
+       if (pba_num_size == NULL) {
+               DEBUGOUT("PBA buffer size was null\n");
+               ret_val = E1000_ERR_INVALID_ARGUMENT;
+               goto out;
+       }
+
+       ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+       ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+        /* if data is not ptr guard the PBA must be in legacy format */
+       if (nvm_data != NVM_PBA_PTR_GUARD) {
+               *pba_num_size = 11;
+               goto out;
+       }
+
+       ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+       if (length == 0xFFFF || length == 0) {
+               DEBUGOUT("NVM PBA number section invalid length\n");
+               ret_val = E1000_ERR_NVM_PBA_SECTION;
+               goto out;
+       }
+
+       /*
+        * Convert from length in u16 values to u8 chars, add 1 for NULL,
+        * and subtract 2 because length field is included in length.
+        */
+       *pba_num_size = ((u32)length * 2) - 1;
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_read_mac_addr_generic - Read device MAC address
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the device MAC address from the EEPROM and stores the value.
+ *  Since devices with two ports use the same EEPROM, we increment the
+ *  last bit in the MAC address for the second port.
+ **/
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
+{
+       u32 rar_high;
+       u32 rar_low;
+       u16 i;
+
+       rar_high = E1000_READ_REG(hw, E1000_RAH(0));
+       rar_low = E1000_READ_REG(hw, E1000_RAL(0));
+
+       for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
+               hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
+
+       for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
+               hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
+
+       for (i = 0; i < ETH_ADDR_LEN; i++)
+               hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_validate_nvm_checksum_generic - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u16 checksum = 0;
+       u16 i, nvm_data;
+
+       DEBUGFUNC("e1000_validate_nvm_checksum_generic");
+
+       for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+               ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+               if (ret_val) {
+                       DEBUGOUT("NVM Read Error\n");
+                       goto out;
+               }
+               checksum += nvm_data;
+       }
+
+       if (checksum != (u16) NVM_SUM) {
+               DEBUGOUT("NVM Checksum Invalid\n");
+               ret_val = -E1000_ERR_NVM;
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_update_nvm_checksum_generic - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  up to the checksum.  Then calculates the EEPROM checksum and writes the
+ *  value to the EEPROM.
+ **/
+s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw)
+{
+       s32 ret_val;
+       u16 checksum = 0;
+       u16 i, nvm_data;
+
+       DEBUGFUNC("e1000_update_nvm_checksum");
+
+       for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+               ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+               if (ret_val) {
+                       DEBUGOUT("NVM Read Error while updating checksum.\n");
+                       goto out;
+               }
+               checksum += nvm_data;
+       }
+       checksum = (u16) NVM_SUM - checksum;
+       ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
+       if (ret_val)
+               DEBUGOUT("NVM Write Error while updating checksum.\n");
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_reload_nvm_generic - Reloads EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ *  extended control register.
+ **/
+static void e1000_reload_nvm_generic(struct e1000_hw *hw)
+{
+       u32 ctrl_ext;
+
+       DEBUGFUNC("e1000_reload_nvm_generic");
+
+       usec_delay(10);
+       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+       ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+       E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+       E1000_WRITE_FLUSH(hw);
+}
+
diff --git a/lib/librte_pmd_e1000/e1000/e1000_nvm.h b/lib/librte_pmd_e1000/e1000/e1000_nvm.h
new file mode 100644 (file)
index 0000000..6bba641
--- /dev/null
@@ -0,0 +1,66 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_NVM_H_
+#define _E1000_NVM_H_
+
+void e1000_init_nvm_ops_generic(struct e1000_hw *hw);
+s32  e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
+void e1000_null_nvm_generic(struct e1000_hw *hw);
+s32  e1000_null_led_default(struct e1000_hw *hw, u16 *data);
+s32  e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
+s32  e1000_acquire_nvm_generic(struct e1000_hw *hw);
+
+s32  e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
+s32  e1000_read_mac_addr_generic(struct e1000_hw *hw);
+s32  e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+                                   u32 pba_num_size);
+s32  e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size);
+s32  e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32  e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset,
+                              u16 words, u16 *data);
+s32  e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words,
+                         u16 *data);
+s32  e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data);
+s32  e1000_validate_nvm_checksum_generic(struct e1000_hw *hw);
+s32  e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset,
+                               u16 words, u16 *data);
+s32  e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words,
+                         u16 *data);
+s32  e1000_update_nvm_checksum_generic(struct e1000_hw *hw);
+void e1000_release_nvm_generic(struct e1000_hw *hw);
+
+#define E1000_STM_OPCODE  0xDB00
+
+#endif
diff --git a/lib/librte_pmd_e1000/e1000/e1000_osdep.c b/lib/librte_pmd_e1000/e1000/e1000_osdep.c
new file mode 100644 (file)
index 0000000..203dcc8
--- /dev/null
@@ -0,0 +1,72 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "e1000_api.h"
+
+/*
+ * NOTE: the following routines using the e1000 
+ *     naming style are provided to the shared
+ *     code but are OS specific
+ */
+
+void
+e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+       return;
+}
+
+void
+e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+       *value = 0;
+       return;
+}
+
+/*
+ * Read the PCI Express capabilities
+ */
+int32_t
+e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+       return E1000_NOT_IMPLEMENTED;
+}
+
+/*
+ * Write the PCI Express capabilities
+ */
+int32_t
+e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+       return E1000_NOT_IMPLEMENTED;
+}
diff --git a/lib/librte_pmd_e1000/e1000/e1000_osdep.h b/lib/librte_pmd_e1000/e1000/e1000_osdep.h
new file mode 100644 (file)
index 0000000..cf460d5
--- /dev/null
@@ -0,0 +1,128 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_OSDEP_H_
+#define _E1000_OSDEP_H_
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+
+#include "../e1000_logs.h"
+
+/* Remove some compiler warnings for the files in this dir */
+#ifdef __INTEL_COMPILER
+#pragma warning(disable:2259) /* conversion may lose significant bits */
+#pragma warning(disable:869)  /* Parameter was never referenced */
+#pragma warning(disable:181)  /* Arg incompatible with format string */
+#else
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#pragma GCC diagnostic ignored "-Wformat"
+#pragma GCC diagnostic ignored "-Wuninitialized"
+#if (((__GNUC__) >= 4) && ((__GNUC_MINOR__) >= 7))
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+#endif
+
+#define DELAY(x) rte_delay_us(x)
+#define usec_delay(x) DELAY(x)
+#define msec_delay(x) DELAY(1000*(x))
+#define msec_delay_irq(x) DELAY(1000*(x))
+
+#define DEBUGFUNC(F)            DEBUGOUT(F);
+#define DEBUGOUT(S, args...)    PMD_DRV_LOG(DEBUG, S, ##args)
+#define DEBUGOUT1(S, args...)   DEBUGOUT(S, ##args)
+#define DEBUGOUT2(S, args...)   DEBUGOUT(S, ##args)
+#define DEBUGOUT3(S, args...)   DEBUGOUT(S, ##args)
+#define DEBUGOUT6(S, args...)   DEBUGOUT(S, ##args)
+#define DEBUGOUT7(S, args...)   DEBUGOUT(S, ##args)
+
+#define FALSE                  0
+#define TRUE                   1
+
+typedef uint64_t       u64;
+typedef uint32_t       u32;
+typedef uint16_t       u16;
+typedef uint8_t                u8;
+typedef int64_t                s64;
+typedef int32_t                s32;
+typedef int16_t                s16;
+typedef int8_t         s8;
+typedef int            bool;
+
+#define __le16         u16
+#define __le32         u32
+#define __le64         u64
+
+#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS)
+
+#define E1000_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+
+#define E1000_PCI_REG_WRITE(reg, value) do { \
+       E1000_PCI_REG((reg)) = (value); \
+} while (0)
+
+#define E1000_PCI_REG_ADDR(hw, reg) \
+       ((volatile uint32_t *)((char *)(hw)->hw_addr + (reg)))
+
+#define E1000_PCI_REG_ARRAY_ADDR(hw, reg, index) \
+       E1000_PCI_REG_ADDR((hw), (reg) + ((index) << 2))
+
+static inline uint32_t e1000_read_addr(volatile void* addr)
+{
+       return E1000_PCI_REG(addr);
+}
+
+/* Register READ/WRITE macros */
+
+#define E1000_READ_REG(hw, reg) \
+       e1000_read_addr(E1000_PCI_REG_ADDR((hw), (reg)))
+
+#define E1000_WRITE_REG(hw, reg, value) \
+       E1000_PCI_REG_WRITE(E1000_PCI_REG_ADDR((hw), (reg)), (value))
+
+#define E1000_READ_REG_ARRAY(hw, reg, index) \
+       E1000_PCI_REG(E1000_PCI_REG_ARRAY_ADDR((hw), (reg), (index)))
+
+#define E1000_WRITE_REG_ARRAY(hw, reg, index, value) \
+       E1000_PCI_REG_WRITE(E1000_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value))
+
+#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
+#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
+
+#endif /* _E1000_OSDEP_H_ */
diff --git a/lib/librte_pmd_e1000/e1000/e1000_phy.c b/lib/librte_pmd_e1000/e1000/e1000_phy.c
new file mode 100644 (file)
index 0000000..aede670
--- /dev/null
@@ -0,0 +1,2988 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "e1000_api.h"
+
+static s32 e1000_copper_link_autoneg(struct e1000_hw *hw);
+static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
+/* Cable length tables */
+static const u16 e1000_m88_cable_length_table[] = {
+       0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
+#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
+                (sizeof(e1000_m88_cable_length_table) / \
+                 sizeof(e1000_m88_cable_length_table[0]))
+
+static const u16 e1000_igp_2_cable_length_table[] = {
+       0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3,
+       6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22,
+       26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40,
+       44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61,
+       66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
+       87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
+       100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
+       124};
+#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
+                (sizeof(e1000_igp_2_cable_length_table) / \
+                 sizeof(e1000_igp_2_cable_length_table[0]))
+
+/**
+ *  e1000_init_phy_ops_generic - Initialize PHY function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Setups up the function pointers to no-op functions
+ **/
+void e1000_init_phy_ops_generic(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       DEBUGFUNC("e1000_init_phy_ops_generic");
+
+       /* Initialize function pointers */
+       phy->ops.init_params = e1000_null_ops_generic;
+       phy->ops.acquire = e1000_null_ops_generic;
+       phy->ops.check_polarity = e1000_null_ops_generic;
+       phy->ops.check_reset_block = e1000_null_ops_generic;
+       phy->ops.commit = e1000_null_ops_generic;
+       phy->ops.force_speed_duplex = e1000_null_ops_generic;
+       phy->ops.get_cfg_done = e1000_null_ops_generic;
+       phy->ops.get_cable_length = e1000_null_ops_generic;
+       phy->ops.get_info = e1000_null_ops_generic;
+       phy->ops.read_reg = e1000_null_read_reg;
+       phy->ops.read_reg_locked = e1000_null_read_reg;
+       phy->ops.release = e1000_null_phy_generic;
+       phy->ops.reset = e1000_null_ops_generic;
+       phy->ops.set_d0_lplu_state = e1000_null_lplu_state;
+       phy->ops.set_d3_lplu_state = e1000_null_lplu_state;
+       phy->ops.write_reg = e1000_null_write_reg;
+       phy->ops.write_reg_locked = e1000_null_write_reg;
+       phy->ops.power_up = e1000_null_phy_generic;
+       phy->ops.power_down = e1000_null_phy_generic;
+}
+
+/**
+ *  e1000_null_read_reg - No-op function, return 0
+ *  @hw: pointer to the HW structure
+ **/
+s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       DEBUGFUNC("e1000_null_read_reg");
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_null_phy_generic - No-op function, return void
+ *  @hw: pointer to the HW structure
+ **/
+void e1000_null_phy_generic(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_null_phy_generic");
+       return;
+}
+
+/**
+ *  e1000_null_lplu_state - No-op function, return 0
+ *  @hw: pointer to the HW structure
+ **/
+s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active)
+{
+       DEBUGFUNC("e1000_null_lplu_state");
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_null_write_reg - No-op function, return 0
+ *  @hw: pointer to the HW structure
+ **/
+s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       DEBUGFUNC("e1000_null_write_reg");
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_check_reset_block_generic - Check if PHY reset is blocked
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the PHY management control register and check whether a PHY reset
+ *  is blocked.  If a reset is not blocked return E1000_SUCCESS, otherwise
+ *  return E1000_BLK_PHY_RESET (12).
+ **/
+s32 e1000_check_reset_block_generic(struct e1000_hw *hw)
+{
+       u32 manc;
+
+       DEBUGFUNC("e1000_check_reset_block");
+
+       manc = E1000_READ_REG(hw, E1000_MANC);
+
+       return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
+              E1000_BLK_PHY_RESET : E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_phy_id - Retrieve the PHY ID and revision
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the PHY registers and stores the PHY ID and possibly the PHY
+ *  revision in the hardware structure.
+ **/
+s32 e1000_get_phy_id(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val = E1000_SUCCESS;
+       u16 phy_id;
+
+       DEBUGFUNC("e1000_get_phy_id");
+
+       if (!(phy->ops.read_reg))
+               goto out;
+
+       ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
+       if (ret_val)
+               goto out;
+
+       phy->id = (u32)(phy_id << 16);
+       usec_delay(20);
+       ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
+       if (ret_val)
+               goto out;
+
+       phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
+       phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_phy_reset_dsp_generic - Reset PHY DSP
+ *  @hw: pointer to the HW structure
+ *
+ *  Reset the digital signal processor.
+ **/
+s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_phy_reset_dsp_generic");
+
+       if (!(hw->phy.ops.write_reg))
+               goto out;
+
+       ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
+       if (ret_val)
+               goto out;
+
+       ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_read_phy_reg_mdic - Read MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the MDI control register in the PHY at offset and stores the
+ *  information read to data.
+ **/
+s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       u32 i, mdic = 0;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_read_phy_reg_mdic");
+
+       if (offset > MAX_PHY_REG_ADDRESS) {
+               DEBUGOUT1("PHY Address %d is out of range\n", offset);
+               return -E1000_ERR_PARAM;
+       }
+
+       /*
+        * Set up Op-code, Phy Address, and register offset in the MDI
+        * Control register.  The MAC will take care of interfacing with the
+        * PHY to retrieve the desired data.
+        */
+       mdic = ((offset << E1000_MDIC_REG_SHIFT) |
+               (phy->addr << E1000_MDIC_PHY_SHIFT) |
+               (E1000_MDIC_OP_READ));
+
+       E1000_WRITE_REG(hw, E1000_MDIC, mdic);
+
+       /*
+        * Poll the ready bit to see if the MDI read completed
+        * Increasing the time out as testing showed failures with
+        * the lower time out
+        */
+       for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+               usec_delay(50);
+               mdic = E1000_READ_REG(hw, E1000_MDIC);
+               if (mdic & E1000_MDIC_READY)
+                       break;
+       }
+       if (!(mdic & E1000_MDIC_READY)) {
+               DEBUGOUT("MDI Read did not complete\n");
+               ret_val = -E1000_ERR_PHY;
+               goto out;
+       }
+       if (mdic & E1000_MDIC_ERROR) {
+               DEBUGOUT("MDI Error\n");
+               ret_val = -E1000_ERR_PHY;
+               goto out;
+       }
+       *data = (u16) mdic;
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_mdic - Write MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write to register at offset
+ *
+ *  Writes data to MDI control register in the PHY at offset.
+ **/
+s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       u32 i, mdic = 0;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_write_phy_reg_mdic");
+
+       if (offset > MAX_PHY_REG_ADDRESS) {
+               DEBUGOUT1("PHY Address %d is out of range\n", offset);
+               return -E1000_ERR_PARAM;
+       }
+
+       /*
+        * Set up Op-code, Phy Address, and register offset in the MDI
+        * Control register.  The MAC will take care of interfacing with the
+        * PHY to retrieve the desired data.
+        */
+       mdic = (((u32)data) |
+               (offset << E1000_MDIC_REG_SHIFT) |
+               (phy->addr << E1000_MDIC_PHY_SHIFT) |
+               (E1000_MDIC_OP_WRITE));
+
+       E1000_WRITE_REG(hw, E1000_MDIC, mdic);
+
+       /*
+        * Poll the ready bit to see if the MDI read completed
+        * Increasing the time out as testing showed failures with
+        * the lower time out
+        */
+       for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+               usec_delay(50);
+               mdic = E1000_READ_REG(hw, E1000_MDIC);
+               if (mdic & E1000_MDIC_READY)
+                       break;
+       }
+       if (!(mdic & E1000_MDIC_READY)) {
+               DEBUGOUT("MDI Write did not complete\n");
+               ret_val = -E1000_ERR_PHY;
+               goto out;
+       }
+       if (mdic & E1000_MDIC_ERROR) {
+               DEBUGOUT("MDI Error\n");
+               ret_val = -E1000_ERR_PHY;
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_read_phy_reg_i2c - Read PHY register using i2c
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset using the i2c interface and stores the
+ *  retrieved information in data.
+ **/
+s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       u32 i, i2ccmd = 0;
+
+       DEBUGFUNC("e1000_read_phy_reg_i2c");
+
+       /*
+        * Set up Op-code, Phy Address, and register address in the I2CCMD
+        * register.  The MAC will take care of interfacing with the
+        * PHY to retrieve the desired data.
+        */
+       i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+                 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+                 (E1000_I2CCMD_OPCODE_READ));
+
+       E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+
+       /* Poll the ready bit to see if the I2C read completed */
+       for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+               usec_delay(50);
+               i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
+               if (i2ccmd & E1000_I2CCMD_READY)
+                       break;
+       }
+       if (!(i2ccmd & E1000_I2CCMD_READY)) {
+               DEBUGOUT("I2CCMD Read did not complete\n");
+               return -E1000_ERR_PHY;
+       }
+       if (i2ccmd & E1000_I2CCMD_ERROR) {
+               DEBUGOUT("I2CCMD Error bit set\n");
+               return -E1000_ERR_PHY;
+       }
+
+       /* Need to byte-swap the 16-bit value. */
+       *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_write_phy_reg_i2c - Write PHY register using i2c
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset using the i2c interface.
+ **/
+s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       u32 i, i2ccmd = 0;
+       u16 phy_data_swapped;
+
+       DEBUGFUNC("e1000_write_phy_reg_i2c");
+
+       /* Swap the data bytes for the I2C interface */
+       phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
+
+       /*
+        * Set up Op-code, Phy Address, and register address in the I2CCMD
+        * register.  The MAC will take care of interfacing with the
+        * PHY to retrieve the desired data.
+        */
+       i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+                 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+                 E1000_I2CCMD_OPCODE_WRITE |
+                 phy_data_swapped);
+
+       E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+
+       /* Poll the ready bit to see if the I2C read completed */
+       for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+               usec_delay(50);
+               i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
+               if (i2ccmd & E1000_I2CCMD_READY)
+                       break;
+       }
+       if (!(i2ccmd & E1000_I2CCMD_READY)) {
+               DEBUGOUT("I2CCMD Write did not complete\n");
+               return -E1000_ERR_PHY;
+       }
+       if (i2ccmd & E1000_I2CCMD_ERROR) {
+               DEBUGOUT("I2CCMD Error bit set\n");
+               return -E1000_ERR_PHY;
+       }
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_read_phy_reg_m88 - Read m88 PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore, if necessary, then reads the PHY register at offset
+ *  and storing the retrieved information in data.  Release any acquired
+ *  semaphores before exiting.
+ **/
+s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_read_phy_reg_m88");
+
+       if (!(hw->phy.ops.acquire))
+               goto out;
+
+       ret_val = hw->phy.ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+                                         data);
+
+       hw->phy.ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_m88 - Write m88 PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_write_phy_reg_m88");
+
+       if (!(hw->phy.ops.acquire))
+               goto out;
+
+       ret_val = hw->phy.ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+                                          data);
+
+       hw->phy.ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  __e1000_read_phy_reg_igp - Read igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *  @locked: semaphore has already been acquired or not
+ *
+ *  Acquires semaphore, if necessary, then reads the PHY register at offset
+ *  and stores the retrieved information in data.  Release any acquired
+ *  semaphores before exiting.
+ **/
+static s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
+                                    bool locked)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("__e1000_read_phy_reg_igp");
+
+       if (!locked) {
+               if (!(hw->phy.ops.acquire))
+                       goto out;
+
+               ret_val = hw->phy.ops.acquire(hw);
+               if (ret_val)
+                       goto out;
+       }
+
+       if (offset > MAX_PHY_MULTI_PAGE_REG) {
+               ret_val = e1000_write_phy_reg_mdic(hw,
+                                                  IGP01E1000_PHY_PAGE_SELECT,
+                                                  (u16)offset);
+               if (ret_val)
+                       goto release;
+       }
+
+       ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+                                         data);
+
+release:
+       if (!locked)
+               hw->phy.ops.release(hw);
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_read_phy_reg_igp - Read igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore then reads the PHY register at offset and stores the
+ *  retrieved information in data.
+ *  Release the acquired semaphore before exiting.
+ **/
+s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       return __e1000_read_phy_reg_igp(hw, offset, data, FALSE);
+}
+
+/**
+ *  e1000_read_phy_reg_igp_locked - Read igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset and stores the retrieved information
+ *  in data.  Assumes semaphore already acquired.
+ **/
+s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       return __e1000_read_phy_reg_igp(hw, offset, data, TRUE);
+}
+
+/**
+ *  e1000_write_phy_reg_igp - Write igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *  @locked: semaphore has already been acquired or not
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+static s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
+                                     bool locked)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_write_phy_reg_igp");
+
+       if (!locked) {
+               if (!(hw->phy.ops.acquire))
+                       goto out;
+
+               ret_val = hw->phy.ops.acquire(hw);
+               if (ret_val)
+                       goto out;
+       }
+
+       if (offset > MAX_PHY_MULTI_PAGE_REG) {
+               ret_val = e1000_write_phy_reg_mdic(hw,
+                                                  IGP01E1000_PHY_PAGE_SELECT,
+                                                  (u16)offset);
+               if (ret_val)
+                       goto release;
+       }
+
+       ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+                                          data);
+
+release:
+       if (!locked)
+               hw->phy.ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_igp - Write igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       return __e1000_write_phy_reg_igp(hw, offset, data, FALSE);
+}
+
+/**
+ *  e1000_write_phy_reg_igp_locked - Write igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset.
+ *  Assumes semaphore already acquired.
+ **/
+s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       return __e1000_write_phy_reg_igp(hw, offset, data, TRUE);
+}
+
+/**
+ *  __e1000_read_kmrn_reg - Read kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *  @locked: semaphore has already been acquired or not
+ *
+ *  Acquires semaphore, if necessary.  Then reads the PHY register at offset
+ *  using the kumeran interface.  The information retrieved is stored in data.
+ *  Release any acquired semaphores before exiting.
+ **/
+static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
+                                 bool locked)
+{
+       u32 kmrnctrlsta;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("__e1000_read_kmrn_reg");
+
+       if (!locked) {
+               if (!(hw->phy.ops.acquire))
+                       goto out;
+
+               ret_val = hw->phy.ops.acquire(hw);
+               if (ret_val)
+                       goto out;
+       }
+
+       kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+                      E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+       E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
+
+       usec_delay(2);
+
+       kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA);
+       *data = (u16)kmrnctrlsta;
+
+       if (!locked)
+               hw->phy.ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_read_kmrn_reg_generic -  Read kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore then reads the PHY register at offset using the
+ *  kumeran interface.  The information retrieved is stored in data.
+ *  Release the acquired semaphore before exiting.
+ **/
+s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       return __e1000_read_kmrn_reg(hw, offset, data, FALSE);
+}
+
+/**
+ *  e1000_read_kmrn_reg_locked -  Read kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset using the kumeran interface.  The
+ *  information retrieved is stored in data.
+ *  Assumes semaphore already acquired.
+ **/
+s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       return __e1000_read_kmrn_reg(hw, offset, data, TRUE);
+}
+
+/**
+ *  __e1000_write_kmrn_reg - Write kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *  @locked: semaphore has already been acquired or not
+ *
+ *  Acquires semaphore, if necessary.  Then write the data to PHY register
+ *  at the offset using the kumeran interface.  Release any acquired semaphores
+ *  before exiting.
+ **/
+static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
+                                  bool locked)
+{
+       u32 kmrnctrlsta;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_write_kmrn_reg_generic");
+
+       if (!locked) {
+               if (!(hw->phy.ops.acquire))
+                       goto out;
+
+               ret_val = hw->phy.ops.acquire(hw);
+               if (ret_val)
+                       goto out;
+       }
+
+       kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+                      E1000_KMRNCTRLSTA_OFFSET) | data;
+       E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
+
+       usec_delay(2);
+
+       if (!locked)
+               hw->phy.ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_write_kmrn_reg_generic -  Write kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore then writes the data to the PHY register at the offset
+ *  using the kumeran interface.  Release the acquired semaphore before exiting.
+ **/
+s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       return __e1000_write_kmrn_reg(hw, offset, data, FALSE);
+}
+
+/**
+ *  e1000_write_kmrn_reg_locked -  Write kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Write the data to PHY register at the offset using the kumeran interface.
+ *  Assumes semaphore already acquired.
+ **/
+s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       return __e1000_write_kmrn_reg(hw, offset, data, TRUE);
+}
+
+/**
+ *  e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up Carrier-sense on Transmit and downshift values.
+ **/
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
+{
+       s32 ret_val;
+       u16 phy_data;
+
+       DEBUGFUNC("e1000_copper_link_setup_82577");
+
+       if (hw->phy.reset_disable) {
+               ret_val = E1000_SUCCESS;
+               goto out;
+       }
+
+       if (hw->phy.type == e1000_phy_82580) {
+               ret_val = hw->phy.ops.reset(hw);
+               if (ret_val) {
+                       DEBUGOUT("Error resetting the PHY.\n");
+                       goto out;
+               }
+       }
+
+       /* Enable CRS on Tx. This must be set for half-duplex operation. */
+       ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data);
+       if (ret_val)
+               goto out;
+
+       phy_data |= I82577_CFG_ASSERT_CRS_ON_TX;
+
+       /* Enable downshift */
+       phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
+
+       ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up MDI/MDI-X and polarity for m88 PHY's.  If necessary, transmit clock
+ *  and downshift values are set also.
+ **/
+s32 e1000_copper_link_setup_m88(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 phy_data;
+
+       DEBUGFUNC("e1000_copper_link_setup_m88");
+
+       if (phy->reset_disable) {
+               ret_val = E1000_SUCCESS;
+               goto out;
+       }
+
+       /* Enable CRS on Tx. This must be set for half-duplex operation. */
+       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+       if (ret_val)
+               goto out;
+
+       phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+       /*
+        * Options:
+        *   MDI/MDI-X = 0 (default)
+        *   0 - Auto for all speeds
+        *   1 - MDI mode
+        *   2 - MDI-X mode
+        *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+        */
+       phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+       switch (phy->mdix) {
+       case 1:
+               phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+               break;
+       case 2:
+               phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+               break;
+       case 3:
+               phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+               break;
+       case 0:
+       default:
+               phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+               break;
+       }
+
+       /*
+        * Options:
+        *   disable_polarity_correction = 0 (default)
+        *       Automatic Correction for Reversed Cable Polarity
+        *   0 - Disabled
+        *   1 - Enabled
+        */
+       phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+       if (phy->disable_polarity_correction == 1)
+               phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+       ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+       if (ret_val)
+               goto out;
+
+       if (phy->revision < E1000_REVISION_4) {
+               /*
+                * Force TX_CLK in the Extended PHY Specific Control Register
+                * to 25MHz clock.
+                */
+               ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+                                            &phy_data);
+               if (ret_val)
+                       goto out;
+
+               phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+               if ((phy->revision == E1000_REVISION_2) &&
+                   (phy->id == M88E1111_I_PHY_ID)) {
+                       /* 82573L PHY - set the downshift counter to 5x. */
+                       phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
+                       phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+               } else {
+                       /* Configure Master and Slave downshift values */
+                       phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+                                    M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+                       phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+                                    M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+               }
+               ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+                                            phy_data);
+               if (ret_val)
+                       goto out;
+       }
+
+       /* Commit the changes. */
+       ret_val = phy->ops.commit(hw);
+       if (ret_val) {
+               DEBUGOUT("Error committing the PHY changes\n");
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's.
+ *  Also enables and sets the downshift parameters.
+ **/
+s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 phy_data;
+
+       DEBUGFUNC("e1000_copper_link_setup_m88_gen2");
+
+       if (phy->reset_disable) {
+               ret_val = E1000_SUCCESS;
+               goto out;
+       }
+
+       /* Enable CRS on Tx. This must be set for half-duplex operation. */
+       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+       if (ret_val)
+               goto out;
+
+       /*
+        * Options:
+        *   MDI/MDI-X = 0 (default)
+        *   0 - Auto for all speeds
+        *   1 - MDI mode
+        *   2 - MDI-X mode
+        *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+        */
+       phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+       switch (phy->mdix) {
+       case 1:
+               phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+               break;
+       case 2:
+               phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+               break;
+       case 3:
+               /* M88E1112 does not support this mode) */
+               if (phy->id != M88E1112_E_PHY_ID) {
+                       phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+                       break;
+               }
+       case 0:
+       default:
+               phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+               break;
+       }
+
+       /*
+        * Options:
+        *   disable_polarity_correction = 0 (default)
+        *       Automatic Correction for Reversed Cable Polarity
+        *   0 - Disabled
+        *   1 - Enabled
+        */
+       phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+       if (phy->disable_polarity_correction == 1)
+               phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+       /* Enable downshift and setting it to X6 */
+       phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK;
+       phy_data |= I347AT4_PSCR_DOWNSHIFT_6X;
+       phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE;
+
+       ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+       if (ret_val)
+               goto out;
+
+       /* Commit the changes. */
+       ret_val = phy->ops.commit(hw);
+       if (ret_val) {
+               DEBUGOUT("Error committing the PHY changes\n");
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_copper_link_setup_igp - Setup igp PHY's for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
+ *  igp PHY's.
+ **/
+s32 e1000_copper_link_setup_igp(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 data;
+
+       DEBUGFUNC("e1000_copper_link_setup_igp");
+
+       if (phy->reset_disable) {
+               ret_val = E1000_SUCCESS;
+               goto out;
+       }
+
+       ret_val = hw->phy.ops.reset(hw);
+       if (ret_val) {
+               DEBUGOUT("Error resetting the PHY.\n");
+               goto out;
+       }
+
+       /*
+        * Wait 100ms for MAC to configure PHY from NVM settings, to avoid
+        * timeout issues when LFS is enabled.
+        */
+       msec_delay(100);
+
+       /* disable lplu d0 during driver init */
+       if (hw->phy.ops.set_d0_lplu_state) {
+               ret_val = hw->phy.ops.set_d0_lplu_state(hw, FALSE);
+               if (ret_val) {
+                       DEBUGOUT("Error Disabling LPLU D0\n");
+                       goto out;
+               }
+       }
+       /* Configure mdi-mdix settings */
+       ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
+       if (ret_val)
+               goto out;
+
+       data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+       switch (phy->mdix) {
+       case 1:
+               data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+               break;
+       case 2:
+               data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+               break;
+       case 0:
+       default:
+               data |= IGP01E1000_PSCR_AUTO_MDIX;
+               break;
+       }
+       ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
+       if (ret_val)
+               goto out;
+
+       /* set auto-master slave resolution settings */
+       if (hw->mac.autoneg) {
+               /*
+                * when autonegotiation advertisement is only 1000Mbps then we
+                * should disable SmartSpeed and enable Auto MasterSlave
+                * resolution as hardware default.
+                */
+               if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
+                       /* Disable SmartSpeed */
+                       ret_val = phy->ops.read_reg(hw,
+                                                    IGP01E1000_PHY_PORT_CONFIG,
+                                                    &data);
+                       if (ret_val)
+                               goto out;
+
+                       data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+                       ret_val = phy->ops.write_reg(hw,
+                                                    IGP01E1000_PHY_PORT_CONFIG,
+                                                    data);
+                       if (ret_val)
+                               goto out;
+
+                       /* Set auto Master/Slave resolution process */
+                       ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
+                       if (ret_val)
+                               goto out;
+
+                       data &= ~CR_1000T_MS_ENABLE;
+                       ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
+                       if (ret_val)
+                               goto out;
+               }
+
+               ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
+               if (ret_val)
+                       goto out;
+
+               /* load defaults for future use */
+               phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
+                       ((data & CR_1000T_MS_VALUE) ?
+                       e1000_ms_force_master :
+                       e1000_ms_force_slave) :
+                       e1000_ms_auto;
+
+               switch (phy->ms_type) {
+               case e1000_ms_force_master:
+                       data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+                       break;
+               case e1000_ms_force_slave:
+                       data |= CR_1000T_MS_ENABLE;
+                       data &= ~(CR_1000T_MS_VALUE);
+                       break;
+               case e1000_ms_auto:
+                       data &= ~CR_1000T_MS_ENABLE;
+               default:
+                       break;
+               }
+               ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
+               if (ret_val)
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Performs initial bounds checking on autoneg advertisement parameter, then
+ *  configure to advertise the full capability.  Setup the PHY to autoneg
+ *  and restart the negotiation process between the link partner.  If
+ *  autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
+ **/
+static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 phy_ctrl;
+
+       DEBUGFUNC("e1000_copper_link_autoneg");
+
+       /*
+        * Perform some bounds checking on the autoneg advertisement
+        * parameter.
+        */
+       phy->autoneg_advertised &= phy->autoneg_mask;
+
+       /*
+        * If autoneg_advertised is zero, we assume it was not defaulted
+        * by the calling code so we set to advertise full capability.
+        */
+       if (phy->autoneg_advertised == 0)
+               phy->autoneg_advertised = phy->autoneg_mask;
+
+       DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
+       ret_val = e1000_phy_setup_autoneg(hw);
+       if (ret_val) {
+               DEBUGOUT("Error Setting up Auto-Negotiation\n");
+               goto out;
+       }
+       DEBUGOUT("Restarting Auto-Neg\n");
+
+       /*
+        * Restart auto-negotiation by setting the Auto Neg Enable bit and
+        * the Auto Neg Restart bit in the PHY control register.
+        */
+       ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+       if (ret_val)
+               goto out;
+
+       phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+       ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+       if (ret_val)
+               goto out;
+
+       /*
+        * Does the user want to wait for Auto-Neg to complete here, or
+        * check at a later time (for example, callback routine).
+        */
+       if (phy->autoneg_wait_to_complete) {
+               ret_val = hw->mac.ops.wait_autoneg(hw);
+               if (ret_val) {
+                       DEBUGOUT("Error while waiting for "
+                                "autoneg to complete\n");
+                       goto out;
+               }
+       }
+
+       hw->mac.get_link_status = TRUE;
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the MII auto-neg advertisement register and/or the 1000T control
+ *  register and if the PHY is already setup for auto-negotiation, then
+ *  return successful.  Otherwise, setup advertisement and flow control to
+ *  the appropriate values for the wanted auto-negotiation.
+ **/
+static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 mii_autoneg_adv_reg;
+       u16 mii_1000t_ctrl_reg = 0;
+
+       DEBUGFUNC("e1000_phy_setup_autoneg");
+
+       phy->autoneg_advertised &= phy->autoneg_mask;
+
+       /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+       ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+       if (ret_val)
+               goto out;
+
+       if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+               /* Read the MII 1000Base-T Control Register (Address 9). */
+               ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
+                                           &mii_1000t_ctrl_reg);
+               if (ret_val)
+                       goto out;
+       }
+
+       /*
+        * Need to parse both autoneg_advertised and fc and set up
+        * the appropriate PHY registers.  First we will parse for
+        * autoneg_advertised software override.  Since we can advertise
+        * a plethora of combinations, we need to check each bit
+        * individually.
+        */
+
+       /*
+        * First we clear all the 10/100 mb speed bits in the Auto-Neg
+        * Advertisement Register (Address 4) and the 1000 mb speed bits in
+        * the  1000Base-T Control Register (Address 9).
+        */
+       mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
+                                NWAY_AR_100TX_HD_CAPS |
+                                NWAY_AR_10T_FD_CAPS   |
+                                NWAY_AR_10T_HD_CAPS);
+       mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
+
+       DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised);
+
+       /* Do we want to advertise 10 Mb Half Duplex? */
+       if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
+               DEBUGOUT("Advertise 10mb Half duplex\n");
+               mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+       }
+
+       /* Do we want to advertise 10 Mb Full Duplex? */
+       if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
+               DEBUGOUT("Advertise 10mb Full duplex\n");
+               mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+       }
+
+       /* Do we want to advertise 100 Mb Half Duplex? */
+       if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
+               DEBUGOUT("Advertise 100mb Half duplex\n");
+               mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+       }
+
+       /* Do we want to advertise 100 Mb Full Duplex? */
+       if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
+               DEBUGOUT("Advertise 100mb Full duplex\n");
+               mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+       }
+
+       /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+       if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
+               DEBUGOUT("Advertise 1000mb Half duplex request denied!\n");
+
+       /* Do we want to advertise 1000 Mb Full Duplex? */
+       if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
+               DEBUGOUT("Advertise 1000mb Full duplex\n");
+               mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+       }
+
+       /*
+        * Check for a software override of the flow control settings, and
+        * setup the PHY advertisement registers accordingly.  If
+        * auto-negotiation is enabled, then software will have to set the
+        * "PAUSE" bits to the correct value in the Auto-Negotiation
+        * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
+        * negotiation.
+        *
+        * The possible values of the "fc" parameter are:
+        *      0:  Flow control is completely disabled
+        *      1:  Rx flow control is enabled (we can receive pause frames
+        *          but not send pause frames).
+        *      2:  Tx flow control is enabled (we can send pause frames
+        *          but we do not support receiving pause frames).
+        *      3:  Both Rx and Tx flow control (symmetric) are enabled.
+        *  other:  No software override.  The flow control configuration
+        *          in the EEPROM is used.
+        */
+       switch (hw->fc.current_mode) {
+       case e1000_fc_none:
+               /*
+                * Flow control (Rx & Tx) is completely disabled by a
+                * software over-ride.
+                */
+               mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+               break;
+       case e1000_fc_rx_pause:
+               /*
+                * Rx Flow control is enabled, and Tx Flow control is
+                * disabled, by a software over-ride.
+                *
+                * Since there really isn't a way to advertise that we are
+                * capable of Rx Pause ONLY, we will advertise that we
+                * support both symmetric and asymmetric Rx PAUSE.  Later
+                * (in e1000_config_fc_after_link_up) we will disable the
+                * hw's ability to send PAUSE frames.
+                */
+               mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+               break;
+       case e1000_fc_tx_pause:
+               /*
+                * Tx Flow control is enabled, and Rx Flow control is
+                * disabled, by a software over-ride.
+                */
+               mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+               mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+               break;
+       case e1000_fc_full:
+               /*
+                * Flow control (both Rx and Tx) is enabled by a software
+                * over-ride.
+                */
+               mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+               break;
+       default:
+               DEBUGOUT("Flow control param set incorrectly\n");
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+       }
+
+       ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+       if (ret_val)
+               goto out;
+
+       DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+       if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+               ret_val = phy->ops.write_reg(hw,
+                                             PHY_1000T_CTRL,
+                                             mii_1000t_ctrl_reg);
+               if (ret_val)
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_setup_copper_link_generic - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the appropriate function to configure the link for auto-neg or forced
+ *  speed and duplex.  Then we check for link, once link is established calls
+ *  to configure collision distance and flow control are called.  If link is
+ *  not established, we return -E1000_ERR_PHY (-2).
+ **/
+s32 e1000_setup_copper_link_generic(struct e1000_hw *hw)
+{
+       s32 ret_val;
+       bool link;
+
+       DEBUGFUNC("e1000_setup_copper_link_generic");
+
+       if (hw->mac.autoneg) {
+               /*
+                * Setup autoneg and flow control advertisement and perform
+                * autonegotiation.
+                */
+               ret_val = e1000_copper_link_autoneg(hw);
+               if (ret_val)
+                       goto out;
+       } else {
+               /*
+                * PHY will be set to 10H, 10F, 100H or 100F
+                * depending on user settings.
+                */
+               DEBUGOUT("Forcing Speed and Duplex\n");
+               ret_val = hw->phy.ops.force_speed_duplex(hw);
+               if (ret_val) {
+                       DEBUGOUT("Error Forcing Speed and Duplex\n");
+                       goto out;
+               }
+       }
+
+       /*
+        * Check link status. Wait up to 100 microseconds for link to become
+        * valid.
+        */
+       ret_val = e1000_phy_has_link_generic(hw,
+                                            COPPER_LINK_UP_LIMIT,
+                                            10,
+                                            &link);
+       if (ret_val)
+               goto out;
+
+       if (link) {
+               DEBUGOUT("Valid link established!!!\n");
+               e1000_config_collision_dist_generic(hw);
+               ret_val = e1000_config_fc_after_link_up_generic(hw);
+       } else {
+               DEBUGOUT("Unable to establish link!!!\n");
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.  Clears the
+ *  auto-crossover to force MDI manually.  Waits for link and returns
+ *  successful if link up is successful, else -E1000_ERR_PHY (-2).
+ **/
+s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 phy_data;
+       bool link;
+
+       DEBUGFUNC("e1000_phy_force_speed_duplex_igp");
+
+       ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+       if (ret_val)
+               goto out;
+
+       e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+       ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+       if (ret_val)
+               goto out;
+
+       /*
+        * Clear Auto-Crossover to force MDI manually.  IGP requires MDI
+        * forced whenever speed and duplex are forced.
+        */
+       ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+       if (ret_val)
+               goto out;
+
+       phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+       phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+       ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+       if (ret_val)
+               goto out;
+
+       DEBUGOUT1("IGP PSCR: %X\n", phy_data);
+
+       usec_delay(1);
+
+       if (phy->autoneg_wait_to_complete) {
+               DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n");
+
+               ret_val = e1000_phy_has_link_generic(hw,
+                                                    PHY_FORCE_LIMIT,
+                                                    100000,
+                                                    &link);
+               if (ret_val)
+                       goto out;
+
+               if (!link)
+                       DEBUGOUT("Link taking longer than expected.\n");
+
+               /* Try once more */
+               ret_val = e1000_phy_has_link_generic(hw,
+                                                    PHY_FORCE_LIMIT,
+                                                    100000,
+                                                    &link);
+               if (ret_val)
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.  Clears the
+ *  auto-crossover to force MDI manually.  Resets the PHY to commit the
+ *  changes.  If time expires while waiting for link up, we reset the DSP.
+ *  After reset, TX_CLK and CRS on Tx must be set.  Return successful upon
+ *  successful completion, else return corresponding error code.
+ **/
+s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 phy_data;
+       bool link;
+
+       DEBUGFUNC("e1000_phy_force_speed_duplex_m88");
+
+       /*
+        * Clear Auto-Crossover to force MDI manually.  M88E1000 requires MDI
+        * forced whenever speed and duplex are forced.
+        */
+       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+       if (ret_val)
+               goto out;
+
+       phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+       ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+       if (ret_val)
+               goto out;
+
+       DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data);
+
+       ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+       if (ret_val)
+               goto out;
+
+       e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+       ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+       if (ret_val)
+               goto out;
+
+       /* Reset the phy to commit changes. */
+       ret_val = hw->phy.ops.commit(hw);
+       if (ret_val)
+               goto out;
+
+       if (phy->autoneg_wait_to_complete) {
+               DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n");
+
+               ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+                                                    100000, &link);
+               if (ret_val)
+                       goto out;
+
+               if (!link) {
+                       if (hw->phy.type != e1000_phy_m88 ||
+                           hw->phy.id == I347AT4_E_PHY_ID ||
+                           hw->phy.id == M88E1340M_E_PHY_ID ||
+                           hw->phy.id == M88E1112_E_PHY_ID) {
+                               DEBUGOUT("Link taking longer than expected.\n");
+                       } else {
+                               /*
+                                * We didn't get link.
+                                * Reset the DSP and cross our fingers.
+                                */
+                               ret_val = phy->ops.write_reg(hw,
+                                               M88E1000_PHY_PAGE_SELECT,
+                                               0x001d);
+                               if (ret_val)
+                                       goto out;
+                               ret_val = e1000_phy_reset_dsp_generic(hw);
+                               if (ret_val)
+                                       goto out;
+                       }
+               }
+
+               /* Try once more */
+               ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+                                                    100000, &link);
+               if (ret_val)
+                       goto out;
+       }
+
+       if (hw->phy.type != e1000_phy_m88 ||
+           hw->phy.id == I347AT4_E_PHY_ID ||
+           hw->phy.id == M88E1340M_E_PHY_ID ||
+           hw->phy.id == M88E1112_E_PHY_ID)
+               goto out;
+
+       ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+       if (ret_val)
+               goto out;
+
+       /*
+        * Resetting the phy means we need to re-force TX_CLK in the
+        * Extended PHY Specific Control Register to 25MHz clock from
+        * the reset value of 2.5MHz.
+        */
+       phy_data |= M88E1000_EPSCR_TX_CLK_25;
+       ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+       if (ret_val)
+               goto out;
+
+       /*
+        * In addition, we must re-enable CRS on Tx for both half and full
+        * duplex.
+        */
+       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+       if (ret_val)
+               goto out;
+
+       phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+       ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex
+ *  @hw: pointer to the HW structure
+ *
+ *  Forces the speed and duplex settings of the PHY.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 data;
+       bool link;
+
+       DEBUGFUNC("e1000_phy_force_speed_duplex_ife");
+
+       ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data);
+       if (ret_val)
+               goto out;
+
+       e1000_phy_force_speed_duplex_setup(hw, &data);
+
+       ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data);
+       if (ret_val)
+               goto out;
+
+       /* Disable MDI-X support for 10/100 */
+       ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
+       if (ret_val)
+               goto out;
+
+       data &= ~IFE_PMC_AUTO_MDIX;
+       data &= ~IFE_PMC_FORCE_MDIX;
+
+       ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data);
+       if (ret_val)
+               goto out;
+
+       DEBUGOUT1("IFE PMC: %X\n", data);
+
+       usec_delay(1);
+
+       if (phy->autoneg_wait_to_complete) {
+               DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n");
+
+               ret_val = e1000_phy_has_link_generic(hw,
+                                                    PHY_FORCE_LIMIT,
+                                                    100000,
+                                                    &link);
+               if (ret_val)
+                       goto out;
+
+               if (!link)
+                       DEBUGOUT("Link taking longer than expected.\n");
+
+               /* Try once more */
+               ret_val = e1000_phy_has_link_generic(hw,
+                                                    PHY_FORCE_LIMIT,
+                                                    100000,
+                                                    &link);
+               if (ret_val)
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @phy_ctrl: pointer to current value of PHY_CONTROL
+ *
+ *  Forces speed and duplex on the PHY by doing the following: disable flow
+ *  control, force speed/duplex on the MAC, disable auto speed detection,
+ *  disable auto-negotiation, configure duplex, configure speed, configure
+ *  the collision distance, write configuration to CTRL register.  The
+ *  caller must write to the PHY_CONTROL register for these settings to
+ *  take affect.
+ **/
+void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       u32 ctrl;
+
+       DEBUGFUNC("e1000_phy_force_speed_duplex_setup");
+
+       /* Turn off flow control when forcing speed/duplex */
+       hw->fc.current_mode = e1000_fc_none;
+
+       /* Force speed/duplex on the mac */
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+       ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+       ctrl &= ~E1000_CTRL_SPD_SEL;
+
+       /* Disable Auto Speed Detection */
+       ctrl &= ~E1000_CTRL_ASDE;
+
+       /* Disable autoneg on the phy */
+       *phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
+
+       /* Forcing Full or Half Duplex? */
+       if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
+               ctrl &= ~E1000_CTRL_FD;
+               *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
+               DEBUGOUT("Half Duplex\n");
+       } else {
+               ctrl |= E1000_CTRL_FD;
+               *phy_ctrl |= MII_CR_FULL_DUPLEX;
+               DEBUGOUT("Full Duplex\n");
+       }
+
+       /* Forcing 10mb or 100mb? */
+       if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
+               ctrl |= E1000_CTRL_SPD_100;
+               *phy_ctrl |= MII_CR_SPEED_100;
+               *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+               DEBUGOUT("Forcing 100mb\n");
+       } else {
+               ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+               *phy_ctrl |= MII_CR_SPEED_10;
+               *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+               DEBUGOUT("Forcing 10mb\n");
+       }
+
+       e1000_config_collision_dist_generic(hw);
+
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+}
+
+/**
+ *  e1000_set_d3_lplu_state_generic - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  and SmartSpeed is disabled when active is TRUE, else clear lplu for D3
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.
+ **/
+s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val = E1000_SUCCESS;
+       u16 data;
+
+       DEBUGFUNC("e1000_set_d3_lplu_state_generic");
+
+       if (!(hw->phy.ops.read_reg))
+               goto out;
+
+       ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+       if (ret_val)
+               goto out;
+
+       if (!active) {
+               data &= ~IGP02E1000_PM_D3_LPLU;
+               ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+                                            data);
+               if (ret_val)
+                       goto out;
+               /*
+                * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+                * during Dx states where the power conservation is most
+                * important.  During driver activity we should enable
+                * SmartSpeed, so performance is maintained.
+                */
+               if (phy->smart_speed == e1000_smart_speed_on) {
+                       ret_val = phy->ops.read_reg(hw,
+                                                   IGP01E1000_PHY_PORT_CONFIG,
+                                                   &data);
+                       if (ret_val)
+                               goto out;
+
+                       data |= IGP01E1000_PSCFR_SMART_SPEED;
+                       ret_val = phy->ops.write_reg(hw,
+                                                    IGP01E1000_PHY_PORT_CONFIG,
+                                                    data);
+                       if (ret_val)
+                               goto out;
+               } else if (phy->smart_speed == e1000_smart_speed_off) {
+                       ret_val = phy->ops.read_reg(hw,
+                                                    IGP01E1000_PHY_PORT_CONFIG,
+                                                    &data);
+                       if (ret_val)
+                               goto out;
+
+                       data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+                       ret_val = phy->ops.write_reg(hw,
+                                                    IGP01E1000_PHY_PORT_CONFIG,
+                                                    data);
+                       if (ret_val)
+                               goto out;
+               }
+       } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+                  (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+                  (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+               data |= IGP02E1000_PM_D3_LPLU;
+               ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+                                             data);
+               if (ret_val)
+                       goto out;
+
+               /* When LPLU is enabled, we should disable SmartSpeed */
+               ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                            &data);
+               if (ret_val)
+                       goto out;
+
+               data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+               ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                             data);
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_check_downshift_generic - Checks whether a downshift in speed occurred
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  A downshift is detected by querying the PHY link health.
+ **/
+s32 e1000_check_downshift_generic(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 phy_data, offset, mask;
+
+       DEBUGFUNC("e1000_check_downshift_generic");
+
+       switch (phy->type) {
+       case e1000_phy_m88:
+       case e1000_phy_gg82563:
+               offset  = M88E1000_PHY_SPEC_STATUS;
+               mask    = M88E1000_PSSR_DOWNSHIFT;
+               break;
+       case e1000_phy_igp_2:
+       case e1000_phy_igp_3:
+               offset  = IGP01E1000_PHY_LINK_HEALTH;
+               mask    = IGP01E1000_PLHR_SS_DOWNGRADE;
+               break;
+       default:
+               /* speed downshift not supported */
+               phy->speed_downgraded = FALSE;
+               ret_val = E1000_SUCCESS;
+               goto out;
+       }
+
+       ret_val = phy->ops.read_reg(hw, offset, &phy_data);
+
+       if (!ret_val)
+               phy->speed_downgraded = (phy_data & mask) ? TRUE : FALSE;
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_check_polarity_m88 - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_m88(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 data;
+
+       DEBUGFUNC("e1000_check_polarity_m88");
+
+       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
+
+       if (!ret_val)
+               phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
+                                     ? e1000_rev_polarity_reversed
+                                     : e1000_rev_polarity_normal;
+
+       return ret_val;
+}
+
+/**
+ *  e1000_check_polarity_igp - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY port status register, and the
+ *  current speed (since there is no polarity at 100Mbps).
+ **/
+s32 e1000_check_polarity_igp(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 data, offset, mask;
+
+       DEBUGFUNC("e1000_check_polarity_igp");
+
+       /*
+        * Polarity is determined based on the speed of
+        * our connection.
+        */
+       ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+       if (ret_val)
+               goto out;
+
+       if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+           IGP01E1000_PSSR_SPEED_1000MBPS) {
+               offset  = IGP01E1000_PHY_PCS_INIT_REG;
+               mask    = IGP01E1000_PHY_POLARITY_MASK;
+       } else {
+               /*
+                * This really only applies to 10Mbps since
+                * there is no polarity for 100Mbps (always 0).
+                */
+               offset  = IGP01E1000_PHY_PORT_STATUS;
+               mask    = IGP01E1000_PSSR_POLARITY_REVERSED;
+       }
+
+       ret_val = phy->ops.read_reg(hw, offset, &data);
+
+       if (!ret_val)
+               phy->cable_polarity = (data & mask)
+                                     ? e1000_rev_polarity_reversed
+                                     : e1000_rev_polarity_normal;
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_check_polarity_ife - Check cable polarity for IFE PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Polarity is determined on the polarity reversal feature being enabled.
+ **/
+s32 e1000_check_polarity_ife(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 phy_data, offset, mask;
+
+       DEBUGFUNC("e1000_check_polarity_ife");
+
+       /*
+        * Polarity is determined based on the reversal feature being enabled.
+        */
+       if (phy->polarity_correction) {
+               offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
+               mask = IFE_PESC_POLARITY_REVERSED;
+       } else {
+               offset = IFE_PHY_SPECIAL_CONTROL;
+               mask = IFE_PSC_FORCE_POLARITY;
+       }
+
+       ret_val = phy->ops.read_reg(hw, offset, &phy_data);
+
+       if (!ret_val)
+               phy->cable_polarity = (phy_data & mask)
+                                      ? e1000_rev_polarity_reversed
+                                      : e1000_rev_polarity_normal;
+
+       return ret_val;
+}
+
+/**
+ *  e1000_wait_autoneg_generic - Wait for auto-neg completion
+ *  @hw: pointer to the HW structure
+ *
+ *  Waits for auto-negotiation to complete or for the auto-negotiation time
+ *  limit to expire, which ever happens first.
+ **/
+s32 e1000_wait_autoneg_generic(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u16 i, phy_status;
+
+       DEBUGFUNC("e1000_wait_autoneg_generic");
+
+       if (!(hw->phy.ops.read_reg))
+               return E1000_SUCCESS;
+
+       /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
+       for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
+               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+               if (ret_val)
+                       break;
+               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+               if (ret_val)
+                       break;
+               if (phy_status & MII_SR_AUTONEG_COMPLETE)
+                       break;
+               msec_delay(100);
+       }
+
+       /*
+        * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+        * has completed.
+        */
+       return ret_val;
+}
+
+/**
+ *  e1000_phy_has_link_generic - Polls PHY for link
+ *  @hw: pointer to the HW structure
+ *  @iterations: number of times to poll for link
+ *  @usec_interval: delay between polling attempts
+ *  @success: pointer to whether polling was successful or not
+ *
+ *  Polls the PHY status register for link, 'iterations' number of times.
+ **/
+s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+                               u32 usec_interval, bool *success)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u16 i, phy_status;
+
+       DEBUGFUNC("e1000_phy_has_link_generic");
+
+       if (!(hw->phy.ops.read_reg))
+               return E1000_SUCCESS;
+
+       for (i = 0; i < iterations; i++) {
+               /*
+                * Some PHYs require the PHY_STATUS register to be read
+                * twice due to the link bit being sticky.  No harm doing
+                * it across the board.
+                */
+               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+               if (ret_val)
+                       /*
+                        * If the first read fails, another entity may have
+                        * ownership of the resources, wait and try again to
+                        * see if they have relinquished the resources yet.
+                        */
+                       usec_delay(usec_interval);
+               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+               if (ret_val)
+                       break;
+               if (phy_status & MII_SR_LINK_STATUS)
+                       break;
+               if (usec_interval >= 1000)
+                       msec_delay_irq(usec_interval/1000);
+               else
+                       usec_delay(usec_interval);
+       }
+
+       *success = (i < iterations) ? TRUE : FALSE;
+
+       return ret_val;
+}
+
+/**
+ *  e1000_get_cable_length_m88 - Determine cable length for m88 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the PHY specific status register to retrieve the cable length
+ *  information.  The cable length is determined by averaging the minimum and
+ *  maximum values to get the "average" cable length.  The m88 PHY has four
+ *  possible cable length values, which are:
+ *     Register Value          Cable Length
+ *     0                       < 50 meters
+ *     1                       50 - 80 meters
+ *     2                       80 - 110 meters
+ *     3                       110 - 140 meters
+ *     4                       > 140 meters
+ **/
+s32 e1000_get_cable_length_m88(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 phy_data, index;
+
+       DEBUGFUNC("e1000_get_cable_length_m88");
+
+       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+       if (ret_val)
+               goto out;
+
+       index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+               M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+       if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+               ret_val = -E1000_ERR_PHY;
+               goto out;
+       }
+
+       phy->min_cable_length = e1000_m88_cable_length_table[index];
+       phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+
+       phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+       return ret_val;
+}
+
+s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 phy_data, phy_data2, index, default_page, is_cm;
+
+       DEBUGFUNC("e1000_get_cable_length_m88_gen2");
+
+       switch (hw->phy.id) {
+       case M88E1340M_E_PHY_ID:
+       case I347AT4_E_PHY_ID:
+               /* Remember the original page select and set it to 7 */
+               ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
+                                           &default_page);
+               if (ret_val)
+                       goto out;
+
+               ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07);
+               if (ret_val)
+                       goto out;
+
+               /* Get cable length from PHY Cable Diagnostics Control Reg */
+               ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr),
+                                           &phy_data);
+               if (ret_val)
+                       goto out;
+
+               /* Check if the unit of cable length is meters or cm */
+               ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2);
+               if (ret_val)
+                       goto out;
+
+               is_cm = !(phy_data & I347AT4_PCDC_CABLE_LENGTH_UNIT);
+
+               /* Populate the phy structure with cable length in meters */
+               phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
+               phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
+               phy->cable_length = phy_data / (is_cm ? 100 : 1);
+
+               /* Reset the page selec to its original value */
+               ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
+                                            default_page);
+               if (ret_val)
+                       goto out;
+               break;
+       case M88E1112_E_PHY_ID:
+               /* Remember the original page select and set it to 5 */
+               ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
+                                           &default_page);
+               if (ret_val)
+                       goto out;
+
+               ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05);
+               if (ret_val)
+                       goto out;
+
+               ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE,
+                                           &phy_data);
+               if (ret_val)
+                       goto out;
+
+               index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+                       M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+               if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+                       ret_val = -E1000_ERR_PHY;
+                       goto out;
+               }
+
+               phy->min_cable_length = e1000_m88_cable_length_table[index];
+               phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+
+               phy->cable_length = (phy->min_cable_length +
+                                    phy->max_cable_length) / 2;
+
+               /* Reset the page select to its original value */
+               ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
+                                            default_page);
+               if (ret_val)
+                       goto out;
+
+               break;
+       default:
+               ret_val = -E1000_ERR_PHY;
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  The automatic gain control (agc) normalizes the amplitude of the
+ *  received signal, adjusting for the attenuation produced by the
+ *  cable.  By reading the AGC registers, which represent the
+ *  combination of coarse and fine gain value, the value can be put
+ *  into a lookup table to obtain the approximate cable length
+ *  for each channel.
+ **/
+s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val = E1000_SUCCESS;
+       u16 phy_data, i, agc_value = 0;
+       u16 cur_agc_index, max_agc_index = 0;
+       u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+       static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
+              IGP02E1000_PHY_AGC_A,
+              IGP02E1000_PHY_AGC_B,
+              IGP02E1000_PHY_AGC_C,
+              IGP02E1000_PHY_AGC_D
+       };
+
+       DEBUGFUNC("e1000_get_cable_length_igp_2");
+
+       /* Read the AGC registers for all channels */
+       for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+               ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data);
+               if (ret_val)
+                       goto out;
+
+               /*
+                * Getting bits 15:9, which represent the combination of
+                * coarse and fine gain values.  The result is a number
+                * that can be put into the lookup table to obtain the
+                * approximate cable length.
+                */
+               cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+                               IGP02E1000_AGC_LENGTH_MASK;
+
+               /* Array index bound check. */
+               if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+                   (cur_agc_index == 0)) {
+                       ret_val = -E1000_ERR_PHY;
+                       goto out;
+               }
+
+               /* Remove min & max AGC values from calculation. */
+               if (e1000_igp_2_cable_length_table[min_agc_index] >
+                   e1000_igp_2_cable_length_table[cur_agc_index])
+                       min_agc_index = cur_agc_index;
+               if (e1000_igp_2_cable_length_table[max_agc_index] <
+                   e1000_igp_2_cable_length_table[cur_agc_index])
+                       max_agc_index = cur_agc_index;
+
+               agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
+       }
+
+       agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+                     e1000_igp_2_cable_length_table[max_agc_index]);
+       agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+       /* Calculate cable length with the error range of +/- 10 meters. */
+       phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+                                (agc_value - IGP02E1000_AGC_RANGE) : 0;
+       phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
+
+       phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_get_phy_info_m88 - Retrieve PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Valid for only copper links.  Read the PHY status register (sticky read)
+ *  to verify that link is up.  Read the PHY special control register to
+ *  determine the polarity and 10base-T extended distance.  Read the PHY
+ *  special status register to determine MDI/MDIx and current speed.  If
+ *  speed is 1000, then determine cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_m88(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32  ret_val;
+       u16 phy_data;
+       bool link;
+
+       DEBUGFUNC("e1000_get_phy_info_m88");
+
+       if (phy->media_type != e1000_media_type_copper) {
+               DEBUGOUT("Phy info is only valid for copper media\n");
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+       }
+
+       ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+       if (ret_val)
+               goto out;
+
+       if (!link) {
+               DEBUGOUT("Phy info is only valid if link is up\n");
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+       }
+
+       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+       if (ret_val)
+               goto out;
+
+       phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
+                                  ? TRUE : FALSE;
+
+       ret_val = e1000_check_polarity_m88(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+       if (ret_val)
+               goto out;
+
+       phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? TRUE : FALSE;
+
+       if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+               ret_val = hw->phy.ops.get_cable_length(hw);
+               if (ret_val)
+                       goto out;
+
+               ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data);
+               if (ret_val)
+                       goto out;
+
+               phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
+                               ? e1000_1000t_rx_status_ok
+                               : e1000_1000t_rx_status_not_ok;
+
+               phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
+                                ? e1000_1000t_rx_status_ok
+                                : e1000_1000t_rx_status_not_ok;
+       } else {
+               /* Set values to "undefined" */
+               phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+               phy->local_rx = e1000_1000t_rx_status_undefined;
+               phy->remote_rx = e1000_1000t_rx_status_undefined;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_get_phy_info_igp - Retrieve igp PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Read PHY status to determine if link is up.  If link is up, then
+ *  set/determine 10base-T extended distance and polarity correction.  Read
+ *  PHY port status to determine MDI/MDIx and speed.  Based on the speed,
+ *  determine on the cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_igp(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 data;
+       bool link;
+
+       DEBUGFUNC("e1000_get_phy_info_igp");
+
+       ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+       if (ret_val)
+               goto out;
+
+       if (!link) {
+               DEBUGOUT("Phy info is only valid if link is up\n");
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+       }
+
+       phy->polarity_correction = TRUE;
+
+       ret_val = e1000_check_polarity_igp(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+       if (ret_val)
+               goto out;
+
+       phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? TRUE : FALSE;
+
+       if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+           IGP01E1000_PSSR_SPEED_1000MBPS) {
+               ret_val = phy->ops.get_cable_length(hw);
+               if (ret_val)
+                       goto out;
+
+               ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+               if (ret_val)
+                       goto out;
+
+               phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+                               ? e1000_1000t_rx_status_ok
+                               : e1000_1000t_rx_status_not_ok;
+
+               phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+                                ? e1000_1000t_rx_status_ok
+                                : e1000_1000t_rx_status_not_ok;
+       } else {
+               phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+               phy->local_rx = e1000_1000t_rx_status_undefined;
+               phy->remote_rx = e1000_1000t_rx_status_undefined;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_get_phy_info_ife - Retrieves various IFE PHY states
+ *  @hw: pointer to the HW structure
+ *
+ *  Populates "phy" structure with various feature states.
+ **/
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 data;
+       bool link;
+
+       DEBUGFUNC("e1000_get_phy_info_ife");
+
+       ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+       if (ret_val)
+               goto out;
+
+       if (!link) {
+               DEBUGOUT("Phy info is only valid if link is up\n");
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+       }
+
+       ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data);
+       if (ret_val)
+               goto out;
+       phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE)
+                                  ? FALSE : TRUE;
+
+       if (phy->polarity_correction) {
+               ret_val = e1000_check_polarity_ife(hw);
+               if (ret_val)
+                       goto out;
+       } else {
+               /* Polarity is forced */
+               phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY)
+                                     ? e1000_rev_polarity_reversed
+                                     : e1000_rev_polarity_normal;
+       }
+
+       ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
+       if (ret_val)
+               goto out;
+
+       phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? TRUE : FALSE;
+
+       /* The following parameters are undefined for 10/100 operation. */
+       phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+       phy->local_rx = e1000_1000t_rx_status_undefined;
+       phy->remote_rx = e1000_1000t_rx_status_undefined;
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_phy_sw_reset_generic - PHY software reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Does a software reset of the PHY by reading the PHY control register and
+ *  setting/write the control register reset bit to the PHY.
+ **/
+s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u16 phy_ctrl;
+
+       DEBUGFUNC("e1000_phy_sw_reset_generic");
+
+       if (!(hw->phy.ops.read_reg))
+               goto out;
+
+       ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+       if (ret_val)
+               goto out;
+
+       phy_ctrl |= MII_CR_RESET;
+       ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+       if (ret_val)
+               goto out;
+
+       usec_delay(1);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_phy_hw_reset_generic - PHY hardware reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Verify the reset block is not blocking us from resetting.  Acquire
+ *  semaphore (if necessary) and read/set/write the device control reset
+ *  bit in the PHY.  Wait the appropriate delay time for the device to
+ *  reset and release the semaphore (if necessary).
+ **/
+s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val = E1000_SUCCESS;
+       u32 ctrl;
+
+       DEBUGFUNC("e1000_phy_hw_reset_generic");
+
+       ret_val = phy->ops.check_reset_block(hw);
+       if (ret_val) {
+               ret_val = E1000_SUCCESS;
+               goto out;
+       }
+
+       ret_val = phy->ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
+       E1000_WRITE_FLUSH(hw);
+
+       usec_delay(phy->reset_delay_us);
+
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+       E1000_WRITE_FLUSH(hw);
+
+       usec_delay(150);
+
+       phy->ops.release(hw);
+
+       ret_val = phy->ops.get_cfg_done(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_get_cfg_done_generic - Generic configuration done
+ *  @hw: pointer to the HW structure
+ *
+ *  Generic function to wait 10 milli-seconds for configuration to complete
+ *  and return success.
+ **/
+s32 e1000_get_cfg_done_generic(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_get_cfg_done_generic");
+
+       msec_delay_irq(10);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_phy_init_script_igp3 - Inits the IGP3 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
+ **/
+s32 e1000_phy_init_script_igp3(struct e1000_hw *hw)
+{
+       DEBUGOUT("Running IGP 3 PHY init script\n");
+
+       /* PHY init IGP 3 */
+       /* Enable rise/fall, 10-mode work in class-A */
+       hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018);
+       /* Remove all caps from Replica path filter */
+       hw->phy.ops.write_reg(hw, 0x2F52, 0x0000);
+       /* Bias trimming for ADC, AFE and Driver (Default) */
+       hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24);
+       /* Increase Hybrid poly bias */
+       hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0);
+       /* Add 4% to Tx amplitude in Gig mode */
+       hw->phy.ops.write_reg(hw, 0x2010, 0x10B0);
+       /* Disable trimming (TTT) */
+       hw->phy.ops.write_reg(hw, 0x2011, 0x0000);
+       /* Poly DC correction to 94.6% + 2% for all channels */
+       hw->phy.ops.write_reg(hw, 0x20DD, 0x249A);
+       /* ABS DC correction to 95.9% */
+       hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3);
+       /* BG temp curve trim */
+       hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE);
+       /* Increasing ADC OPAMP stage 1 currents to max */
+       hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4);
+       /* Force 1000 ( required for enabling PHY regs configuration) */
+       hw->phy.ops.write_reg(hw, 0x0000, 0x0140);
+       /* Set upd_freq to 6 */
+       hw->phy.ops.write_reg(hw, 0x1F30, 0x1606);
+       /* Disable NPDFE */
+       hw->phy.ops.write_reg(hw, 0x1F31, 0xB814);
+       /* Disable adaptive fixed FFE (Default) */
+       hw->phy.ops.write_reg(hw, 0x1F35, 0x002A);
+       /* Enable FFE hysteresis */
+       hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067);
+       /* Fixed FFE for short cable lengths */
+       hw->phy.ops.write_reg(hw, 0x1F54, 0x0065);
+       /* Fixed FFE for medium cable lengths */
+       hw->phy.ops.write_reg(hw, 0x1F55, 0x002A);
+       /* Fixed FFE for long cable lengths */
+       hw->phy.ops.write_reg(hw, 0x1F56, 0x002A);
+       /* Enable Adaptive Clip Threshold */
+       hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0);
+       /* AHT reset limit to 1 */
+       hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF);
+       /* Set AHT master delay to 127 msec */
+       hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC);
+       /* Set scan bits for AHT */
+       hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF);
+       /* Set AHT Preset bits */
+       hw->phy.ops.write_reg(hw, 0x1F79, 0x0210);
+       /* Change integ_factor of channel A to 3 */
+       hw->phy.ops.write_reg(hw, 0x1895, 0x0003);
+       /* Change prop_factor of channels BCD to 8 */
+       hw->phy.ops.write_reg(hw, 0x1796, 0x0008);
+       /* Change cg_icount + enable integbp for channels BCD */
+       hw->phy.ops.write_reg(hw, 0x1798, 0xD008);
+       /*
+        * Change cg_icount + enable integbp + change prop_factor_master
+        * to 8 for channel A
+        */
+       hw->phy.ops.write_reg(hw, 0x1898, 0xD918);
+       /* Disable AHT in Slave mode on channel A */
+       hw->phy.ops.write_reg(hw, 0x187A, 0x0800);
+       /*
+        * Enable LPLU and disable AN to 1000 in non-D0a states,
+        * Enable SPD+B2B
+        */
+       hw->phy.ops.write_reg(hw, 0x0019, 0x008D);
+       /* Enable restart AN on an1000_dis change */
+       hw->phy.ops.write_reg(hw, 0x001B, 0x2080);
+       /* Enable wh_fifo read clock in 10/100 modes */
+       hw->phy.ops.write_reg(hw, 0x0014, 0x0045);
+       /* Restart AN, Speed selection is 1000 */
+       hw->phy.ops.write_reg(hw, 0x0000, 0x1340);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_phy_type_from_id - Get PHY type from id
+ *  @phy_id: phy_id read from the phy
+ *
+ *  Returns the phy type from the id.
+ **/
+enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id)
+{
+       enum e1000_phy_type phy_type = e1000_phy_unknown;
+
+       switch (phy_id) {
+       case M88E1000_I_PHY_ID:
+       case M88E1000_E_PHY_ID:
+       case M88E1111_I_PHY_ID:
+       case M88E1011_I_PHY_ID:
+       case I347AT4_E_PHY_ID:
+       case M88E1112_E_PHY_ID:
+       case M88E1340M_E_PHY_ID:
+               phy_type = e1000_phy_m88;
+               break;
+       case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
+               phy_type = e1000_phy_igp_2;
+               break;
+       case GG82563_E_PHY_ID:
+               phy_type = e1000_phy_gg82563;
+               break;
+       case IGP03E1000_E_PHY_ID:
+               phy_type = e1000_phy_igp_3;
+               break;
+       case IFE_E_PHY_ID:
+       case IFE_PLUS_E_PHY_ID:
+       case IFE_C_E_PHY_ID:
+               phy_type = e1000_phy_ife;
+               break;
+       case I82580_I_PHY_ID:
+               phy_type = e1000_phy_82580;
+               break;
+       default:
+               phy_type = e1000_phy_unknown;
+               break;
+       }
+       return phy_type;
+}
+
+/**
+ *  e1000_determine_phy_address - Determines PHY address.
+ *  @hw: pointer to the HW structure
+ *
+ *  This uses a trial and error method to loop through possible PHY
+ *  addresses. It tests each by reading the PHY ID registers and
+ *  checking for a match.
+ **/
+s32 e1000_determine_phy_address(struct e1000_hw *hw)
+{
+       s32 ret_val = -E1000_ERR_PHY_TYPE;
+       u32 phy_addr = 0;
+       u32 i;
+       enum e1000_phy_type phy_type = e1000_phy_unknown;
+
+       hw->phy.id = phy_type;
+
+       for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) {
+               hw->phy.addr = phy_addr;
+               i = 0;
+
+               do {
+                       e1000_get_phy_id(hw);
+                       phy_type = e1000_get_phy_type_from_id(hw->phy.id);
+
+                       /*
+                        * If phy_type is valid, break - we found our
+                        * PHY address
+                        */
+                       if (phy_type != e1000_phy_unknown) {
+                               ret_val = E1000_SUCCESS;
+                               goto out;
+                       }
+                       msec_delay(1);
+                       i++;
+               } while (i < 10);
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ * e1000_power_up_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_up_phy_copper(struct e1000_hw *hw)
+{
+       u16 mii_reg = 0;
+
+       /* The PHY will retain its settings across a power down/up cycle */
+       hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+       mii_reg &= ~MII_CR_POWER_DOWN;
+       hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+}
+
+/**
+ * e1000_power_down_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_down_phy_copper(struct e1000_hw *hw)
+{
+       u16 mii_reg = 0;
+
+       /* The PHY will retain its settings across a power down/up cycle */
+       hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+       mii_reg |= MII_CR_POWER_DOWN;
+       hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+       msec_delay(1);
+}
+
+/**
+ *  e1000_check_polarity_82577 - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_82577(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 data;
+
+       DEBUGFUNC("e1000_check_polarity_82577");
+
+       ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+
+       if (!ret_val)
+               phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
+                                     ? e1000_rev_polarity_reversed
+                                     : e1000_rev_polarity_normal;
+
+       return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.
+ **/
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 phy_data;
+       bool link;
+
+       DEBUGFUNC("e1000_phy_force_speed_duplex_82577");
+
+       ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+       if (ret_val)
+               goto out;
+
+       e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+       ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+       if (ret_val)
+               goto out;
+
+       usec_delay(1);
+
+       if (phy->autoneg_wait_to_complete) {
+               DEBUGOUT("Waiting for forced speed/duplex link on 82577 phy\n");
+
+               ret_val = e1000_phy_has_link_generic(hw,
+                                                    PHY_FORCE_LIMIT,
+                                                    100000,
+                                                    &link);
+               if (ret_val)
+                       goto out;
+
+               if (!link)
+                       DEBUGOUT("Link taking longer than expected.\n");
+
+               /* Try once more */
+               ret_val = e1000_phy_has_link_generic(hw,
+                                                    PHY_FORCE_LIMIT,
+                                                    100000,
+                                                    &link);
+               if (ret_val)
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_get_phy_info_82577 - Retrieve I82577 PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Read PHY status to determine if link is up.  If link is up, then
+ *  set/determine 10base-T extended distance and polarity correction.  Read
+ *  PHY port status to determine MDI/MDIx and speed.  Based on the speed,
+ *  determine on the cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 data;
+       bool link;
+
+       DEBUGFUNC("e1000_get_phy_info_82577");
+
+       ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+       if (ret_val)
+               goto out;
+
+       if (!link) {
+               DEBUGOUT("Phy info is only valid if link is up\n");
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+       }
+
+       phy->polarity_correction = TRUE;
+
+       ret_val = e1000_check_polarity_82577(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+       if (ret_val)
+               goto out;
+
+       phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? TRUE : FALSE;
+
+       if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
+           I82577_PHY_STATUS2_SPEED_1000MBPS) {
+               ret_val = hw->phy.ops.get_cable_length(hw);
+               if (ret_val)
+                       goto out;
+
+               ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+               if (ret_val)
+                       goto out;
+
+               phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+                               ? e1000_1000t_rx_status_ok
+                               : e1000_1000t_rx_status_not_ok;
+
+               phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+                                ? e1000_1000t_rx_status_ok
+                                : e1000_1000t_rx_status_not_ok;
+       } else {
+               phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+               phy->local_rx = e1000_1000t_rx_status_undefined;
+               phy->remote_rx = e1000_1000t_rx_status_undefined;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_get_cable_length_82577 - Determine cable length for 82577 PHY
+ *  @hw: pointer to the HW structure
+ *
+ * Reads the diagnostic status register and verifies result is valid before
+ * placing it in the phy_cable_length field.
+ **/
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 phy_data, length;
+
+       DEBUGFUNC("e1000_get_cable_length_82577");
+
+       ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
+       if (ret_val)
+               goto out;
+
+       length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
+                I82577_DSTATUS_CABLE_LENGTH_SHIFT;
+
+       if (length == E1000_CABLE_LENGTH_UNDEFINED)
+               ret_val = -E1000_ERR_PHY;
+
+       phy->cable_length = length;
+
+out:
+       return ret_val;
+}
diff --git a/lib/librte_pmd_e1000/e1000/e1000_phy.h b/lib/librte_pmd_e1000/e1000/e1000_phy.h
new file mode 100644 (file)
index 0000000..1b21430
--- /dev/null
@@ -0,0 +1,217 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_PHY_H_
+#define _E1000_PHY_H_
+
+void e1000_init_phy_ops_generic(struct e1000_hw *hw);
+s32  e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+void e1000_null_phy_generic(struct e1000_hw *hw);
+s32  e1000_null_lplu_state(struct e1000_hw *hw, bool active);
+s32  e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_check_downshift_generic(struct e1000_hw *hw);
+s32  e1000_check_polarity_m88(struct e1000_hw *hw);
+s32  e1000_check_polarity_igp(struct e1000_hw *hw);
+s32  e1000_check_polarity_ife(struct e1000_hw *hw);
+s32  e1000_check_reset_block_generic(struct e1000_hw *hw);
+s32  e1000_copper_link_setup_igp(struct e1000_hw *hw);
+s32  e1000_copper_link_setup_m88(struct e1000_hw *hw);
+s32  e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw);
+s32  e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+s32  e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+s32  e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
+s32  e1000_get_cable_length_m88(struct e1000_hw *hw);
+s32  e1000_get_cable_length_m88_gen2(struct e1000_hw *hw);
+s32  e1000_get_cable_length_igp_2(struct e1000_hw *hw);
+s32  e1000_get_cfg_done_generic(struct e1000_hw *hw);
+s32  e1000_get_phy_id(struct e1000_hw *hw);
+s32  e1000_get_phy_info_igp(struct e1000_hw *hw);
+s32  e1000_get_phy_info_m88(struct e1000_hw *hw);
+s32  e1000_get_phy_info_ife(struct e1000_hw *hw);
+s32  e1000_phy_sw_reset_generic(struct e1000_hw *hw);
+void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
+s32  e1000_phy_hw_reset_generic(struct e1000_hw *hw);
+s32  e1000_phy_reset_dsp_generic(struct e1000_hw *hw);
+s32  e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active);
+s32  e1000_setup_copper_link_generic(struct e1000_hw *hw);
+s32  e1000_wait_autoneg_generic(struct e1000_hw *hw);
+s32  e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_phy_reset_dsp(struct e1000_hw *hw);
+s32  e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+                                u32 usec_interval, bool *success);
+s32  e1000_phy_init_script_igp3(struct e1000_hw *hw);
+enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id);
+s32  e1000_determine_phy_address(struct e1000_hw *hw);
+void e1000_power_up_phy_copper(struct e1000_hw *hw);
+void e1000_power_down_phy_copper(struct e1000_hw *hw);
+s32  e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_copper_link_setup_82577(struct e1000_hw *hw);
+s32  e1000_check_polarity_82577(struct e1000_hw *hw);
+s32  e1000_get_phy_info_82577(struct e1000_hw *hw);
+s32  e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
+s32  e1000_get_cable_length_82577(struct e1000_hw *hw);
+
+#define E1000_MAX_PHY_ADDR                4
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG        0x10 /* Port Config */
+#define IGP01E1000_PHY_PORT_STATUS        0x11 /* Status */
+#define IGP01E1000_PHY_PORT_CTRL          0x12 /* Control */
+#define IGP01E1000_PHY_LINK_HEALTH        0x13 /* PHY Link Health */
+#define IGP01E1000_GMII_FIFO              0x14 /* GMII FIFO */
+#define IGP01E1000_PHY_CHANNEL_QUALITY    0x15 /* PHY Channel Quality */
+#define IGP02E1000_PHY_POWER_MGMT         0x19 /* Power Management */
+#define IGP01E1000_PHY_PAGE_SELECT        0x1F /* Page Select */
+#define BM_PHY_PAGE_SELECT                22   /* Page Select for BM */
+#define IGP_PAGE_SHIFT                    5
+#define PHY_REG_MASK                      0x1F
+
+#define HV_INTC_FC_PAGE_START             768
+#define I82578_ADDR_REG                   29
+#define I82577_ADDR_REG                   16
+#define I82577_CFG_REG                    22
+#define I82577_CFG_ASSERT_CRS_ON_TX       (1 << 15)
+#define I82577_CFG_ENABLE_DOWNSHIFT       (3 << 10) /* auto downshift 100/10 */
+#define I82577_CTRL_REG                   23
+
+/* 82577 specific PHY registers */
+#define I82577_PHY_CTRL_2            18
+#define I82577_PHY_LBK_CTRL          19
+#define I82577_PHY_STATUS_2          26
+#define I82577_PHY_DIAG_STATUS       31
+
+/* I82577 PHY Status 2 */
+#define I82577_PHY_STATUS2_REV_POLARITY   0x0400
+#define I82577_PHY_STATUS2_MDIX           0x0800
+#define I82577_PHY_STATUS2_SPEED_MASK     0x0300
+#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
+#define I82577_PHY_STATUS2_SPEED_100MBPS  0x0100
+
+/* I82577 PHY Control 2 */
+#define I82577_PHY_CTRL2_AUTO_MDIX        0x0400
+#define I82577_PHY_CTRL2_FORCE_MDI_MDIX   0x0200
+
+/* I82577 PHY Diagnostics Status */
+#define I82577_DSTATUS_CABLE_LENGTH       0x03FC
+#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
+
+/* 82580 PHY Power Management */
+#define E1000_82580_PHY_POWER_MGMT        0xE14
+#define E1000_82580_PM_SPD                0x0001 /* Smart Power Down */
+#define E1000_82580_PM_D0_LPLU            0x0002 /* For D0a states */
+#define E1000_82580_PM_D3_LPLU            0x0004 /* For all other states */
+
+#define IGP01E1000_PHY_PCS_INIT_REG       0x00B4
+#define IGP01E1000_PHY_POLARITY_MASK      0x0078
+
+#define IGP01E1000_PSCR_AUTO_MDIX         0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX    0x2000 /* 0=MDI, 1=MDIX */
+
+#define IGP01E1000_PSCFR_SMART_SPEED      0x0080
+
+/* Enable flexible speed on link-up */
+#define IGP01E1000_GMII_FLEX_SPD          0x0010
+#define IGP01E1000_GMII_SPD               0x0020 /* Enable SPD */
+
+#define IGP02E1000_PM_SPD                 0x0001 /* Smart Power Down */
+#define IGP02E1000_PM_D0_LPLU             0x0002 /* For D0a states */
+#define IGP02E1000_PM_D3_LPLU             0x0004 /* For all other states */
+
+#define IGP01E1000_PLHR_SS_DOWNGRADE      0x8000
+
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_MDIX              0x0800
+#define IGP01E1000_PSSR_SPEED_MASK        0xC000
+#define IGP01E1000_PSSR_SPEED_1000MBPS    0xC000
+
+#define IGP02E1000_PHY_CHANNEL_NUM        4
+#define IGP02E1000_PHY_AGC_A              0x11B1
+#define IGP02E1000_PHY_AGC_B              0x12B1
+#define IGP02E1000_PHY_AGC_C              0x14B1
+#define IGP02E1000_PHY_AGC_D              0x18B1
+
+#define IGP02E1000_AGC_LENGTH_SHIFT       9   /* Course - 15:13, Fine - 12:9 */
+#define IGP02E1000_AGC_LENGTH_MASK        0x7F
+#define IGP02E1000_AGC_RANGE              15
+
+#define IGP03E1000_PHY_MISC_CTRL          0x1B
+#define IGP03E1000_PHY_MISC_DUPLEX_MANUAL_SET  0x1000 /* Manually Set Duplex */
+
+#define E1000_CABLE_LENGTH_UNDEFINED      0xFF
+
+#define E1000_KMRNCTRLSTA_OFFSET          0x001F0000
+#define E1000_KMRNCTRLSTA_OFFSET_SHIFT    16
+#define E1000_KMRNCTRLSTA_REN             0x00200000
+#define E1000_KMRNCTRLSTA_DIAG_OFFSET     0x3    /* Kumeran Diagnostic */
+#define E1000_KMRNCTRLSTA_TIMEOUTS        0x4    /* Kumeran Timeouts */
+#define E1000_KMRNCTRLSTA_INBAND_PARAM    0x9    /* Kumeran InBand Parameters */
+#define E1000_KMRNCTRLSTA_IBIST_DISABLE   0x0200 /* Kumeran IBIST Disable */
+#define E1000_KMRNCTRLSTA_DIAG_NELPBK     0x1000 /* Nearend Loopback mode */
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
+#define IFE_PHY_SPECIAL_CONTROL     0x11 /* 100BaseTx PHY Special Control */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */
+#define IFE_PHY_MDIX_CONTROL        0x1C /* MDI/MDI-X Control */
+
+/* IFE PHY Extended Status Control */
+#define IFE_PESC_POLARITY_REVERSED    0x0100
+
+/* IFE PHY Special Control */
+#define IFE_PSC_AUTO_POLARITY_DISABLE      0x0010
+#define IFE_PSC_FORCE_POLARITY             0x0020
+#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100
+
+/* IFE PHY Special Control and LED Control */
+#define IFE_PSCL_PROBE_MODE            0x0020
+#define IFE_PSCL_PROBE_LEDS_OFF        0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON         0x0007 /* Force LEDs 0 and 2 on */
+
+/* IFE PHY MDIX Control */
+#define IFE_PMC_MDIX_STATUS      0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_FORCE_MDIX       0x0040 /* 1=force MDI-X, 0=force MDI */
+#define IFE_PMC_AUTO_MDIX        0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */
+
+#endif
diff --git a/lib/librte_pmd_e1000/e1000/e1000_regs.h b/lib/librte_pmd_e1000/e1000/e1000_regs.h
new file mode 100644 (file)
index 0000000..6b902ea
--- /dev/null
@@ -0,0 +1,574 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_REGS_H_
+#define _E1000_REGS_H_
+
+#define E1000_CTRL     0x00000  /* Device Control - RW */
+#define E1000_CTRL_DUP 0x00004  /* Device Control Duplicate (Shadow) - RW */
+#define E1000_STATUS   0x00008  /* Device Status - RO */
+#define E1000_EECD     0x00010  /* EEPROM/Flash Control - RW */
+#define E1000_EERD     0x00014  /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018  /* Extended Device Control - RW */
+#define E1000_FLA      0x0001C  /* Flash Access - RW */
+#define E1000_MDIC     0x00020  /* MDI Control - RW */
+#define E1000_MDICNFG  0x00E04  /* MDI Config - RW */
+#define E1000_REGISTER_SET_SIZE        0x20000 /* CSR Size */
+#define E1000_EEPROM_INIT_CTRL_WORD_2  0x0F /* EEPROM Init Ctrl Word 2 */
+#define E1000_BARCTRL                  0x5BBC /* BAR ctrl reg */
+#define E1000_BARCTRL_FLSIZE           0x0700 /* BAR ctrl Flsize */
+#define E1000_BARCTRL_CSRSIZE          0x2000 /* BAR ctrl CSR size */
+#define E1000_SCTL     0x00024  /* SerDes Control - RW */
+#define E1000_FCAL     0x00028  /* Flow Control Address Low - RW */
+#define E1000_FCAH     0x0002C  /* Flow Control Address High -RW */
+#define E1000_FEXT     0x0002C  /* Future Extended - RW */
+#define E1000_FEXTNVM4 0x00024  /* Future Extended NVM 4 - RW */
+#define E1000_FEXTNVM  0x00028  /* Future Extended NVM - RW */
+#define E1000_FCT      0x00030  /* Flow Control Type - RW */
+#define E1000_CONNSW   0x00034  /* Copper/Fiber switch control - RW */
+#define E1000_VET      0x00038  /* VLAN Ether Type - RW */
+#define E1000_ICR      0x000C0  /* Interrupt Cause Read - R/clr */
+#define E1000_ITR      0x000C4  /* Interrupt Throttling Rate - RW */
+#define E1000_ICS      0x000C8  /* Interrupt Cause Set - WO */
+#define E1000_IMS      0x000D0  /* Interrupt Mask Set - RW */
+#define E1000_IMC      0x000D8  /* Interrupt Mask Clear - WO */
+#define E1000_IAM      0x000E0  /* Interrupt Acknowledge Auto Mask */
+#define E1000_RCTL     0x00100  /* Rx Control - RW */
+#define E1000_FCTTV    0x00170  /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW     0x00178  /* Tx Configuration Word - RW */
+#define E1000_RXCW     0x00180  /* Rx Configuration Word - RO */
+#define E1000_EICR     0x01580  /* Ext. Interrupt Cause Read - R/clr */
+#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
+#define E1000_EICS     0x01520  /* Ext. Interrupt Cause Set - W0 */
+#define E1000_EIMS     0x01524  /* Ext. Interrupt Mask Set/Read - RW */
+#define E1000_EIMC     0x01528  /* Ext. Interrupt Mask Clear - WO */
+#define E1000_EIAC     0x0152C  /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAM     0x01530  /* Ext. Interrupt Ack Auto Clear Mask - RW */
+#define E1000_GPIE     0x01514  /* General Purpose Interrupt Enable - RW */
+#define E1000_IVAR0    0x01700  /* Interrupt Vector Allocation (array) - RW */
+#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
+#define E1000_TCTL     0x00400  /* Tx Control - RW */
+#define E1000_TCTL_EXT 0x00404  /* Extended Tx Control - RW */
+#define E1000_TIPG     0x00410  /* Tx Inter-packet gap -RW */
+#define E1000_TBT      0x00448  /* Tx Burst Timer - RW */
+#define E1000_AIT      0x00458  /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL   0x00E00  /* LED Control - RW */
+#define E1000_EXTCNF_CTRL  0x00F00  /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE  0x00F08  /* Extended Configuration Size */
+#define E1000_PHY_CTRL     0x00F10  /* PHY Control Register in CSR */
+#define E1000_PBA      0x01000  /* Packet Buffer Allocation - RW */
+#define E1000_PBS      0x01008  /* Packet Buffer Size */
+#define E1000_EEMNGCTL 0x01010  /* MNG EEprom Control */
+#define E1000_EEARBC   0x01024  /* EEPROM Auto Read Bus Control */
+#define E1000_FLASHT   0x01028  /* FLASH Timer Register */
+#define E1000_EEWR     0x0102C  /* EEPROM Write Register - RW */
+#define E1000_FLSWCTL  0x01030  /* FLASH control register */
+#define E1000_FLSWDATA 0x01034  /* FLASH data register */
+#define E1000_FLSWCNT  0x01038  /* FLASH Access Counter */
+#define E1000_FLOP     0x0103C  /* FLASH Opcode Register */
+#define E1000_I2CCMD   0x01028  /* SFPI2C Command Register - RW */
+#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */
+#define E1000_WDSTP    0x01040  /* Watchdog Setup - RW */
+#define E1000_SWDSTS   0x01044  /* SW Device Status - RW */
+#define E1000_FRTIMER  0x01048  /* Free Running Timer - RW */
+#define E1000_TCPTIMER 0x0104C  /* TCP Timer - RW */
+#define E1000_VPDDIAG  0x01060  /* VPD Diagnostic - RO */
+#define E1000_ICR_V2   0x01500  /* Interrupt Cause - new location - RC */
+#define E1000_ICS_V2   0x01504  /* Interrupt Cause Set - new location - WO */
+#define E1000_IMS_V2   0x01508  /* Interrupt Mask Set/Read - new location - RW */
+#define E1000_IMC_V2   0x0150C  /* Interrupt Mask Clear - new location - WO */
+#define E1000_IAM_V2   0x01510  /* Interrupt Ack Auto Mask - new location - RW */
+#define E1000_ERT      0x02008  /* Early Rx Threshold - RW */
+#define E1000_FCRTL    0x02160  /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH    0x02168  /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL   0x02170  /* Packet Split Receive Control - RW */
+#define E1000_RDFPCQ(_n)  (0x02430 + (0x4 * (_n)))
+#define E1000_PBRTH    0x02458  /* PB Rx Arbitration Threshold - RW */
+#define E1000_FCRTV    0x02460  /* Flow Control Refresh Timer Value - RW */
+/* Split and Replication Rx Control - RW */
+#define E1000_RDPUMB   0x025CC  /* DMA Rx Descriptor uC Mailbox - RW */
+#define E1000_RDPUAD   0x025D0  /* DMA Rx Descriptor uC Addr Command - RW */
+#define E1000_RDPUWD   0x025D4  /* DMA Rx Descriptor uC Data Write - RW */
+#define E1000_RDPURD   0x025D8  /* DMA Rx Descriptor uC Data Read - RW */
+#define E1000_RDPUCTL  0x025DC  /* DMA Rx Descriptor uC Control - RW */
+#define E1000_PBDIAG   0x02458  /* Packet Buffer Diagnostic - RW */
+#define E1000_RXPBS    0x02404  /* Rx Packet Buffer Size - RW */
+#define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer adapters - RW */
+#define E1000_PBRWAC   0x024E8 /* Rx packet buffer wrap around counter - RO */
+#define E1000_RDTR     0x02820  /* Rx Delay Timer - RW */
+#define E1000_RADV     0x0282C  /* Rx Interrupt Absolute Delay Timer - RW */
+/*
+ * Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL_REG(current_rx_queue)
+ */
+#define E1000_RDBAL(_n)      ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
+                                         (0x0C000 + ((_n) * 0x40)))
+#define E1000_RDBAH(_n)      ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
+                                         (0x0C004 + ((_n) * 0x40)))
+#define E1000_RDLEN(_n)      ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
+                                         (0x0C008 + ((_n) * 0x40)))
+#define E1000_SRRCTL(_n)     ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
+                                         (0x0C00C + ((_n) * 0x40)))
+#define E1000_RDH(_n)        ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
+                                         (0x0C010 + ((_n) * 0x40)))
+#define E1000_RXCTL(_n)      ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
+                                         (0x0C014 + ((_n) * 0x40)))
+#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n)
+#define E1000_RDT(_n)        ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
+                                         (0x0C018 + ((_n) * 0x40)))
+#define E1000_RXDCTL(_n)     ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
+                                         (0x0C028 + ((_n) * 0x40)))
+#define E1000_RQDPC(_n)      ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \
+                                         (0x0C030 + ((_n) * 0x40)))
+#define E1000_TDBAL(_n)      ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
+                                         (0x0E000 + ((_n) * 0x40)))
+#define E1000_TDBAH(_n)      ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
+                                         (0x0E004 + ((_n) * 0x40)))
+#define E1000_TDLEN(_n)      ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
+                                         (0x0E008 + ((_n) * 0x40)))
+#define E1000_TDH(_n)        ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
+                                         (0x0E010 + ((_n) * 0x40)))
+#define E1000_TXCTL(_n)      ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
+                                         (0x0E014 + ((_n) * 0x40)))
+#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
+#define E1000_TDT(_n)        ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
+                                         (0x0E018 + ((_n) * 0x40)))
+#define E1000_TXDCTL(_n)     ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
+                                         (0x0E028 + ((_n) * 0x40)))
+#define E1000_TDWBAL(_n)     ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \
+                                         (0x0E038 + ((_n) * 0x40)))
+#define E1000_TDWBAH(_n)     ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \
+                                         (0x0E03C + ((_n) * 0x40)))
+#define E1000_TARC(_n)                   (0x03840 + ((_n) * 0x100))
+#define E1000_RSRPD    0x02C00  /* Rx Small Packet Detect - RW */
+#define E1000_RAID     0x02C08  /* Receive Ack Interrupt Delay - RW */
+#define E1000_TXDMAC   0x03000  /* Tx DMA Control - RW */
+#define E1000_KABGTXD  0x03004  /* AFE Band Gap Transmit Ref Data */
+#define E1000_PSRTYPE(_i)       (0x05480 + ((_i) * 4))
+#define E1000_RAL(_i)  (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+                                       (0x054E0 + ((_i - 16) * 8)))
+#define E1000_RAH(_i)  (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+                                       (0x054E4 + ((_i - 16) * 8)))
+#define E1000_SHRAL(_i)         (0x05438 + ((_i) * 8))
+#define E1000_SHRAH(_i)         (0x0543C + ((_i) * 8))
+#define E1000_IP4AT_REG(_i)     (0x05840 + ((_i) * 8))
+#define E1000_IP6AT_REG(_i)     (0x05880 + ((_i) * 4))
+#define E1000_WUPM_REG(_i)      (0x05A00 + ((_i) * 4))
+#define E1000_FFMT_REG(_i)      (0x09000 + ((_i) * 8))
+#define E1000_FFVT_REG(_i)      (0x09800 + ((_i) * 8))
+#define E1000_FFLT_REG(_i)      (0x05F00 + ((_i) * 8))
+#define E1000_PBSLAC   0x03100  /* Packet Buffer Slave Access Control */
+#define E1000_PBSLAD(_n)  (0x03110 + (0x4 * (_n)))  /* Packet Buffer DWORD (_n) */
+#define E1000_TXPBS    0x03404  /* Tx Packet Buffer Size - RW */
+#define E1000_ITPBS   0x03404   /* Same as TXPBS, renamed for newer adpaters - RW */
+#define E1000_TDFH     0x03410  /* Tx Data FIFO Head - RW */
+#define E1000_TDFT     0x03418  /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS    0x03420  /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS    0x03428  /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC    0x03430  /* Tx Data FIFO Packet Count - RW */
+#define E1000_TDPUMB   0x0357C  /* DMA Tx Descriptor uC Mail Box - RW */
+#define E1000_TDPUAD   0x03580  /* DMA Tx Descriptor uC Addr Command - RW */
+#define E1000_TDPUWD   0x03584  /* DMA Tx Descriptor uC Data Write - RW */
+#define E1000_TDPURD   0x03588  /* DMA Tx Descriptor uC Data  Read  - RW */
+#define E1000_TDPUCTL  0x0358C  /* DMA Tx Descriptor uC Control - RW */
+#define E1000_DTXCTL   0x03590  /* DMA Tx Control - RW */
+#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */
+#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */
+#define E1000_DTXMXSZRQ  0x03540 /* DMA Tx Max Total Allow Size Requests - RW */
+#define E1000_TIDV     0x03820  /* Tx Interrupt Delay Value - RW */
+#define E1000_TADV     0x0382C  /* Tx Interrupt Absolute Delay Val - RW */
+#define E1000_TSPMT    0x03830  /* TCP Segmentation PAD & Min Threshold - RW */
+#define E1000_CRCERRS  0x04000  /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004  /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS  0x04008  /* Symbol Error Count - R/clr */
+#define E1000_RXERRC   0x0400C  /* Receive Error Count - R/clr */
+#define E1000_MPC      0x04010  /* Missed Packet Count - R/clr */
+#define E1000_SCC      0x04014  /* Single Collision Count - R/clr */
+#define E1000_ECOL     0x04018  /* Excessive Collision Count - R/clr */
+#define E1000_MCC      0x0401C  /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL  0x04020  /* Late Collision Count - R/clr */
+#define E1000_COLC     0x04028  /* Collision Count - R/clr */
+#define E1000_DC       0x04030  /* Defer Count - R/clr */
+#define E1000_TNCRS    0x04034  /* Tx-No CRS - R/clr */
+#define E1000_SEC      0x04038  /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR  0x0403C  /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC     0x04040  /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC   0x04048  /* XON Rx Count - R/clr */
+#define E1000_XONTXC   0x0404C  /* XON Tx Count - R/clr */
+#define E1000_XOFFRXC  0x04050  /* XOFF Rx Count - R/clr */
+#define E1000_XOFFTXC  0x04054  /* XOFF Tx Count - R/clr */
+#define E1000_FCRUC    0x04058  /* Flow Control Rx Unsupported Count- R/clr */
+#define E1000_PRC64    0x0405C  /* Packets Rx (64 bytes) - R/clr */
+#define E1000_PRC127   0x04060  /* Packets Rx (65-127 bytes) - R/clr */
+#define E1000_PRC255   0x04064  /* Packets Rx (128-255 bytes) - R/clr */
+#define E1000_PRC511   0x04068  /* Packets Rx (255-511 bytes) - R/clr */
+#define E1000_PRC1023  0x0406C  /* Packets Rx (512-1023 bytes) - R/clr */
+#define E1000_PRC1522  0x04070  /* Packets Rx (1024-1522 bytes) - R/clr */
+#define E1000_GPRC     0x04074  /* Good Packets Rx Count - R/clr */
+#define E1000_BPRC     0x04078  /* Broadcast Packets Rx Count - R/clr */
+#define E1000_MPRC     0x0407C  /* Multicast Packets Rx Count - R/clr */
+#define E1000_GPTC     0x04080  /* Good Packets Tx Count - R/clr */
+#define E1000_GORCL    0x04088  /* Good Octets Rx Count Low - R/clr */
+#define E1000_GORCH    0x0408C  /* Good Octets Rx Count High - R/clr */
+#define E1000_GOTCL    0x04090  /* Good Octets Tx Count Low - R/clr */
+#define E1000_GOTCH    0x04094  /* Good Octets Tx Count High - R/clr */
+#define E1000_RNBC     0x040A0  /* Rx No Buffers Count - R/clr */
+#define E1000_RUC      0x040A4  /* Rx Undersize Count - R/clr */
+#define E1000_RFC      0x040A8  /* Rx Fragment Count - R/clr */
+#define E1000_ROC      0x040AC  /* Rx Oversize Count - R/clr */
+#define E1000_RJC      0x040B0  /* Rx Jabber Count - R/clr */
+#define E1000_MGTPRC   0x040B4  /* Management Packets Rx Count - R/clr */
+#define E1000_MGTPDC   0x040B8  /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC   0x040BC  /* Management Packets Tx Count - R/clr */
+#define E1000_TORL     0x040C0  /* Total Octets Rx Low - R/clr */
+#define E1000_TORH     0x040C4  /* Total Octets Rx High - R/clr */
+#define E1000_TOTL     0x040C8  /* Total Octets Tx Low - R/clr */
+#define E1000_TOTH     0x040CC  /* Total Octets Tx High - R/clr */
+#define E1000_TPR      0x040D0  /* Total Packets Rx - R/clr */
+#define E1000_TPT      0x040D4  /* Total Packets Tx - R/clr */
+#define E1000_PTC64    0x040D8  /* Packets Tx (64 bytes) - R/clr */
+#define E1000_PTC127   0x040DC  /* Packets Tx (65-127 bytes) - R/clr */
+#define E1000_PTC255   0x040E0  /* Packets Tx (128-255 bytes) - R/clr */
+#define E1000_PTC511   0x040E4  /* Packets Tx (256-511 bytes) - R/clr */
+#define E1000_PTC1023  0x040E8  /* Packets Tx (512-1023 bytes) - R/clr */
+#define E1000_PTC1522  0x040EC  /* Packets Tx (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC     0x040F0  /* Multicast Packets Tx Count - R/clr */
+#define E1000_BPTC     0x040F4  /* Broadcast Packets Tx Count - R/clr */
+#define E1000_TSCTC    0x040F8  /* TCP Segmentation Context Tx - R/clr */
+#define E1000_TSCTFC   0x040FC  /* TCP Segmentation Context Tx Fail - R/clr */
+#define E1000_IAC      0x04100  /* Interrupt Assertion Count */
+#define E1000_ICRXPTC  0x04104  /* Interrupt Cause Rx Pkt Timer Expire Count */
+#define E1000_ICRXATC  0x04108  /* Interrupt Cause Rx Abs Timer Expire Count */
+#define E1000_ICTXPTC  0x0410C  /* Interrupt Cause Tx Pkt Timer Expire Count */
+#define E1000_ICTXATC  0x04110  /* Interrupt Cause Tx Abs Timer Expire Count */
+#define E1000_ICTXQEC  0x04118  /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C  /* Interrupt Cause Tx Queue Min Thresh Count */
+#define E1000_ICRXDMTC 0x04120  /* Interrupt Cause Rx Desc Min Thresh Count */
+#define E1000_ICRXOC   0x04124  /* Interrupt Cause Receiver Overrun Count */
+
+#define E1000_VFGPRC   0x00F10
+#define E1000_VFGORC   0x00F18
+#define E1000_VFMPRC   0x00F3C
+#define E1000_VFGPTC   0x00F14
+#define E1000_VFGOTC   0x00F34
+#define E1000_VFGOTLBC 0x00F50
+#define E1000_VFGPTLBC 0x00F44
+#define E1000_VFGORLBC 0x00F48
+#define E1000_VFGPRLBC 0x00F40
+/* Virtualization statistical counters */
+#define E1000_PFVFGPRC(_n)   (0x010010 + (0x100 * (_n)))
+#define E1000_PFVFGPTC(_n)   (0x010014 + (0x100 * (_n)))
+#define E1000_PFVFGORC(_n)   (0x010018 + (0x100 * (_n)))
+#define E1000_PFVFGOTC(_n)   (0x010034 + (0x100 * (_n)))
+#define E1000_PFVFMPRC(_n)   (0x010038 + (0x100 * (_n)))
+#define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n)))
+#define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n)))
+#define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n)))
+#define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n)))
+
+#define E1000_LSECTXUT        0x04300  /* LinkSec Tx Untagged Packet Count - OutPktsUntagged */
+#define E1000_LSECTXPKTE      0x04304  /* LinkSec Encrypted Tx Packets Count - OutPktsEncrypted */
+#define E1000_LSECTXPKTP      0x04308  /* LinkSec Protected Tx Packet Count - OutPktsProtected */
+#define E1000_LSECTXOCTE      0x0430C  /* LinkSec Encrypted Tx Octets Count - OutOctetsEncrypted */
+#define E1000_LSECTXOCTP      0x04310  /* LinkSec Protected Tx Octets Count - OutOctetsProtected */
+#define E1000_LSECRXUT        0x04314  /* LinkSec Untagged non-Strict Rx Packet Count - InPktsUntagged/InPktsNoTag */
+#define E1000_LSECRXOCTD      0x0431C  /* LinkSec Rx Octets Decrypted Count - InOctetsDecrypted */
+#define E1000_LSECRXOCTV      0x04320  /* LinkSec Rx Octets Validated - InOctetsValidated */
+#define E1000_LSECRXBAD       0x04324  /* LinkSec Rx Bad Tag - InPktsBadTag */
+#define E1000_LSECRXNOSCI     0x04328  /* LinkSec Rx Packet No SCI Count - InPktsNoSci */
+#define E1000_LSECRXUNSCI     0x0432C  /* LinkSec Rx Packet Unknown SCI Count - InPktsUnknownSci */
+#define E1000_LSECRXUNCH      0x04330  /* LinkSec Rx Unchecked Packets Count - InPktsUnchecked */
+#define E1000_LSECRXDELAY     0x04340  /* LinkSec Rx Delayed Packet Count - InPktsDelayed */
+#define E1000_LSECRXLATE      0x04350  /* LinkSec Rx Late Packets Count - InPktsLate */
+#define E1000_LSECRXOK(_n)    (0x04360 + (0x04 * (_n))) /* LinkSec Rx Packet OK Count - InPktsOk */
+#define E1000_LSECRXINV(_n)   (0x04380 + (0x04 * (_n))) /* LinkSec Rx Invalid Count - InPktsInvalid */
+#define E1000_LSECRXNV(_n)    (0x043A0 + (0x04 * (_n))) /* LinkSec Rx Not Valid Count - InPktsNotValid */
+#define E1000_LSECRXUNSA      0x043C0  /* LinkSec Rx Unused SA Count - InPktsUnusedSa */
+#define E1000_LSECRXNUSA      0x043D0  /* LinkSec Rx Not Using SA Count - InPktsNotUsingSa */
+#define E1000_LSECTXCAP       0x0B000  /* LinkSec Tx Capabilities Register - RO */
+#define E1000_LSECRXCAP       0x0B300  /* LinkSec Rx Capabilities Register - RO */
+#define E1000_LSECTXCTRL      0x0B004  /* LinkSec Tx Control - RW */
+#define E1000_LSECRXCTRL      0x0B304  /* LinkSec Rx Control - RW */
+#define E1000_LSECTXSCL       0x0B008  /* LinkSec Tx SCI Low - RW */
+#define E1000_LSECTXSCH       0x0B00C  /* LinkSec Tx SCI High - RW */
+#define E1000_LSECTXSA        0x0B010  /* LinkSec Tx SA0 - RW */
+#define E1000_LSECTXPN0       0x0B018  /* LinkSec Tx SA PN 0 - RW */
+#define E1000_LSECTXPN1       0x0B01C  /* LinkSec Tx SA PN 1 - RW */
+#define E1000_LSECRXSCL       0x0B3D0  /* LinkSec Rx SCI Low - RW */
+#define E1000_LSECRXSCH       0x0B3E0  /* LinkSec Rx SCI High - RW */
+#define E1000_LSECTXKEY0(_n)  (0x0B020 + (0x04 * (_n))) /* LinkSec Tx 128-bit Key 0 - WO */
+#define E1000_LSECTXKEY1(_n)  (0x0B030 + (0x04 * (_n))) /* LinkSec Tx 128-bit Key 1 - WO */
+#define E1000_LSECRXSA(_n)    (0x0B310 + (0x04 * (_n))) /* LinkSec Rx SAs - RW */
+#define E1000_LSECRXPN(_n)    (0x0B330 + (0x04 * (_n))) /* LinkSec Rx SAs - RW */
+/*
+ * LinkSec Rx Keys  - where _n is the SA no. and _m the 4 dwords of the 128 bit
+ * key - RW.
+ */
+#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m)))
+
+#define E1000_SSVPC             0x041A0  /* Switch Security Violation Packet Count */
+#define E1000_IPSCTRL           0xB430   /* IpSec Control Register */
+#define E1000_IPSRXCMD          0x0B408  /* IPSec Rx Command Register - RW */
+#define E1000_IPSRXIDX          0x0B400  /* IPSec Rx Index - RW */
+#define E1000_IPSRXIPADDR(_n)   (0x0B420+ (0x04 * (_n)))  /* IPSec Rx IPv4/v6 Address - RW */
+#define E1000_IPSRXKEY(_n)      (0x0B410 + (0x04 * (_n))) /* IPSec Rx 128-bit Key - RW */
+#define E1000_IPSRXSALT         0x0B404  /* IPSec Rx Salt - RW */
+#define E1000_IPSRXSPI          0x0B40C  /* IPSec Rx SPI - RW */
+#define E1000_IPSTXKEY(_n)      (0x0B460 + (0x04 * (_n))) /* IPSec Tx 128-bit Key - RW */
+#define E1000_IPSTXSALT         0x0B454  /* IPSec Tx Salt - RW */
+#define E1000_IPSTXIDX          0x0B450  /* IPSec Tx SA IDX - RW */
+#define E1000_PCS_CFG0    0x04200  /* PCS Configuration 0 - RW */
+#define E1000_PCS_LCTL    0x04208  /* PCS Link Control - RW */
+#define E1000_PCS_LSTAT   0x0420C  /* PCS Link Status - RO */
+#define E1000_CBTMPC      0x0402C  /* Circuit Breaker Tx Packet Count */
+#define E1000_HTDPMC      0x0403C  /* Host Transmit Discarded Packets */
+#define E1000_CBRDPC      0x04044  /* Circuit Breaker Rx Dropped Count */
+#define E1000_CBRMPC      0x040FC  /* Circuit Breaker Rx Packet Count */
+#define E1000_RPTHC       0x04104  /* Rx Packets To Host */
+#define E1000_HGPTC       0x04118  /* Host Good Packets Tx Count */
+#define E1000_HTCBDPC     0x04124  /* Host Tx Circuit Breaker Dropped Count */
+#define E1000_HGORCL      0x04128  /* Host Good Octets Received Count Low */
+#define E1000_HGORCH      0x0412C  /* Host Good Octets Received Count High */
+#define E1000_HGOTCL      0x04130  /* Host Good Octets Transmit Count Low */
+#define E1000_HGOTCH      0x04134  /* Host Good Octets Transmit Count High */
+#define E1000_LENERRS     0x04138  /* Length Errors Count */
+#define E1000_SCVPC       0x04228  /* SerDes/SGMII Code Violation Pkt Count */
+#define E1000_HRMPC       0x0A018  /* Header Redirection Missed Packet Count */
+#define E1000_PCS_ANADV   0x04218  /* AN advertisement - RW */
+#define E1000_PCS_LPAB    0x0421C  /* Link Partner Ability - RW */
+#define E1000_PCS_NPTX    0x04220  /* AN Next Page Transmit - RW */
+#define E1000_PCS_LPABNP  0x04224  /* Link Partner Ability Next Page - RW */
+#define E1000_1GSTAT_RCV  0x04228  /* 1GSTAT Code Violation Packet Count - RW */
+#define E1000_RXCSUM   0x05000  /* Rx Checksum Control - RW */
+#define E1000_RLPML    0x05004  /* Rx Long Packet Max Length */
+#define E1000_RFCTL    0x05008  /* Receive Filter Control*/
+#define E1000_MTA      0x05200  /* Multicast Table Array - RW Array */
+#define E1000_RA       0x05400  /* Receive Address - RW Array */
+#define E1000_RA2      0x054E0  /* 2nd half of receive address array - RW Array */
+#define E1000_VFTA     0x05600  /* VLAN Filter Table Array - RW Array */
+#define E1000_VT_CTL   0x0581C  /* VMDq Control - RW */
+#define E1000_VFQA0    0x0B000  /* VLAN Filter Queue Array 0 - RW Array */
+#define E1000_VFQA1    0x0B200  /* VLAN Filter Queue Array 1 - RW Array */
+#define E1000_WUC      0x05800  /* Wakeup Control - RW */
+#define E1000_WUFC     0x05808  /* Wakeup Filter Control - RW */
+#define E1000_WUS      0x05810  /* Wakeup Status - RO */
+#define E1000_MANC     0x05820  /* Management Control - RW */
+#define E1000_IPAV     0x05838  /* IP Address Valid - RW */
+#define E1000_IP4AT    0x05840  /* IPv4 Address Table - RW Array */
+#define E1000_IP6AT    0x05880  /* IPv6 Address Table - RW Array */
+#define E1000_WUPL     0x05900  /* Wakeup Packet Length - RW */
+#define E1000_WUPM     0x05A00  /* Wakeup Packet Memory - RO A */
+#define E1000_PBACL    0x05B68  /* MSIx PBA Clear - Read/Write 1's to clear */
+#define E1000_FFLT     0x05F00  /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF  0x08800  /* Host Interface */
+#define E1000_FFMT     0x09000  /* Flexible Filter Mask Table - RW Array */
+#define E1000_FFVT     0x09800  /* Flexible Filter Value Table - RW Array */
+#define E1000_FHFT(_n)  (0x09000 + (_n * 0x100)) /* Flexible Host Filter Table */
+#define E1000_FHFT_EXT(_n) (0x09A00 + (_n * 0x100)) /* Ext Flexible Host Filter Table */
+
+
+#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define E1000_MDPHYA      0x0003C /* PHY address - RW */
+#define E1000_MANC2H      0x05860 /* Management Control To Host - RW */
+#define E1000_MDEF(_n)    (0x05890 + (4 * (_n))) /* Mngmt Decision Filters */
+#define E1000_SW_FW_SYNC  0x05B5C /* Software-Firmware Synchronization - RW */
+#define E1000_CCMCTL      0x05B48 /* CCM Control Register */
+#define E1000_GIOCTL      0x05B44 /* GIO Analog Control Register */
+#define E1000_SCCTL       0x05B4C /* PCIc PLL Configuration Register */
+#define E1000_GCR         0x05B00 /* PCI-Ex Control */
+#define E1000_GCR2        0x05B64 /* PCI-Ex Control #2 */
+#define E1000_GSCL_1    0x05B10 /* PCI-Ex Statistic Control #1 */
+#define E1000_GSCL_2    0x05B14 /* PCI-Ex Statistic Control #2 */
+#define E1000_GSCL_3    0x05B18 /* PCI-Ex Statistic Control #3 */
+#define E1000_GSCL_4    0x05B1C /* PCI-Ex Statistic Control #4 */
+#define E1000_FACTPS    0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM      0x05B50 /* SW Semaphore */
+#define E1000_FWSM      0x05B54 /* FW Semaphore */
+#define E1000_SWSM2     0x05B58 /* Driver-only SW semaphore (not used by BOOT agents) */
+#define E1000_DCA_ID    0x05B70 /* DCA Requester ID Information - RO */
+#define E1000_DCA_CTRL  0x05B74 /* DCA Control - RW */
+#define E1000_UFUSE     0x05B78 /* UFUSE - RO */
+#define E1000_FFLT_DBG  0x05F04 /* Debug Register */
+#define E1000_HICR      0x08F00 /* Host Interface Control */
+#define E1000_FWSTS     0x08F0C /* FW Status */
+
+/* RSS registers */
+#define E1000_CPUVEC    0x02C10 /* CPU Vector Register - RW */
+#define E1000_MRQC      0x05818 /* Multiple Receive Control - RW */
+#define E1000_IMIR(_i)      (0x05A80 + ((_i) * 4))  /* Immediate Interrupt */
+#define E1000_IMIREXT(_i)   (0x05AA0 + ((_i) * 4))  /* Immediate Interrupt Ext*/
+#define E1000_IMIRVP    0x05AC0 /* Immediate Interrupt Rx VLAN Priority - RW */
+#define E1000_MSIXBM(_i)    (0x01600 + ((_i) * 4)) /* MSI-X Allocation Register
+                                                    * (_i) - RW */
+#define E1000_MSIXTADD(_i)  (0x0C000 + ((_i) * 0x10)) /* MSI-X Table entry addr
+                                                       * low reg - RW */
+#define E1000_MSIXTUADD(_i) (0x0C004 + ((_i) * 0x10)) /* MSI-X Table entry addr
+                                                       * upper reg - RW */
+#define E1000_MSIXTMSG(_i)  (0x0C008 + ((_i) * 0x10)) /* MSI-X Table entry
+                                                       * message reg - RW */
+#define E1000_MSIXVCTRL(_i) (0x0C00C + ((_i) * 0x10)) /* MSI-X Table entry
+                                                       * vector ctrl reg - RW */
+#define E1000_MSIXPBA    0x0E000 /* MSI-X Pending bit array */
+#define E1000_RETA(_i)  (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */
+#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */
+#define E1000_RSSIM     0x05864 /* RSS Interrupt Mask */
+#define E1000_RSSIR     0x05868 /* RSS Interrupt Request */
+/* VT Registers */
+#define E1000_SWPBS     0x03004 /* Switch Packet Buffer Size - RW */
+#define E1000_MBVFICR   0x00C80 /* Mailbox VF Cause - RWC */
+#define E1000_MBVFIMR   0x00C84 /* Mailbox VF int Mask - RW */
+#define E1000_VFLRE     0x00C88 /* VF Register Events - RWC */
+#define E1000_VFRE      0x00C8C /* VF Receive Enables */
+#define E1000_VFTE      0x00C90 /* VF Transmit Enables */
+#define E1000_QDE       0x02408 /* Queue Drop Enable - RW */
+#define E1000_DTXSWC    0x03500 /* DMA Tx Switch Control - RW */
+#define E1000_WVBR      0x03554 /* VM Wrong Behavior - RWS */
+#define E1000_RPLOLR    0x05AF0 /* Replication Offload - RW */
+#define E1000_UTA       0x0A000 /* Unicast Table Array - RW */
+#define E1000_IOVTCL    0x05BBC /* IOV Control Register */
+#define E1000_VMRCTL    0X05D80 /* Virtual Mirror Rule Control */
+#define E1000_VMRVLAN   0x05D90 /* Virtual Mirror Rule VLAN */
+#define E1000_VMRVM     0x05DA0 /* Virtual Mirror Rule VM */
+#define E1000_MDFB      0x03558 /* Malicious Driver free block */
+#define E1000_LVMMC     0x03548 /* Last VM Misbehavior cause */
+#define E1000_TXSWC     0x05ACC /* Tx Switch Control */
+#define E1000_SCCRL     0x05DB0 /* Storm Control Control */
+#define E1000_BSCTRH    0x05DB8 /* Broadcast Storm Control Threshold */
+#define E1000_MSCTRH    0x05DBC /* Multicast Storm Control Threshold */
+/* These act per VF so an array friendly macro is used */
+#define E1000_V2PMAILBOX(_n)   (0x00C40 + (4 * (_n)))
+#define E1000_P2VMAILBOX(_n)   (0x00C00 + (4 * (_n)))
+#define E1000_VMBMEM(_n)       (0x00800 + (64 * (_n)))
+#define E1000_VFVMBMEM(_n)     (0x00800 + (_n))
+#define E1000_VMOLR(_n)        (0x05AD0 + (4 * (_n)))
+#define E1000_VLVF(_n)         (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
+                                                       * Filter - RW */
+#define E1000_VMVIR(_n)        (0x03700 + (4 * (_n)))
+#define E1000_DVMOLR(_n)       (0x0C038 + (0x40 * (_n))) /* DMA VM offload */
+/* Time Sync */
+#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
+#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
+#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
+#define E1000_RXSTMPL    0x0B624 /* Rx timestamp Low - RO */
+#define E1000_RXSTMPH    0x0B628 /* Rx timestamp High - RO */
+#define E1000_RXSATRL    0x0B62C /* Rx timestamp attribute low - RO */
+#define E1000_RXSATRH    0x0B630 /* Rx timestamp attribute high - RO */
+#define E1000_TXSTMPL    0x0B618 /* Tx timestamp value Low - RO */
+#define E1000_TXSTMPH    0x0B61C /* Tx timestamp value High - RO */
+#define E1000_SYSTIML    0x0B600 /* System time register Low - RO */
+#define E1000_SYSTIMH    0x0B604 /* System time register High - RO */
+#define E1000_TIMINCA    0x0B608 /* Increment attributes register - RW */
+#define E1000_TSAUXC     0x0B640 /* Timesync Auxiliary Control register */
+#define E1000_SYSTIMR    0x0B6F8 /* System time register Residue */
+
+/* Filtering Registers */
+#define E1000_SAQF(_n)  (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */
+#define E1000_DAQF(_n)  (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */
+#define E1000_SPQF(_n)  (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */
+#define E1000_FTQF(_n)  (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
+#define E1000_TTQF(_n)  (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */
+#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
+#define E1000_ETQF(_n)  (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
+
+#define E1000_RTTDCS            0x3600  /* Reedtown Tx Desc plane control and status */
+#define E1000_RTTPCS            0x3474  /* Reedtown Tx Packet Plane control and status */
+#define E1000_RTRPCS            0x2474  /* Rx packet plane control and status */
+#define E1000_RTRUP2TC          0x05AC4 /* Rx User Priority to Traffic Class */
+#define E1000_RTTUP2TC          0x0418  /* Transmit User Priority to Traffic Class */
+#define E1000_RTTDTCRC(_n)      (0x3610 + ((_n) * 4)) /* Tx Desc plane TC Rate-scheduler config */
+#define E1000_RTTPTCRC(_n)      (0x3480 + ((_n) * 4)) /* Tx Packet plane TC Rate-Scheduler Config */
+#define E1000_RTRPTCRC(_n)      (0x2480 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler Config */
+#define E1000_RTTDTCRS(_n)      (0x3630 + ((_n) * 4)) /* Tx Desc Plane TC Rate-Scheduler Status */
+#define E1000_RTTDTCRM(_n)      (0x3650 + ((_n) * 4)) /* Tx Desc Plane TC Rate-Scheduler MMW */
+#define E1000_RTTPTCRS(_n)      (0x34A0 + ((_n) * 4)) /* Tx Packet plane TC Rate-Scheduler Status */
+#define E1000_RTTPTCRM(_n)      (0x34C0 + ((_n) * 4)) /* Tx Packet plane TC Rate-scheduler MMW */
+#define E1000_RTRPTCRS(_n)      (0x24A0 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler Status */
+#define E1000_RTRPTCRM(_n)      (0x24C0 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler MMW */
+#define E1000_RTTDVMRM(_n)      (0x3670 + ((_n) * 4)) /* Tx Desc plane VM Rate-Scheduler MMW*/
+#define E1000_RTTBCNRM(_n)      (0x3690 + ((_n) * 4)) /* Tx BCN Rate-Scheduler MMW */
+#define E1000_RTTDQSEL          0x3604  /* Tx Desc Plane Queue Select */
+#define E1000_RTTDVMRC          0x3608  /* Tx Desc Plane VM Rate-Scheduler Config */
+#define E1000_RTTDVMRS          0x360C  /* Tx Desc Plane VM Rate-Scheduler Status */
+#define E1000_RTTBCNRC          0x36B0  /* Tx BCN Rate-Scheduler Config */
+#define E1000_RTTBCNRS          0x36B4  /* Tx BCN Rate-Scheduler Status */
+#define E1000_RTTBCNCR          0xB200  /* Tx BCN Control Register */
+#define E1000_RTTBCNTG          0x35A4  /* Tx BCN Tagging */
+#define E1000_RTTBCNCP          0xB208  /* Tx BCN Congestion point */
+#define E1000_RTRBCNCR          0xB20C  /* Rx BCN Control Register */
+#define E1000_RTTBCNRD          0x36B8  /* Tx BCN Rate Drift */
+#define E1000_PFCTOP            0x1080  /* Priority Flow Control Type and Opcode */
+#define E1000_RTTBCNIDX         0xB204  /* Tx BCN Congestion Point */
+#define E1000_RTTBCNACH         0x0B214 /* Tx BCN Control High */
+#define E1000_RTTBCNACL         0x0B210 /* Tx BCN Control Low */
+
+/* DMA Coalescing registers */
+#define E1000_DMACR             0x02508 /* Control Register */
+#define E1000_DMCTXTH           0x03550 /* Transmit Threshold */
+#define E1000_DMCTLX            0x02514 /* Time to Lx Request */
+#define E1000_DMCRTRH           0x05DD0 /* Receive Packet Rate Threshold */
+#define E1000_DMCCNT            0x05DD4 /* Current Rx Count */
+#define E1000_FCRTC             0x02170 /* Flow Control Rx high watermark */
+#define E1000_PCIEMISC          0x05BB8 /* PCIE misc config register */
+
+/* PCIe Parity Status Register */
+#define E1000_PCIEERRSTS        0x05BA8
+
+#define E1000_PROXYS            0x5F64 /* Proxying Status */
+#define E1000_PROXYFC           0x5F60 /* Proxying Filter Control */
+/* Thermal sensor configuration and status registers */
+#define E1000_THMJT             0x08100 /* Junction Temperature */
+#define E1000_THLOWTC           0x08104 /* Low Threshold Control */
+#define E1000_THMIDTC           0x08108 /* Mid Threshold Control */
+#define E1000_THHIGHTC          0x0810C /* High Threshold Control */
+#define E1000_THSTAT            0x08110 /* Thermal Sensor Status */
+
+/*Energy Efficient Ethernet "EEE" registers */
+#define E1000_IPCNFG            0x0E38 /* Internal PHY Configuration */
+#define E1000_LTRC              0x01A0 /* Latency Tolerance Reporting Control */
+#define E1000_EEER              0x0E30 /* Energy Efficient Ethernet "EEE"*/
+#define E1000_EEE_SU            0x0E34 /* EEE Setup */
+#define E1000_TLPIC             0x4148 /* EEE Tx LPI Count - TLPIC */
+#define E1000_RLPIC             0x414C /* EEE Rx LPI Count - RLPIC */
+
+/* OS2BMC Registers */
+#define E1000_B2OSPC            0x08FE0 /* BMC2OS packets sent by BMC */
+#define E1000_B2OGPRC           0x04158 /* BMC2OS packets received by host */
+#define E1000_O2BGPTC           0x08FE4 /* OS2BMC packets received by BMC */
+#define E1000_O2BSPC            0x0415C /* OS2BMC packets transmitted by host */
+
+#endif
diff --git a/lib/librte_pmd_e1000/e1000/e1000_vf.c b/lib/librte_pmd_e1000/e1000/e1000_vf.c
new file mode 100644 (file)
index 0000000..8b81e4b
--- /dev/null
@@ -0,0 +1,574 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#include "e1000_api.h"
+
+
+static s32       e1000_init_phy_params_vf(struct e1000_hw *hw);
+static s32       e1000_init_nvm_params_vf(struct e1000_hw *hw);
+static void      e1000_release_vf(struct e1000_hw *hw);
+static s32       e1000_acquire_vf(struct e1000_hw *hw);
+static s32       e1000_setup_link_vf(struct e1000_hw *hw);
+static s32       e1000_get_bus_info_pcie_vf(struct e1000_hw *hw);
+static s32       e1000_init_mac_params_vf(struct e1000_hw *hw);
+static s32       e1000_check_for_link_vf(struct e1000_hw *hw);
+static s32       e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
+                                              u16 *duplex);
+static s32       e1000_init_hw_vf(struct e1000_hw *hw);
+static s32       e1000_reset_hw_vf(struct e1000_hw *hw);
+static void      e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, u32);
+static void      e1000_rar_set_vf(struct e1000_hw *, u8 *, u32);
+static s32       e1000_read_mac_addr_vf(struct e1000_hw *);
+
+/**
+ *  e1000_init_phy_params_vf - Inits PHY params
+ *  @hw: pointer to the HW structure
+ *
+ *  Doesn't do much - there's no PHY available to the VF.
+ **/
+static s32 e1000_init_phy_params_vf(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_init_phy_params_vf");
+       hw->phy.type = e1000_phy_vf;
+       hw->phy.ops.acquire = e1000_acquire_vf;
+       hw->phy.ops.release = e1000_release_vf;
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_init_nvm_params_vf - Inits NVM params
+ *  @hw: pointer to the HW structure
+ *
+ *  Doesn't do much - there's no NVM available to the VF.
+ **/
+static s32 e1000_init_nvm_params_vf(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_init_nvm_params_vf");
+       hw->nvm.type = e1000_nvm_none;
+       hw->nvm.ops.acquire = e1000_acquire_vf;
+       hw->nvm.ops.release = e1000_release_vf;
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_init_mac_params_vf - Inits MAC params
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_mac_params_vf(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+
+       DEBUGFUNC("e1000_init_mac_params_vf");
+
+       /* Set media type */
+       /*
+        * Virtual functions don't care what they're media type is as they
+        * have no direct access to the PHY, or the media.  That is handled
+        * by the physical function driver.
+        */
+       hw->phy.media_type = e1000_media_type_unknown;
+
+       /* No ASF features for the VF driver */
+       mac->asf_firmware_present = FALSE;
+       /* ARC subsystem not supported */
+       mac->arc_subsystem_valid = FALSE;
+       /* Disable adaptive IFS mode so the generic funcs don't do anything */
+       mac->adaptive_ifs = FALSE;
+       /* VF's have no MTA Registers - PF feature only */
+       mac->mta_reg_count = 128;
+       /* VF's have no access to RAR entries  */
+       mac->rar_entry_count = 1;
+
+       /* Function pointers */
+       /* link setup */
+       mac->ops.setup_link = e1000_setup_link_vf;
+       /* bus type/speed/width */
+       mac->ops.get_bus_info = e1000_get_bus_info_pcie_vf;
+       /* reset */
+       mac->ops.reset_hw = e1000_reset_hw_vf;
+       /* hw initialization */
+       mac->ops.init_hw = e1000_init_hw_vf;
+       /* check for link */
+       mac->ops.check_for_link = e1000_check_for_link_vf;
+       /* link info */
+       mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
+       /* multicast address update */
+       mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
+       /* set mac address */
+       mac->ops.rar_set = e1000_rar_set_vf;
+       /* read mac address */
+       mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
+
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_init_function_pointers_vf - Inits function pointers
+ *  @hw: pointer to the HW structure
+ **/
+void e1000_init_function_pointers_vf(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_init_function_pointers_vf");
+
+       hw->mac.ops.init_params = e1000_init_mac_params_vf;
+       hw->nvm.ops.init_params = e1000_init_nvm_params_vf;
+       hw->phy.ops.init_params = e1000_init_phy_params_vf;
+       hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
+}
+
+/**
+ *  e1000_acquire_vf - Acquire rights to access PHY or NVM.
+ *  @hw: pointer to the HW structure
+ *
+ *  There is no PHY or NVM so we want all attempts to acquire these to fail.
+ *  In addition, the MAC registers to access PHY/NVM don't exist so we don't
+ *  even want any SW to attempt to use them.
+ **/
+static s32 e1000_acquire_vf(struct e1000_hw *hw)
+{
+       return -E1000_ERR_PHY;
+}
+
+/**
+ *  e1000_release_vf - Release PHY or NVM
+ *  @hw: pointer to the HW structure
+ *
+ *  There is no PHY or NVM so we want all attempts to acquire these to fail.
+ *  In addition, the MAC registers to access PHY/NVM don't exist so we don't
+ *  even want any SW to attempt to use them.
+ **/
+static void e1000_release_vf(struct e1000_hw *hw)
+{
+       return;
+}
+
+/**
+ *  e1000_setup_link_vf - Sets up link.
+ *  @hw: pointer to the HW structure
+ *
+ *  Virtual functions cannot change link.
+ **/
+static s32 e1000_setup_link_vf(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_setup_link_vf");
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_bus_info_pcie_vf - Gets the bus info.
+ *  @hw: pointer to the HW structure
+ *
+ *  Virtual functions are not really on their own bus.
+ **/
+static s32 e1000_get_bus_info_pcie_vf(struct e1000_hw *hw)
+{
+       struct e1000_bus_info *bus = &hw->bus;
+
+       DEBUGFUNC("e1000_get_bus_info_pcie_vf");
+
+       /* Do not set type PCI-E because we don't want disable master to run */
+       bus->type = e1000_bus_type_reserved;
+       bus->speed = e1000_bus_speed_2500;
+
+       return 0;
+}
+
+/**
+ *  e1000_get_link_up_info_vf - Gets link info.
+ *  @hw: pointer to the HW structure
+ *  @speed: pointer to 16 bit value to store link speed.
+ *  @duplex: pointer to 16 bit value to store duplex.
+ *
+ *  Since we cannot read the PHY and get accurate link info, we must rely upon
+ *  the status register's data which is often stale and inaccurate.
+ **/
+static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
+                                     u16 *duplex)
+{
+       s32 status;
+
+       DEBUGFUNC("e1000_get_link_up_info_vf");
+
+       status = E1000_READ_REG(hw, E1000_STATUS);
+       if (status & E1000_STATUS_SPEED_1000) {
+               *speed = SPEED_1000;
+               DEBUGOUT("1000 Mbs, ");
+       } else if (status & E1000_STATUS_SPEED_100) {
+               *speed = SPEED_100;
+               DEBUGOUT("100 Mbs, ");
+       } else {
+               *speed = SPEED_10;
+               DEBUGOUT("10 Mbs, ");
+       }
+
+       if (status & E1000_STATUS_FD) {
+               *duplex = FULL_DUPLEX;
+               DEBUGOUT("Full Duplex\n");
+       } else {
+               *duplex = HALF_DUPLEX;
+               DEBUGOUT("Half Duplex\n");
+       }
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_reset_hw_vf - Resets the HW
+ *  @hw: pointer to the HW structure
+ *
+ *  VF's provide a function level reset. This is done using bit 26 of ctrl_reg.
+ *  This is all the reset we can perform on a VF.
+ **/
+static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       u32 timeout = E1000_VF_INIT_TIMEOUT;
+       s32 ret_val = -E1000_ERR_MAC_INIT;
+       u32 ctrl, msgbuf[3];
+       u8 *addr = (u8 *)(&msgbuf[1]);
+
+       DEBUGFUNC("e1000_reset_hw_vf");
+
+       DEBUGOUT("Issuing a function level reset to MAC\n");
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+       /* we cannot reset while the RSTI / RSTD bits are asserted */
+       while (!mbx->ops.check_for_rst(hw, 0) && timeout) {
+               timeout--;
+               usec_delay(5);
+       }
+
+       if (timeout) {
+               /* mailbox timeout can now become active */
+               mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT;
+
+               msgbuf[0] = E1000_VF_RESET;
+               mbx->ops.write_posted(hw, msgbuf, 1, 0);
+
+               msec_delay(10);
+
+               /* set our "perm_addr" based on info provided by PF */
+               ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
+               if (!ret_val) {
+                       if (msgbuf[0] == (E1000_VF_RESET |
+                                               E1000_VT_MSGTYPE_ACK))
+                               memcpy(hw->mac.perm_addr, addr, 6);
+                       else
+                               ret_val = -E1000_ERR_MAC_INIT;
+               }
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_init_hw_vf - Inits the HW
+ *  @hw: pointer to the HW structure
+ *
+ *  Not much to do here except clear the PF Reset indication if there is one.
+ **/
+static s32 e1000_init_hw_vf(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_init_hw_vf");
+
+       /* attempt to set and restore our mac address */
+       e1000_rar_set_vf(hw, hw->mac.addr, 0);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_rar_set_vf - set device MAC address
+ *  @hw: pointer to the HW structure
+ *  @addr: pointer to the receive address
+ *  @index receive address array register
+ **/
+static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       u32 msgbuf[3];
+       u8 *msg_addr = (u8 *)(&msgbuf[1]);
+       s32 ret_val;
+
+       memset(msgbuf, 0, 12);
+       msgbuf[0] = E1000_VF_SET_MAC_ADDR;
+       memcpy(msg_addr, addr, 6);
+       ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0);
+
+       if (!ret_val)
+               ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
+
+       msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
+
+       /* if nacked the address was rejected, use "perm_addr" */
+       if (!ret_val &&
+           (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK)))
+               e1000_read_mac_addr_vf(hw);
+}
+
+/**
+ *  e1000_hash_mc_addr_vf - Generate a multicast hash value
+ *  @hw: pointer to the HW structure
+ *  @mc_addr: pointer to a multicast address
+ *
+ *  Generates a multicast address hash value which is used to determine
+ *  the multicast filter table array address and new table value.
+ **/
+static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
+{
+       u32 hash_value, hash_mask;
+       u8 bit_shift = 0;
+
+       DEBUGFUNC("e1000_hash_mc_addr_generic");
+
+       /* Register count multiplied by bits per register */
+       hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+       /*
+        * The bit_shift is the number of left-shifts
+        * where 0xFF would still fall within the hash mask.
+        */
+       while (hash_mask >> bit_shift != 0xFF)
+               bit_shift++;
+
+       hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+                                 (((u16) mc_addr[5]) << bit_shift)));
+
+       return hash_value;
+}
+
+/**
+ *  e1000_update_mc_addr_list_vf - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *
+ *  Updates the Multicast Table Array.
+ *  The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
+                                  u8 *mc_addr_list, u32 mc_addr_count)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       u32 msgbuf[E1000_VFMAILBOX_SIZE];
+       u16 *hash_list = (u16 *)&msgbuf[1];
+       u32 hash_value;
+       u32 i;
+
+       DEBUGFUNC("e1000_update_mc_addr_list_vf");
+
+       /* Each entry in the list uses 1 16 bit word.  We have 30
+        * 16 bit words available in our HW msg buffer (minus 1 for the
+        * msg type).  That's 30 hash values if we pack 'em right.  If
+        * there are more than 30 MC addresses to add then punt the
+        * extras for now and then add code to handle more than 30 later.
+        * It would be unusual for a server to request that many multi-cast
+        * addresses except for in large enterprise network environments.
+        */
+
+       DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count);
+
+       if (mc_addr_count > 30) {
+               msgbuf[0] |= E1000_VF_SET_MULTICAST_OVERFLOW;
+               mc_addr_count = 30;
+       }
+
+       msgbuf[0] = E1000_VF_SET_MULTICAST;
+       msgbuf[0] |= mc_addr_count << E1000_VT_MSGINFO_SHIFT;
+
+       for (i = 0; i < mc_addr_count; i++) {
+               hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list);
+               DEBUGOUT1("Hash value = 0x%03X\n", hash_value);
+               hash_list[i] = hash_value & 0x0FFF;
+               mc_addr_list += ETH_ADDR_LEN;
+       }
+
+       mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE, 0);
+}
+
+/**
+ *  e1000_vfta_set_vf - Set/Unset vlan filter table address
+ *  @hw: pointer to the HW structure
+ *  @vid: determines the vfta register and bit to set/unset
+ *  @set: if TRUE then set bit, else clear bit
+ **/
+void e1000_vfta_set_vf(struct e1000_hw *hw, u16 vid, bool set)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       u32 msgbuf[2];
+
+       msgbuf[0] = E1000_VF_SET_VLAN;
+       msgbuf[1] = vid;
+       /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+       if (set)
+               msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
+
+       mbx->ops.write_posted(hw, msgbuf, 2, 0);
+}
+
+/** e1000_rlpml_set_vf - Set the maximum receive packet length
+ *  @hw: pointer to the HW structure
+ *  @max_size: value to assign to max frame size
+ **/
+void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       u32 msgbuf[2];
+
+       msgbuf[0] = E1000_VF_SET_LPE;
+       msgbuf[1] = max_size;
+
+       mbx->ops.write_posted(hw, msgbuf, 2, 0);
+}
+
+/**
+ *  e1000_promisc_set_vf - Set flags for Unicast or Multicast promisc
+ *  @hw: pointer to the HW structure
+ *  @uni: boolean indicating unicast promisc status
+ *  @multi: boolean indicating multicast promisc status
+ **/
+s32 e1000_promisc_set_vf(struct e1000_hw *hw, enum e1000_promisc_type type)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       u32 msgbuf = E1000_VF_SET_PROMISC;
+       s32 ret_val;
+
+       switch (type) {
+       case e1000_promisc_multicast:
+               msgbuf |= E1000_VF_SET_PROMISC_MULTICAST;
+               break;
+       case e1000_promisc_enabled:
+               msgbuf |= E1000_VF_SET_PROMISC_MULTICAST;
+       case e1000_promisc_unicast:
+               msgbuf |= E1000_VF_SET_PROMISC_UNICAST;
+       case e1000_promisc_disabled:
+               break;
+       default:
+               return -E1000_ERR_MAC_INIT;
+       }
+
+        ret_val = mbx->ops.write_posted(hw, &msgbuf, 1, 0);
+
+       if (!ret_val)
+               ret_val = mbx->ops.read_posted(hw, &msgbuf, 1, 0);
+
+       if (!ret_val && !(msgbuf & E1000_VT_MSGTYPE_ACK))
+               ret_val = -E1000_ERR_MAC_INIT;
+
+       return ret_val;
+}
+
+/**
+ *  e1000_read_mac_addr_vf - Read device MAC address
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_vf(struct e1000_hw *hw)
+{
+       int i;
+
+       for (i = 0; i < ETH_ADDR_LEN; i++)
+               hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_check_for_link_vf - Check for link for a virtual interface
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks to see if the underlying PF is still talking to the VF and
+ *  if it is then it reports the link state to the hardware, otherwise
+ *  it reports link down and returns an error.
+ **/
+static s32 e1000_check_for_link_vf(struct e1000_hw *hw)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       struct e1000_mac_info *mac = &hw->mac;
+       s32 ret_val = E1000_SUCCESS;
+       u32 in_msg = 0;
+
+       DEBUGFUNC("e1000_check_for_link_vf");
+
+       /*
+        * We only want to run this if there has been a rst asserted.
+        * in this case that could mean a link change, device reset,
+        * or a virtual function reset
+        */
+
+       /* If we were hit with a reset or timeout drop the link */
+       if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+               mac->get_link_status = TRUE;
+
+       if (!mac->get_link_status)
+               goto out;
+
+       /* if link status is down no point in checking to see if pf is up */
+       if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
+               goto out;
+
+       /* if the read failed it could just be a mailbox collision, best wait
+        * until we are called again and don't report an error */
+       if (mbx->ops.read(hw, &in_msg, 1, 0))
+               goto out;
+
+       /* if incoming message isn't clear to send we are waiting on response */
+       if (!(in_msg & E1000_VT_MSGTYPE_CTS)) {
+               /* message is not CTS and is NACK we have lost CTS status */
+               if (in_msg & E1000_VT_MSGTYPE_NACK)
+                       ret_val = -E1000_ERR_MAC_INIT;
+               goto out;
+       }
+
+       /* at this point we know the PF is talking to us, check and see if
+        * we are still accepting timeout or if we had a timeout failure.
+        * if we failed then we will need to reinit */
+       if (!mbx->timeout) {
+               ret_val = -E1000_ERR_MAC_INIT;
+               goto out;
+       }
+
+       /* if we passed all the tests above then the link is up and we no
+        * longer need to check for link */
+       mac->get_link_status = FALSE;
+
+out:
+       return ret_val;
+}
+
diff --git a/lib/librte_pmd_e1000/e1000/e1000_vf.h b/lib/librte_pmd_e1000/e1000/e1000_vf.h
new file mode 100644 (file)
index 0000000..b2fd8a1
--- /dev/null
@@ -0,0 +1,294 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_VF_H_
+#define _E1000_VF_H_
+
+#include "e1000_osdep.h"
+#include "e1000_regs.h"
+#include "e1000_defines.h"
+
+struct e1000_hw;
+
+#define E1000_DEV_ID_82576_VF                 0x10CA
+#define E1000_DEV_ID_I350_VF                  0x1520
+
+#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
+/* Additional Descriptor Control definitions */
+#define E1000_TXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Tx Queue */
+#define E1000_RXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Rx Queue */
+
+/* SRRCTL bit definitions */
+#define E1000_SRRCTL_BSIZEPKT_SHIFT                     10 /* Shift _right_ */
+#define E1000_SRRCTL_BSIZEHDRSIZE_MASK                  0x00000F00
+#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT                 2  /* Shift _left_ */
+#define E1000_SRRCTL_DESCTYPE_LEGACY                    0x00000000
+#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF                0x02000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT                 0x04000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS          0x0A000000
+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION           0x06000000
+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define E1000_SRRCTL_DESCTYPE_MASK                      0x0E000000
+#define E1000_SRRCTL_DROP_EN                            0x80000000
+
+#define E1000_SRRCTL_BSIZEPKT_MASK      0x0000007F
+#define E1000_SRRCTL_BSIZEHDR_MASK      0x00003F00
+
+/* Interrupt Defines */
+#define E1000_EICR     0x01580  /* Ext. Interrupt Cause Read - R/clr */
+#define E1000_EITR(_n) (0x01680 + ((_n) << 2))
+#define E1000_EICS     0x01520  /* Ext. Interrupt Cause Set - W0 */
+#define E1000_EIMS     0x01524  /* Ext. Interrupt Mask Set/Read - RW */
+#define E1000_EIMC     0x01528  /* Ext. Interrupt Mask Clear - WO */
+#define E1000_EIAC     0x0152C  /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAM     0x01530  /* Ext. Interrupt Ack Auto Clear Mask - RW */
+#define E1000_IVAR0    0x01700  /* Interrupt Vector Allocation (array) - RW */
+#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
+#define E1000_IVAR_VALID        0x80
+
+/* Receive Descriptor - Advanced */
+union e1000_adv_rx_desc {
+       struct {
+               u64 pkt_addr;             /* Packet buffer address */
+               u64 hdr_addr;             /* Header buffer address */
+       } read;
+       struct {
+               struct {
+                       union {
+                               u32 data;
+                               struct {
+                                       /* RSS type, Packet type */
+                                       u16 pkt_info;
+                                       /* Split Header, header buffer len */
+                                       u16 hdr_info;
+                               } hs_rss;
+                       } lo_dword;
+                       union {
+                               u32 rss;          /* RSS Hash */
+                               struct {
+                                       u16 ip_id;    /* IP id */
+                                       u16 csum;     /* Packet Checksum */
+                               } csum_ip;
+                       } hi_dword;
+               } lower;
+               struct {
+                       u32 status_error;     /* ext status/error */
+                       u16 length;           /* Packet length */
+                       u16 vlan;             /* VLAN tag */
+               } upper;
+       } wb;  /* writeback */
+};
+
+#define E1000_RXDADV_HDRBUFLEN_MASK      0x7FE0
+#define E1000_RXDADV_HDRBUFLEN_SHIFT     5
+
+/* Transmit Descriptor - Advanced */
+union e1000_adv_tx_desc {
+       struct {
+               u64 buffer_addr;    /* Address of descriptor's data buf */
+               u32 cmd_type_len;
+               u32 olinfo_status;
+       } read;
+       struct {
+               u64 rsvd;       /* Reserved */
+               u32 nxtseq_seed;
+               u32 status;
+       } wb;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define E1000_ADVTXD_DTYP_CTXT    0x00200000 /* Advanced Context Descriptor */
+#define E1000_ADVTXD_DTYP_DATA    0x00300000 /* Advanced Data Descriptor */
+#define E1000_ADVTXD_DCMD_EOP     0x01000000 /* End of Packet */
+#define E1000_ADVTXD_DCMD_IFCS    0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADVTXD_DCMD_RS      0x08000000 /* Report Status */
+#define E1000_ADVTXD_DCMD_DEXT    0x20000000 /* Descriptor extension (1=Adv) */
+#define E1000_ADVTXD_DCMD_VLE     0x40000000 /* VLAN pkt enable */
+#define E1000_ADVTXD_DCMD_TSE     0x80000000 /* TCP Seg enable */
+#define E1000_ADVTXD_PAYLEN_SHIFT    14 /* Adv desc PAYLEN shift */
+
+/* Context descriptors */
+struct e1000_adv_tx_context_desc {
+       u32 vlan_macip_lens;
+       u32 seqnum_seed;
+       u32 type_tucmd_mlhl;
+       u32 mss_l4len_idx;
+};
+
+#define E1000_ADVTXD_MACLEN_SHIFT    9  /* Adv ctxt desc mac len shift */
+#define E1000_ADVTXD_TUCMD_IPV4    0x00000400  /* IP Packet Type: 1=IPv4 */
+#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800  /* L4 Packet TYPE of TCP */
+#define E1000_ADVTXD_L4LEN_SHIFT     8  /* Adv ctxt L4LEN shift */
+#define E1000_ADVTXD_MSS_SHIFT      16  /* Adv ctxt MSS shift */
+
+enum e1000_mac_type {
+       e1000_undefined = 0,
+       e1000_vfadapt,
+       e1000_vfadapt_i350,
+       e1000_num_macs  /* List is 1-based, so subtract 1 for TRUE count. */
+};
+
+struct e1000_vf_stats {
+       u64 base_gprc;
+       u64 base_gptc;
+       u64 base_gorc;
+       u64 base_gotc;
+       u64 base_mprc;
+       u64 base_gotlbc;
+       u64 base_gptlbc;
+       u64 base_gorlbc;
+       u64 base_gprlbc;
+
+       u32 last_gprc;
+       u32 last_gptc;
+       u32 last_gorc;
+       u32 last_gotc;
+       u32 last_mprc;
+       u32 last_gotlbc;
+       u32 last_gptlbc;
+       u32 last_gorlbc;
+       u32 last_gprlbc;
+
+       u64 gprc;
+       u64 gptc;
+       u64 gorc;
+       u64 gotc;
+       u64 mprc;
+       u64 gotlbc;
+       u64 gptlbc;
+       u64 gorlbc;
+       u64 gprlbc;
+};
+
+#include "e1000_mbx.h"
+
+struct e1000_mac_operations {
+       /* Function pointers for the MAC. */
+       s32  (*init_params)(struct e1000_hw *);
+       s32  (*check_for_link)(struct e1000_hw *);
+       void (*clear_vfta)(struct e1000_hw *);
+       s32  (*get_bus_info)(struct e1000_hw *);
+       s32  (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
+       void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
+       s32  (*reset_hw)(struct e1000_hw *);
+       s32  (*init_hw)(struct e1000_hw *);
+       s32  (*setup_link)(struct e1000_hw *);
+       void (*write_vfta)(struct e1000_hw *, u32, u32);
+       void (*rar_set)(struct e1000_hw *, u8*, u32);
+       s32  (*read_mac_addr)(struct e1000_hw *);
+};
+
+struct e1000_mac_info {
+       struct e1000_mac_operations ops;
+       u8 addr[6];
+       u8 perm_addr[6];
+
+       enum e1000_mac_type type;
+
+       u16 mta_reg_count;
+       u16 rar_entry_count;
+
+       bool get_link_status;
+};
+
+struct e1000_mbx_operations {
+       s32 (*init_params)(struct e1000_hw *hw);
+       s32 (*read)(struct e1000_hw *, u32 *, u16,  u16);
+       s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
+       s32 (*read_posted)(struct e1000_hw *, u32 *, u16,  u16);
+       s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
+       s32 (*check_for_msg)(struct e1000_hw *, u16);
+       s32 (*check_for_ack)(struct e1000_hw *, u16);
+       s32 (*check_for_rst)(struct e1000_hw *, u16);
+};
+
+struct e1000_mbx_stats {
+       u32 msgs_tx;
+       u32 msgs_rx;
+
+       u32 acks;
+       u32 reqs;
+       u32 rsts;
+};
+
+struct e1000_mbx_info {
+       struct e1000_mbx_operations ops;
+       struct e1000_mbx_stats stats;
+       u32 timeout;
+       u32 usec_delay;
+       u16 size;
+};
+
+struct e1000_dev_spec_vf {
+       u32 vf_number;
+       u32 v2p_mailbox;
+};
+
+struct e1000_hw {
+       void *back;
+
+       u8 *hw_addr;
+       u8 *flash_address;
+       unsigned long io_base;
+
+       struct e1000_mac_info  mac;
+       struct e1000_mbx_info mbx;
+
+       union {
+               struct e1000_dev_spec_vf vf;
+       } dev_spec;
+
+       u16 device_id;
+       u16 subsystem_vendor_id;
+       u16 subsystem_device_id;
+       u16 vendor_id;
+
+       u8  revision_id;
+};
+
+enum e1000_promisc_type {
+       e1000_promisc_disabled = 0,   /* all promisc modes disabled */
+       e1000_promisc_unicast = 1,    /* unicast promiscuous enabled */
+       e1000_promisc_multicast = 2,  /* multicast promiscuous enabled */
+       e1000_promisc_enabled = 3,    /* both uni and multicast promisc */
+       e1000_num_promisc_types
+};
+
+/* These functions must be implemented by drivers */
+s32  e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_vfta_set_vf(struct e1000_hw *, u16, bool);
+void e1000_rlpml_set_vf(struct e1000_hw *, u16);
+s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type);
+#endif /* _E1000_VF_H_ */
diff --git a/lib/librte_pmd_e1000/e1000/if_igb.c b/lib/librte_pmd_e1000/e1000/if_igb.c
new file mode 100644 (file)
index 0000000..4aa08f6
--- /dev/null
@@ -0,0 +1,5567 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#ifdef HAVE_KERNEL_OPTION_HEADERS
+#include "opt_device_polling.h"
+#include "opt_inet.h"
+#include "opt_altq.h"
+#endif
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#if __FreeBSD_version >= 800000
+#include <sys/buf_ring.h>
+#endif
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/kernel.h>
+#include <sys/kthread.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+#include <sys/eventhandler.h>
+#include <sys/pcpu.h>
+#include <sys/smp.h>
+#include <machine/smp.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <net/bpf.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+#include <netinet/tcp_lro.h>
+#include <netinet/udp.h>
+
+#include <machine/in_cksum.h>
+#include <dev/led/led.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+
+#include "e1000_api.h"
+#include "e1000_82575.h"
+#include "if_igb.h"
+
+/*********************************************************************
+ *  Set this to one to display debug statistics
+ *********************************************************************/
+int    igb_display_debug_stats = 0;
+
+/*********************************************************************
+ *  Driver version:
+ *********************************************************************/
+char igb_driver_version[] = "version - 2.2.3";
+
+
+/*********************************************************************
+ *  PCI Device ID Table
+ *
+ *  Used by probe to select devices to load on
+ *  Last field stores an index into e1000_strings
+ *  Last entry must be all 0s
+ *
+ *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
+ *********************************************************************/
+
+static igb_vendor_info_t igb_vendor_info_array[] =
+{
+       { 0x8086, E1000_DEV_ID_82575EB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES,
+                                               PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER,
+                                               PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82576,           PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82576_NS,        PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82576_NS_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82576_FIBER,     PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82576_SERDES,    PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82576_SERDES_QUAD,
+                                               PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82576_QUAD_COPPER,
+                                               PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82576_QUAD_COPPER_ET2,
+                                               PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82576_VF,        PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82580_COPPER,    PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82580_FIBER,     PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82580_SERDES,    PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82580_SGMII,     PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82580_COPPER_DUAL,
+                                               PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82580_QUAD_FIBER,
+                                               PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_DH89XXCC_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_DH89XXCC_SGMII,  PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_DH89XXCC_SFP,    PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_DH89XXCC_BACKPLANE,
+                                               PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_I350_COPPER,     PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_I350_FIBER,      PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_I350_SERDES,     PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_I350_SGMII,      PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_I350_VF,         PCI_ANY_ID, PCI_ANY_ID, 0},
+       /* required last entry */
+       { 0, 0, 0, 0, 0}
+};
+
+/*********************************************************************
+ *  Table of branding strings for all supported NICs.
+ *********************************************************************/
+
+static char *igb_strings[] = {
+       "Intel(R) PRO/1000 Network Connection"
+};
+
+/*********************************************************************
+ *  Function prototypes
+ *********************************************************************/
+static int     igb_probe(device_t);
+static int     igb_attach(device_t);
+static int     igb_detach(device_t);
+static int     igb_shutdown(device_t);
+static int     igb_suspend(device_t);
+static int     igb_resume(device_t);
+static void    igb_start(struct ifnet *);
+static void    igb_start_locked(struct tx_ring *, struct ifnet *ifp);
+#if __FreeBSD_version >= 800000
+static int     igb_mq_start(struct ifnet *, struct mbuf *);
+static int     igb_mq_start_locked(struct ifnet *,
+                   struct tx_ring *, struct mbuf *);
+static void    igb_qflush(struct ifnet *);
+#endif
+static int     igb_ioctl(struct ifnet *, u_long, caddr_t);
+static void    igb_init(void *);
+static void    igb_init_locked(struct adapter *);
+static void    igb_stop(void *);
+static void    igb_media_status(struct ifnet *, struct ifmediareq *);
+static int     igb_media_change(struct ifnet *);
+static void    igb_identify_hardware(struct adapter *);
+static int     igb_allocate_pci_resources(struct adapter *);
+static int     igb_allocate_msix(struct adapter *);
+static int     igb_allocate_legacy(struct adapter *);
+static int     igb_setup_msix(struct adapter *);
+static void    igb_free_pci_resources(struct adapter *);
+static void    igb_local_timer(void *);
+static void    igb_reset(struct adapter *);
+static int     igb_setup_interface(device_t, struct adapter *);
+static int     igb_allocate_queues(struct adapter *);
+static void    igb_configure_queues(struct adapter *);
+
+static int     igb_allocate_transmit_buffers(struct tx_ring *);
+static void    igb_setup_transmit_structures(struct adapter *);
+static void    igb_setup_transmit_ring(struct tx_ring *);
+static void    igb_initialize_transmit_units(struct adapter *);
+static void    igb_free_transmit_structures(struct adapter *);
+static void    igb_free_transmit_buffers(struct tx_ring *);
+
+static int     igb_allocate_receive_buffers(struct rx_ring *);
+static int     igb_setup_receive_structures(struct adapter *);
+static int     igb_setup_receive_ring(struct rx_ring *);
+static void    igb_initialize_receive_units(struct adapter *);
+static void    igb_free_receive_structures(struct adapter *);
+static void    igb_free_receive_buffers(struct rx_ring *);
+static void    igb_free_receive_ring(struct rx_ring *);
+
+static void    igb_enable_intr(struct adapter *);
+static void    igb_disable_intr(struct adapter *);
+static void    igb_update_stats_counters(struct adapter *);
+static bool    igb_txeof(struct tx_ring *);
+
+static __inline        void igb_rx_discard(struct rx_ring *, int);
+static __inline void igb_rx_input(struct rx_ring *,
+                   struct ifnet *, struct mbuf *, u32);
+
+static bool    igb_rxeof(struct igb_queue *, int, int *);
+static void    igb_rx_checksum(u32, struct mbuf *, u32);
+static int     igb_tx_ctx_setup(struct tx_ring *, struct mbuf *);
+static bool    igb_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
+static void    igb_set_promisc(struct adapter *);
+static void    igb_disable_promisc(struct adapter *);
+static void    igb_set_multi(struct adapter *);
+static void    igb_update_link_status(struct adapter *);
+static void    igb_refresh_mbufs(struct rx_ring *, int);
+
+static void    igb_register_vlan(void *, struct ifnet *, u16);
+static void    igb_unregister_vlan(void *, struct ifnet *, u16);
+static void    igb_setup_vlan_hw_support(struct adapter *);
+
+static int     igb_xmit(struct tx_ring *, struct mbuf **);
+static int     igb_dma_malloc(struct adapter *, bus_size_t,
+                   struct igb_dma_alloc *, int);
+static void    igb_dma_free(struct adapter *, struct igb_dma_alloc *);
+static int     igb_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
+static void    igb_print_nvm_info(struct adapter *);
+static int     igb_is_valid_ether_addr(u8 *);
+static void     igb_add_hw_stats(struct adapter *);
+
+static void    igb_vf_init_stats(struct adapter *);
+static void    igb_update_vf_stats_counters(struct adapter *);
+
+/* Management and WOL Support */
+static void    igb_init_manageability(struct adapter *);
+static void    igb_release_manageability(struct adapter *);
+static void     igb_get_hw_control(struct adapter *);
+static void     igb_release_hw_control(struct adapter *);
+static void     igb_enable_wakeup(device_t);
+static void     igb_led_func(void *, int);
+
+static int     igb_irq_fast(void *);
+static void    igb_msix_que(void *);
+static void    igb_msix_link(void *);
+static void    igb_handle_que(void *context, int pending);
+static void    igb_handle_link(void *context, int pending);
+
+static void    igb_set_sysctl_value(struct adapter *, const char *,
+                   const char *, int *, int);
+static int     igb_set_flowcntl(SYSCTL_HANDLER_ARGS);
+
+#ifdef DEVICE_POLLING
+static poll_handler_t igb_poll;
+#endif /* POLLING */
+
+/*********************************************************************
+ *  FreeBSD Device Interface Entry Points
+ *********************************************************************/
+
+static device_method_t igb_methods[] = {
+       /* Device interface */
+       DEVMETHOD(device_probe, igb_probe),
+       DEVMETHOD(device_attach, igb_attach),
+       DEVMETHOD(device_detach, igb_detach),
+       DEVMETHOD(device_shutdown, igb_shutdown),
+       DEVMETHOD(device_suspend, igb_suspend),
+       DEVMETHOD(device_resume, igb_resume),
+       {0, 0}
+};
+
+static driver_t igb_driver = {
+       "igb", igb_methods, sizeof(struct adapter),
+};
+
+static devclass_t igb_devclass;
+DRIVER_MODULE(igb, pci, igb_driver, igb_devclass, 0, 0);
+MODULE_DEPEND(igb, pci, 1, 1, 1);
+MODULE_DEPEND(igb, ether, 1, 1, 1);
+
+/*********************************************************************
+ *  Tunable default values.
+ *********************************************************************/
+
+/* Descriptor defaults */
+static int igb_rxd = IGB_DEFAULT_RXD;
+static int igb_txd = IGB_DEFAULT_TXD;
+TUNABLE_INT("hw.igb.rxd", &igb_rxd);
+TUNABLE_INT("hw.igb.txd", &igb_txd);
+
+/*
+** AIM: Adaptive Interrupt Moderation
+** which means that the interrupt rate
+** is varied over time based on the
+** traffic for that interrupt vector
+*/
+static int igb_enable_aim = TRUE;
+TUNABLE_INT("hw.igb.enable_aim", &igb_enable_aim);
+
+/*
+ * MSIX should be the default for best performance,
+ * but this allows it to be forced off for testing.
+ */         
+static int igb_enable_msix = 1;
+TUNABLE_INT("hw.igb.enable_msix", &igb_enable_msix);
+
+/*
+** Tuneable Interrupt rate
+*/
+static int igb_max_interrupt_rate = 8000;
+TUNABLE_INT("hw.igb.max_interrupt_rate", &igb_max_interrupt_rate);
+
+/*
+** Header split causes the packet header to
+** be dma'd to a seperate mbuf from the payload.
+** this can have memory alignment benefits. But
+** another plus is that small packets often fit
+** into the header and thus use no cluster. Its
+** a very workload dependent type feature.
+*/
+static bool igb_header_split = FALSE;
+TUNABLE_INT("hw.igb.hdr_split", &igb_header_split);
+
+/*
+** This will autoconfigure based on
+** the number of CPUs if left at 0.
+*/
+static int igb_num_queues = 0;
+TUNABLE_INT("hw.igb.num_queues", &igb_num_queues);
+
+/* How many packets rxeof tries to clean at a time */
+static int igb_rx_process_limit = 100;
+TUNABLE_INT("hw.igb.rx_process_limit", &igb_rx_process_limit);
+
+/* Flow control setting - default to FULL */
+static int igb_fc_setting = e1000_fc_full;
+TUNABLE_INT("hw.igb.fc_setting", &igb_fc_setting);
+
+/* Energy Efficient Ethernet - default to off */
+static int igb_eee_disabled = TRUE;
+TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled);
+
+/*
+** DMA Coalescing, only for i350 - default to off,
+** this feature is for power savings
+*/
+static int igb_dma_coalesce = FALSE;
+TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce);
+
+/*********************************************************************
+ *  Device identification routine
+ *
+ *  igb_probe determines if the driver should be loaded on
+ *  adapter based on PCI vendor/device id of the adapter.
+ *
+ *  return BUS_PROBE_DEFAULT on success, positive on failure
+ *********************************************************************/
+
+static int
+igb_probe(device_t dev)
+{
+       char            adapter_name[60];
+       uint16_t        pci_vendor_id = 0;
+       uint16_t        pci_device_id = 0;
+       uint16_t        pci_subvendor_id = 0;
+       uint16_t        pci_subdevice_id = 0;
+       igb_vendor_info_t *ent;
+
+       INIT_DEBUGOUT("igb_probe: begin");
+
+       pci_vendor_id = pci_get_vendor(dev);
+       if (pci_vendor_id != IGB_VENDOR_ID)
+               return (ENXIO);
+
+       pci_device_id = pci_get_device(dev);
+       pci_subvendor_id = pci_get_subvendor(dev);
+       pci_subdevice_id = pci_get_subdevice(dev);
+
+       ent = igb_vendor_info_array;
+       while (ent->vendor_id != 0) {
+               if ((pci_vendor_id == ent->vendor_id) &&
+                   (pci_device_id == ent->device_id) &&
+
+                   ((pci_subvendor_id == ent->subvendor_id) ||
+                   (ent->subvendor_id == PCI_ANY_ID)) &&
+
+                   ((pci_subdevice_id == ent->subdevice_id) ||
+                   (ent->subdevice_id == PCI_ANY_ID))) {
+                       sprintf(adapter_name, "%s %s",
+                               igb_strings[ent->index],
+                               igb_driver_version);
+                       device_set_desc_copy(dev, adapter_name);
+                       return (BUS_PROBE_DEFAULT);
+               }
+               ent++;
+       }
+
+       return (ENXIO);
+}
+
+/*********************************************************************
+ *  Device initialization routine
+ *
+ *  The attach entry point is called when the driver is being loaded.
+ *  This routine identifies the type of hardware, allocates all resources
+ *  and initializes the hardware.
+ *
+ *  return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+igb_attach(device_t dev)
+{
+       struct adapter  *adapter;
+       int             error = 0;
+       u16             eeprom_data;
+
+       INIT_DEBUGOUT("igb_attach: begin");
+
+       adapter = device_get_softc(dev);
+       adapter->dev = adapter->osdep.dev = dev;
+       IGB_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
+
+       /* SYSCTL stuff */
+       SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+           SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+           OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
+           igb_sysctl_nvm_info, "I", "NVM Information");
+
+       SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+           SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+           OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
+           &igb_enable_aim, 1, "Interrupt Moderation");
+
+       SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+           SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+           OID_AUTO, "flow_control", CTLTYPE_INT|CTLFLAG_RW,
+           adapter, 0, igb_set_flowcntl, "I", "Flow Control");
+
+       callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
+
+       /* Determine hardware and mac info */
+       igb_identify_hardware(adapter);
+
+       /* Setup PCI resources */
+       if (igb_allocate_pci_resources(adapter)) {
+               device_printf(dev, "Allocation of PCI resources failed\n");
+               error = ENXIO;
+               goto err_pci;
+       }
+
+       /* Do Shared Code initialization */
+       if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
+               device_printf(dev, "Setup of Shared code failed\n");
+               error = ENXIO;
+               goto err_pci;
+       }
+
+       e1000_get_bus_info(&adapter->hw);
+
+       /* Sysctl for limiting the amount of work done in the taskqueue */
+       igb_set_sysctl_value(adapter, "rx_processing_limit",
+           "max number of rx packets to process", &adapter->rx_process_limit,
+           igb_rx_process_limit);
+
+       /*
+        * Validate number of transmit and receive descriptors. It
+        * must not exceed hardware maximum, and must be multiple
+        * of E1000_DBA_ALIGN.
+        */
+       if (((igb_txd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN) != 0 ||
+           (igb_txd > IGB_MAX_TXD) || (igb_txd < IGB_MIN_TXD)) {
+               device_printf(dev, "Using %d TX descriptors instead of %d!\n",
+                   IGB_DEFAULT_TXD, igb_txd);
+               adapter->num_tx_desc = IGB_DEFAULT_TXD;
+       } else
+               adapter->num_tx_desc = igb_txd;
+       if (((igb_rxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN) != 0 ||
+           (igb_rxd > IGB_MAX_RXD) || (igb_rxd < IGB_MIN_RXD)) {
+               device_printf(dev, "Using %d RX descriptors instead of %d!\n",
+                   IGB_DEFAULT_RXD, igb_rxd);
+               adapter->num_rx_desc = IGB_DEFAULT_RXD;
+       } else
+               adapter->num_rx_desc = igb_rxd;
+
+       adapter->hw.mac.autoneg = DO_AUTO_NEG;
+       adapter->hw.phy.autoneg_wait_to_complete = FALSE;
+       adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
+
+       /* Copper options */
+       if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+               adapter->hw.phy.mdix = AUTO_ALL_MODES;
+               adapter->hw.phy.disable_polarity_correction = FALSE;
+               adapter->hw.phy.ms_type = IGB_MASTER_SLAVE;
+       }
+
+       /*
+        * Set the frame limits assuming
+        * standard ethernet sized frames.
+        */
+       adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
+       adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
+
+       /*
+       ** Allocate and Setup Queues
+       */
+       if (igb_allocate_queues(adapter)) {
+               error = ENOMEM;
+               goto err_pci;
+       }
+
+       /* Allocate the appropriate stats memory */
+       if (adapter->vf_ifp) {
+               adapter->stats =
+                   (struct e1000_vf_stats *)malloc(sizeof \
+                   (struct e1000_vf_stats), M_DEVBUF, M_NOWAIT | M_ZERO);
+               igb_vf_init_stats(adapter);
+       } else
+               adapter->stats =
+                   (struct e1000_hw_stats *)malloc(sizeof \
+                   (struct e1000_hw_stats), M_DEVBUF, M_NOWAIT | M_ZERO);
+       if (adapter->stats == NULL) {
+               device_printf(dev, "Can not allocate stats memory\n");
+               error = ENOMEM;
+               goto err_late;
+       }
+
+       /* Allocate multicast array memory. */
+       adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
+           MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
+       if (adapter->mta == NULL) {
+               device_printf(dev, "Can not allocate multicast setup array\n");
+               error = ENOMEM;
+               goto err_late;
+       }
+
+       /* Some adapter-specific advanced features */
+       if (adapter->hw.mac.type >= e1000_i350) {
+               igb_set_sysctl_value(adapter, "dma_coalesce",
+                   "configure dma coalesce",
+                   &adapter->dma_coalesce, igb_dma_coalesce);
+               igb_set_sysctl_value(adapter, "eee_disabled",
+                   "enable Energy Efficient Ethernet",
+                   &adapter->hw.dev_spec._82575.eee_disable,
+                   igb_eee_disabled);
+               e1000_set_eee_i350(&adapter->hw);
+       }
+
+       /*
+       ** Start from a known state, this is
+       ** important in reading the nvm and
+       ** mac from that.
+       */
+       e1000_reset_hw(&adapter->hw);
+
+       /* Make sure we have a good EEPROM before we read from it */
+       if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
+               /*
+               ** Some PCI-E parts fail the first check due to
+               ** the link being in sleep state, call it again,
+               ** if it fails a second time its a real issue.
+               */
+               if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
+                       device_printf(dev,
+                           "The EEPROM Checksum Is Not Valid\n");
+                       error = EIO;
+                       goto err_late;
+               }
+       }
+
+       /*
+       ** Copy the permanent MAC address out of the EEPROM
+       */
+       if (e1000_read_mac_addr(&adapter->hw) < 0) {
+               device_printf(dev, "EEPROM read error while reading MAC"
+                   " address\n");
+               error = EIO;
+               goto err_late;
+       }
+       /* Check its sanity */
+       if (!igb_is_valid_ether_addr(adapter->hw.mac.addr)) {
+               device_printf(dev, "Invalid MAC address\n");
+               error = EIO;
+               goto err_late;
+       }
+
+       /* 
+       ** Configure Interrupts
+       */
+       if ((adapter->msix > 1) && (igb_enable_msix))
+               error = igb_allocate_msix(adapter);
+       else /* MSI or Legacy */
+               error = igb_allocate_legacy(adapter);
+       if (error)
+               goto err_late;
+
+       /* Setup OS specific network interface */
+       if (igb_setup_interface(dev, adapter) != 0)
+               goto err_late;
+
+       /* Now get a good starting state */
+       igb_reset(adapter);
+
+       /* Initialize statistics */
+       igb_update_stats_counters(adapter);
+
+       adapter->hw.mac.get_link_status = 1;
+       igb_update_link_status(adapter);
+
+       /* Indicate SOL/IDER usage */
+       if (e1000_check_reset_block(&adapter->hw))
+               device_printf(dev,
+                   "PHY reset is blocked due to SOL/IDER session.\n");
+
+       /* Determine if we have to control management hardware */
+       adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
+
+       /*
+        * Setup Wake-on-Lan
+        */
+       /* APME bit in EEPROM is mapped to WUC.APME */
+       eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC) & E1000_WUC_APME;
+       if (eeprom_data)
+               adapter->wol = E1000_WUFC_MAG;
+
+       /* Register for VLAN events */
+       adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
+            igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
+       adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
+            igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
+
+       igb_add_hw_stats(adapter);
+
+       /* Tell the stack that the interface is not active */
+       adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+       adapter->led_dev = led_create(igb_led_func, adapter,
+           device_get_nameunit(dev));
+
+       INIT_DEBUGOUT("igb_attach: end");
+
+       return (0);
+
+err_late:
+       igb_free_transmit_structures(adapter);
+       igb_free_receive_structures(adapter);
+       igb_release_hw_control(adapter);
+       if (adapter->ifp != NULL)
+               if_free(adapter->ifp);
+err_pci:
+       igb_free_pci_resources(adapter);
+       free(adapter->mta, M_DEVBUF);
+       IGB_CORE_LOCK_DESTROY(adapter);
+
+       return (error);
+}
+
+/*********************************************************************
+ *  Device removal routine
+ *
+ *  The detach entry point is called when the driver is being removed.
+ *  This routine stops the adapter and deallocates all the resources
+ *  that were allocated for driver operation.
+ *
+ *  return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+igb_detach(device_t dev)
+{
+       struct adapter  *adapter = device_get_softc(dev);
+       struct ifnet    *ifp = adapter->ifp;
+
+       INIT_DEBUGOUT("igb_detach: begin");
+
+       /* Make sure VLANS are not using driver */
+       if (adapter->ifp->if_vlantrunk != NULL) {
+               device_printf(dev,"Vlan in use, detach first\n");
+               return (EBUSY);
+       }
+
+       if (adapter->led_dev != NULL)
+               led_destroy(adapter->led_dev);
+
+#ifdef DEVICE_POLLING
+       if (ifp->if_capenable & IFCAP_POLLING)
+               ether_poll_deregister(ifp);
+#endif
+
+       IGB_CORE_LOCK(adapter);
+       adapter->in_detach = 1;
+       igb_stop(adapter);
+       IGB_CORE_UNLOCK(adapter);
+
+       e1000_phy_hw_reset(&adapter->hw);
+
+       /* Give control back to firmware */
+       igb_release_manageability(adapter);
+       igb_release_hw_control(adapter);
+
+       if (adapter->wol) {
+               E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
+               E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
+               igb_enable_wakeup(dev);
+       }
+
+       /* Unregister VLAN events */
+       if (adapter->vlan_attach != NULL)
+               EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
+       if (adapter->vlan_detach != NULL)
+               EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
+
+       ether_ifdetach(adapter->ifp);
+
+       callout_drain(&adapter->timer);
+
+       igb_free_pci_resources(adapter);
+       bus_generic_detach(dev);
+       if_free(ifp);
+
+       igb_free_transmit_structures(adapter);
+       igb_free_receive_structures(adapter);
+       free(adapter->mta, M_DEVBUF);
+
+       IGB_CORE_LOCK_DESTROY(adapter);
+
+       return (0);
+}
+
+/*********************************************************************
+ *
+ *  Shutdown entry point
+ *
+ **********************************************************************/
+
+static int
+igb_shutdown(device_t dev)
+{
+       return igb_suspend(dev);
+}
+
+/*
+ * Suspend/resume device methods.
+ */
+static int
+igb_suspend(device_t dev)
+{
+       struct adapter *adapter = device_get_softc(dev);
+
+       IGB_CORE_LOCK(adapter);
+
+       igb_stop(adapter);
+
+        igb_release_manageability(adapter);
+       igb_release_hw_control(adapter);
+
+        if (adapter->wol) {
+                E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
+                E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
+                igb_enable_wakeup(dev);
+        }
+
+       IGB_CORE_UNLOCK(adapter);
+
+       return bus_generic_suspend(dev);
+}
+
+static int
+igb_resume(device_t dev)
+{
+       struct adapter *adapter = device_get_softc(dev);
+       struct ifnet *ifp = adapter->ifp;
+
+       IGB_CORE_LOCK(adapter);
+       igb_init_locked(adapter);
+       igb_init_manageability(adapter);
+
+       if ((ifp->if_flags & IFF_UP) &&
+           (ifp->if_drv_flags & IFF_DRV_RUNNING))
+               igb_start(ifp);
+
+       IGB_CORE_UNLOCK(adapter);
+
+       return bus_generic_resume(dev);
+}
+
+
+/*********************************************************************
+ *  Transmit entry point
+ *
+ *  igb_start is called by the stack to initiate a transmit.
+ *  The driver will remain in this routine as long as there are
+ *  packets to transmit and transmit resources are available.
+ *  In case resources are not available stack is notified and
+ *  the packet is requeued.
+ **********************************************************************/
+
+static void
+igb_start_locked(struct tx_ring *txr, struct ifnet *ifp)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       struct mbuf     *m_head;
+
+       IGB_TX_LOCK_ASSERT(txr);
+
+       if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
+           IFF_DRV_RUNNING)
+               return;
+       if (!adapter->link_active)
+               return;
+
+       /* Call cleanup if number of TX descriptors low */
+       if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD)
+               igb_txeof(txr);
+
+       while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
+               if (txr->tx_avail <= IGB_MAX_SCATTER) {
+                       ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+                       break;
+               }
+               IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
+               if (m_head == NULL)
+                       break;
+               /*
+                *  Encapsulation can modify our pointer, and or make it
+                *  NULL on failure.  In that event, we can't requeue.
+                */
+               if (igb_xmit(txr, &m_head)) {
+                       if (m_head == NULL)
+                               break;
+                       ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+                       IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
+                       break;
+               }
+
+               /* Send a copy of the frame to the BPF listener */
+               ETHER_BPF_MTAP(ifp, m_head);
+
+               /* Set watchdog on */
+               txr->watchdog_time = ticks;
+               txr->queue_status = IGB_QUEUE_WORKING;
+       }
+}
+/*
+ * Legacy TX driver routine, called from the
+ * stack, always uses tx[0], and spins for it.
+ * Should not be used with multiqueue tx
+ */
+static void
+igb_start(struct ifnet *ifp)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       struct tx_ring  *txr = adapter->tx_rings;
+
+       if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+               IGB_TX_LOCK(txr);
+               igb_start_locked(txr, ifp);
+               IGB_TX_UNLOCK(txr);
+       }
+       return;
+}
+
+#if __FreeBSD_version >= 800000
+/*
+** Multiqueue Transmit driver
+**
+*/
+static int
+igb_mq_start(struct ifnet *ifp, struct mbuf *m)
+{
+       struct adapter          *adapter = ifp->if_softc;
+       struct igb_queue        *que;
+       struct tx_ring          *txr;
+       int                     i = 0, err = 0;
+
+       /* Which queue to use */
+       if ((m->m_flags & M_FLOWID) != 0)
+               i = m->m_pkthdr.flowid % adapter->num_queues;
+
+       txr = &adapter->tx_rings[i];
+       que = &adapter->queues[i];
+
+       if (IGB_TX_TRYLOCK(txr)) {
+               err = igb_mq_start_locked(ifp, txr, m);
+               IGB_TX_UNLOCK(txr);
+       } else {
+               err = drbr_enqueue(ifp, txr->br, m);
+               taskqueue_enqueue(que->tq, &que->que_task);
+       }
+
+       return (err);
+}
+
+static int
+igb_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
+{
+       struct adapter  *adapter = txr->adapter;
+        struct mbuf     *next;
+        int             err = 0, enq;
+
+       IGB_TX_LOCK_ASSERT(txr);
+
+       if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+           IFF_DRV_RUNNING || adapter->link_active == 0) {
+               if (m != NULL)
+                       err = drbr_enqueue(ifp, txr->br, m);
+               return (err);
+       }
+
+       enq = 0;
+       if (m == NULL) {
+               next = drbr_dequeue(ifp, txr->br);
+       } else if (drbr_needs_enqueue(ifp, txr->br)) {
+               if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
+                       return (err);
+               next = drbr_dequeue(ifp, txr->br);
+       } else
+               next = m;
+
+       /* Process the queue */
+       while (next != NULL) {
+               if ((err = igb_xmit(txr, &next)) != 0) {
+                       if (next != NULL)
+                               err = drbr_enqueue(ifp, txr->br, next);
+                       break;
+               }
+               enq++;
+               drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
+               ETHER_BPF_MTAP(ifp, next);
+               if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+                       break;
+               if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD)
+                       igb_txeof(txr);
+               if (txr->tx_avail <= IGB_MAX_SCATTER) {
+                       ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+                       break;
+               }
+               next = drbr_dequeue(ifp, txr->br);
+       }
+       if (enq > 0) {
+               /* Set the watchdog */
+               txr->queue_status = IGB_QUEUE_WORKING;
+               txr->watchdog_time = ticks;
+       }
+       return (err);
+}
+
+/*
+** Flush all ring buffers
+*/
+static void
+igb_qflush(struct ifnet *ifp)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       struct tx_ring  *txr = adapter->tx_rings;
+       struct mbuf     *m;
+
+       for (int i = 0; i < adapter->num_queues; i++, txr++) {
+               IGB_TX_LOCK(txr);
+               while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
+                       m_freem(m);
+               IGB_TX_UNLOCK(txr);
+       }
+       if_qflush(ifp);
+}
+#endif /* __FreeBSD_version >= 800000 */
+
+/*********************************************************************
+ *  Ioctl entry point
+ *
+ *  igb_ioctl is called when the user wants to configure the
+ *  interface.
+ *
+ *  return 0 on success, positive on failure
+ **********************************************************************/
+
+static int
+igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       struct ifreq *ifr = (struct ifreq *)data;
+#ifdef INET
+       struct ifaddr *ifa = (struct ifaddr *)data;
+#endif
+       int error = 0;
+
+       if (adapter->in_detach)
+               return (error);
+
+       switch (command) {
+       case SIOCSIFADDR:
+#ifdef INET
+               if (ifa->ifa_addr->sa_family == AF_INET) {
+                       /*
+                        * XXX
+                        * Since resetting hardware takes a very long time
+                        * and results in link renegotiation we only
+                        * initialize the hardware only when it is absolutely
+                        * required.
+                        */
+                       ifp->if_flags |= IFF_UP;
+                       if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+                               IGB_CORE_LOCK(adapter);
+                               igb_init_locked(adapter);
+                               IGB_CORE_UNLOCK(adapter);
+                       }
+                       if (!(ifp->if_flags & IFF_NOARP))
+                               arp_ifinit(ifp, ifa);
+               } else
+#endif
+                       error = ether_ioctl(ifp, command, data);
+               break;
+       case SIOCSIFMTU:
+           {
+               int max_frame_size;
+
+               IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
+
+               IGB_CORE_LOCK(adapter);
+               max_frame_size = 9234;
+               if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
+                   ETHER_CRC_LEN) {
+                       IGB_CORE_UNLOCK(adapter);
+                       error = EINVAL;
+                       break;
+               }
+
+               ifp->if_mtu = ifr->ifr_mtu;
+               adapter->max_frame_size =
+                   ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+               igb_init_locked(adapter);
+               IGB_CORE_UNLOCK(adapter);
+               break;
+           }
+       case SIOCSIFFLAGS:
+               IOCTL_DEBUGOUT("ioctl rcv'd:\
+                   SIOCSIFFLAGS (Set Interface Flags)");
+               IGB_CORE_LOCK(adapter);
+               if (ifp->if_flags & IFF_UP) {
+                       if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+                               if ((ifp->if_flags ^ adapter->if_flags) &
+                                   (IFF_PROMISC | IFF_ALLMULTI)) {
+                                       igb_disable_promisc(adapter);
+                                       igb_set_promisc(adapter);
+                               }
+                       } else
+                               igb_init_locked(adapter);
+               } else
+                       if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+                               igb_stop(adapter);
+               adapter->if_flags = ifp->if_flags;
+               IGB_CORE_UNLOCK(adapter);
+               break;
+       case SIOCADDMULTI:
+       case SIOCDELMULTI:
+               IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
+               if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+                       IGB_CORE_LOCK(adapter);
+                       igb_disable_intr(adapter);
+                       igb_set_multi(adapter);
+#ifdef DEVICE_POLLING
+                       if (!(ifp->if_capenable & IFCAP_POLLING))
+#endif
+                               igb_enable_intr(adapter);
+                       IGB_CORE_UNLOCK(adapter);
+               }
+               break;
+       case SIOCSIFMEDIA:
+               /*
+               ** As the speed/duplex settings are being
+               ** changed, we need toreset the PHY.
+               */
+               adapter->hw.phy.reset_disable = FALSE;
+               /* Check SOL/IDER usage */
+               IGB_CORE_LOCK(adapter);
+               if (e1000_check_reset_block(&adapter->hw)) {
+                       IGB_CORE_UNLOCK(adapter);
+                       device_printf(adapter->dev, "Media change is"
+                           " blocked due to SOL/IDER session.\n");
+                       break;
+               }
+               IGB_CORE_UNLOCK(adapter);
+       case SIOCGIFMEDIA:
+               IOCTL_DEBUGOUT("ioctl rcv'd: \
+                   SIOCxIFMEDIA (Get/Set Interface Media)");
+               error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
+               break;
+       case SIOCSIFCAP:
+           {
+               int mask, reinit;
+
+               IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
+               reinit = 0;
+               mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+#ifdef DEVICE_POLLING
+               if (mask & IFCAP_POLLING) {
+                       if (ifr->ifr_reqcap & IFCAP_POLLING) {
+                               error = ether_poll_register(igb_poll, ifp);
+                               if (error)
+                                       return (error);
+                               IGB_CORE_LOCK(adapter);
+                               igb_disable_intr(adapter);
+                               ifp->if_capenable |= IFCAP_POLLING;
+                               IGB_CORE_UNLOCK(adapter);
+                       } else {
+                               error = ether_poll_deregister(ifp);
+                               /* Enable interrupt even in error case */
+                               IGB_CORE_LOCK(adapter);
+                               igb_enable_intr(adapter);
+                               ifp->if_capenable &= ~IFCAP_POLLING;
+                               IGB_CORE_UNLOCK(adapter);
+                       }
+               }
+#endif
+               if (mask & IFCAP_HWCSUM) {
+                       ifp->if_capenable ^= IFCAP_HWCSUM;
+                       reinit = 1;
+               }
+               if (mask & IFCAP_TSO4) {
+                       ifp->if_capenable ^= IFCAP_TSO4;
+                       reinit = 1;
+               }
+               if (mask & IFCAP_VLAN_HWTAGGING) {
+                       ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+                       reinit = 1;
+               }
+               if (mask & IFCAP_VLAN_HWFILTER) {
+                       ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
+                       reinit = 1;
+               }
+               if (mask & IFCAP_LRO) {
+                       ifp->if_capenable ^= IFCAP_LRO;
+                       reinit = 1;
+               }
+               if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
+                       igb_init(adapter);
+               VLAN_CAPABILITIES(ifp);
+               break;
+           }
+
+       default:
+               error = ether_ioctl(ifp, command, data);
+               break;
+       }
+
+       return (error);
+}
+
+
+/*********************************************************************
+ *  Init entry point
+ *
+ *  This routine is used in two ways. It is used by the stack as
+ *  init entry point in network interface structure. It is also used
+ *  by the driver as a hw/sw initialization routine to get to a
+ *  consistent state.
+ *
+ *  return 0 on success, positive on failure
+ **********************************************************************/
+
+static void
+igb_init_locked(struct adapter *adapter)
+{
+       struct ifnet    *ifp = adapter->ifp;
+       device_t        dev = adapter->dev;
+
+       INIT_DEBUGOUT("igb_init: begin");
+
+       IGB_CORE_LOCK_ASSERT(adapter);
+
+       igb_disable_intr(adapter);
+       callout_stop(&adapter->timer);
+
+       /* Get the latest mac address, User can use a LAA */
+        bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
+              ETHER_ADDR_LEN);
+
+       /* Put the address into the Receive Address Array */
+       e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+
+       igb_reset(adapter);
+       igb_update_link_status(adapter);
+
+       E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
+
+       /* Set hardware offload abilities */
+       ifp->if_hwassist = 0;
+       if (ifp->if_capenable & IFCAP_TXCSUM) {
+               ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
+#if __FreeBSD_version >= 800000
+               if (adapter->hw.mac.type == e1000_82576)
+                       ifp->if_hwassist |= CSUM_SCTP;
+#endif
+       }
+
+       if (ifp->if_capenable & IFCAP_TSO4)
+               ifp->if_hwassist |= CSUM_TSO;
+
+       /* Configure for OS presence */
+       igb_init_manageability(adapter);
+
+       /* Prepare transmit descriptors and buffers */
+       igb_setup_transmit_structures(adapter);
+       igb_initialize_transmit_units(adapter);
+
+       /* Setup Multicast table */
+       igb_set_multi(adapter);
+
+       /*
+       ** Figure out the desired mbuf pool
+       ** for doing jumbo/packetsplit
+       */
+       if (adapter->max_frame_size <= 2048)
+               adapter->rx_mbuf_sz = MCLBYTES;
+       else if (adapter->max_frame_size <= 4096)
+               adapter->rx_mbuf_sz = MJUMPAGESIZE;
+       else
+               adapter->rx_mbuf_sz = MJUM9BYTES;
+
+       /* Prepare receive descriptors and buffers */
+       if (igb_setup_receive_structures(adapter)) {
+               device_printf(dev, "Could not setup receive structures\n");
+               return;
+       }
+       igb_initialize_receive_units(adapter);
+
+        /* Enable VLAN support */
+       if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
+               igb_setup_vlan_hw_support(adapter);
+                                
+       /* Don't lose promiscuous settings */
+       igb_set_promisc(adapter);
+
+       ifp->if_drv_flags |= IFF_DRV_RUNNING;
+       ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+       callout_reset(&adapter->timer, hz, igb_local_timer, adapter);
+       e1000_clear_hw_cntrs_base_generic(&adapter->hw);
+
+       if (adapter->msix > 1) /* Set up queue routing */
+               igb_configure_queues(adapter);
+
+       /* this clears any pending interrupts */
+       E1000_READ_REG(&adapter->hw, E1000_ICR);
+#ifdef DEVICE_POLLING
+       /*
+        * Only enable interrupts if we are not polling, make sure
+        * they are off otherwise.
+        */
+       if (ifp->if_capenable & IFCAP_POLLING)
+               igb_disable_intr(adapter);
+       else
+#endif /* DEVICE_POLLING */
+       {
+               igb_enable_intr(adapter);
+               E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC);
+       }
+
+       /* Set Energy Efficient Ethernet */
+       e1000_set_eee_i350(&adapter->hw);
+
+       /* Don't reset the phy next time init gets called */
+       adapter->hw.phy.reset_disable = TRUE;
+}
+
+static void
+igb_init(void *arg)
+{
+       struct adapter *adapter = arg;
+
+       IGB_CORE_LOCK(adapter);
+       igb_init_locked(adapter);
+       IGB_CORE_UNLOCK(adapter);
+}
+
+
+static void
+igb_handle_que(void *context, int pending)
+{
+       struct igb_queue *que = context;
+       struct adapter *adapter = que->adapter;
+       struct tx_ring *txr = que->txr;
+       struct ifnet    *ifp = adapter->ifp;
+
+       if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+               bool    more;
+
+               more = igb_rxeof(que, -1, NULL);
+
+               IGB_TX_LOCK(txr);
+               if (igb_txeof(txr))
+                       more = TRUE;
+#if __FreeBSD_version >= 800000
+               if (!drbr_empty(ifp, txr->br))
+                       igb_mq_start_locked(ifp, txr, NULL);
+#else
+               igb_start_locked(txr, ifp);
+#endif
+               IGB_TX_UNLOCK(txr);
+               if (more || (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
+                       taskqueue_enqueue(que->tq, &que->que_task);
+                       return;
+               }
+       }
+
+#ifdef DEVICE_POLLING
+       if (ifp->if_capenable & IFCAP_POLLING)
+               return;
+#endif
+       /* Reenable this interrupt */
+       if (que->eims)
+               E1000_WRITE_REG(&adapter->hw, E1000_EIMS, que->eims);
+       else
+               igb_enable_intr(adapter);
+}
+
+/* Deal with link in a sleepable context */
+static void
+igb_handle_link(void *context, int pending)
+{
+       struct adapter *adapter = context;
+
+       adapter->hw.mac.get_link_status = 1;
+       igb_update_link_status(adapter);
+}
+
+/*********************************************************************
+ *
+ *  MSI/Legacy Deferred
+ *  Interrupt Service routine  
+ *
+ *********************************************************************/
+static int
+igb_irq_fast(void *arg)
+{
+       struct adapter          *adapter = arg;
+       struct igb_queue        *que = adapter->queues;
+       u32                     reg_icr;
+
+
+       reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
+
+       /* Hot eject?  */
+       if (reg_icr == 0xffffffff)
+               return FILTER_STRAY;
+
+       /* Definitely not our interrupt.  */
+       if (reg_icr == 0x0)
+               return FILTER_STRAY;
+
+       if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0)
+               return FILTER_STRAY;
+
+       /*
+        * Mask interrupts until the taskqueue is finished running.  This is
+        * cheap, just assume that it is needed.  This also works around the
+        * MSI message reordering errata on certain systems.
+        */
+       igb_disable_intr(adapter);
+       taskqueue_enqueue(que->tq, &que->que_task);
+
+       /* Link status change */
+       if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))
+               taskqueue_enqueue(que->tq, &adapter->link_task);
+
+       if (reg_icr & E1000_ICR_RXO)
+               adapter->rx_overruns++;
+       return FILTER_HANDLED;
+}
+
+#ifdef DEVICE_POLLING
+/*********************************************************************
+ *
+ *  Legacy polling routine : if using this code you MUST be sure that
+ *  multiqueue is not defined, ie, set igb_num_queues to 1.
+ *
+ *********************************************************************/
+#if __FreeBSD_version >= 800000
+#define POLL_RETURN_COUNT(a) (a)
+static int
+#else
+#define POLL_RETURN_COUNT(a)
+static void
+#endif
+igb_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
+{
+       struct adapter          *adapter = ifp->if_softc;
+       struct igb_queue        *que = adapter->queues;
+       struct tx_ring          *txr = adapter->tx_rings;
+       u32                     reg_icr, rx_done = 0;
+       u32                     loop = IGB_MAX_LOOP;
+       bool                    more;
+
+       IGB_CORE_LOCK(adapter);
+       if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+               IGB_CORE_UNLOCK(adapter);
+               return POLL_RETURN_COUNT(rx_done);
+       }
+
+       if (cmd == POLL_AND_CHECK_STATUS) {
+               reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
+               /* Link status change */
+               if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))
+                       igb_handle_link(adapter, 0);
+
+               if (reg_icr & E1000_ICR_RXO)
+                       adapter->rx_overruns++;
+       }
+       IGB_CORE_UNLOCK(adapter);
+
+       igb_rxeof(que, count, &rx_done);
+
+       IGB_TX_LOCK(txr);
+       do {
+               more = igb_txeof(txr);
+       } while (loop-- && more);
+#if __FreeBSD_version >= 800000
+       if (!drbr_empty(ifp, txr->br))
+               igb_mq_start_locked(ifp, txr, NULL);
+#else
+       igb_start_locked(txr, ifp);
+#endif
+       IGB_TX_UNLOCK(txr);
+       return POLL_RETURN_COUNT(rx_done);
+}
+#endif /* DEVICE_POLLING */
+
+/*********************************************************************
+ *
+ *  MSIX TX Interrupt Service routine
+ *
+ **********************************************************************/
+static void
+igb_msix_que(void *arg)
+{
+       struct igb_queue *que = arg;
+       struct adapter *adapter = que->adapter;
+       struct tx_ring *txr = que->txr;
+       struct rx_ring *rxr = que->rxr;
+       u32             newitr = 0;
+       bool            more_tx, more_rx;
+
+       E1000_WRITE_REG(&adapter->hw, E1000_EIMC, que->eims);
+       ++que->irqs;
+
+       IGB_TX_LOCK(txr);
+       more_tx = igb_txeof(txr);
+       IGB_TX_UNLOCK(txr);
+
+       more_rx = igb_rxeof(que, adapter->rx_process_limit, NULL);
+
+       if (igb_enable_aim == FALSE)
+               goto no_calc;
+       /*
+       ** Do Adaptive Interrupt Moderation:
+        **  - Write out last calculated setting
+       **  - Calculate based on average size over
+       **    the last interval.
+       */
+        if (que->eitr_setting)
+                E1000_WRITE_REG(&adapter->hw,
+                    E1000_EITR(que->msix), que->eitr_setting);
+        que->eitr_setting = 0;
+
+        /* Idle, do nothing */
+        if ((txr->bytes == 0) && (rxr->bytes == 0))
+                goto no_calc;
+                                
+        /* Used half Default if sub-gig */
+        if (adapter->link_speed != 1000)
+                newitr = IGB_DEFAULT_ITR / 2;
+        else {
+               if ((txr->bytes) && (txr->packets))
+                       newitr = txr->bytes/txr->packets;
+               if ((rxr->bytes) && (rxr->packets))
+                       newitr = max(newitr,
+                           (rxr->bytes / rxr->packets));
+                newitr += 24; /* account for hardware frame, crc */
+               /* set an upper boundary */
+               newitr = min(newitr, 3000);
+               /* Be nice to the mid range */
+                if ((newitr > 300) && (newitr < 1200))
+                        newitr = (newitr / 3);
+                else
+                        newitr = (newitr / 2);
+        }
+        newitr &= 0x7FFC;  /* Mask invalid bits */
+        if (adapter->hw.mac.type == e1000_82575)
+                newitr |= newitr << 16;
+        else
+                newitr |= E1000_EITR_CNT_IGNR;
+                 
+        /* save for next interrupt */
+        que->eitr_setting = newitr;
+
+        /* Reset state */
+        txr->bytes = 0;
+        txr->packets = 0;
+        rxr->bytes = 0;
+        rxr->packets = 0;
+
+no_calc:
+       /* Schedule a clean task if needed*/
+       if (more_tx || more_rx ||
+           (adapter->ifp->if_drv_flags & IFF_DRV_OACTIVE))
+               taskqueue_enqueue(que->tq, &que->que_task);
+       else
+               /* Reenable this interrupt */
+               E1000_WRITE_REG(&adapter->hw, E1000_EIMS, que->eims);
+       return;
+}
+
+
+/*********************************************************************
+ *
+ *  MSIX Link Interrupt Service routine
+ *
+ **********************************************************************/
+
+static void
+igb_msix_link(void *arg)
+{
+       struct adapter  *adapter = arg;
+       u32             icr;
+
+       ++adapter->link_irq;
+       icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
+       if (!(icr & E1000_ICR_LSC))
+               goto spurious;
+       igb_handle_link(adapter, 0);
+
+spurious:
+       /* Rearm */
+       E1000_WRITE_REG(&adapter->hw, E1000_IMS, E1000_IMS_LSC);
+       E1000_WRITE_REG(&adapter->hw, E1000_EIMS, adapter->link_mask);
+       return;
+}
+
+
+/*********************************************************************
+ *
+ *  Media Ioctl callback
+ *
+ *  This routine is called whenever the user queries the status of
+ *  the interface using ifconfig.
+ *
+ **********************************************************************/
+static void
+igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+       struct adapter *adapter = ifp->if_softc;
+       u_char fiber_type = IFM_1000_SX;
+
+       INIT_DEBUGOUT("igb_media_status: begin");
+
+       IGB_CORE_LOCK(adapter);
+       igb_update_link_status(adapter);
+
+       ifmr->ifm_status = IFM_AVALID;
+       ifmr->ifm_active = IFM_ETHER;
+
+       if (!adapter->link_active) {
+               IGB_CORE_UNLOCK(adapter);
+               return;
+       }
+
+       ifmr->ifm_status |= IFM_ACTIVE;
+
+       if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
+           (adapter->hw.phy.media_type == e1000_media_type_internal_serdes))
+               ifmr->ifm_active |= fiber_type | IFM_FDX;
+       else {
+               switch (adapter->link_speed) {
+               case 10:
+                       ifmr->ifm_active |= IFM_10_T;
+                       break;
+               case 100:
+                       ifmr->ifm_active |= IFM_100_TX;
+                       break;
+               case 1000:
+                       ifmr->ifm_active |= IFM_1000_T;
+                       break;
+               }
+               if (adapter->link_duplex == FULL_DUPLEX)
+                       ifmr->ifm_active |= IFM_FDX;
+               else
+                       ifmr->ifm_active |= IFM_HDX;
+       }
+       IGB_CORE_UNLOCK(adapter);
+}
+
+/*********************************************************************
+ *
+ *  Media Ioctl callback
+ *
+ *  This routine is called when the user changes speed/duplex using
+ *  media/mediopt option with ifconfig.
+ *
+ **********************************************************************/
+static int
+igb_media_change(struct ifnet *ifp)
+{
+       struct adapter *adapter = ifp->if_softc;
+       struct ifmedia  *ifm = &adapter->media;
+
+       INIT_DEBUGOUT("igb_media_change: begin");
+
+       if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+               return (EINVAL);
+
+       IGB_CORE_LOCK(adapter);
+       switch (IFM_SUBTYPE(ifm->ifm_media)) {
+       case IFM_AUTO:
+               adapter->hw.mac.autoneg = DO_AUTO_NEG;
+               adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
+               break;
+       case IFM_1000_LX:
+       case IFM_1000_SX:
+       case IFM_1000_T:
+               adapter->hw.mac.autoneg = DO_AUTO_NEG;
+               adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
+               break;
+       case IFM_100_TX:
+               adapter->hw.mac.autoneg = FALSE;
+               adapter->hw.phy.autoneg_advertised = 0;
+               if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
+                       adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
+               else
+                       adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
+               break;
+       case IFM_10_T:
+               adapter->hw.mac.autoneg = FALSE;
+               adapter->hw.phy.autoneg_advertised = 0;
+               if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
+                       adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
+               else
+                       adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
+               break;
+       default:
+               device_printf(adapter->dev, "Unsupported media type\n");
+       }
+
+       igb_init_locked(adapter);
+       IGB_CORE_UNLOCK(adapter);
+
+       return (0);
+}
+
+
+/*********************************************************************
+ *
+ *  This routine maps the mbufs to Advanced TX descriptors.
+ *  used by the 82575 adapter.
+ *  
+ **********************************************************************/
+
+static int
+igb_xmit(struct tx_ring *txr, struct mbuf **m_headp)
+{
+       struct adapter          *adapter = txr->adapter;
+       bus_dma_segment_t       segs[IGB_MAX_SCATTER];
+       bus_dmamap_t            map;
+       struct igb_tx_buffer    *tx_buffer, *tx_buffer_mapped;
+       union e1000_adv_tx_desc *txd = NULL;
+       struct mbuf             *m_head;
+       u32                     olinfo_status = 0, cmd_type_len = 0;
+       int                     nsegs, i, j, error, first, last = 0;
+       u32                     hdrlen = 0;
+
+       m_head = *m_headp;
+
+
+       /* Set basic descriptor constants */
+       cmd_type_len |= E1000_ADVTXD_DTYP_DATA;
+       cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
+       if (m_head->m_flags & M_VLANTAG)
+               cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
+
+       /*
+         * Map the packet for DMA.
+        *
+        * Capture the first descriptor index,
+        * this descriptor will have the index
+        * of the EOP which is the only one that
+        * now gets a DONE bit writeback.
+        */
+       first = txr->next_avail_desc;
+       tx_buffer = &txr->tx_buffers[first];
+       tx_buffer_mapped = tx_buffer;
+       map = tx_buffer->map;
+
+       error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
+           *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
+
+       if (error == EFBIG) {
+               struct mbuf *m;
+
+               m = m_defrag(*m_headp, M_DONTWAIT);
+               if (m == NULL) {
+                       adapter->mbuf_defrag_failed++;
+                       m_freem(*m_headp);
+                       *m_headp = NULL;
+                       return (ENOBUFS);
+               }
+               *m_headp = m;
+
+               /* Try it again */
+               error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
+                   *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
+
+               if (error == ENOMEM) {
+                       adapter->no_tx_dma_setup++;
+                       return (error);
+               } else if (error != 0) {
+                       adapter->no_tx_dma_setup++;
+                       m_freem(*m_headp);
+                       *m_headp = NULL;
+                       return (error);
+               }
+       } else if (error == ENOMEM) {
+               adapter->no_tx_dma_setup++;
+               return (error);
+       } else if (error != 0) {
+               adapter->no_tx_dma_setup++;
+               m_freem(*m_headp);
+               *m_headp = NULL;
+               return (error);
+       }
+
+       /* Check again to be sure we have enough descriptors */
+        if (nsegs > (txr->tx_avail - 2)) {
+                txr->no_desc_avail++;
+               bus_dmamap_unload(txr->txtag, map);
+               return (ENOBUFS);
+        }
+       m_head = *m_headp;
+
+        /*
+         * Set up the context descriptor:
+         * used when any hardware offload is done.
+        * This includes CSUM, VLAN, and TSO. It
+        * will use the first descriptor.
+         */
+        if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
+               if (igb_tso_setup(txr, m_head, &hdrlen)) {
+                       cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
+                       olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
+                       olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
+               } else
+                       return (ENXIO); 
+       } else if (igb_tx_ctx_setup(txr, m_head))
+               olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
+
+       /* Calculate payload length */
+       olinfo_status |= ((m_head->m_pkthdr.len - hdrlen)
+           << E1000_ADVTXD_PAYLEN_SHIFT);
+
+       /* 82575 needs the queue index added */
+       if (adapter->hw.mac.type == e1000_82575)
+               olinfo_status |= txr->me << 4;
+
+       /* Set up our transmit descriptors */
+       i = txr->next_avail_desc;
+       for (j = 0; j < nsegs; j++) {
+               bus_size_t seg_len;
+               bus_addr_t seg_addr;
+
+               tx_buffer = &txr->tx_buffers[i];
+               txd = (union e1000_adv_tx_desc *)&txr->tx_base[i];
+               seg_addr = segs[j].ds_addr;
+               seg_len  = segs[j].ds_len;
+
+               txd->read.buffer_addr = htole64(seg_addr);
+               txd->read.cmd_type_len = htole32(cmd_type_len | seg_len);
+               txd->read.olinfo_status = htole32(olinfo_status);
+               last = i;
+               if (++i == adapter->num_tx_desc)
+                       i = 0;
+               tx_buffer->m_head = NULL;
+               tx_buffer->next_eop = -1;
+       }
+
+       txr->next_avail_desc = i;
+       txr->tx_avail -= nsegs;
+
+        tx_buffer->m_head = m_head;
+       tx_buffer_mapped->map = tx_buffer->map;
+       tx_buffer->map = map;
+        bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
+
+        /*
+         * Last Descriptor of Packet
+        * needs End Of Packet (EOP)
+        * and Report Status (RS)
+         */
+        txd->read.cmd_type_len |=
+           htole32(E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS);
+       /*
+        * Keep track in the first buffer which
+        * descriptor will be written back
+        */
+       tx_buffer = &txr->tx_buffers[first];
+       tx_buffer->next_eop = last;
+       txr->watchdog_time = ticks;
+
+       /*
+        * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
+        * that this frame is available to transmit.
+        */
+       bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+       E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), i);
+       ++txr->tx_packets;
+
+       return (0);
+
+}
+
+static void
+igb_set_promisc(struct adapter *adapter)
+{
+       struct ifnet    *ifp = adapter->ifp;
+       struct e1000_hw *hw = &adapter->hw;
+       u32             reg;
+
+       if (adapter->vf_ifp) {
+               e1000_promisc_set_vf(hw, e1000_promisc_enabled);
+               return;
+       }
+
+       reg = E1000_READ_REG(hw, E1000_RCTL);
+       if (ifp->if_flags & IFF_PROMISC) {
+               reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+               E1000_WRITE_REG(hw, E1000_RCTL, reg);
+       } else if (ifp->if_flags & IFF_ALLMULTI) {
+               reg |= E1000_RCTL_MPE;
+               reg &= ~E1000_RCTL_UPE;
+               E1000_WRITE_REG(hw, E1000_RCTL, reg);
+       }
+}
+
+static void
+igb_disable_promisc(struct adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32             reg;
+
+       if (adapter->vf_ifp) {
+               e1000_promisc_set_vf(hw, e1000_promisc_disabled);
+               return;
+       }
+       reg = E1000_READ_REG(hw, E1000_RCTL);
+       reg &=  (~E1000_RCTL_UPE);
+       reg &=  (~E1000_RCTL_MPE);
+       E1000_WRITE_REG(hw, E1000_RCTL, reg);
+}
+
+
+/*********************************************************************
+ *  Multicast Update
+ *
+ *  This routine is called whenever multicast address list is updated.
+ *
+ **********************************************************************/
+
+static void
+igb_set_multi(struct adapter *adapter)
+{
+       struct ifnet    *ifp = adapter->ifp;
+       struct ifmultiaddr *ifma;
+       u32 reg_rctl = 0;
+       u8  *mta;
+
+       int mcnt = 0;
+
+       IOCTL_DEBUGOUT("igb_set_multi: begin");
+
+       mta = adapter->mta;
+       bzero(mta, sizeof(uint8_t) * ETH_ADDR_LEN *
+           MAX_NUM_MULTICAST_ADDRESSES);
+
+#if __FreeBSD_version < 800000
+       IF_ADDR_LOCK(ifp);
+#else
+       if_maddr_rlock(ifp);
+#endif
+       TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+               if (ifma->ifma_addr->sa_family != AF_LINK)
+                       continue;
+
+               if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
+                       break;
+
+               bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
+                   &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
+               mcnt++;
+       }
+#if __FreeBSD_version < 800000
+       IF_ADDR_UNLOCK(ifp);
+#else
+       if_maddr_runlock(ifp);
+#endif
+
+       if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
+               reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+               reg_rctl |= E1000_RCTL_MPE;
+               E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
+       } else
+               e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
+}
+
+
+/*********************************************************************
+ *  Timer routine:
+ *     This routine checks for link status,
+ *     updates statistics, and does the watchdog.
+ *
+ **********************************************************************/
+
+static void
+igb_local_timer(void *arg)
+{
+       struct adapter          *adapter = arg;
+       device_t                dev = adapter->dev;
+       struct tx_ring          *txr = adapter->tx_rings;
+
+
+       IGB_CORE_LOCK_ASSERT(adapter);
+
+       igb_update_link_status(adapter);
+       igb_update_stats_counters(adapter);
+
+       /* 
+       ** If flow control has paused us since last checking
+       ** it invalidates the watchdog timing, so dont run it.
+       */
+       if (adapter->pause_frames) {
+               adapter->pause_frames = 0;
+               goto out;
+       }
+
+        /*
+        ** Watchdog: check for time since any descriptor was cleaned
+        */
+       for (int i = 0; i < adapter->num_queues; i++, txr++)
+               if (txr->queue_status == IGB_QUEUE_HUNG) 
+                       goto timeout;
+out:
+       callout_reset(&adapter->timer, hz, igb_local_timer, adapter);
+#ifndef DEVICE_POLLING
+       /* Schedule all queue interrupts - deadlock protection */
+       E1000_WRITE_REG(&adapter->hw, E1000_EICS, adapter->que_mask);
+#endif
+       return;
+
+timeout:
+       device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
+       device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
+            E1000_READ_REG(&adapter->hw, E1000_TDH(txr->me)),
+            E1000_READ_REG(&adapter->hw, E1000_TDT(txr->me)));
+       device_printf(dev,"TX(%d) desc avail = %d,"
+            "Next TX to Clean = %d\n",
+            txr->me, txr->tx_avail, txr->next_to_clean);
+       adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+       adapter->watchdog_events++;
+       igb_init_locked(adapter);
+}
+
+static void
+igb_update_link_status(struct adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       struct ifnet *ifp = adapter->ifp;
+       device_t dev = adapter->dev;
+       struct tx_ring *txr = adapter->tx_rings;
+       u32 link_check, thstat, ctrl;
+
+       link_check = thstat = ctrl = 0;
+
+       /* Get the cached link value or read for real */
+        switch (hw->phy.media_type) {
+        case e1000_media_type_copper:
+                if (hw->mac.get_link_status) {
+                       /* Do the work to read phy */
+                        e1000_check_for_link(hw);
+                        link_check = !hw->mac.get_link_status;
+                } else
+                        link_check = TRUE;
+                break;
+        case e1000_media_type_fiber:
+                e1000_check_for_link(hw);
+                link_check = (E1000_READ_REG(hw, E1000_STATUS) &
+                                 E1000_STATUS_LU);
+                break;
+        case e1000_media_type_internal_serdes:
+                e1000_check_for_link(hw);
+                link_check = adapter->hw.mac.serdes_has_link;
+                break;
+       /* VF device is type_unknown */
+        case e1000_media_type_unknown:
+                e1000_check_for_link(hw);
+               link_check = !hw->mac.get_link_status;
+               /* Fall thru */
+        default:
+                break;
+        }
+
+       /* Check for thermal downshift or shutdown */
+       if (hw->mac.type == e1000_i350) {
+               thstat = E1000_READ_REG(hw, E1000_THSTAT);
+               ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT);
+       }
+
+       /* Now we check if a transition has happened */
+       if (link_check && (adapter->link_active == 0)) {
+               e1000_get_speed_and_duplex(&adapter->hw, 
+                   &adapter->link_speed, &adapter->link_duplex);
+               if (bootverbose)
+                       device_printf(dev, "Link is up %d Mbps %s\n",
+                           adapter->link_speed,
+                           ((adapter->link_duplex == FULL_DUPLEX) ?
+                           "Full Duplex" : "Half Duplex"));
+               adapter->link_active = 1;
+               ifp->if_baudrate = adapter->link_speed * 1000000;
+               if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
+                   (thstat & E1000_THSTAT_LINK_THROTTLE))
+                       device_printf(dev, "Link: thermal downshift\n");
+               /* This can sleep */
+               if_link_state_change(ifp, LINK_STATE_UP);
+       } else if (!link_check && (adapter->link_active == 1)) {
+               ifp->if_baudrate = adapter->link_speed = 0;
+               adapter->link_duplex = 0;
+               if (bootverbose)
+                       device_printf(dev, "Link is Down\n");
+               if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
+                   (thstat & E1000_THSTAT_PWR_DOWN))
+                       device_printf(dev, "Link: thermal shutdown\n");
+               adapter->link_active = 0;
+               /* This can sleep */
+               if_link_state_change(ifp, LINK_STATE_DOWN);
+               /* Turn off watchdogs */
+               for (int i = 0; i < adapter->num_queues; i++, txr++)
+                       txr->queue_status = IGB_QUEUE_IDLE;
+       }
+}
+
+/*********************************************************************
+ *
+ *  This routine disables all traffic on the adapter by issuing a
+ *  global reset on the MAC and deallocates TX/RX buffers.
+ *
+ **********************************************************************/
+
+static void
+igb_stop(void *arg)
+{
+       struct adapter  *adapter = arg;
+       struct ifnet    *ifp = adapter->ifp;
+       struct tx_ring *txr = adapter->tx_rings;
+
+       IGB_CORE_LOCK_ASSERT(adapter);
+
+       INIT_DEBUGOUT("igb_stop: begin");
+
+       igb_disable_intr(adapter);
+
+       callout_stop(&adapter->timer);
+
+       /* Tell the stack that the interface is no longer active */
+       ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+       /* Unarm watchdog timer. */
+       for (int i = 0; i < adapter->num_queues; i++, txr++) {
+               IGB_TX_LOCK(txr);
+               txr->queue_status = IGB_QUEUE_IDLE;
+               IGB_TX_UNLOCK(txr);
+       }
+
+       e1000_reset_hw(&adapter->hw);
+       E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
+
+       e1000_led_off(&adapter->hw);
+       e1000_cleanup_led(&adapter->hw);
+}
+
+
+/*********************************************************************
+ *
+ *  Determine hardware revision.
+ *
+ **********************************************************************/
+static void
+igb_identify_hardware(struct adapter *adapter)
+{
+       device_t dev = adapter->dev;
+
+       /* Make sure our PCI config space has the necessary stuff set */
+       adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
+       if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
+           (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
+               INIT_DEBUGOUT("Memory Access and/or Bus Master "
+                   "bits were not set!\n");
+               adapter->hw.bus.pci_cmd_word |=
+               (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
+               pci_write_config(dev, PCIR_COMMAND,
+                   adapter->hw.bus.pci_cmd_word, 2);
+       }
+
+       /* Save off the information about this board */
+       adapter->hw.vendor_id = pci_get_vendor(dev);
+       adapter->hw.device_id = pci_get_device(dev);
+       adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
+       adapter->hw.subsystem_vendor_id =
+           pci_read_config(dev, PCIR_SUBVEND_0, 2);
+       adapter->hw.subsystem_device_id =
+           pci_read_config(dev, PCIR_SUBDEV_0, 2);
+
+       /* Set MAC type early for PCI setup */
+       e1000_set_mac_type(&adapter->hw);
+
+       /* Are we a VF device? */
+       if ((adapter->hw.mac.type == e1000_vfadapt) ||
+           (adapter->hw.mac.type == e1000_vfadapt_i350))
+               adapter->vf_ifp = 1;
+       else
+               adapter->vf_ifp = 0;
+}
+
+static int
+igb_allocate_pci_resources(struct adapter *adapter)
+{
+       device_t        dev = adapter->dev;
+       int             rid;
+
+       rid = PCIR_BAR(0);
+       adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+           &rid, RF_ACTIVE);
+       if (adapter->pci_mem == NULL) {
+               device_printf(dev, "Unable to allocate bus resource: memory\n");
+               return (ENXIO);
+       }
+       adapter->osdep.mem_bus_space_tag =
+           rman_get_bustag(adapter->pci_mem);
+       adapter->osdep.mem_bus_space_handle =
+           rman_get_bushandle(adapter->pci_mem);
+       adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
+
+       adapter->num_queues = 1; /* Defaults for Legacy or MSI */
+
+       /* This will setup either MSI/X or MSI */
+       adapter->msix = igb_setup_msix(adapter);
+       adapter->hw.back = &adapter->osdep;
+
+       return (0);
+}
+
+/*********************************************************************
+ *
+ *  Setup the Legacy or MSI Interrupt handler
+ *
+ **********************************************************************/
+static int
+igb_allocate_legacy(struct adapter *adapter)
+{
+       device_t                dev = adapter->dev;
+       struct igb_queue        *que = adapter->queues;
+       int                     error, rid = 0;
+
+       /* Turn off all interrupts */
+       E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
+
+       /* MSI RID is 1 */
+       if (adapter->msix == 1)
+               rid = 1;
+
+       /* We allocate a single interrupt resource */
+       adapter->res = bus_alloc_resource_any(dev,
+           SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+       if (adapter->res == NULL) {
+               device_printf(dev, "Unable to allocate bus resource: "
+                   "interrupt\n");
+               return (ENXIO);
+       }
+
+       /*
+        * Try allocating a fast interrupt and the associated deferred
+        * processing contexts.
+        */
+       TASK_INIT(&que->que_task, 0, igb_handle_que, que);
+       /* Make tasklet for deferred link handling */
+       TASK_INIT(&adapter->link_task, 0, igb_handle_link, adapter);
+       que->tq = taskqueue_create_fast("igb_taskq", M_NOWAIT,
+           taskqueue_thread_enqueue, &que->tq);
+       taskqueue_start_threads(&que->tq, 1, PI_NET, "%s taskq",
+           device_get_nameunit(adapter->dev));
+       if ((error = bus_setup_intr(dev, adapter->res,
+           INTR_TYPE_NET | INTR_MPSAFE, igb_irq_fast, NULL,
+           adapter, &adapter->tag)) != 0) {
+               device_printf(dev, "Failed to register fast interrupt "
+                           "handler: %d\n", error);
+               taskqueue_free(que->tq);
+               que->tq = NULL;
+               return (error);
+       }
+
+       return (0);
+}
+
+
+/*********************************************************************
+ *
+ *  Setup the MSIX Queue Interrupt handlers: 
+ *
+ **********************************************************************/
+static int
+igb_allocate_msix(struct adapter *adapter)
+{
+       device_t                dev = adapter->dev;
+       struct igb_queue        *que = adapter->queues;
+       int                     error, rid, vector = 0;
+
+
+       for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
+               rid = vector +1;
+               que->res = bus_alloc_resource_any(dev,
+                   SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+               if (que->res == NULL) {
+                       device_printf(dev,
+                           "Unable to allocate bus resource: "
+                           "MSIX Queue Interrupt\n");
+                       return (ENXIO);
+               }
+               error = bus_setup_intr(dev, que->res,
+                   INTR_TYPE_NET | INTR_MPSAFE, NULL,
+                   igb_msix_que, que, &que->tag);
+               if (error) {
+                       que->res = NULL;
+                       device_printf(dev, "Failed to register Queue handler");
+                       return (error);
+               }
+#if __FreeBSD_version >= 800504
+               bus_describe_intr(dev, que->res, que->tag, "que %d", i);
+#endif
+               que->msix = vector;
+               if (adapter->hw.mac.type == e1000_82575)
+                       que->eims = E1000_EICR_TX_QUEUE0 << i;
+               else
+                       que->eims = 1 << vector;
+               /*
+               ** Bind the msix vector, and thus the
+               ** rings to the corresponding cpu.
+               */
+               if (adapter->num_queues > 1)
+                       bus_bind_intr(dev, que->res, i);
+               /* Make tasklet for deferred handling */
+               TASK_INIT(&que->que_task, 0, igb_handle_que, que);
+               que->tq = taskqueue_create_fast("igb_que", M_NOWAIT,
+                   taskqueue_thread_enqueue, &que->tq);
+               taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
+                   device_get_nameunit(adapter->dev));
+       }
+
+       /* And Link */
+       rid = vector + 1;
+       adapter->res = bus_alloc_resource_any(dev,
+           SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+       if (adapter->res == NULL) {
+               device_printf(dev,
+                   "Unable to allocate bus resource: "
+                   "MSIX Link Interrupt\n");
+               return (ENXIO);
+       }
+       if ((error = bus_setup_intr(dev, adapter->res,
+           INTR_TYPE_NET | INTR_MPSAFE, NULL,
+           igb_msix_link, adapter, &adapter->tag)) != 0) {
+               device_printf(dev, "Failed to register Link handler");
+               return (error);
+       }
+#if __FreeBSD_version >= 800504
+       bus_describe_intr(dev, adapter->res, adapter->tag, "link");
+#endif
+       adapter->linkvec = vector;
+
+       return (0);
+}
+
+
+static void
+igb_configure_queues(struct adapter *adapter)
+{
+       struct  e1000_hw        *hw = &adapter->hw;
+       struct  igb_queue       *que;
+       u32                     tmp, ivar = 0, newitr = 0;
+
+       /* First turn on RSS capability */
+       if (adapter->hw.mac.type != e1000_82575)
+               E1000_WRITE_REG(hw, E1000_GPIE,
+                   E1000_GPIE_MSIX_MODE | E1000_GPIE_EIAME |
+                   E1000_GPIE_PBA | E1000_GPIE_NSICR);
+
+       /* Turn on MSIX */
+       switch (adapter->hw.mac.type) {
+       case e1000_82580:
+       case e1000_i350:
+       case e1000_vfadapt:
+       case e1000_vfadapt_i350:
+               /* RX entries */
+               for (int i = 0; i < adapter->num_queues; i++) {
+                       u32 index = i >> 1;
+                       ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
+                       que = &adapter->queues[i];
+                       if (i & 1) {
+                               ivar &= 0xFF00FFFF;
+                               ivar |= (que->msix | E1000_IVAR_VALID) << 16;
+                       } else {
+                               ivar &= 0xFFFFFF00;
+                               ivar |= que->msix | E1000_IVAR_VALID;
+                       }
+                       E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
+               }
+               /* TX entries */
+               for (int i = 0; i < adapter->num_queues; i++) {
+                       u32 index = i >> 1;
+                       ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
+                       que = &adapter->queues[i];
+                       if (i & 1) {
+                               ivar &= 0x00FFFFFF;
+                               ivar |= (que->msix | E1000_IVAR_VALID) << 24;
+                       } else {
+                               ivar &= 0xFFFF00FF;
+                               ivar |= (que->msix | E1000_IVAR_VALID) << 8;
+                       }
+                       E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
+                       adapter->que_mask |= que->eims;
+               }
+
+               /* And for the link interrupt */
+               ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8;
+               adapter->link_mask = 1 << adapter->linkvec;
+               E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
+               break;
+       case e1000_82576:
+               /* RX entries */
+               for (int i = 0; i < adapter->num_queues; i++) {
+                       u32 index = i & 0x7; /* Each IVAR has two entries */
+                       ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
+                       que = &adapter->queues[i];
+                       if (i < 8) {
+                               ivar &= 0xFFFFFF00;
+                               ivar |= que->msix | E1000_IVAR_VALID;
+                       } else {
+                               ivar &= 0xFF00FFFF;
+                               ivar |= (que->msix | E1000_IVAR_VALID) << 16;
+                       }
+                       E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
+                       adapter->que_mask |= que->eims;
+               }
+               /* TX entries */
+               for (int i = 0; i < adapter->num_queues; i++) {
+                       u32 index = i & 0x7; /* Each IVAR has two entries */
+                       ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
+                       que = &adapter->queues[i];
+                       if (i < 8) {
+                               ivar &= 0xFFFF00FF;
+                               ivar |= (que->msix | E1000_IVAR_VALID) << 8;
+                       } else {
+                               ivar &= 0x00FFFFFF;
+                               ivar |= (que->msix | E1000_IVAR_VALID) << 24;
+                       }
+                       E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
+                       adapter->que_mask |= que->eims;
+               }
+
+               /* And for the link interrupt */
+               ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8;
+               adapter->link_mask = 1 << adapter->linkvec;
+               E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
+               break;
+
+       case e1000_82575:
+                /* enable MSI-X support*/
+               tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
+                tmp |= E1000_CTRL_EXT_PBA_CLR;
+                /* Auto-Mask interrupts upon ICR read. */
+                tmp |= E1000_CTRL_EXT_EIAME;
+                tmp |= E1000_CTRL_EXT_IRCA;
+                E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
+
+               /* Queues */
+               for (int i = 0; i < adapter->num_queues; i++) {
+                       que = &adapter->queues[i];
+                       tmp = E1000_EICR_RX_QUEUE0 << i;
+                       tmp |= E1000_EICR_TX_QUEUE0 << i;
+                       que->eims = tmp;
+                       E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0),
+                           i, que->eims);
+                       adapter->que_mask |= que->eims;
+               }
+
+               /* Link */
+               E1000_WRITE_REG(hw, E1000_MSIXBM(adapter->linkvec),
+                   E1000_EIMS_OTHER);
+               adapter->link_mask |= E1000_EIMS_OTHER;
+       default:
+               break;
+       }
+
+       /* Set the starting interrupt rate */
+       if (igb_max_interrupt_rate > 0)
+               newitr = (4000000 / igb_max_interrupt_rate) & 0x7FFC;
+
+        if (hw->mac.type == e1000_82575)
+                newitr |= newitr << 16;
+        else
+                newitr |= E1000_EITR_CNT_IGNR;
+
+       for (int i = 0; i < adapter->num_queues; i++) {
+               que = &adapter->queues[i];
+               E1000_WRITE_REG(hw, E1000_EITR(que->msix), newitr);
+       }
+
+       return;
+}
+
+
+static void
+igb_free_pci_resources(struct adapter *adapter)
+{
+       struct          igb_queue *que = adapter->queues;
+       device_t        dev = adapter->dev;
+       int             rid;
+
+       /*
+       ** There is a slight possibility of a failure mode
+       ** in attach that will result in entering this function
+       ** before interrupt resources have been initialized, and
+       ** in that case we do not want to execute the loops below
+       ** We can detect this reliably by the state of the adapter
+       ** res pointer.
+       */
+       if (adapter->res == NULL)
+               goto mem;
+
+       /*
+        * First release all the interrupt resources:
+        */
+       for (int i = 0; i < adapter->num_queues; i++, que++) {
+               rid = que->msix + 1;
+               if (que->tag != NULL) {
+                       bus_teardown_intr(dev, que->res, que->tag);
+                       que->tag = NULL;
+               }
+               if (que->res != NULL)
+                       bus_release_resource(dev,
+                           SYS_RES_IRQ, rid, que->res);
+       }
+
+       /* Clean the Legacy or Link interrupt last */
+       if (adapter->linkvec) /* we are doing MSIX */
+               rid = adapter->linkvec + 1;
+       else
+               (adapter->msix != 0) ? (rid = 1):(rid = 0);
+
+       if (adapter->tag != NULL) {
+               bus_teardown_intr(dev, adapter->res, adapter->tag);
+               adapter->tag = NULL;
+       }
+       if (adapter->res != NULL)
+               bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
+
+mem:
+       if (adapter->msix)
+               pci_release_msi(dev);
+
+       if (adapter->msix_mem != NULL)
+               bus_release_resource(dev, SYS_RES_MEMORY,
+                   PCIR_BAR(IGB_MSIX_BAR), adapter->msix_mem);
+
+       if (adapter->pci_mem != NULL)
+               bus_release_resource(dev, SYS_RES_MEMORY,
+                   PCIR_BAR(0), adapter->pci_mem);
+
+}
+
+/*
+ * Setup Either MSI/X or MSI
+ */
+static int
+igb_setup_msix(struct adapter *adapter)
+{
+       device_t dev = adapter->dev;
+       int rid, want, queues, msgs;
+
+       /* tuneable override */
+       if (igb_enable_msix == 0)
+               goto msi;
+
+       /* First try MSI/X */
+       rid = PCIR_BAR(IGB_MSIX_BAR);
+       adapter->msix_mem = bus_alloc_resource_any(dev,
+           SYS_RES_MEMORY, &rid, RF_ACTIVE);
+               if (!adapter->msix_mem) {
+               /* May not be enabled */
+               device_printf(adapter->dev,
+                   "Unable to map MSIX table \n");
+               goto msi;
+       }
+
+       msgs = pci_msix_count(dev); 
+       if (msgs == 0) { /* system has msix disabled */
+               bus_release_resource(dev, SYS_RES_MEMORY,
+                   PCIR_BAR(IGB_MSIX_BAR), adapter->msix_mem);
+               adapter->msix_mem = NULL;
+               goto msi;
+       }
+
+       /* Figure out a reasonable auto config value */
+       queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
+
+       /* Manual override */
+       if (igb_num_queues != 0)
+               queues = igb_num_queues;
+       if (queues > 8)  /* max queues */
+               queues = 8;
+
+       /* Can have max of 4 queues on 82575 */
+       if ((adapter->hw.mac.type == e1000_82575) && (queues > 4))
+               queues = 4;
+
+       /* Limit the VF devices to one queue */
+       if (adapter->vf_ifp)
+               queues = 1;
+
+       /*
+       ** One vector (RX/TX pair) per queue
+       ** plus an additional for Link interrupt
+       */
+       want = queues + 1;
+       if (msgs >= want)
+               msgs = want;
+       else {
+                       device_printf(adapter->dev,
+                   "MSIX Configuration Problem, "
+                   "%d vectors configured, but %d queues wanted!\n",
+                   msgs, want);
+               return (ENXIO);
+       }
+       if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
+                       device_printf(adapter->dev,
+                   "Using MSIX interrupts with %d vectors\n", msgs);
+               adapter->num_queues = queues;
+               return (msgs);
+       }
+msi:
+               msgs = pci_msi_count(dev);
+               if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
+                       device_printf(adapter->dev,"Using MSI interrupt\n");
+       return (msgs);
+}
+
+/*********************************************************************
+ *
+ *  Set up an fresh starting state
+ *
+ **********************************************************************/
+static void
+igb_reset(struct adapter *adapter)
+{
+       device_t        dev = adapter->dev;
+       struct e1000_hw *hw = &adapter->hw;
+       struct e1000_fc_info *fc = &hw->fc;
+       struct ifnet    *ifp = adapter->ifp;
+       u32             pba = 0;
+       u16             hwm;
+
+       INIT_DEBUGOUT("igb_reset: begin");
+
+       /* Let the firmware know the OS is in control */
+       igb_get_hw_control(adapter);
+
+       /*
+        * Packet Buffer Allocation (PBA)
+        * Writing PBA sets the receive portion of the buffer
+        * the remainder is used for the transmit buffer.
+        */
+       switch (hw->mac.type) {
+       case e1000_82575:
+               pba = E1000_PBA_32K;
+               break;
+       case e1000_82576:
+       case e1000_vfadapt:
+               pba = E1000_READ_REG(hw, E1000_RXPBS);
+               pba &= E1000_RXPBS_SIZE_MASK_82576;
+               break;
+       case e1000_82580:
+       case e1000_i350:
+       case e1000_vfadapt_i350:
+               pba = E1000_READ_REG(hw, E1000_RXPBS);
+               pba = e1000_rxpbs_adjust_82580(pba);
+               break;
+               pba = E1000_PBA_35K;
+       default:
+               break;
+       }
+
+       /* Special needs in case of Jumbo frames */
+       if ((hw->mac.type == e1000_82575) && (ifp->if_mtu > ETHERMTU)) {
+               u32 tx_space, min_tx, min_rx;
+               pba = E1000_READ_REG(hw, E1000_PBA);
+               tx_space = pba >> 16;
+               pba &= 0xffff;
+               min_tx = (adapter->max_frame_size +
+                   sizeof(struct e1000_tx_desc) - ETHERNET_FCS_SIZE) * 2;
+               min_tx = roundup2(min_tx, 1024);
+               min_tx >>= 10;
+                min_rx = adapter->max_frame_size;
+                min_rx = roundup2(min_rx, 1024);
+                min_rx >>= 10;
+               if (tx_space < min_tx &&
+                   ((min_tx - tx_space) < pba)) {
+                       pba = pba - (min_tx - tx_space);
+                       /*
+                         * if short on rx space, rx wins
+                         * and must trump tx adjustment
+                        */
+                        if (pba < min_rx)
+                                pba = min_rx;
+               }
+               E1000_WRITE_REG(hw, E1000_PBA, pba);
+       }
+
+       INIT_DEBUGOUT1("igb_init: pba=%dK",pba);
+
+       /*
+        * These parameters control the automatic generation (Tx) and
+        * response (Rx) to Ethernet PAUSE frames.
+        * - High water mark should allow for at least two frames to be
+        *   received after sending an XOFF.
+        * - Low water mark works best when it is very near the high water mark.
+        *   This allows the receiver to restart by sending XON when it has
+        *   drained a bit.
+        */
+       hwm = min(((pba << 10) * 9 / 10),
+           ((pba << 10) - 2 * adapter->max_frame_size));
+
+       if (hw->mac.type < e1000_82576) {
+               fc->high_water = hwm & 0xFFF8;  /* 8-byte granularity */
+               fc->low_water = fc->high_water - 8;
+       } else {
+               fc->high_water = hwm & 0xFFF0;  /* 16-byte granularity */
+               fc->low_water = fc->high_water - 16;
+       }
+
+       fc->pause_time = IGB_FC_PAUSE_TIME;
+       fc->send_xon = TRUE;
+
+       /* Issue a global reset */
+       e1000_reset_hw(hw);
+       E1000_WRITE_REG(hw, E1000_WUC, 0);
+
+       if (e1000_init_hw(hw) < 0)
+               device_printf(dev, "Hardware Initialization Failed\n");
+
+       /* Setup DMA Coalescing */
+       if ((hw->mac.type == e1000_i350) &&
+           (adapter->dma_coalesce == TRUE)) {
+               u32 reg;
+
+               hwm = (pba - 4) << 10;
+               reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
+                   & E1000_DMACR_DMACTHR_MASK);
+
+               /* transition to L0x or L1 if available..*/
+               reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
+
+               /* timer = +-1000 usec in 32usec intervals */
+               reg |= (1000 >> 5);
+               E1000_WRITE_REG(hw, E1000_DMACR, reg);
+
+               /* No lower threshold */
+               E1000_WRITE_REG(hw, E1000_DMCRTRH, 0);
+
+               /* set hwm to PBA -  2 * max frame size */
+               E1000_WRITE_REG(hw, E1000_FCRTC, hwm);
+
+               /* Set the interval before transition */
+               reg = E1000_READ_REG(hw, E1000_DMCTLX);
+               reg |= 0x800000FF; /* 255 usec */
+               E1000_WRITE_REG(hw, E1000_DMCTLX, reg);
+
+               /* free space in tx packet buffer to wake from DMA coal */
+               E1000_WRITE_REG(hw, E1000_DMCTXTH,
+                   (20480 - (2 * adapter->max_frame_size)) >> 6);
+
+               /* make low power state decision controlled by DMA coal */
+               reg = E1000_READ_REG(hw, E1000_PCIEMISC);
+               E1000_WRITE_REG(hw, E1000_PCIEMISC,
+                   reg | E1000_PCIEMISC_LX_DECISION);
+               device_printf(dev, "DMA Coalescing enabled\n");
+       }
+
+       E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
+       e1000_get_phy_info(hw);
+       e1000_check_for_link(hw);
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Setup networking device structure and register an interface.
+ *
+ **********************************************************************/
+static int
+igb_setup_interface(device_t dev, struct adapter *adapter)
+{
+       struct ifnet   *ifp;
+
+       INIT_DEBUGOUT("igb_setup_interface: begin");
+
+       ifp = adapter->ifp = if_alloc(IFT_ETHER);
+       if (ifp == NULL) {
+               device_printf(dev, "can not allocate ifnet structure\n");
+               return (-1);
+       }
+       if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+       ifp->if_mtu = ETHERMTU;
+       ifp->if_init =  igb_init;
+       ifp->if_softc = adapter;
+       ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+       ifp->if_ioctl = igb_ioctl;
+       ifp->if_start = igb_start;
+#if __FreeBSD_version >= 800000
+       ifp->if_transmit = igb_mq_start;
+       ifp->if_qflush = igb_qflush;
+#endif
+       IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
+       ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
+       IFQ_SET_READY(&ifp->if_snd);
+
+       ether_ifattach(ifp, adapter->hw.mac.addr);
+
+       ifp->if_capabilities = ifp->if_capenable = 0;
+
+       ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
+       ifp->if_capabilities |= IFCAP_TSO4;
+       ifp->if_capabilities |= IFCAP_JUMBO_MTU;
+       ifp->if_capenable = ifp->if_capabilities;
+
+       /* Don't enable LRO by default */
+       ifp->if_capabilities |= IFCAP_LRO;
+
+#ifdef DEVICE_POLLING
+       ifp->if_capabilities |= IFCAP_POLLING;
+#endif
+
+       /*
+        * Tell the upper layer(s) we
+        * support full VLAN capability.
+        */
+       ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+       ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
+       ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
+
+       /*
+       ** Dont turn this on by default, if vlans are
+       ** created on another pseudo device (eg. lagg)
+       ** then vlan events are not passed thru, breaking
+       ** operation, but with HW FILTER off it works. If
+       ** using vlans directly on the em driver you can
+       ** enable this and get full hardware tag filtering.
+       */
+       ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
+
+       /*
+        * Specify the media types supported by this adapter and register
+        * callbacks to update media and link information
+        */
+       ifmedia_init(&adapter->media, IFM_IMASK,
+           igb_media_change, igb_media_status);
+       if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
+           (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
+               ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 
+                           0, NULL);
+               ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
+       } else {
+               ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
+               ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
+                           0, NULL);
+               ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
+                           0, NULL);
+               ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
+                           0, NULL);
+               if (adapter->hw.phy.type != e1000_phy_ife) {
+                       ifmedia_add(&adapter->media,
+                               IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
+                       ifmedia_add(&adapter->media,
+                               IFM_ETHER | IFM_1000_T, 0, NULL);
+               }
+       }
+       ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+       ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
+       return (0);
+}
+
+
+/*
+ * Manage DMA'able memory.
+ */
+static void
+igb_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+       if (error)
+               return;
+       *(bus_addr_t *) arg = segs[0].ds_addr;
+}
+
+static int
+igb_dma_malloc(struct adapter *adapter, bus_size_t size,
+        struct igb_dma_alloc *dma, int mapflags)
+{
+       int error;
+
+       error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
+                               IGB_DBA_ALIGN, 0,       /* alignment, bounds */
+                               BUS_SPACE_MAXADDR,      /* lowaddr */
+                               BUS_SPACE_MAXADDR,      /* highaddr */
+                               NULL, NULL,             /* filter, filterarg */
+                               size,                   /* maxsize */
+                               1,                      /* nsegments */
+                               size,                   /* maxsegsize */
+                               0,                      /* flags */
+                               NULL,                   /* lockfunc */
+                               NULL,                   /* lockarg */
+                               &dma->dma_tag);
+       if (error) {
+               device_printf(adapter->dev,
+                   "%s: bus_dma_tag_create failed: %d\n",
+                   __func__, error);
+               goto fail_0;
+       }
+
+       error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
+           BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
+       if (error) {
+               device_printf(adapter->dev,
+                   "%s: bus_dmamem_alloc(%ju) failed: %d\n",
+                   __func__, (uintmax_t)size, error);
+               goto fail_2;
+       }
+
+       dma->dma_paddr = 0;
+       error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
+           size, igb_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
+       if (error || dma->dma_paddr == 0) {
+               device_printf(adapter->dev,
+                   "%s: bus_dmamap_load failed: %d\n",
+                   __func__, error);
+               goto fail_3;
+       }
+
+       return (0);
+
+fail_3:
+       bus_dmamap_unload(dma->dma_tag, dma->dma_map);
+fail_2:
+       bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
+       bus_dma_tag_destroy(dma->dma_tag);
+fail_0:
+       dma->dma_map = NULL;
+       dma->dma_tag = NULL;
+
+       return (error);
+}
+
+static void
+igb_dma_free(struct adapter *adapter, struct igb_dma_alloc *dma)
+{
+       if (dma->dma_tag == NULL)
+               return;
+       if (dma->dma_map != NULL) {
+               bus_dmamap_sync(dma->dma_tag, dma->dma_map,
+                   BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+               bus_dmamap_unload(dma->dma_tag, dma->dma_map);
+               bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
+               dma->dma_map = NULL;
+       }
+       bus_dma_tag_destroy(dma->dma_tag);
+       dma->dma_tag = NULL;
+}
+
+
+/*********************************************************************
+ *
+ *  Allocate memory for the transmit and receive rings, and then
+ *  the descriptors associated with each, called only once at attach.
+ *
+ **********************************************************************/
+static int
+igb_allocate_queues(struct adapter *adapter)
+{
+       device_t dev = adapter->dev;
+       struct igb_queue        *que = NULL;
+       struct tx_ring          *txr = NULL;
+       struct rx_ring          *rxr = NULL;
+       int rsize, tsize, error = E1000_SUCCESS;
+       int txconf = 0, rxconf = 0;
+
+       /* First allocate the top level queue structs */
+       if (!(adapter->queues =
+           (struct igb_queue *) malloc(sizeof(struct igb_queue) *
+           adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+               device_printf(dev, "Unable to allocate queue memory\n");
+               error = ENOMEM;
+               goto fail;
+       }
+
+       /* Next allocate the TX ring struct memory */
+       if (!(adapter->tx_rings =
+           (struct tx_ring *) malloc(sizeof(struct tx_ring) *
+           adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+               device_printf(dev, "Unable to allocate TX ring memory\n");
+               error = ENOMEM;
+               goto tx_fail;
+       }
+
+       /* Now allocate the RX */
+       if (!(adapter->rx_rings =
+           (struct rx_ring *) malloc(sizeof(struct rx_ring) *
+           adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+               device_printf(dev, "Unable to allocate RX ring memory\n");
+               error = ENOMEM;
+               goto rx_fail;
+       }
+
+       tsize = roundup2(adapter->num_tx_desc *
+           sizeof(union e1000_adv_tx_desc), IGB_DBA_ALIGN);
+       /*
+        * Now set up the TX queues, txconf is needed to handle the
+        * possibility that things fail midcourse and we need to
+        * undo memory gracefully
+        */ 
+       for (int i = 0; i < adapter->num_queues; i++, txconf++) {
+               /* Set up some basics */
+               txr = &adapter->tx_rings[i];
+               txr->adapter = adapter;
+               txr->me = i;
+
+               /* Initialize the TX lock */
+               snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
+                   device_get_nameunit(dev), txr->me);
+               mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
+
+               if (igb_dma_malloc(adapter, tsize,
+                       &txr->txdma, BUS_DMA_NOWAIT)) {
+                       device_printf(dev,
+                           "Unable to allocate TX Descriptor memory\n");
+                       error = ENOMEM;
+                       goto err_tx_desc;
+               }
+               txr->tx_base = (struct e1000_tx_desc *)txr->txdma.dma_vaddr;
+               bzero((void *)txr->tx_base, tsize);
+
+               /* Now allocate transmit buffers for the ring */
+               if (igb_allocate_transmit_buffers(txr)) {
+                       device_printf(dev,
+                           "Critical Failure setting up transmit buffers\n");
+                       error = ENOMEM;
+                       goto err_tx_desc;
+               }
+#if __FreeBSD_version >= 800000
+               /* Allocate a buf ring */
+               txr->br = buf_ring_alloc(IGB_BR_SIZE, M_DEVBUF,
+                   M_WAITOK, &txr->tx_mtx);
+#endif
+       }
+
+       /*
+        * Next the RX queues...
+        */ 
+       rsize = roundup2(adapter->num_rx_desc *
+           sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN);
+       for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
+               rxr = &adapter->rx_rings[i];
+               rxr->adapter = adapter;
+               rxr->me = i;
+
+               /* Initialize the RX lock */
+               snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
+                   device_get_nameunit(dev), txr->me);
+               mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
+
+               if (igb_dma_malloc(adapter, rsize,
+                       &rxr->rxdma, BUS_DMA_NOWAIT)) {
+                       device_printf(dev,
+                           "Unable to allocate RxDescriptor memory\n");
+                       error = ENOMEM;
+                       goto err_rx_desc;
+               }
+               rxr->rx_base = (union e1000_adv_rx_desc *)rxr->rxdma.dma_vaddr;
+               bzero((void *)rxr->rx_base, rsize);
+
+               /* Allocate receive buffers for the ring*/
+               if (igb_allocate_receive_buffers(rxr)) {
+                       device_printf(dev,
+                           "Critical Failure setting up receive buffers\n");
+                       error = ENOMEM;
+                       goto err_rx_desc;
+               }
+       }
+
+       /*
+       ** Finally set up the queue holding structs
+       */
+       for (int i = 0; i < adapter->num_queues; i++) {
+               que = &adapter->queues[i];
+               que->adapter = adapter;
+               que->txr = &adapter->tx_rings[i];
+               que->rxr = &adapter->rx_rings[i];
+       }
+
+       return (0);
+
+err_rx_desc:
+       for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
+               igb_dma_free(adapter, &rxr->rxdma);
+err_tx_desc:
+       for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
+               igb_dma_free(adapter, &txr->txdma);
+       free(adapter->rx_rings, M_DEVBUF);
+rx_fail:
+#if __FreeBSD_version >= 800000
+       buf_ring_free(txr->br, M_DEVBUF);
+#endif
+       free(adapter->tx_rings, M_DEVBUF);
+tx_fail:
+       free(adapter->queues, M_DEVBUF);
+fail:
+       return (error);
+}
+
+/*********************************************************************
+ *
+ *  Allocate memory for tx_buffer structures. The tx_buffer stores all
+ *  the information needed to transmit a packet on the wire. This is
+ *  called only once at attach, setup is done every reset.
+ *
+ **********************************************************************/
+static int
+igb_allocate_transmit_buffers(struct tx_ring *txr)
+{
+       struct adapter *adapter = txr->adapter;
+       device_t dev = adapter->dev;
+       struct igb_tx_buffer *txbuf;
+       int error, i;
+
+       /*
+        * Setup DMA descriptor areas.
+        */
+       if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),
+                              1, 0,                    /* alignment, bounds */
+                              BUS_SPACE_MAXADDR,       /* lowaddr */
+                              BUS_SPACE_MAXADDR,       /* highaddr */
+                              NULL, NULL,              /* filter, filterarg */
+                              IGB_TSO_SIZE,            /* maxsize */
+                              IGB_MAX_SCATTER,         /* nsegments */
+                              PAGE_SIZE,               /* maxsegsize */
+                              0,                       /* flags */
+                              NULL,                    /* lockfunc */
+                              NULL,                    /* lockfuncarg */
+                              &txr->txtag))) {
+               device_printf(dev,"Unable to allocate TX DMA tag\n");
+               goto fail;
+       }
+
+       if (!(txr->tx_buffers =
+           (struct igb_tx_buffer *) malloc(sizeof(struct igb_tx_buffer) *
+           adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+               device_printf(dev, "Unable to allocate tx_buffer memory\n");
+               error = ENOMEM;
+               goto fail;
+       }
+
+        /* Create the descriptor buffer dma maps */
+       txbuf = txr->tx_buffers;
+       for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
+               error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
+               if (error != 0) {
+                       device_printf(dev, "Unable to create TX DMA map\n");
+                       goto fail;
+               }
+       }
+
+       return 0;
+fail:
+       /* We free all, it handles case where we are in the middle */
+       igb_free_transmit_structures(adapter);
+       return (error);
+}
+
+/*********************************************************************
+ *
+ *  Initialize a transmit ring.
+ *
+ **********************************************************************/
+static void
+igb_setup_transmit_ring(struct tx_ring *txr)
+{
+       struct adapter *adapter = txr->adapter;
+       struct igb_tx_buffer *txbuf;
+       int i;
+
+       /* Clear the old descriptor contents */
+       IGB_TX_LOCK(txr);
+       bzero((void *)txr->tx_base,
+             (sizeof(union e1000_adv_tx_desc)) * adapter->num_tx_desc);
+       /* Reset indices */
+       txr->next_avail_desc = 0;
+       txr->next_to_clean = 0;
+
+       /* Free any existing tx buffers. */
+        txbuf = txr->tx_buffers;
+       for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
+               if (txbuf->m_head != NULL) {
+                       bus_dmamap_sync(txr->txtag, txbuf->map,
+                           BUS_DMASYNC_POSTWRITE);
+                       bus_dmamap_unload(txr->txtag, txbuf->map);
+                       m_freem(txbuf->m_head);
+                       txbuf->m_head = NULL;
+               }
+               /* clear the watch index */
+               txbuf->next_eop = -1;
+        }
+
+       /* Set number of descriptors available */
+       txr->tx_avail = adapter->num_tx_desc;
+
+       bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+       IGB_TX_UNLOCK(txr);
+}
+
+/*********************************************************************
+ *
+ *  Initialize all transmit rings.
+ *
+ **********************************************************************/
+static void
+igb_setup_transmit_structures(struct adapter *adapter)
+{
+       struct tx_ring *txr = adapter->tx_rings;
+
+       for (int i = 0; i < adapter->num_queues; i++, txr++)
+               igb_setup_transmit_ring(txr);
+
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Enable transmit unit.
+ *
+ **********************************************************************/
+static void
+igb_initialize_transmit_units(struct adapter *adapter)
+{
+       struct tx_ring  *txr = adapter->tx_rings;
+       struct e1000_hw *hw = &adapter->hw;
+       u32             tctl, txdctl;
+
+       INIT_DEBUGOUT("igb_initialize_transmit_units: begin");
+       tctl = txdctl = 0;
+
+       /* Setup the Tx Descriptor Rings */
+       for (int i = 0; i < adapter->num_queues; i++, txr++) {
+               u64 bus_addr = txr->txdma.dma_paddr;
+
+               E1000_WRITE_REG(hw, E1000_TDLEN(i),
+                   adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
+               E1000_WRITE_REG(hw, E1000_TDBAH(i),
+                   (uint32_t)(bus_addr >> 32));
+               E1000_WRITE_REG(hw, E1000_TDBAL(i),
+                   (uint32_t)bus_addr);
+
+               /* Setup the HW Tx Head and Tail descriptor pointers */
+               E1000_WRITE_REG(hw, E1000_TDT(i), 0);
+               E1000_WRITE_REG(hw, E1000_TDH(i), 0);
+
+               HW_DEBUGOUT2("Base = %x, Length = %x\n",
+                   E1000_READ_REG(hw, E1000_TDBAL(i)),
+                   E1000_READ_REG(hw, E1000_TDLEN(i)));
+
+               txr->queue_status = IGB_QUEUE_IDLE;
+
+               txdctl |= IGB_TX_PTHRESH;
+               txdctl |= IGB_TX_HTHRESH << 8;
+               txdctl |= IGB_TX_WTHRESH << 16;
+               txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
+               E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
+       }
+
+       if (adapter->vf_ifp)
+               return;
+
+       e1000_config_collision_dist(hw);
+
+       /* Program the Transmit Control Register */
+       tctl = E1000_READ_REG(hw, E1000_TCTL);
+       tctl &= ~E1000_TCTL_CT;
+       tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
+                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
+
+       /* This write will effectively turn on the transmit unit. */
+       E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+}
+
+/*********************************************************************
+ *
+ *  Free all transmit rings.
+ *
+ **********************************************************************/
+static void
+igb_free_transmit_structures(struct adapter *adapter)
+{
+       struct tx_ring *txr = adapter->tx_rings;
+
+       for (int i = 0; i < adapter->num_queues; i++, txr++) {
+               IGB_TX_LOCK(txr);
+               igb_free_transmit_buffers(txr);
+               igb_dma_free(adapter, &txr->txdma);
+               IGB_TX_UNLOCK(txr);
+               IGB_TX_LOCK_DESTROY(txr);
+       }
+       free(adapter->tx_rings, M_DEVBUF);
+}
+
+/*********************************************************************
+ *
+ *  Free transmit ring related data structures.
+ *
+ **********************************************************************/
+static void
+igb_free_transmit_buffers(struct tx_ring *txr)
+{
+       struct adapter *adapter = txr->adapter;
+       struct igb_tx_buffer *tx_buffer;
+       int             i;
+
+       INIT_DEBUGOUT("free_transmit_ring: begin");
+
+       if (txr->tx_buffers == NULL)
+               return;
+
+       tx_buffer = txr->tx_buffers;
+       for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
+               if (tx_buffer->m_head != NULL) {
+                       bus_dmamap_sync(txr->txtag, tx_buffer->map,
+                           BUS_DMASYNC_POSTWRITE);
+                       bus_dmamap_unload(txr->txtag,
+                           tx_buffer->map);
+                       m_freem(tx_buffer->m_head);
+                       tx_buffer->m_head = NULL;
+                       if (tx_buffer->map != NULL) {
+                               bus_dmamap_destroy(txr->txtag,
+                                   tx_buffer->map);
+                               tx_buffer->map = NULL;
+                       }
+               } else if (tx_buffer->map != NULL) {
+                       bus_dmamap_unload(txr->txtag,
+                           tx_buffer->map);
+                       bus_dmamap_destroy(txr->txtag,
+                           tx_buffer->map);
+                       tx_buffer->map = NULL;
+               }
+       }
+#if __FreeBSD_version >= 800000
+       if (txr->br != NULL)
+               buf_ring_free(txr->br, M_DEVBUF);
+#endif
+       if (txr->tx_buffers != NULL) {
+               free(txr->tx_buffers, M_DEVBUF);
+               txr->tx_buffers = NULL;
+       }
+       if (txr->txtag != NULL) {
+               bus_dma_tag_destroy(txr->txtag);
+               txr->txtag = NULL;
+       }
+       return;
+}
+
+/**********************************************************************
+ *
+ *  Setup work for hardware segmentation offload (TSO)
+ *
+ **********************************************************************/
+static boolean_t
+igb_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *hdrlen)
+{
+       struct adapter *adapter = txr->adapter;
+       struct e1000_adv_tx_context_desc *TXD;
+       struct igb_tx_buffer        *tx_buffer;
+       u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+       u32 mss_l4len_idx = 0;
+       u16 vtag = 0;
+       int ctxd, ehdrlen, ip_hlen, tcp_hlen;
+       struct ether_vlan_header *eh;
+       struct ip *ip;
+       struct tcphdr *th;
+
+
+       /*
+        * Determine where frame payload starts.
+        * Jump over vlan headers if already present
+        */
+       eh = mtod(mp, struct ether_vlan_header *);
+       if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
+               ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+       else
+               ehdrlen = ETHER_HDR_LEN;
+
+       /* Ensure we have at least the IP+TCP header in the first mbuf. */
+       if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
+               return FALSE;
+
+       /* Only supports IPV4 for now */
+       ctxd = txr->next_avail_desc;
+       tx_buffer = &txr->tx_buffers[ctxd];
+       TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd];
+
+       ip = (struct ip *)(mp->m_data + ehdrlen);
+       if (ip->ip_p != IPPROTO_TCP)
+                return FALSE;   /* 0 */
+       ip->ip_sum = 0;
+       ip_hlen = ip->ip_hl << 2;
+       th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
+       th->th_sum = in_pseudo(ip->ip_src.s_addr,
+           ip->ip_dst.s_addr, htons(IPPROTO_TCP));
+       tcp_hlen = th->th_off << 2;
+       /*
+        * Calculate header length, this is used
+        * in the transmit desc in igb_xmit
+        */
+       *hdrlen = ehdrlen + ip_hlen + tcp_hlen;
+
+       /* VLAN MACLEN IPLEN */
+       if (mp->m_flags & M_VLANTAG) {
+               vtag = htole16(mp->m_pkthdr.ether_vtag);
+               vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT);
+       }
+
+       vlan_macip_lens |= (ehdrlen << E1000_ADVTXD_MACLEN_SHIFT);
+       vlan_macip_lens |= ip_hlen;
+       TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
+
+       /* ADV DTYPE TUCMD */
+       type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
+       type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
+       type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
+       TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
+
+       /* MSS L4LEN IDX */
+       mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT);
+       mss_l4len_idx |= (tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT);
+       /* 82575 needs the queue index added */
+       if (adapter->hw.mac.type == e1000_82575)
+               mss_l4len_idx |= txr->me << 4;
+       TXD->mss_l4len_idx = htole32(mss_l4len_idx);
+
+       TXD->seqnum_seed = htole32(0);
+       tx_buffer->m_head = NULL;
+       tx_buffer->next_eop = -1;
+
+       if (++ctxd == adapter->num_tx_desc)
+               ctxd = 0;
+
+       txr->tx_avail--;
+       txr->next_avail_desc = ctxd;
+       return TRUE;
+}
+
+
+/*********************************************************************
+ *
+ *  Context Descriptor setup for VLAN or CSUM
+ *
+ **********************************************************************/
+
+static bool
+igb_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
+{
+       struct adapter *adapter = txr->adapter;
+       struct e1000_adv_tx_context_desc *TXD;
+       struct igb_tx_buffer        *tx_buffer;
+       u32 vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx;
+       struct ether_vlan_header *eh;
+       struct ip *ip = NULL;
+       struct ip6_hdr *ip6;
+       int  ehdrlen, ctxd, ip_hlen = 0;
+       u16     etype, vtag = 0;
+       u8      ipproto = 0;
+       bool    offload = TRUE;
+
+       if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
+               offload = FALSE;
+
+       vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0;
+       ctxd = txr->next_avail_desc;
+       tx_buffer = &txr->tx_buffers[ctxd];
+       TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd];
+
+       /*
+       ** In advanced descriptors the vlan tag must 
+       ** be placed into the context descriptor, thus
+       ** we need to be here just for that setup.
+       */
+       if (mp->m_flags & M_VLANTAG) {
+               vtag = htole16(mp->m_pkthdr.ether_vtag);
+               vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT);
+       } else if (offload == FALSE)
+               return FALSE;
+
+       /*
+        * Determine where frame payload starts.
+        * Jump over vlan headers if already present,
+        * helpful for QinQ too.
+        */
+       eh = mtod(mp, struct ether_vlan_header *);
+       if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+               etype = ntohs(eh->evl_proto);
+               ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+       } else {
+               etype = ntohs(eh->evl_encap_proto);
+               ehdrlen = ETHER_HDR_LEN;
+       }
+
+       /* Set the ether header length */
+       vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
+
+       switch (etype) {
+               case ETHERTYPE_IP:
+                       ip = (struct ip *)(mp->m_data + ehdrlen);
+                       ip_hlen = ip->ip_hl << 2;
+                       if (mp->m_len < ehdrlen + ip_hlen) {
+                               offload = FALSE;
+                               break;
+                       }
+                       ipproto = ip->ip_p;
+                       type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
+                       break;
+               case ETHERTYPE_IPV6:
+                       ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
+                       ip_hlen = sizeof(struct ip6_hdr);
+                       ipproto = ip6->ip6_nxt;
+                       type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
+                       break;
+               default:
+                       offload = FALSE;
+                       break;
+       }
+
+       vlan_macip_lens |= ip_hlen;
+       type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
+
+       switch (ipproto) {
+               case IPPROTO_TCP:
+                       if (mp->m_pkthdr.csum_flags & CSUM_TCP)
+                               type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
+                       break;
+               case IPPROTO_UDP:
+                       if (mp->m_pkthdr.csum_flags & CSUM_UDP)
+                               type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP;
+                       break;
+#if __FreeBSD_version >= 800000
+               case IPPROTO_SCTP:
+                       if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
+                               type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP;
+                       break;
+#endif
+               default:
+                       offload = FALSE;
+                       break;
+       }
+
+       /* 82575 needs the queue index added */
+       if (adapter->hw.mac.type == e1000_82575)
+               mss_l4len_idx = txr->me << 4;
+
+       /* Now copy bits into descriptor */
+       TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
+       TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
+       TXD->seqnum_seed = htole32(0);
+       TXD->mss_l4len_idx = htole32(mss_l4len_idx);
+
+       tx_buffer->m_head = NULL;
+       tx_buffer->next_eop = -1;
+
+       /* We've consumed the first desc, adjust counters */
+       if (++ctxd == adapter->num_tx_desc)
+               ctxd = 0;
+       txr->next_avail_desc = ctxd;
+       --txr->tx_avail;
+
+        return (offload);
+}
+
+
+/**********************************************************************
+ *
+ *  Examine each tx_buffer in the used queue. If the hardware is done
+ *  processing the packet then free associated resources. The
+ *  tx_buffer is put back on the free queue.
+ *
+ *  TRUE return means there's work in the ring to clean, FALSE its empty.
+ **********************************************************************/
+static bool
+igb_txeof(struct tx_ring *txr)
+{
+       struct adapter  *adapter = txr->adapter;
+        int first, last, done, processed;
+        struct igb_tx_buffer *tx_buffer;
+        struct e1000_tx_desc   *tx_desc, *eop_desc;
+       struct ifnet   *ifp = adapter->ifp;
+
+       IGB_TX_LOCK_ASSERT(txr);
+
+        if (txr->tx_avail == adapter->num_tx_desc) {
+               txr->queue_status = IGB_QUEUE_IDLE;
+                return FALSE;
+       }
+
+       processed = 0;
+        first = txr->next_to_clean;
+        tx_desc = &txr->tx_base[first];
+        tx_buffer = &txr->tx_buffers[first];
+       last = tx_buffer->next_eop;
+        eop_desc = &txr->tx_base[last];
+
+       /*
+        * What this does is get the index of the
+        * first descriptor AFTER the EOP of the 
+        * first packet, that way we can do the
+        * simple comparison on the inner while loop.
+        */
+       if (++last == adapter->num_tx_desc)
+               last = 0;
+       done = last;
+
+        bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+            BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+        while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
+               /* We clean the range of the packet */
+               while (first != done) {
+                       tx_desc->upper.data = 0;
+                       tx_desc->lower.data = 0;
+                       tx_desc->buffer_addr = 0;
+                       ++txr->tx_avail;
+                       ++processed;
+
+                       if (tx_buffer->m_head) {
+                               txr->bytes +=
+                                   tx_buffer->m_head->m_pkthdr.len;
+                               bus_dmamap_sync(txr->txtag,
+                                   tx_buffer->map,
+                                   BUS_DMASYNC_POSTWRITE);
+                               bus_dmamap_unload(txr->txtag,
+                                   tx_buffer->map);
+
+                               m_freem(tx_buffer->m_head);
+                               tx_buffer->m_head = NULL;
+                       }
+                       tx_buffer->next_eop = -1;
+                       txr->watchdog_time = ticks;
+
+                       if (++first == adapter->num_tx_desc)
+                               first = 0;
+
+                       tx_buffer = &txr->tx_buffers[first];
+                       tx_desc = &txr->tx_base[first];
+               }
+               ++txr->packets;
+               ++ifp->if_opackets;
+               /* See if we can continue to the next packet */
+               last = tx_buffer->next_eop;
+               if (last != -1) {
+                       eop_desc = &txr->tx_base[last];
+                       /* Get new done point */
+                       if (++last == adapter->num_tx_desc) last = 0;
+                       done = last;
+               } else
+                       break;
+        }
+        bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+        txr->next_to_clean = first;
+
+       /*
+       ** Watchdog calculation, we know there's
+       ** work outstanding or the first return
+       ** would have been taken, so none processed
+       ** for too long indicates a hang.
+       */
+       if ((!processed) && ((ticks - txr->watchdog_time) > IGB_WATCHDOG))
+               txr->queue_status = IGB_QUEUE_HUNG;
+
+        /*
+         * If we have a minimum free, clear IFF_DRV_OACTIVE
+         * to tell the stack that it is OK to send packets.
+         */
+        if (txr->tx_avail > IGB_TX_CLEANUP_THRESHOLD) {                
+                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+               /* All clean, turn off the watchdog */
+                if (txr->tx_avail == adapter->num_tx_desc) {
+                       txr->queue_status = IGB_QUEUE_IDLE;
+                       return (FALSE);
+               }
+        }
+       return (TRUE);
+}
+
+/*********************************************************************
+ *
+ *  Refresh mbuf buffers for RX descriptor rings
+ *   - now keeps its own state so discards due to resource
+ *     exhaustion are unnecessary, if an mbuf cannot be obtained
+ *     it just returns, keeping its placeholder, thus it can simply
+ *     be recalled to try again.
+ *
+ **********************************************************************/
+static void
+igb_refresh_mbufs(struct rx_ring *rxr, int limit)
+{
+       struct adapter          *adapter = rxr->adapter;
+       bus_dma_segment_t       hseg[1];
+       bus_dma_segment_t       pseg[1];
+       struct igb_rx_buf       *rxbuf;
+       struct mbuf             *mh, *mp;
+       int                     i, j, nsegs, error;
+       bool                    refreshed = FALSE;
+
+       i = j = rxr->next_to_refresh;
+       /*
+       ** Get one descriptor beyond
+       ** our work mark to control
+       ** the loop.
+        */
+       if (++j == adapter->num_rx_desc)
+               j = 0;
+
+       while (j != limit) {
+               rxbuf = &rxr->rx_buffers[i];
+               /* No hdr mbuf used with header split off */
+               if (rxr->hdr_split == FALSE)
+                       goto no_split;
+               if (rxbuf->m_head == NULL) {
+                       mh = m_gethdr(M_DONTWAIT, MT_DATA);
+                       if (mh == NULL)
+                               goto update;
+               } else
+                       mh = rxbuf->m_head;
+
+               mh->m_pkthdr.len = mh->m_len = MHLEN;
+               mh->m_len = MHLEN;
+               mh->m_flags |= M_PKTHDR;
+               /* Get the memory mapping */
+               error = bus_dmamap_load_mbuf_sg(rxr->htag,
+                   rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
+               if (error != 0) {
+                       printf("Refresh mbufs: hdr dmamap load"
+                           " failure - %d\n", error);
+                       m_free(mh);
+                       rxbuf->m_head = NULL;
+                       goto update;
+               }
+               rxbuf->m_head = mh;
+               bus_dmamap_sync(rxr->htag, rxbuf->hmap,
+                   BUS_DMASYNC_PREREAD);
+               rxr->rx_base[i].read.hdr_addr =
+                   htole64(hseg[0].ds_addr);
+no_split:
+               if (rxbuf->m_pack == NULL) {
+                       mp = m_getjcl(M_DONTWAIT, MT_DATA,
+                           M_PKTHDR, adapter->rx_mbuf_sz);
+                       if (mp == NULL)
+                               goto update;
+               } else
+                       mp = rxbuf->m_pack;
+
+               mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
+               /* Get the memory mapping */
+               error = bus_dmamap_load_mbuf_sg(rxr->ptag,
+                   rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
+               if (error != 0) {
+                       printf("Refresh mbufs: payload dmamap load"
+                           " failure - %d\n", error);
+                       m_free(mp);
+                       rxbuf->m_pack = NULL;
+                       goto update;
+               }
+               rxbuf->m_pack = mp;
+               bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+                   BUS_DMASYNC_PREREAD);
+               rxr->rx_base[i].read.pkt_addr =
+                   htole64(pseg[0].ds_addr);
+               refreshed = TRUE; /* I feel wefreshed :) */
+
+               i = j; /* our next is precalculated */
+               rxr->next_to_refresh = i;
+               if (++j == adapter->num_rx_desc)
+                       j = 0;
+       }
+update:
+       if (refreshed) /* update tail */
+               E1000_WRITE_REG(&adapter->hw,
+                   E1000_RDT(rxr->me), rxr->next_to_refresh);
+       return;
+}
+
+
+/*********************************************************************
+ *
+ *  Allocate memory for rx_buffer structures. Since we use one
+ *  rx_buffer per received packet, the maximum number of rx_buffer's
+ *  that we'll need is equal to the number of receive descriptors
+ *  that we've allocated.
+ *
+ **********************************************************************/
+static int
+igb_allocate_receive_buffers(struct rx_ring *rxr)
+{
+       struct  adapter         *adapter = rxr->adapter;
+       device_t                dev = adapter->dev;
+       struct igb_rx_buf       *rxbuf;
+       int                     i, bsize, error;
+
+       bsize = sizeof(struct igb_rx_buf) * adapter->num_rx_desc;
+       if (!(rxr->rx_buffers =
+           (struct igb_rx_buf *) malloc(bsize,
+           M_DEVBUF, M_NOWAIT | M_ZERO))) {
+               device_printf(dev, "Unable to allocate rx_buffer memory\n");
+               error = ENOMEM;
+               goto fail;
+       }
+
+       if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),
+                                  1, 0,                /* alignment, bounds */
+                                  BUS_SPACE_MAXADDR,   /* lowaddr */
+                                  BUS_SPACE_MAXADDR,   /* highaddr */
+                                  NULL, NULL,          /* filter, filterarg */
+                                  MSIZE,               /* maxsize */
+                                  1,                   /* nsegments */
+                                  MSIZE,               /* maxsegsize */
+                                  0,                   /* flags */
+                                  NULL,                /* lockfunc */
+                                  NULL,                /* lockfuncarg */
+                                  &rxr->htag))) {
+               device_printf(dev, "Unable to create RX DMA tag\n");
+               goto fail;
+       }
+
+       if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),
+                                  1, 0,                /* alignment, bounds */
+                                  BUS_SPACE_MAXADDR,   /* lowaddr */
+                                  BUS_SPACE_MAXADDR,   /* highaddr */
+                                  NULL, NULL,          /* filter, filterarg */
+                                  MJUM9BYTES,          /* maxsize */
+                                  1,                   /* nsegments */
+                                  MJUM9BYTES,          /* maxsegsize */
+                                  0,                   /* flags */
+                                  NULL,                /* lockfunc */
+                                  NULL,                /* lockfuncarg */
+                                  &rxr->ptag))) {
+               device_printf(dev, "Unable to create RX payload DMA tag\n");
+               goto fail;
+       }
+
+       for (i = 0; i < adapter->num_rx_desc; i++) {
+               rxbuf = &rxr->rx_buffers[i];
+               error = bus_dmamap_create(rxr->htag,
+                   BUS_DMA_NOWAIT, &rxbuf->hmap);
+               if (error) {
+                       device_printf(dev,
+                           "Unable to create RX head DMA maps\n");
+                       goto fail;
+               }
+               error = bus_dmamap_create(rxr->ptag,
+                   BUS_DMA_NOWAIT, &rxbuf->pmap);
+               if (error) {
+                       device_printf(dev,
+                           "Unable to create RX packet DMA maps\n");
+                       goto fail;
+               }
+       }
+
+       return (0);
+
+fail:
+       /* Frees all, but can handle partial completion */
+       igb_free_receive_structures(adapter);
+       return (error);
+}
+
+
+static void
+igb_free_receive_ring(struct rx_ring *rxr)
+{
+       struct  adapter         *adapter = rxr->adapter;
+       struct igb_rx_buf       *rxbuf;
+
+
+       for (int i = 0; i < adapter->num_rx_desc; i++) {
+               rxbuf = &rxr->rx_buffers[i];
+               if (rxbuf->m_head != NULL) {
+                       bus_dmamap_sync(rxr->htag, rxbuf->hmap,
+                           BUS_DMASYNC_POSTREAD);
+                       bus_dmamap_unload(rxr->htag, rxbuf->hmap);
+                       rxbuf->m_head->m_flags |= M_PKTHDR;
+                       m_freem(rxbuf->m_head);
+               }
+               if (rxbuf->m_pack != NULL) {
+                       bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+                           BUS_DMASYNC_POSTREAD);
+                       bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
+                       rxbuf->m_pack->m_flags |= M_PKTHDR;
+                       m_freem(rxbuf->m_pack);
+               }
+               rxbuf->m_head = NULL;
+               rxbuf->m_pack = NULL;
+       }
+}
+
+
+/*********************************************************************
+ *
+ *  Initialize a receive ring and its buffers.
+ *
+ **********************************************************************/
+static int
+igb_setup_receive_ring(struct rx_ring *rxr)
+{
+       struct  adapter         *adapter;
+       struct  ifnet           *ifp;
+       device_t                dev;
+       struct igb_rx_buf       *rxbuf;
+       bus_dma_segment_t       pseg[1], hseg[1];
+       struct lro_ctrl         *lro = &rxr->lro;
+       int                     rsize, nsegs, error = 0;
+
+       adapter = rxr->adapter;
+       dev = adapter->dev;
+       ifp = adapter->ifp;
+
+       /* Clear the ring contents */
+       IGB_RX_LOCK(rxr);
+       rsize = roundup2(adapter->num_rx_desc *
+           sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN);
+       bzero((void *)rxr->rx_base, rsize);
+
+       /*
+       ** Free current RX buffer structures and their mbufs
+       */
+       igb_free_receive_ring(rxr);
+
+       /* Configure for header split? */
+       if (igb_header_split)
+               rxr->hdr_split = TRUE;
+
+        /* Now replenish the ring mbufs */
+       for (int j = 0; j < adapter->num_rx_desc; ++j) {
+               struct mbuf     *mh, *mp;
+
+               rxbuf = &rxr->rx_buffers[j];
+               if (rxr->hdr_split == FALSE)
+                       goto skip_head;
+
+               /* First the header */
+               rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA);
+               if (rxbuf->m_head == NULL) {
+                       error = ENOBUFS;
+                        goto fail;
+               }
+               m_adj(rxbuf->m_head, ETHER_ALIGN);
+               mh = rxbuf->m_head;
+               mh->m_len = mh->m_pkthdr.len = MHLEN;
+               mh->m_flags |= M_PKTHDR;
+               /* Get the memory mapping */
+               error = bus_dmamap_load_mbuf_sg(rxr->htag,
+                   rxbuf->hmap, rxbuf->m_head, hseg,
+                   &nsegs, BUS_DMA_NOWAIT);
+               if (error != 0) /* Nothing elegant to do here */
+                        goto fail;
+               bus_dmamap_sync(rxr->htag,
+                   rxbuf->hmap, BUS_DMASYNC_PREREAD);
+               /* Update descriptor */
+               rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
+
+skip_head:
+               /* Now the payload cluster */
+               rxbuf->m_pack = m_getjcl(M_DONTWAIT, MT_DATA,
+                   M_PKTHDR, adapter->rx_mbuf_sz);
+               if (rxbuf->m_pack == NULL) {
+                       error = ENOBUFS;
+                        goto fail;
+               }
+               mp = rxbuf->m_pack;
+               mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
+               /* Get the memory mapping */
+               error = bus_dmamap_load_mbuf_sg(rxr->ptag,
+                   rxbuf->pmap, mp, pseg,
+                   &nsegs, BUS_DMA_NOWAIT);
+               if (error != 0)
+                        goto fail;
+               bus_dmamap_sync(rxr->ptag,
+                   rxbuf->pmap, BUS_DMASYNC_PREREAD);
+               /* Update descriptor */
+               rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
+        }
+
+       /* Setup our descriptor indices */
+       rxr->next_to_check = 0;
+       rxr->next_to_refresh = adapter->num_rx_desc - 1;
+       rxr->lro_enabled = FALSE;
+       rxr->rx_split_packets = 0;
+       rxr->rx_bytes = 0;
+
+       rxr->fmp = NULL;
+       rxr->lmp = NULL;
+       rxr->discard = FALSE;
+
+       bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+       /*
+       ** Now set up the LRO interface, we
+       ** also only do head split when LRO
+       ** is enabled, since so often they
+       ** are undesireable in similar setups.
+       */
+       if (ifp->if_capenable & IFCAP_LRO) {
+               error = tcp_lro_init(lro);
+               if (error) {
+                       device_printf(dev, "LRO Initialization failed!\n");
+                       goto fail;
+               }
+               INIT_DEBUGOUT("RX LRO Initialized\n");
+               rxr->lro_enabled = TRUE;
+               lro->ifp = adapter->ifp;
+       }
+
+       IGB_RX_UNLOCK(rxr);
+       return (0);
+
+fail:
+       igb_free_receive_ring(rxr);
+       IGB_RX_UNLOCK(rxr);
+       return (error);
+}
+
+
+/*********************************************************************
+ *
+ *  Initialize all receive rings.
+ *
+ **********************************************************************/
+static int
+igb_setup_receive_structures(struct adapter *adapter)
+{
+       struct rx_ring *rxr = adapter->rx_rings;
+       int i;
+
+       for (i = 0; i < adapter->num_queues; i++, rxr++)
+               if (igb_setup_receive_ring(rxr))
+                       goto fail;
+
+       return (0);
+fail:
+       /*
+        * Free RX buffers allocated so far, we will only handle
+        * the rings that completed, the failing case will have
+        * cleaned up for itself. 'i' is the endpoint.
+        */
+       for (int j = 0; j > i; ++j) {
+               rxr = &adapter->rx_rings[i];
+               IGB_RX_LOCK(rxr);
+               igb_free_receive_ring(rxr);
+               IGB_RX_UNLOCK(rxr);
+       }
+
+       return (ENOBUFS);
+}
+
+/*********************************************************************
+ *
+ *  Enable receive unit.
+ *
+ **********************************************************************/
+static void
+igb_initialize_receive_units(struct adapter *adapter)
+{
+       struct rx_ring  *rxr = adapter->rx_rings;
+       struct ifnet    *ifp = adapter->ifp;
+       struct e1000_hw *hw = &adapter->hw;
+       u32             rctl, rxcsum, psize, srrctl = 0;
+
+       INIT_DEBUGOUT("igb_initialize_receive_unit: begin");
+
+       /*
+        * Make sure receives are disabled while setting
+        * up the descriptor ring
+        */
+       rctl = E1000_READ_REG(hw, E1000_RCTL);
+       E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+
+       /*
+       ** Set up for header split
+       */
+       if (igb_header_split) {
+               /* Use a standard mbuf for the header */
+               srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+               srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+       } else
+               srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+       /*
+       ** Set up for jumbo frames
+       */
+       if (ifp->if_mtu > ETHERMTU) {
+               rctl |= E1000_RCTL_LPE;
+               if (adapter->rx_mbuf_sz == MJUMPAGESIZE) {
+                       srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+                       rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
+               } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) {
+                       srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+                       rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
+               }
+               /* Set maximum packet len */
+               psize = adapter->max_frame_size;
+               /* are we on a vlan? */
+               if (adapter->ifp->if_vlantrunk != NULL)
+                       psize += VLAN_TAG_SIZE;
+               E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize);
+       } else {
+               rctl &= ~E1000_RCTL_LPE;
+               srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+               rctl |= E1000_RCTL_SZ_2048;
+       }
+
+       /* Setup the Base and Length of the Rx Descriptor Rings */
+       for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+               u64 bus_addr = rxr->rxdma.dma_paddr;
+               u32 rxdctl;
+
+               E1000_WRITE_REG(hw, E1000_RDLEN(i),
+                   adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
+               E1000_WRITE_REG(hw, E1000_RDBAH(i),
+                   (uint32_t)(bus_addr >> 32));
+               E1000_WRITE_REG(hw, E1000_RDBAL(i),
+                   (uint32_t)bus_addr);
+               E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
+               /* Enable this Queue */
+               rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
+               rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
+               rxdctl &= 0xFFF00000;
+               rxdctl |= IGB_RX_PTHRESH;
+               rxdctl |= IGB_RX_HTHRESH << 8;
+               rxdctl |= IGB_RX_WTHRESH << 16;
+               E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
+       }
+
+       /*
+       ** Setup for RX MultiQueue
+       */
+       rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
+       if (adapter->num_queues >1) {
+               u32 random[10], mrqc, shift = 0;
+               union igb_reta {
+                       u32 dword;
+                       u8  bytes[4];
+               } reta;
+
+               arc4rand(&random, sizeof(random), 0);
+               if (adapter->hw.mac.type == e1000_82575)
+                       shift = 6;
+               /* Warning FM follows */
+               for (int i = 0; i < 128; i++) {
+                       reta.bytes[i & 3] =
+                           (i % adapter->num_queues) << shift;
+                       if ((i & 3) == 3)
+                               E1000_WRITE_REG(hw,
+                                   E1000_RETA(i >> 2), reta.dword);
+               }
+               /* Now fill in hash table */
+               mrqc = E1000_MRQC_ENABLE_RSS_4Q;
+               for (int i = 0; i < 10; i++)
+                       E1000_WRITE_REG_ARRAY(hw,
+                           E1000_RSSRK(0), i, random[i]);
+
+               mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
+                   E1000_MRQC_RSS_FIELD_IPV4_TCP);
+               mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
+                   E1000_MRQC_RSS_FIELD_IPV6_TCP);
+               mrqc |=( E1000_MRQC_RSS_FIELD_IPV4_UDP |
+                   E1000_MRQC_RSS_FIELD_IPV6_UDP);
+               mrqc |=( E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
+                   E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
+
+               E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+
+               /*
+               ** NOTE: Receive Full-Packet Checksum Offload 
+               ** is mutually exclusive with Multiqueue. However
+               ** this is not the same as TCP/IP checksums which
+               ** still work.
+               */
+               rxcsum |= E1000_RXCSUM_PCSD;
+#if __FreeBSD_version >= 800000
+               /* For SCTP Offload */
+               if ((hw->mac.type == e1000_82576)
+                   && (ifp->if_capenable & IFCAP_RXCSUM))
+                       rxcsum |= E1000_RXCSUM_CRCOFL;
+#endif
+       } else {
+               /* Non RSS setup */
+               if (ifp->if_capenable & IFCAP_RXCSUM) {
+                       rxcsum |= E1000_RXCSUM_IPPCSE;
+#if __FreeBSD_version >= 800000
+                       if (adapter->hw.mac.type == e1000_82576)
+                               rxcsum |= E1000_RXCSUM_CRCOFL;
+#endif
+               } else
+                       rxcsum &= ~E1000_RXCSUM_TUOFL;
+       }
+       E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
+
+       /* Setup the Receive Control Register */
+       rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+       rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
+                  E1000_RCTL_RDMTS_HALF |
+                  (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+       /* Strip CRC bytes. */
+       rctl |= E1000_RCTL_SECRC;
+       /* Make sure VLAN Filters are off */
+       rctl &= ~E1000_RCTL_VFE;
+       /* Don't store bad packets */
+       rctl &= ~E1000_RCTL_SBP;
+
+       /* Enable Receives */
+       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+       /*
+        * Setup the HW Rx Head and Tail Descriptor Pointers
+        *   - needs to be after enable
+        */
+       for (int i = 0; i < adapter->num_queues; i++) {
+               rxr = &adapter->rx_rings[i];
+               E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check);
+               E1000_WRITE_REG(hw, E1000_RDT(i), rxr->next_to_refresh);
+       }
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Free receive rings.
+ *
+ **********************************************************************/
+static void
+igb_free_receive_structures(struct adapter *adapter)
+{
+       struct rx_ring *rxr = adapter->rx_rings;
+
+       for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+               struct lro_ctrl *lro = &rxr->lro;
+               igb_free_receive_buffers(rxr);
+               tcp_lro_free(lro);
+               igb_dma_free(adapter, &rxr->rxdma);
+       }
+
+       free(adapter->rx_rings, M_DEVBUF);
+}
+
+/*********************************************************************
+ *
+ *  Free receive ring data structures.
+ *
+ **********************************************************************/
+static void
+igb_free_receive_buffers(struct rx_ring *rxr)
+{
+       struct adapter          *adapter = rxr->adapter;
+       struct igb_rx_buf       *rxbuf;
+       int i;
+
+       INIT_DEBUGOUT("free_receive_structures: begin");
+
+       /* Cleanup any existing buffers */
+       if (rxr->rx_buffers != NULL) {
+               for (i = 0; i < adapter->num_rx_desc; i++) {
+                       rxbuf = &rxr->rx_buffers[i];
+                       if (rxbuf->m_head != NULL) {
+                               bus_dmamap_sync(rxr->htag, rxbuf->hmap,
+                                   BUS_DMASYNC_POSTREAD);
+                               bus_dmamap_unload(rxr->htag, rxbuf->hmap);
+                               rxbuf->m_head->m_flags |= M_PKTHDR;
+                               m_freem(rxbuf->m_head);
+                       }
+                       if (rxbuf->m_pack != NULL) {
+                               bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+                                   BUS_DMASYNC_POSTREAD);
+                               bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
+                               rxbuf->m_pack->m_flags |= M_PKTHDR;
+                               m_freem(rxbuf->m_pack);
+                       }
+                       rxbuf->m_head = NULL;
+                       rxbuf->m_pack = NULL;
+                       if (rxbuf->hmap != NULL) {
+                               bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
+                               rxbuf->hmap = NULL;
+                       }
+                       if (rxbuf->pmap != NULL) {
+                               bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
+                               rxbuf->pmap = NULL;
+                       }
+               }
+               if (rxr->rx_buffers != NULL) {
+                       free(rxr->rx_buffers, M_DEVBUF);
+                       rxr->rx_buffers = NULL;
+               }
+       }
+
+       if (rxr->htag != NULL) {
+               bus_dma_tag_destroy(rxr->htag);
+               rxr->htag = NULL;
+       }
+       if (rxr->ptag != NULL) {
+               bus_dma_tag_destroy(rxr->ptag);
+               rxr->ptag = NULL;
+       }
+}
+
+static __inline void
+igb_rx_discard(struct rx_ring *rxr, int i)
+{
+       struct igb_rx_buf       *rbuf;
+
+       rbuf = &rxr->rx_buffers[i];
+
+       /* Partially received? Free the chain */
+       if (rxr->fmp != NULL) {
+               rxr->fmp->m_flags |= M_PKTHDR;
+               m_freem(rxr->fmp);
+               rxr->fmp = NULL;
+               rxr->lmp = NULL;
+       }
+
+       /*
+       ** With advanced descriptors the writeback
+       ** clobbers the buffer addrs, so its easier
+       ** to just free the existing mbufs and take
+       ** the normal refresh path to get new buffers
+       ** and mapping.
+       */
+       if (rbuf->m_head) {
+               m_free(rbuf->m_head);
+               rbuf->m_head = NULL;
+       }
+
+       if (rbuf->m_pack) {
+               m_free(rbuf->m_pack);
+               rbuf->m_pack = NULL;
+       }
+
+       return;
+}
+
+static __inline void
+igb_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
+{
+
+       /*
+        * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
+        * should be computed by hardware. Also it should not have VLAN tag in
+        * ethernet header.
+        */
+       if (rxr->lro_enabled &&
+           (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
+           (ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 &&
+           (ptype & (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP)) ==
+           (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP) &&
+           (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == 
+           (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
+               /*
+                * Send to the stack if:
+                **  - LRO not enabled, or
+                **  - no LRO resources, or
+                **  - lro enqueue fails
+                */
+               if (rxr->lro.lro_cnt != 0)
+                       if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
+                               return;
+       }
+       IGB_RX_UNLOCK(rxr);
+       (*ifp->if_input)(ifp, m);
+       IGB_RX_LOCK(rxr);
+}
+
+/*********************************************************************
+ *
+ *  This routine executes in interrupt context. It replenishes
+ *  the mbufs in the descriptor and sends data which has been
+ *  dma'ed into host memory to upper layer.
+ *
+ *  We loop at most count times if count is > 0, or until done if
+ *  count < 0.
+ *
+ *  Return TRUE if more to clean, FALSE otherwise
+ *********************************************************************/
+static bool
+igb_rxeof(struct igb_queue *que, int count, int *done)
+{
+       struct adapter          *adapter = que->adapter;
+       struct rx_ring          *rxr = que->rxr;
+       struct ifnet            *ifp = adapter->ifp;
+       struct lro_ctrl         *lro = &rxr->lro;
+       struct lro_entry        *queued;
+       int                     i, processed = 0, rxdone = 0;
+       u32                     ptype, staterr = 0;
+       union e1000_adv_rx_desc *cur;
+
+       IGB_RX_LOCK(rxr);
+       /* Sync the ring. */
+       bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+           BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+       /* Main clean loop */
+       for (i = rxr->next_to_check; count != 0;) {
+               struct mbuf             *sendmp, *mh, *mp;
+               struct igb_rx_buf       *rxbuf;
+               u16                     hlen, plen, hdr, vtag;
+               bool                    eop = FALSE;
+               cur = &rxr->rx_base[i];
+               staterr = le32toh(cur->wb.upper.status_error);
+               if ((staterr & E1000_RXD_STAT_DD) == 0)
+                       break;
+               if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+                       break;
+               count--;
+               sendmp = mh = mp = NULL;
+               cur->wb.upper.status_error = 0;
+               rxbuf = &rxr->rx_buffers[i];
+               plen = le16toh(cur->wb.upper.length);
+               ptype = le32toh(cur->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK;
+               if ((adapter->hw.mac.type == e1000_i350) &&
+                   (staterr & E1000_RXDEXT_STATERR_LB))
+                       vtag = be16toh(cur->wb.upper.vlan);
+               else
+                       vtag = le16toh(cur->wb.upper.vlan);
+               hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
+               eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP);
+
+               /* Make sure all segments of a bad packet are discarded */
+               if (((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0) ||
+                   (rxr->discard)) {
+                       ifp->if_ierrors++;
+                       ++rxr->rx_discarded;
+                       if (!eop) /* Catch subsequent segs */
+                               rxr->discard = TRUE;
+                       else
+                               rxr->discard = FALSE;
+                       igb_rx_discard(rxr, i);
+                       goto next_desc;
+               }
+
+               /*
+               ** The way the hardware is configured to
+               ** split, it will ONLY use the header buffer
+               ** when header split is enabled, otherwise we
+               ** get normal behavior, ie, both header and
+               ** payload are DMA'd into the payload buffer.
+               **
+               ** The fmp test is to catch the case where a
+               ** packet spans multiple descriptors, in that
+               ** case only the first header is valid.
+               */
+               if (rxr->hdr_split && rxr->fmp == NULL) {
+                       hlen = (hdr & E1000_RXDADV_HDRBUFLEN_MASK) >>
+                           E1000_RXDADV_HDRBUFLEN_SHIFT;
+                       if (hlen > IGB_HDR_BUF)
+                               hlen = IGB_HDR_BUF;
+                       mh = rxr->rx_buffers[i].m_head;
+                       mh->m_len = hlen;
+                       /* clear buf pointer for refresh */
+                       rxbuf->m_head = NULL;
+                       /*
+                       ** Get the payload length, this
+                       ** could be zero if its a small
+                       ** packet.
+                       */
+                       if (plen > 0) {
+                               mp = rxr->rx_buffers[i].m_pack;
+                               mp->m_len = plen;
+                               mh->m_next = mp;
+                               /* clear buf pointer */
+                               rxbuf->m_pack = NULL;
+                               rxr->rx_split_packets++;
+                       }
+               } else {
+                       /*
+                       ** Either no header split, or a
+                       ** secondary piece of a fragmented
+                       ** split packet.
+                       */
+                       mh = rxr->rx_buffers[i].m_pack;
+                       mh->m_len = plen;
+                       /* clear buf info for refresh */
+                       rxbuf->m_pack = NULL;
+               }
+
+               ++processed; /* So we know when to refresh */
+
+               /* Initial frame - setup */
+               if (rxr->fmp == NULL) {
+                       mh->m_pkthdr.len = mh->m_len;
+                       /* Save the head of the chain */
+                       rxr->fmp = mh;
+                       rxr->lmp = mh;
+                       if (mp != NULL) {
+                               /* Add payload if split */
+                               mh->m_pkthdr.len += mp->m_len;
+                               rxr->lmp = mh->m_next;
+                       }
+               } else {
+                       /* Chain mbuf's together */
+                       rxr->lmp->m_next = mh;
+                       rxr->lmp = rxr->lmp->m_next;
+                       rxr->fmp->m_pkthdr.len += mh->m_len;
+               }
+
+               if (eop) {
+                       rxr->fmp->m_pkthdr.rcvif = ifp;
+                       ifp->if_ipackets++;
+                       rxr->rx_packets++;
+                       /* capture data for AIM */
+                       rxr->packets++;
+                       rxr->bytes += rxr->fmp->m_pkthdr.len;
+                       rxr->rx_bytes += rxr->fmp->m_pkthdr.len;
+
+                       if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
+                               igb_rx_checksum(staterr, rxr->fmp, ptype);
+
+                       if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
+                           (staterr & E1000_RXD_STAT_VP) != 0) {
+                               rxr->fmp->m_pkthdr.ether_vtag = vtag;
+                               rxr->fmp->m_flags |= M_VLANTAG;
+                       }
+#if __FreeBSD_version >= 800000
+                       rxr->fmp->m_pkthdr.flowid = que->msix;
+                       rxr->fmp->m_flags |= M_FLOWID;
+#endif
+                       sendmp = rxr->fmp;
+                       /* Make sure to set M_PKTHDR. */
+                       sendmp->m_flags |= M_PKTHDR;
+                       rxr->fmp = NULL;
+                       rxr->lmp = NULL;
+               }
+
+next_desc:
+               bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+                   BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+               /* Advance our pointers to the next descriptor. */
+               if (++i == adapter->num_rx_desc)
+                       i = 0;
+               /*
+               ** Send to the stack or LRO
+               */
+               if (sendmp != NULL) {
+                       rxr->next_to_check = i;
+                       igb_rx_input(rxr, ifp, sendmp, ptype);
+                       i = rxr->next_to_check;
+                       rxdone++;
+               }
+
+               /* Every 8 descriptors we go to refresh mbufs */
+               if (processed == 8) {
+                        igb_refresh_mbufs(rxr, i);
+                        processed = 0;
+               }
+       }
+
+       /* Catch any remainders */
+       if (igb_rx_unrefreshed(rxr))
+               igb_refresh_mbufs(rxr, i);
+
+       rxr->next_to_check = i;
+
+       /*
+        * Flush any outstanding LRO work
+        */
+       while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
+               SLIST_REMOVE_HEAD(&lro->lro_active, next);
+               tcp_lro_flush(lro, queued);
+       }
+
+       if (done != NULL)
+               *done = rxdone;
+
+       IGB_RX_UNLOCK(rxr);
+       return ((staterr & E1000_RXD_STAT_DD) ? TRUE : FALSE);
+}
+
+/*********************************************************************
+ *
+ *  Verify that the hardware indicated that the checksum is valid.
+ *  Inform the stack about the status of checksum so that stack
+ *  doesn't spend time verifying the checksum.
+ *
+ *********************************************************************/
+static void
+igb_rx_checksum(u32 staterr, struct mbuf *mp, u32 ptype)
+{
+       u16 status = (u16)staterr;
+       u8  errors = (u8) (staterr >> 24);
+       int sctp;
+
+       /* Ignore Checksum bit is set */
+       if (status & E1000_RXD_STAT_IXSM) {
+               mp->m_pkthdr.csum_flags = 0;
+               return;
+       }
+
+       if ((ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 &&
+           (ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0)
+               sctp = 1;
+       else
+               sctp = 0;
+       if (status & E1000_RXD_STAT_IPCS) {
+               /* Did it pass? */
+               if (!(errors & E1000_RXD_ERR_IPE)) {
+                       /* IP Checksum Good */
+                       mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
+                       mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+               } else
+                       mp->m_pkthdr.csum_flags = 0;
+       }
+
+       if (status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) {
+               u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
+#if __FreeBSD_version >= 800000
+               if (sctp) /* reassign */
+                       type = CSUM_SCTP_VALID;
+#endif
+               /* Did it pass? */
+               if (!(errors & E1000_RXD_ERR_TCPE)) {
+                       mp->m_pkthdr.csum_flags |= type;
+                       if (sctp == 0)
+                               mp->m_pkthdr.csum_data = htons(0xffff);
+               }
+       }
+       return;
+}
+
+/*
+ * This routine is run via an vlan
+ * config EVENT
+ */
+static void
+igb_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       u32             index, bit;
+
+       if (ifp->if_softc !=  arg)   /* Not our event */
+               return;
+
+       if ((vtag == 0) || (vtag > 4095))       /* Invalid */
+                return;
+
+       IGB_CORE_LOCK(adapter);
+       index = (vtag >> 5) & 0x7F;
+       bit = vtag & 0x1F;
+       adapter->shadow_vfta[index] |= (1 << bit);
+       ++adapter->num_vlans;
+       /* Change hw filter setting */
+       if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
+               igb_setup_vlan_hw_support(adapter);
+       IGB_CORE_UNLOCK(adapter);
+}
+
+/*
+ * This routine is run via an vlan
+ * unconfig EVENT
+ */
+static void
+igb_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       u32             index, bit;
+
+       if (ifp->if_softc !=  arg)
+               return;
+
+       if ((vtag == 0) || (vtag > 4095))       /* Invalid */
+                return;
+
+       IGB_CORE_LOCK(adapter);
+       index = (vtag >> 5) & 0x7F;
+       bit = vtag & 0x1F;
+       adapter->shadow_vfta[index] &= ~(1 << bit);
+       --adapter->num_vlans;
+       /* Change hw filter setting */
+       if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
+               igb_setup_vlan_hw_support(adapter);
+       IGB_CORE_UNLOCK(adapter);
+}
+
+static void
+igb_setup_vlan_hw_support(struct adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       struct ifnet    *ifp = adapter->ifp;
+       u32             reg;
+
+       if (adapter->vf_ifp) {
+               e1000_rlpml_set_vf(hw,
+                   adapter->max_frame_size + VLAN_TAG_SIZE);
+               return;
+       }
+
+       reg = E1000_READ_REG(hw, E1000_CTRL);
+       reg |= E1000_CTRL_VME;
+       E1000_WRITE_REG(hw, E1000_CTRL, reg);
+
+       /* Enable the Filter Table */
+       if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
+               reg = E1000_READ_REG(hw, E1000_RCTL);
+               reg &= ~E1000_RCTL_CFIEN;
+               reg |= E1000_RCTL_VFE;
+               E1000_WRITE_REG(hw, E1000_RCTL, reg);
+       }
+
+       /* Update the frame size */
+       E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
+           adapter->max_frame_size + VLAN_TAG_SIZE);
+
+       /* Don't bother with table if no vlans */
+       if ((adapter->num_vlans == 0) ||
+           ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0))
+                return;
+       /*
+       ** A soft reset zero's out the VFTA, so
+       ** we need to repopulate it now.
+       */
+       for (int i = 0; i < IGB_VFTA_SIZE; i++)
+                if (adapter->shadow_vfta[i] != 0) {
+                       if (adapter->vf_ifp)
+                               e1000_vfta_set_vf(hw,
+                                   adapter->shadow_vfta[i], TRUE);
+                       else
+                               E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
+                                i, adapter->shadow_vfta[i]);
+               }
+}
+
+static void
+igb_enable_intr(struct adapter *adapter)
+{
+       /* With RSS set up what to auto clear */
+       if (adapter->msix_mem) {
+               u32 mask = (adapter->que_mask | adapter->link_mask);
+               E1000_WRITE_REG(&adapter->hw, E1000_EIAC, mask);
+               E1000_WRITE_REG(&adapter->hw, E1000_EIAM, mask);
+               E1000_WRITE_REG(&adapter->hw, E1000_EIMS, mask);
+               E1000_WRITE_REG(&adapter->hw, E1000_IMS,
+                   E1000_IMS_LSC);
+       } else {
+               E1000_WRITE_REG(&adapter->hw, E1000_IMS,
+                   IMS_ENABLE_MASK);
+       }
+       E1000_WRITE_FLUSH(&adapter->hw);
+
+       return;
+}
+
+static void
+igb_disable_intr(struct adapter *adapter)
+{
+       if (adapter->msix_mem) {
+               E1000_WRITE_REG(&adapter->hw, E1000_EIMC, ~0);
+               E1000_WRITE_REG(&adapter->hw, E1000_EIAC, 0);
+       } 
+       E1000_WRITE_REG(&adapter->hw, E1000_IMC, ~0);
+       E1000_WRITE_FLUSH(&adapter->hw);
+       return;
+}
+
+/*
+ * Bit of a misnomer, what this really means is
+ * to enable OS management of the system... aka
+ * to disable special hardware management features 
+ */
+static void
+igb_init_manageability(struct adapter *adapter)
+{
+       if (adapter->has_manage) {
+               int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
+               int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
+
+               /* disable hardware interception of ARP */
+               manc &= ~(E1000_MANC_ARP_EN);
+
+                /* enable receiving management packets to the host */
+               manc |= E1000_MANC_EN_MNG2HOST;
+               manc2h |= 1 << 5;  /* Mng Port 623 */
+               manc2h |= 1 << 6;  /* Mng Port 664 */
+               E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
+               E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
+       }
+}
+
+/*
+ * Give control back to hardware management
+ * controller if there is one.
+ */
+static void
+igb_release_manageability(struct adapter *adapter)
+{
+       if (adapter->has_manage) {
+               int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
+
+               /* re-enable hardware interception of ARP */
+               manc |= E1000_MANC_ARP_EN;
+               manc &= ~E1000_MANC_EN_MNG2HOST;
+
+               E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
+       }
+}
+
+/*
+ * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded. 
+ *
+ */
+static void
+igb_get_hw_control(struct adapter *adapter)
+{
+       u32 ctrl_ext;
+
+       if (adapter->vf_ifp)
+               return;
+
+       /* Let firmware know the driver has taken over */
+       ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
+       E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
+           ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+}
+
+/*
+ * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded.
+ *
+ */
+static void
+igb_release_hw_control(struct adapter *adapter)
+{
+       u32 ctrl_ext;
+
+       if (adapter->vf_ifp)
+               return;
+
+       /* Let firmware taken over control of h/w */
+       ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
+       E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
+           ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+}
+
+static int
+igb_is_valid_ether_addr(uint8_t *addr)
+{
+       char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
+
+       if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
+               return (FALSE);
+       }
+
+       return (TRUE);
+}
+
+
+/*
+ * Enable PCI Wake On Lan capability
+ */
+static void
+igb_enable_wakeup(device_t dev)
+{
+       u16     cap, status;
+       u8      id;
+
+       /* First find the capabilities pointer*/
+       cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
+       /* Read the PM Capabilities */
+       id = pci_read_config(dev, cap, 1);
+       if (id != PCIY_PMG)     /* Something wrong */
+               return;
+       /* OK, we have the power capabilities, so
+          now get the status register */
+       cap += PCIR_POWER_STATUS;
+       status = pci_read_config(dev, cap, 2);
+       status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
+       pci_write_config(dev, cap, status, 2);
+       return;
+}
+
+static void
+igb_led_func(void *arg, int onoff)
+{
+       struct adapter  *adapter = arg;
+
+       IGB_CORE_LOCK(adapter);
+       if (onoff) {
+               e1000_setup_led(&adapter->hw);
+               e1000_led_on(&adapter->hw);
+       } else {
+               e1000_led_off(&adapter->hw);
+               e1000_cleanup_led(&adapter->hw);
+       }
+       IGB_CORE_UNLOCK(adapter);
+}
+
+/**********************************************************************
+ *
+ *  Update the board statistics counters.
+ *
+ **********************************************************************/
+static void
+igb_update_stats_counters(struct adapter *adapter)
+{
+       struct ifnet            *ifp;
+        struct e1000_hw                *hw = &adapter->hw;
+       struct e1000_hw_stats   *stats;
+
+       /* 
+       ** The virtual function adapter has only a
+       ** small controlled set of stats, do only 
+       ** those and return.
+       */
+       if (adapter->vf_ifp) {
+               igb_update_vf_stats_counters(adapter);
+               return;
+       }
+
+       stats = (struct e1000_hw_stats  *)adapter->stats;
+
+       if(adapter->hw.phy.media_type == e1000_media_type_copper ||
+          (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
+               stats->symerrs +=
+                   E1000_READ_REG(hw,E1000_SYMERRS);
+               stats->sec += E1000_READ_REG(hw, E1000_SEC);
+       }
+
+       stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
+       stats->mpc += E1000_READ_REG(hw, E1000_MPC);
+       stats->scc += E1000_READ_REG(hw, E1000_SCC);
+       stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
+
+       stats->mcc += E1000_READ_REG(hw, E1000_MCC);
+       stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
+       stats->colc += E1000_READ_REG(hw, E1000_COLC);
+       stats->dc += E1000_READ_REG(hw, E1000_DC);
+       stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
+       stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
+       stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
+       /*
+       ** For watchdog management we need to know if we have been
+       ** paused during the last interval, so capture that here.
+       */ 
+        adapter->pause_frames = E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
+        stats->xoffrxc += adapter->pause_frames;
+       stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
+       stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
+       stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
+       stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
+       stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
+       stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
+       stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
+       stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
+       stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
+       stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
+       stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
+       stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
+
+       /* For the 64-bit byte counters the low dword must be read first. */
+       /* Both registers clear on the read of the high dword */
+
+       stats->gorc += E1000_READ_REG(hw, E1000_GORCL) +
+           ((u64)E1000_READ_REG(hw, E1000_GORCH) << 32);
+       stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) +
+           ((u64)E1000_READ_REG(hw, E1000_GOTCH) << 32);
+
+       stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
+       stats->ruc += E1000_READ_REG(hw, E1000_RUC);
+       stats->rfc += E1000_READ_REG(hw, E1000_RFC);
+       stats->roc += E1000_READ_REG(hw, E1000_ROC);
+       stats->rjc += E1000_READ_REG(hw, E1000_RJC);
+
+       stats->tor += E1000_READ_REG(hw, E1000_TORH);
+       stats->tot += E1000_READ_REG(hw, E1000_TOTH);
+
+       stats->tpr += E1000_READ_REG(hw, E1000_TPR);
+       stats->tpt += E1000_READ_REG(hw, E1000_TPT);
+       stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
+       stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
+       stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
+       stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
+       stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
+       stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
+       stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
+       stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
+
+       /* Interrupt Counts */
+
+       stats->iac += E1000_READ_REG(hw, E1000_IAC);
+       stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
+       stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
+       stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
+       stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
+       stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
+       stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
+       stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
+       stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
+
+       /* Host to Card Statistics */
+
+       stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
+       stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
+       stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
+       stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
+       stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
+       stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
+       stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
+       stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) +
+           ((u64)E1000_READ_REG(hw, E1000_HGORCH) << 32));
+       stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) +
+           ((u64)E1000_READ_REG(hw, E1000_HGOTCH) << 32));
+       stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
+       stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
+       stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
+
+       stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
+       stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
+       stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
+       stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
+       stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
+       stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
+
+       ifp = adapter->ifp;
+       ifp->if_collisions = stats->colc;
+
+       /* Rx Errors */
+       ifp->if_ierrors = adapter->dropped_pkts + stats->rxerrc +
+           stats->crcerrs + stats->algnerrc +
+           stats->ruc + stats->roc + stats->mpc + stats->cexterr;
+
+       /* Tx Errors */
+       ifp->if_oerrors = stats->ecol +
+           stats->latecol + adapter->watchdog_events;
+
+       /* Driver specific counters */
+       adapter->device_control = E1000_READ_REG(hw, E1000_CTRL);
+       adapter->rx_control = E1000_READ_REG(hw, E1000_RCTL);
+       adapter->int_mask = E1000_READ_REG(hw, E1000_IMS);
+       adapter->eint_mask = E1000_READ_REG(hw, E1000_EIMS);
+       adapter->packet_buf_alloc_tx =
+           ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16);
+       adapter->packet_buf_alloc_rx =
+           (E1000_READ_REG(hw, E1000_PBA) & 0xffff);
+}
+
+
+/**********************************************************************
+ *
+ *  Initialize the VF board statistics counters.
+ *
+ **********************************************************************/
+static void
+igb_vf_init_stats(struct adapter *adapter)
+{
+        struct e1000_hw *hw = &adapter->hw;
+       struct e1000_vf_stats   *stats;
+
+       stats = (struct e1000_vf_stats  *)adapter->stats;
+       if (stats == NULL)
+               return;
+        stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC);
+        stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC);
+        stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC);
+        stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC);
+        stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC);
+}
+/**********************************************************************
+ *
+ *  Update the VF board statistics counters.
+ *
+ **********************************************************************/
+static void
+igb_update_vf_stats_counters(struct adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       struct e1000_vf_stats   *stats;
+
+       if (adapter->link_speed == 0)
+               return;
+
+       stats = (struct e1000_vf_stats  *)adapter->stats;
+
+       UPDATE_VF_REG(E1000_VFGPRC,
+           stats->last_gprc, stats->gprc);
+       UPDATE_VF_REG(E1000_VFGORC,
+           stats->last_gorc, stats->gorc);
+       UPDATE_VF_REG(E1000_VFGPTC,
+           stats->last_gptc, stats->gptc);
+       UPDATE_VF_REG(E1000_VFGOTC,
+           stats->last_gotc, stats->gotc);
+       UPDATE_VF_REG(E1000_VFMPRC,
+           stats->last_mprc, stats->mprc);
+}
+
+/* Export a single 32-bit register via a read-only sysctl. */
+static int
+igb_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
+{
+       struct adapter *adapter;
+       u_int val;
+
+       adapter = oidp->oid_arg1;
+       val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
+       return (sysctl_handle_int(oidp, &val, 0, req));
+}
+
+/*
+**  Tuneable interrupt rate handler
+*/
+static int
+igb_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
+{
+       struct igb_queue        *que = ((struct igb_queue *)oidp->oid_arg1);
+       int                     error;
+       u32                     reg, usec, rate;
+                        
+       reg = E1000_READ_REG(&que->adapter->hw, E1000_EITR(que->msix));
+       usec = ((reg & 0x7FFC) >> 2);
+       if (usec > 0)
+               rate = 1000000 / usec;
+       else
+               rate = 0;
+       error = sysctl_handle_int(oidp, &rate, 0, req);
+       if (error || !req->newptr)
+               return error;
+       return 0;
+}
+
+/*
+ * Add sysctl variables, one per statistic, to the system.
+ */
+static void
+igb_add_hw_stats(struct adapter *adapter)
+{
+       device_t dev = adapter->dev;
+
+       struct tx_ring *txr = adapter->tx_rings;
+       struct rx_ring *rxr = adapter->rx_rings;
+
+       struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+       struct sysctl_oid *tree = device_get_sysctl_tree(dev);
+       struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
+       struct e1000_hw_stats *stats = adapter->stats;
+
+       struct sysctl_oid *stat_node, *queue_node, *int_node, *host_node;
+       struct sysctl_oid_list *stat_list, *queue_list, *int_list, *host_list;
+
+#define QUEUE_NAME_LEN 32
+       char namebuf[QUEUE_NAME_LEN];
+
+       /* Driver Statistics */
+       SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "link_irq", 
+                       CTLFLAG_RD, &adapter->link_irq, 0,
+                       "Link MSIX IRQ Handled");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 
+                       CTLFLAG_RD, &adapter->dropped_pkts,
+                       "Driver dropped packets");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail", 
+                       CTLFLAG_RD, &adapter->no_tx_dma_setup,
+                       "Driver tx dma failure in xmit");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
+                       CTLFLAG_RD, &adapter->rx_overruns,
+                       "RX overruns");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
+                       CTLFLAG_RD, &adapter->watchdog_events,
+                       "Watchdog timeouts");
+
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "device_control", 
+                       CTLFLAG_RD, &adapter->device_control,
+                       "Device Control Register");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_control", 
+                       CTLFLAG_RD, &adapter->rx_control,
+                       "Receiver Control Register");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "interrupt_mask", 
+                       CTLFLAG_RD, &adapter->int_mask,
+                       "Interrupt Mask");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "extended_int_mask", 
+                       CTLFLAG_RD, &adapter->eint_mask,
+                       "Extended Interrupt Mask");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_buf_alloc", 
+                       CTLFLAG_RD, &adapter->packet_buf_alloc_tx,
+                       "Transmit Buffer Packet Allocation");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_buf_alloc", 
+                       CTLFLAG_RD, &adapter->packet_buf_alloc_rx,
+                       "Receive Buffer Packet Allocation");
+       SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
+                       CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
+                       "Flow Control High Watermark");
+       SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", 
+                       CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
+                       "Flow Control Low Watermark");
+
+       for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
+               struct lro_ctrl *lro = &rxr->lro;
+
+               snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
+               queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
+                                           CTLFLAG_RD, NULL, "Queue Name");
+               queue_list = SYSCTL_CHILDREN(queue_node);
+
+               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 
+                               CTLFLAG_RD, &adapter->queues[i],
+                               sizeof(&adapter->queues[i]),
+                               igb_sysctl_interrupt_rate_handler,
+                               "IU", "Interrupt Rate");
+
+               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 
+                               CTLFLAG_RD, adapter, E1000_TDH(txr->me),
+                               igb_sysctl_reg_handler, "IU",
+                               "Transmit Descriptor Head");
+               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 
+                               CTLFLAG_RD, adapter, E1000_TDT(txr->me),
+                               igb_sysctl_reg_handler, "IU",
+                               "Transmit Descriptor Tail");
+               SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", 
+                               CTLFLAG_RD, &txr->no_desc_avail,
+                               "Queue No Descriptor Available");
+               SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
+                               CTLFLAG_RD, &txr->tx_packets,
+                               "Queue Packets Transmitted");
+
+               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 
+                               CTLFLAG_RD, adapter, E1000_RDH(rxr->me),
+                               igb_sysctl_reg_handler, "IU",
+                               "Receive Descriptor Head");
+               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 
+                               CTLFLAG_RD, adapter, E1000_RDT(rxr->me),
+                               igb_sysctl_reg_handler, "IU",
+                               "Receive Descriptor Tail");
+               SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
+                               CTLFLAG_RD, &rxr->rx_packets,
+                               "Queue Packets Received");
+               SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
+                               CTLFLAG_RD, &rxr->rx_bytes,
+                               "Queue Bytes Received");
+               SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "lro_queued",
+                               CTLFLAG_RD, &lro->lro_queued, 0,
+                               "LRO Queued");
+               SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "lro_flushed",
+                               CTLFLAG_RD, &lro->lro_flushed, 0,
+                               "LRO Flushed");
+       }
+
+       /* MAC stats get their own sub node */
+
+       stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 
+                                   CTLFLAG_RD, NULL, "MAC Statistics");
+       stat_list = SYSCTL_CHILDREN(stat_node);
+
+       /*
+       ** VF adapter has a very limited set of stats
+       ** since its not managing the metal, so to speak.
+       */
+       if (adapter->vf_ifp) {
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
+                       CTLFLAG_RD, &stats->gprc,
+                       "Good Packets Received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
+                       CTLFLAG_RD, &stats->gptc,
+                       "Good Packets Transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", 
+                       CTLFLAG_RD, &stats->gorc, 
+                       "Good Octets Received"); 
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 
+                       CTLFLAG_RD, &stats->gotc, 
+                       "Good Octets Transmitted"); 
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
+                       CTLFLAG_RD, &stats->mprc,
+                       "Multicast Packets Received");
+               return;
+       }
+
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "excess_coll", 
+                       CTLFLAG_RD, &stats->ecol,
+                       "Excessive collisions");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "single_coll", 
+                       CTLFLAG_RD, &stats->scc,
+                       "Single collisions");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "multiple_coll", 
+                       CTLFLAG_RD, &stats->mcc,
+                       "Multiple collisions");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "late_coll", 
+                       CTLFLAG_RD, &stats->latecol,
+                       "Late collisions");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "collision_count", 
+                       CTLFLAG_RD, &stats->colc,
+                       "Collision Count");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
+                       CTLFLAG_RD, &stats->symerrs,
+                       "Symbol Errors");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
+                       CTLFLAG_RD, &stats->sec,
+                       "Sequence Errors");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "defer_count",
+                       CTLFLAG_RD, &stats->dc,
+                       "Defer Count");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "missed_packets",
+                       CTLFLAG_RD, &stats->mpc,
+                       "Missed Packets");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
+                       CTLFLAG_RD, &stats->rnbc,
+                       "Receive No Buffers");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
+                       CTLFLAG_RD, &stats->ruc,
+                       "Receive Undersize");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
+                       CTLFLAG_RD, &stats->rfc,
+                       "Fragmented Packets Received ");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
+                       CTLFLAG_RD, &stats->roc,
+                       "Oversized Packets Received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
+                       CTLFLAG_RD, &stats->rjc,
+                       "Recevied Jabber");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_errs",
+                       CTLFLAG_RD, &stats->rxerrc,
+                       "Receive Errors");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "crc_errs",
+                       CTLFLAG_RD, &stats->crcerrs,
+                       "CRC errors");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
+                       CTLFLAG_RD, &stats->algnerrc,
+                       "Alignment Errors");
+       /* On 82575 these are collision counts */
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
+                       CTLFLAG_RD, &stats->cexterr,
+                       "Collision/Carrier extension errors");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
+                       CTLFLAG_RD, &stats->xonrxc,
+                       "XON Received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_txd",
+                       CTLFLAG_RD, &stats->xontxc,
+                       "XON Transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
+                       CTLFLAG_RD, &stats->xoffrxc,
+                       "XOFF Received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
+                       CTLFLAG_RD, &stats->xofftxc,
+                       "XOFF Transmitted");
+       /* Packet Reception Stats */
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
+                       CTLFLAG_RD, &stats->tpr,
+                       "Total Packets Received ");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
+                       CTLFLAG_RD, &stats->gprc,
+                       "Good Packets Received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
+                       CTLFLAG_RD, &stats->bprc,
+                       "Broadcast Packets Received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
+                       CTLFLAG_RD, &stats->mprc,
+                       "Multicast Packets Received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
+                       CTLFLAG_RD, &stats->prc64,
+                       "64 byte frames received ");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
+                       CTLFLAG_RD, &stats->prc127,
+                       "65-127 byte frames received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
+                       CTLFLAG_RD, &stats->prc255,
+                       "128-255 byte frames received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
+                       CTLFLAG_RD, &stats->prc511,
+                       "256-511 byte frames received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
+                       CTLFLAG_RD, &stats->prc1023,
+                       "512-1023 byte frames received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
+                       CTLFLAG_RD, &stats->prc1522,
+                       "1023-1522 byte frames received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", 
+                       CTLFLAG_RD, &stats->gorc, 
+                       "Good Octets Received"); 
+
+       /* Packet Transmission Stats */
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 
+                       CTLFLAG_RD, &stats->gotc, 
+                       "Good Octets Transmitted"); 
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
+                       CTLFLAG_RD, &stats->tpt,
+                       "Total Packets Transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
+                       CTLFLAG_RD, &stats->gptc,
+                       "Good Packets Transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
+                       CTLFLAG_RD, &stats->bptc,
+                       "Broadcast Packets Transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
+                       CTLFLAG_RD, &stats->mptc,
+                       "Multicast Packets Transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
+                       CTLFLAG_RD, &stats->ptc64,
+                       "64 byte frames transmitted ");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
+                       CTLFLAG_RD, &stats->ptc127,
+                       "65-127 byte frames transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
+                       CTLFLAG_RD, &stats->ptc255,
+                       "128-255 byte frames transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
+                       CTLFLAG_RD, &stats->ptc511,
+                       "256-511 byte frames transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
+                       CTLFLAG_RD, &stats->ptc1023,
+                       "512-1023 byte frames transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
+                       CTLFLAG_RD, &stats->ptc1522,
+                       "1024-1522 byte frames transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_txd",
+                       CTLFLAG_RD, &stats->tsctc,
+                       "TSO Contexts Transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
+                       CTLFLAG_RD, &stats->tsctfc,
+                       "TSO Contexts Failed");
+
+
+       /* Interrupt Stats */
+
+       int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts", 
+                                   CTLFLAG_RD, NULL, "Interrupt Statistics");
+       int_list = SYSCTL_CHILDREN(int_node);
+
+       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "asserts",
+                       CTLFLAG_RD, &stats->iac,
+                       "Interrupt Assertion Count");
+
+       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer",
+                       CTLFLAG_RD, &stats->icrxptc,
+                       "Interrupt Cause Rx Pkt Timer Expire Count");
+
+       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_abs_timer",
+                       CTLFLAG_RD, &stats->icrxatc,
+                       "Interrupt Cause Rx Abs Timer Expire Count");
+
+       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer",
+                       CTLFLAG_RD, &stats->ictxptc,
+                       "Interrupt Cause Tx Pkt Timer Expire Count");
+
+       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_abs_timer",
+                       CTLFLAG_RD, &stats->ictxatc,
+                       "Interrupt Cause Tx Abs Timer Expire Count");
+
+       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_queue_empty",
+                       CTLFLAG_RD, &stats->ictxqec,
+                       "Interrupt Cause Tx Queue Empty Count");
+
+       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh",
+                       CTLFLAG_RD, &stats->ictxqmtc,
+                       "Interrupt Cause Tx Queue Min Thresh Count");
+
+       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh",
+                       CTLFLAG_RD, &stats->icrxdmtc,
+                       "Interrupt Cause Rx Desc Min Thresh Count");
+
+       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_overrun",
+                       CTLFLAG_RD, &stats->icrxoc,
+                       "Interrupt Cause Receiver Overrun Count");
+
+       /* Host to Card Stats */
+
+       host_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "host", 
+                                   CTLFLAG_RD, NULL, 
+                                   "Host to Card Statistics");
+
+       host_list = SYSCTL_CHILDREN(host_node);
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt",
+                       CTLFLAG_RD, &stats->cbtmpc,
+                       "Circuit Breaker Tx Packet Count");
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "host_tx_pkt_discard",
+                       CTLFLAG_RD, &stats->htdpmc,
+                       "Host Transmit Discarded Packets");
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "rx_pkt",
+                       CTLFLAG_RD, &stats->rpthc,
+                       "Rx Packets To Host");
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkts",
+                       CTLFLAG_RD, &stats->cbrmpc,
+                       "Circuit Breaker Rx Packet Count");
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkt_drop",
+                       CTLFLAG_RD, &stats->cbrdpc,
+                       "Circuit Breaker Rx Dropped Count");
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "tx_good_pkt",
+                       CTLFLAG_RD, &stats->hgptc,
+                       "Host Good Packets Tx Count");
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt_drop",
+                       CTLFLAG_RD, &stats->htcbdpc,
+                       "Host Tx Circuit Breaker Dropped Count");
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "rx_good_bytes",
+                       CTLFLAG_RD, &stats->hgorc,
+                       "Host Good Octets Received Count");
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "tx_good_bytes",
+                       CTLFLAG_RD, &stats->hgotc,
+                       "Host Good Octets Transmit Count");
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "length_errors",
+                       CTLFLAG_RD, &stats->lenerrs,
+                       "Length Errors");
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "serdes_violation_pkt",
+                       CTLFLAG_RD, &stats->scvpc,
+                       "SerDes/SGMII Code Violation Pkt Count");
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "header_redir_missed",
+                       CTLFLAG_RD, &stats->hrmpc,
+                       "Header Redirection Missed Packet Count");
+}
+
+
+/**********************************************************************
+ *
+ *  This routine provides a way to dump out the adapter eeprom,
+ *  often a useful debug/service tool. This only dumps the first
+ *  32 words, stuff that matters is in that extent.
+ *
+ **********************************************************************/
+static int
+igb_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
+{
+       struct adapter *adapter;
+       int error;
+       int result;
+
+       result = -1;
+       error = sysctl_handle_int(oidp, &result, 0, req);
+
+       if (error || !req->newptr)
+               return (error);
+
+       /*
+        * This value will cause a hex dump of the
+        * first 32 16-bit words of the EEPROM to
+        * the screen.
+        */
+       if (result == 1) {
+               adapter = (struct adapter *)arg1;
+               igb_print_nvm_info(adapter);
+        }
+
+       return (error);
+}
+
+static void
+igb_print_nvm_info(struct adapter *adapter)
+{
+       u16     eeprom_data;
+       int     i, j, row = 0;
+
+       /* Its a bit crude, but it gets the job done */
+       printf("\nInterface EEPROM Dump:\n");
+       printf("Offset\n0x0000  ");
+       for (i = 0, j = 0; i < 32; i++, j++) {
+               if (j == 8) { /* Make the offset block */
+                       j = 0; ++row;
+                       printf("\n0x00%x0  ",row);
+               }
+               e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
+               printf("%04x ", eeprom_data);
+       }
+       printf("\n");
+}
+
+static void
+igb_set_sysctl_value(struct adapter *adapter, const char *name,
+       const char *description, int *limit, int value)
+{
+       *limit = value;
+       SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
+           SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
+           OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
+}
+
+/*
+** Set flow control using sysctl:
+** Flow control values:
+**     0 - off
+**     1 - rx pause
+**     2 - tx pause
+**     3 - full
+*/
+static int
+igb_set_flowcntl(SYSCTL_HANDLER_ARGS)
+{
+       int error;
+       struct adapter *adapter;
+
+       error = sysctl_handle_int(oidp, &igb_fc_setting, 0, req);
+
+       if (error)
+               return (error);
+
+       adapter = (struct adapter *) arg1;
+       switch (igb_fc_setting) {
+               case e1000_fc_rx_pause:
+               case e1000_fc_tx_pause:
+               case e1000_fc_full:
+                       adapter->hw.fc.requested_mode = igb_fc_setting;
+                       break;
+               case e1000_fc_none:
+               default:
+                       adapter->hw.fc.requested_mode = e1000_fc_none;
+       }
+
+       adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode;
+       e1000_force_mac_fc(&adapter->hw);
+       return error;
+}
diff --git a/lib/librte_pmd_e1000/e1000/if_igb.h b/lib/librte_pmd_e1000/e1000/if_igb.h
new file mode 100644 (file)
index 0000000..9a0bb47
--- /dev/null
@@ -0,0 +1,541 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _IGB_H_DEFINED_
+#define _IGB_H_DEFINED_
+
+/* Tunables */
+
+/*
+ * IGB_TXD: Maximum number of Transmit Descriptors
+ *
+ *   This value is the number of transmit descriptors allocated by the driver.
+ *   Increasing this value allows the driver to queue more transmits. Each
+ *   descriptor is 16 bytes.
+ *   Since TDLEN should be multiple of 128bytes, the number of transmit
+ *   desscriptors should meet the following condition.
+ *      (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
+ */
+#define IGB_MIN_TXD            256
+#define IGB_DEFAULT_TXD                1024
+#define IGB_MAX_TXD            4096
+
+/*
+ * IGB_RXD: Maximum number of Transmit Descriptors
+ *
+ *   This value is the number of receive descriptors allocated by the driver.
+ *   Increasing this value allows the driver to buffer more incoming packets.
+ *   Each descriptor is 16 bytes.  A receive buffer is also allocated for each
+ *   descriptor. The maximum MTU size is 16110.
+ *   Since TDLEN should be multiple of 128bytes, the number of transmit
+ *   desscriptors should meet the following condition.
+ *      (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
+ */
+#define IGB_MIN_RXD            256
+#define IGB_DEFAULT_RXD                1024
+#define IGB_MAX_RXD            4096
+
+/*
+ * IGB_TIDV - Transmit Interrupt Delay Value
+ * Valid Range: 0-65535 (0=off)
+ * Default Value: 64
+ *   This value delays the generation of transmit interrupts in units of
+ *   1.024 microseconds. Transmit interrupt reduction can improve CPU
+ *   efficiency if properly tuned for specific network traffic. If the
+ *   system is reporting dropped transmits, this value may be set too high
+ *   causing the driver to run out of available transmit descriptors.
+ */
+#define IGB_TIDV                         64
+
+/*
+ * IGB_TADV - Transmit Absolute Interrupt Delay Value
+ * Valid Range: 0-65535 (0=off)
+ * Default Value: 64
+ *   This value, in units of 1.024 microseconds, limits the delay in which a
+ *   transmit interrupt is generated. Useful only if IGB_TIDV is non-zero,
+ *   this value ensures that an interrupt is generated after the initial
+ *   packet is sent on the wire within the set amount of time.  Proper tuning,
+ *   along with IGB_TIDV, may improve traffic throughput in specific
+ *   network conditions.
+ */
+#define IGB_TADV                         64
+
+/*
+ * IGB_RDTR - Receive Interrupt Delay Timer (Packet Timer)
+ * Valid Range: 0-65535 (0=off)
+ * Default Value: 0
+ *   This value delays the generation of receive interrupts in units of 1.024
+ *   microseconds.  Receive interrupt reduction can improve CPU efficiency if
+ *   properly tuned for specific network traffic. Increasing this value adds
+ *   extra latency to frame reception and can end up decreasing the throughput
+ *   of TCP traffic. If the system is reporting dropped receives, this value
+ *   may be set too high, causing the driver to run out of available receive
+ *   descriptors.
+ *
+ *   CAUTION: When setting IGB_RDTR to a value other than 0, adapters
+ *            may hang (stop transmitting) under certain network conditions.
+ *            If this occurs a WATCHDOG message is logged in the system
+ *            event log. In addition, the controller is automatically reset,
+ *            restoring the network connection. To eliminate the potential
+ *            for the hang ensure that IGB_RDTR is set to 0.
+ */
+#define IGB_RDTR                         0
+
+/*
+ * Receive Interrupt Absolute Delay Timer (Not valid for 82542/82543/82544)
+ * Valid Range: 0-65535 (0=off)
+ * Default Value: 64
+ *   This value, in units of 1.024 microseconds, limits the delay in which a
+ *   receive interrupt is generated. Useful only if IGB_RDTR is non-zero,
+ *   this value ensures that an interrupt is generated after the initial
+ *   packet is received within the set amount of time.  Proper tuning,
+ *   along with IGB_RDTR, may improve traffic throughput in specific network
+ *   conditions.
+ */
+#define IGB_RADV                         64
+
+/*
+ * This parameter controls the duration of transmit watchdog timer.
+ */
+#define IGB_WATCHDOG                   (10 * hz)
+
+/*
+ * This parameter controls when the driver calls the routine to reclaim
+ * transmit descriptors. Cleaning earlier seems a win.
+ */
+#define IGB_TX_CLEANUP_THRESHOLD       (adapter->num_tx_desc / 2)
+
+/*
+ * This parameter controls whether or not autonegotation is enabled.
+ *              0 - Disable autonegotiation
+ *              1 - Enable  autonegotiation
+ */
+#define DO_AUTO_NEG                     1
+
+/*
+ * This parameter control whether or not the driver will wait for
+ * autonegotiation to complete.
+ *              1 - Wait for autonegotiation to complete
+ *              0 - Don't wait for autonegotiation to complete
+ */
+#define WAIT_FOR_AUTO_NEG_DEFAULT       0
+
+/* Tunables -- End */
+
+#define AUTONEG_ADV_DEFAULT    (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
+                               ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
+                               ADVERTISE_1000_FULL)
+
+#define AUTO_ALL_MODES         0
+
+/* PHY master/slave setting */
+#define IGB_MASTER_SLAVE               e1000_ms_hw_default
+
+/*
+ * Micellaneous constants
+ */
+#define IGB_VENDOR_ID                  0x8086
+
+#define IGB_JUMBO_PBA                  0x00000028
+#define IGB_DEFAULT_PBA                        0x00000030
+#define IGB_SMARTSPEED_DOWNSHIFT       3
+#define IGB_SMARTSPEED_MAX             15
+#define IGB_MAX_LOOP                   10
+
+#define IGB_RX_PTHRESH                 (hw->mac.type <= e1000_82576 ? 16 : 8)
+#define IGB_RX_HTHRESH                 8
+#define IGB_RX_WTHRESH                 1
+
+#define IGB_TX_PTHRESH                 8
+#define IGB_TX_HTHRESH                 1
+#define IGB_TX_WTHRESH                 ((hw->mac.type != e1000_82575 && \
+                                          adapter->msix_mem) ? 1 : 16)
+
+#define MAX_NUM_MULTICAST_ADDRESSES     128
+#define PCI_ANY_ID                      (~0U)
+#define ETHER_ALIGN                     2
+#define IGB_TX_BUFFER_SIZE             ((uint32_t) 1514)
+#define IGB_FC_PAUSE_TIME              0x0680
+#define IGB_EEPROM_APME                        0x400;
+#define IGB_QUEUE_IDLE                 0
+#define IGB_QUEUE_WORKING              1
+#define IGB_QUEUE_HUNG                 2
+
+/*
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
+ * also optimize cache line size effect. H/W supports up to cache line size 128.
+ */
+#define IGB_DBA_ALIGN                  128
+
+#define SPEED_MODE_BIT (1<<21)         /* On PCI-E MACs only */
+
+/* PCI Config defines */
+#define IGB_MSIX_BAR           3
+
+/* Defines for printing debug information */
+#define DEBUG_INIT  0
+#define DEBUG_IOCTL 0
+#define DEBUG_HW    0
+
+#define INIT_DEBUGOUT(S)            if (DEBUG_INIT)  printf(S "\n")
+#define INIT_DEBUGOUT1(S, A)        if (DEBUG_INIT)  printf(S "\n", A)
+#define INIT_DEBUGOUT2(S, A, B)     if (DEBUG_INIT)  printf(S "\n", A, B)
+#define IOCTL_DEBUGOUT(S)           if (DEBUG_IOCTL) printf(S "\n")
+#define IOCTL_DEBUGOUT1(S, A)       if (DEBUG_IOCTL) printf(S "\n", A)
+#define IOCTL_DEBUGOUT2(S, A, B)    if (DEBUG_IOCTL) printf(S "\n", A, B)
+#define HW_DEBUGOUT(S)              if (DEBUG_HW) printf(S "\n")
+#define HW_DEBUGOUT1(S, A)          if (DEBUG_HW) printf(S "\n", A)
+#define HW_DEBUGOUT2(S, A, B)       if (DEBUG_HW) printf(S "\n", A, B)
+
+#define IGB_MAX_SCATTER                64
+#define IGB_VFTA_SIZE          128
+#define IGB_BR_SIZE            4096    /* ring buf size */
+#define IGB_TSO_SIZE           (65535 + sizeof(struct ether_vlan_header))
+#define IGB_TSO_SEG_SIZE       4096    /* Max dma segment size */
+#define IGB_HDR_BUF            128
+#define IGB_PKTTYPE_MASK       0x0000FFF0
+#define ETH_ZLEN               60
+#define ETH_ADDR_LEN           6
+
+/* Offload bits in mbuf flag */
+#if __FreeBSD_version >= 800000
+#define CSUM_OFFLOAD           (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
+#else
+#define CSUM_OFFLOAD           (CSUM_IP|CSUM_TCP|CSUM_UDP)
+#endif
+
+/* Define the starting Interrupt rate per Queue */
+#define IGB_INTS_PER_SEC        8000
+#define IGB_DEFAULT_ITR         ((1000000/IGB_INTS_PER_SEC) << 2)
+
+#define IGB_LINK_ITR            2000
+
+/* Precision Time Sync (IEEE 1588) defines */
+#define ETHERTYPE_IEEE1588     0x88F7
+#define PICOSECS_PER_TICK      20833
+#define TSYNC_PORT             319 /* UDP port for the protocol */
+
+/*
+ * Bus dma allocation structure used by
+ * e1000_dma_malloc and e1000_dma_free.
+ */
+struct igb_dma_alloc {
+        bus_addr_t              dma_paddr;
+        caddr_t                 dma_vaddr;
+        bus_dma_tag_t           dma_tag;
+        bus_dmamap_t            dma_map;
+        bus_dma_segment_t       dma_seg;
+        int                     dma_nseg;
+};
+
+
+/*
+** Driver queue struct: this is the interrupt container
+**  for the associated tx and rx ring.
+*/
+struct igb_queue {
+       struct adapter          *adapter;
+       u32                     msix;           /* This queue's MSIX vector */
+       u32                     eims;           /* This queue's EIMS bit */
+       u32                     eitr_setting;
+       struct resource         *res;
+       void                    *tag;
+       struct tx_ring          *txr;
+       struct rx_ring          *rxr;
+       struct task             que_task;
+       struct taskqueue        *tq;
+       u64                     irqs;
+};
+
+/*
+ * Transmit ring: one per queue
+ */
+struct tx_ring {
+       struct adapter          *adapter;
+       u32                     me;
+       struct mtx              tx_mtx;
+       char                    mtx_name[16];
+       struct igb_dma_alloc    txdma;
+       struct e1000_tx_desc    *tx_base;
+       u32                     next_avail_desc;
+       u32                     next_to_clean;
+       volatile u16            tx_avail;
+       struct igb_tx_buffer    *tx_buffers;
+#if __FreeBSD_version >= 800000
+       struct buf_ring         *br;
+#endif
+       bus_dma_tag_t           txtag;
+
+       u32                     bytes;
+       u32                     packets;
+
+       int                     queue_status;
+       int                     watchdog_time;
+       int                     tdt;
+       int                     tdh;
+       u64                     no_desc_avail;
+       u64                     tx_packets;
+};
+
+/*
+ * Receive ring: one per queue
+ */
+struct rx_ring {
+       struct adapter          *adapter;
+       u32                     me;
+       struct igb_dma_alloc    rxdma;
+       union e1000_adv_rx_desc *rx_base;
+       struct lro_ctrl         lro;
+       bool                    lro_enabled;
+       bool                    hdr_split;
+       bool                    discard;
+       struct mtx              rx_mtx;
+       char                    mtx_name[16];
+       u32                     next_to_refresh;
+       u32                     next_to_check;
+       struct igb_rx_buf       *rx_buffers;
+       bus_dma_tag_t           htag;           /* dma tag for rx head */
+       bus_dma_tag_t           ptag;           /* dma tag for rx packet */
+       /*
+        * First/last mbuf pointers, for
+        * collecting multisegment RX packets.
+        */
+       struct mbuf            *fmp;
+       struct mbuf            *lmp;
+
+       u32                     bytes;
+       u32                     packets;
+       int                     rdt;
+       int                     rdh;
+
+       /* Soft stats */
+       u64                     rx_split_packets;
+       u64                     rx_discarded;
+       u64                     rx_packets;
+       u64                     rx_bytes;
+};
+
+struct adapter {
+       struct ifnet    *ifp;
+       struct e1000_hw hw;
+
+       struct e1000_osdep osdep;
+       struct device   *dev;
+       struct cdev     *led_dev;
+
+       struct resource *pci_mem;
+       struct resource *msix_mem;
+       struct resource *res;
+       void            *tag;
+       u32             que_mask;
+
+       int             linkvec;
+       int             link_mask;
+       struct task     link_task;
+       int             link_irq;
+
+       struct ifmedia  media;
+       struct callout  timer;
+       int             msix;   /* total vectors allocated */
+       int             if_flags;
+       int             max_frame_size;
+       int             min_frame_size;
+       int             pause_frames;
+       struct mtx      core_mtx;
+       int             igb_insert_vlan_header;
+        u16            num_queues;
+       u16             vf_ifp;  /* a VF interface */
+
+       eventhandler_tag vlan_attach;
+       eventhandler_tag vlan_detach;
+       u32             num_vlans;
+
+       /* Management and WOL features */
+       int             wol;
+       int             has_manage;
+
+       /*
+       ** Shadow VFTA table, this is needed because
+       ** the real vlan filter table gets cleared during
+       ** a soft reset and the driver needs to be able
+       ** to repopulate it.
+       */
+       u32             shadow_vfta[IGB_VFTA_SIZE];
+
+       /* Info about the interface */
+       u8              link_active;
+       u16             link_speed;
+       u16             link_duplex;
+       u32             smartspeed;
+       u32             dma_coalesce;
+
+       /* Interface queues */
+       struct igb_queue        *queues;
+
+       /*
+        * Transmit rings
+        */
+       struct tx_ring          *tx_rings;
+        u16                    num_tx_desc;
+
+       /* Multicast array pointer */
+       u8                      *mta;
+
+       /* 
+        * Receive rings
+        */
+       struct rx_ring          *rx_rings;
+       bool                    rx_hdr_split;
+        u16                    num_rx_desc;
+       int                     rx_process_limit;
+       u32                     rx_mbuf_sz;
+       u32                     rx_mask;
+
+       /* Misc stats maintained by the driver */
+       unsigned long   dropped_pkts;
+       unsigned long   mbuf_defrag_failed;
+       unsigned long   mbuf_header_failed;
+       unsigned long   mbuf_packet_failed;
+       unsigned long   no_tx_map_avail;
+        unsigned long  no_tx_dma_setup;
+       unsigned long   watchdog_events;
+       unsigned long   rx_overruns;
+       unsigned long   device_control;
+       unsigned long   rx_control;
+       unsigned long   int_mask;
+       unsigned long   eint_mask;
+       unsigned long   packet_buf_alloc_rx;
+       unsigned long   packet_buf_alloc_tx;
+
+       boolean_t       in_detach;
+
+#ifdef IGB_IEEE1588
+       /* IEEE 1588 precision time support */
+       struct cyclecounter     cycles;
+       struct nettimer         clock;
+       struct nettime_compare  compare;
+       struct hwtstamp_ctrl    hwtstamp;
+#endif
+
+       void                    *stats;
+};
+
+/* ******************************************************************************
+ * vendor_info_array
+ *
+ * This array contains the list of Subvendor/Subdevice IDs on which the driver
+ * should load.
+ *
+ * ******************************************************************************/
+typedef struct _igb_vendor_info_t {
+       unsigned int vendor_id;
+       unsigned int device_id;
+       unsigned int subvendor_id;
+       unsigned int subdevice_id;
+       unsigned int index;
+} igb_vendor_info_t;
+
+
+struct igb_tx_buffer {
+       int             next_eop;  /* Index of the desc to watch */
+        struct mbuf    *m_head;
+        bus_dmamap_t    map;         /* bus_dma map for packet */
+};
+
+struct igb_rx_buf {
+        struct mbuf    *m_head;
+        struct mbuf    *m_pack;
+       bus_dmamap_t    hmap;   /* bus_dma map for header */
+       bus_dmamap_t    pmap;   /* bus_dma map for packet */
+};
+
+/*
+** Find the number of unrefreshed RX descriptors
+*/
+static inline u16
+igb_rx_unrefreshed(struct rx_ring *rxr)
+{
+       struct adapter  *adapter = rxr->adapter;
+       if (rxr->next_to_check > rxr->next_to_refresh)
+               return (rxr->next_to_check - rxr->next_to_refresh - 1);
+       else
+               return ((adapter->num_rx_desc + rxr->next_to_check) -
+                   rxr->next_to_refresh - 1);
+}
+
+#define        IGB_CORE_LOCK_INIT(_sc, _name) \
+       mtx_init(&(_sc)->core_mtx, _name, "IGB Core Lock", MTX_DEF)
+#define        IGB_CORE_LOCK_DESTROY(_sc)      mtx_destroy(&(_sc)->core_mtx)
+#define        IGB_CORE_LOCK(_sc)              mtx_lock(&(_sc)->core_mtx)
+#define        IGB_CORE_UNLOCK(_sc)            mtx_unlock(&(_sc)->core_mtx)
+#define        IGB_CORE_LOCK_ASSERT(_sc)       mtx_assert(&(_sc)->core_mtx, MA_OWNED)
+
+#define        IGB_TX_LOCK_DESTROY(_sc)        mtx_destroy(&(_sc)->tx_mtx)
+#define        IGB_TX_LOCK(_sc)                mtx_lock(&(_sc)->tx_mtx)
+#define        IGB_TX_UNLOCK(_sc)              mtx_unlock(&(_sc)->tx_mtx)
+#define        IGB_TX_TRYLOCK(_sc)             mtx_trylock(&(_sc)->tx_mtx)
+#define        IGB_TX_LOCK_ASSERT(_sc)         mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
+
+#define        IGB_RX_LOCK_DESTROY(_sc)        mtx_destroy(&(_sc)->rx_mtx)
+#define        IGB_RX_LOCK(_sc)                mtx_lock(&(_sc)->rx_mtx)
+#define        IGB_RX_UNLOCK(_sc)              mtx_unlock(&(_sc)->rx_mtx)
+#define        IGB_RX_LOCK_ASSERT(_sc)         mtx_assert(&(_sc)->rx_mtx, MA_OWNED)
+
+#define UPDATE_VF_REG(reg, last, cur)          \
+{                                              \
+       u32 new = E1000_READ_REG(hw, reg);      \
+       if (new < last)                         \
+               cur += 0x100000000LL;           \
+       last = new;                             \
+       cur &= 0xFFFFFFFF00000000LL;            \
+       cur |= new;                             \
+}
+
+#if __FreeBSD_version < 800504
+static __inline int
+drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
+{
+#ifdef ALTQ
+       if (ALTQ_IS_ENABLED(&ifp->if_snd))
+               return (1);
+#endif
+       return (!buf_ring_empty(br));
+}
+#endif
+
+#endif /* _IGB_H_DEFINED_ */
+
+
diff --git a/lib/librte_pmd_e1000/e1000_ethdev.h b/lib/librte_pmd_e1000/e1000_ethdev.h
new file mode 100644 (file)
index 0000000..6b72647
--- /dev/null
@@ -0,0 +1,116 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+#ifndef _E1000_ETHDEV_H_
+#define _E1000_ETHDEV_H_
+
+/* need update link, bit flag */
+#define E1000_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
+
+/*
+ * Defines that were not part of e1000_hw.h as they are not used by the FreeBSD
+ * driver.
+ */
+#define E1000_ADVTXD_POPTS_TXSM     0x00000200 /* L4 Checksum offload request */
+#define E1000_ADVTXD_POPTS_IXSM     0x00000100 /* IP Checksum offload request */
+#define E1000_ADVTXD_TUCMD_L4T_RSV  0x00001800 /* L4 Packet TYPE of Reserved */
+#define E1000_RXD_STAT_TMST         0x10000    /* Timestamped Packet indication */
+#define E1000_RXD_ERR_CKSUM_BIT     29 
+#define E1000_RXD_ERR_CKSUM_MSK     3
+#define E1000_ADVTXD_MACLEN_SHIFT   9          /* Bit shift for l2_len */
+
+#define E1000_VFTA_SIZE 128
+
+/* structure for interrupt relative data */
+struct e1000_interrupt {
+       uint32_t flags;
+};
+
+/* local vfta copy */
+struct e1000_vfta {
+       uint32_t vfta[E1000_VFTA_SIZE];
+};
+
+/*
+ * Structure to store private data for each driver instance (for each port).
+ */
+struct e1000_adapter {
+       struct e1000_hw         hw;
+       struct e1000_hw_stats   stats;
+       struct e1000_interrupt  intr;
+       struct e1000_vfta       shadow_vfta;
+};
+
+#define E1000_DEV_PRIVATE_TO_HW(adapter) \
+       (&((struct e1000_adapter *)adapter)->hw)
+
+#define E1000_DEV_PRIVATE_TO_STATS(adapter) \
+       (&((struct e1000_adapter *)adapter)->stats)
+
+#define E1000_DEV_PRIVATE_TO_INTR(adapter) \
+       (&((struct e1000_adapter *)adapter)->intr)
+
+#define E1000_DEV_PRIVATE_TO_VFTA(adapter) \
+       (&((struct e1000_adapter *)adapter)->shadow_vfta)
+
+/*
+ * RX/TX function prototypes
+ */
+int igb_dev_tx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues);
+int igb_dev_rx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues);
+void igb_dev_clear_queues(struct rte_eth_dev *dev);
+
+int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+               uint16_t nb_rx_desc, unsigned int socket_id,
+               const struct rte_eth_rxconf *rx_conf,
+               struct rte_mempool *mb_pool);
+
+int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+               uint16_t nb_tx_desc, unsigned int socket_id,
+               const struct rte_eth_txconf *tx_conf);
+
+int eth_igb_rx_init(struct rte_eth_dev *dev);
+
+void eth_igb_tx_init(struct rte_eth_dev *dev);
+
+uint16_t eth_igb_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts,
+               uint16_t nb_pkts);
+
+uint16_t eth_igb_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+               uint16_t nb_pkts);
+
+uint16_t eth_igb_recv_scattered_pkts(struct igb_rx_queue *rxq,
+               struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+#endif /* _E1000_ETHDEV_H_ */
diff --git a/lib/librte_pmd_e1000/e1000_logs.h b/lib/librte_pmd_e1000/e1000_logs.h
new file mode 100644 (file)
index 0000000..8a1e321
--- /dev/null
@@ -0,0 +1,73 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+#ifndef _E1000_LOGS_H_
+#define _E1000_LOGS_H_
+
+#ifdef RTE_LIBRTE_IGB_DEBUG_INIT
+#define PMD_INIT_LOG(level, fmt, args...) \
+       RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_INIT_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_IGB_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+       RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_IGB_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+       RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_IGB_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+       RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_IGB_DEBUG_DRIVER
+#define PMD_DRV_LOG(level, fmt, args...) \
+       RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_DRV_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#endif /* _E1000_LOGS_H_ */
diff --git a/lib/librte_pmd_e1000/igb_ethdev.c b/lib/librte_pmd_e1000/igb_ethdev.c
new file mode 100644 (file)
index 0000000..5e711c9
--- /dev/null
@@ -0,0 +1,1318 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+
+#include "e1000_logs.h"
+#include "igb/e1000_api.h"
+#include "igb/e1000_hw.h"
+#include "e1000_ethdev.h"
+
+static int  eth_igb_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q,
+               uint16_t nb_tx_q);
+static int  eth_igb_start(struct rte_eth_dev *dev);
+static void eth_igb_stop(struct rte_eth_dev *dev);
+static void eth_igb_close(struct rte_eth_dev *dev);
+static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
+static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
+static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
+static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
+static int  eth_igb_link_update(struct rte_eth_dev *dev,
+                               int wait_to_complete);
+static void eth_igb_stats_get(struct rte_eth_dev *dev,
+                               struct rte_eth_stats *rte_stats);
+static void eth_igb_stats_reset(struct rte_eth_dev *dev);
+static void eth_igb_infos_get(struct rte_eth_dev *dev,
+                               struct rte_eth_dev_info *dev_info);
+static int  eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
+                               struct rte_eth_fc_conf *fc_conf);
+static int eth_igb_interrupt_setup(struct rte_eth_dev *dev);
+static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
+static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
+static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
+                                                       void *param);
+static int  igb_hardware_init(struct e1000_hw *hw);
+static void igb_hw_control_acquire(struct e1000_hw *hw);
+static void igb_hw_control_release(struct e1000_hw *hw);
+static void igb_init_manageability(struct e1000_hw *hw);
+static void igb_release_manageability(struct e1000_hw *hw);
+static void igb_vlan_hw_support_enable(struct rte_eth_dev *dev);
+static void igb_vlan_hw_support_disable(struct rte_eth_dev *dev);
+static void eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
+                                     uint16_t vlan_id,
+                                     int on);
+static int eth_igb_led_on(struct rte_eth_dev *dev);
+static int eth_igb_led_off(struct rte_eth_dev *dev);
+
+static void igb_intr_disable(struct e1000_hw *hw);
+static int  igb_get_rx_buffer_size(struct e1000_hw *hw);
+static void eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+               uint32_t index, uint32_t pool);
+static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
+
+#define IGB_FC_PAUSE_TIME 0x0680
+#define IGB_LINK_UPDATE_CHECK_TIMEOUT  90  /* 9s */
+#define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
+
+static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static struct rte_pci_id pci_id_igb_map[] = {
+
+#undef RTE_LIBRTE_IXGBE_PMD
+#define RTE_PCI_DEV_ID_DECL(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#include "rte_pci_dev_ids.h"
+
+{.device_id = 0},
+};
+
+static struct eth_dev_ops eth_igb_ops = {
+       .dev_configure        = eth_igb_configure,
+       .dev_start            = eth_igb_start,
+       .dev_stop             = eth_igb_stop,
+       .dev_close            = eth_igb_close,
+       .promiscuous_enable   = eth_igb_promiscuous_enable,
+       .promiscuous_disable  = eth_igb_promiscuous_disable,
+       .allmulticast_enable  = eth_igb_allmulticast_enable,
+       .allmulticast_disable = eth_igb_allmulticast_disable,
+       .link_update          = eth_igb_link_update,
+       .stats_get            = eth_igb_stats_get,
+       .stats_reset          = eth_igb_stats_reset,
+       .dev_infos_get        = eth_igb_infos_get,
+       .vlan_filter_set      = eth_igb_vlan_filter_set,
+       .rx_queue_setup       = eth_igb_rx_queue_setup,
+       .tx_queue_setup       = eth_igb_tx_queue_setup,
+       .dev_led_on           = eth_igb_led_on,
+       .dev_led_off          = eth_igb_led_off,
+       .flow_ctrl_set        = eth_igb_flow_ctrl_set,
+       .mac_addr_add         = eth_igb_rar_set,
+       .mac_addr_remove      = eth_igb_rar_clear,
+};
+
+/**
+ * Atomically reads the link status information from global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ *   - Pointer to the structure rte_eth_dev to read from.
+ *   - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ *   - On success, zero.
+ *   - On failure, negative value.
+ */
+static inline int
+rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
+                               struct rte_eth_link *link)
+{
+       struct rte_eth_link *dst = link;
+       struct rte_eth_link *src = &(dev->data->dev_link);
+
+       if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+                                       *(uint64_t *)src) == 0)
+               return -1;
+
+       return 0;
+}
+
+/**
+ * Atomically writes the link status information into global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ *   - Pointer to the structure rte_eth_dev to read from.
+ *   - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ *   - On success, zero.
+ *   - On failure, negative value.
+ */
+static inline int
+rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
+                               struct rte_eth_link *link)
+{
+       struct rte_eth_link *dst = &(dev->data->dev_link);
+       struct rte_eth_link *src = link;
+
+       if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+                                       *(uint64_t *)src) == 0)
+               return -1;
+
+       return 0;
+}
+
+static void
+igb_identify_hardware(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       hw->vendor_id = dev->pci_dev->id.vendor_id;
+       hw->device_id = dev->pci_dev->id.device_id;
+       hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
+       hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
+
+       e1000_set_mac_type(hw);
+
+       /* need to check if it is a vf device below */
+}
+
+static int
+eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
+                  struct rte_eth_dev *eth_dev)
+{
+       int error = 0;
+       struct rte_pci_device *pci_dev;
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+       struct e1000_vfta * shadow_vfta =
+               E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
+
+       pci_dev = eth_dev->pci_dev;
+       eth_dev->dev_ops = &eth_igb_ops;
+       eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
+       eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
+
+       /* for secondary processes, we don't initialise any further as primary
+        * has already done this work. Only check we don't need a different
+        * RX function */
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+               if (eth_dev->data->scattered_rx)
+                       eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
+               return 0;
+       }
+
+       hw->hw_addr= (void *)pci_dev->mem_resource.addr;
+
+       igb_identify_hardware(eth_dev);
+
+       if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
+               error = -EIO;
+               goto err_late;
+       }
+
+       e1000_get_bus_info(hw);
+
+       hw->mac.autoneg = 1;
+       hw->phy.autoneg_wait_to_complete = 0;
+       hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
+
+       /* Copper options */
+       if (hw->phy.media_type == e1000_media_type_copper) {
+               hw->phy.mdix = 0; /* AUTO_ALL_MODES */
+               hw->phy.disable_polarity_correction = 0;
+               hw->phy.ms_type = e1000_ms_hw_default;
+       }
+
+       /*
+        * Start from a known state, this is important in reading the nvm
+        * and mac from that.
+        */
+       e1000_reset_hw(hw);
+
+       /* Make sure we have a good EEPROM before we read from it */
+       if (e1000_validate_nvm_checksum(hw) < 0) {
+               /*
+                * Some PCI-E parts fail the first check due to
+                * the link being in sleep state, call it again,
+                * if it fails a second time its a real issue.
+                */
+               if (e1000_validate_nvm_checksum(hw) < 0) {
+                       PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
+                       error = -EIO;
+                       goto err_late;
+               }
+       }
+
+       /* Read the permanent MAC address out of the EEPROM */
+       if (e1000_read_mac_addr(hw) != 0) {
+               PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
+               error = -EIO;
+               goto err_late;
+       }
+
+       /* Allocate memory for storing MAC addresses */
+       eth_dev->data->mac_addrs = rte_zmalloc("e1000",
+               ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
+       if (eth_dev->data->mac_addrs == NULL) {
+               PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
+                                               "store MAC addresses",
+                               ETHER_ADDR_LEN * hw->mac.rar_entry_count);
+               error = -ENOMEM;
+               goto err_late;
+       }
+
+       /* Copy the permanent MAC address */
+       ether_addr_copy((struct ether_addr *)hw->mac.addr, &eth_dev->data->mac_addrs[0]);
+
+       /* initialize the vfta */
+       memset(shadow_vfta, 0, sizeof(*shadow_vfta));
+
+       /* Now initialize the hardware */
+       if (igb_hardware_init(hw) != 0) {
+               PMD_INIT_LOG(ERR, "Hardware initialization failed");
+               rte_free(eth_dev->data->mac_addrs);
+               eth_dev->data->mac_addrs = NULL;
+               error = -ENODEV;
+               goto err_late;
+       }
+       hw->mac.get_link_status = 1;
+
+       /* Indicate SOL/IDER usage */
+       if (e1000_check_reset_block(hw) < 0) {
+               PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
+                                       "SOL/IDER session");
+       }
+
+       PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x\n",
+                    eth_dev->data->port_id, pci_dev->id.vendor_id,
+                    pci_dev->id.device_id);
+
+       rte_intr_callback_register(&(pci_dev->intr_handle),
+               eth_igb_interrupt_handler, (void *)eth_dev);
+
+       return 0;
+
+err_late:
+       igb_hw_control_release(hw);
+
+       return (error);
+}
+
+static struct eth_driver rte_igb_pmd = {
+       {
+               .name = "rte_igb_pmd",
+               .id_table = pci_id_igb_map,
+               .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
+       },
+       .eth_dev_init = eth_igb_dev_init,
+       .dev_private_size = sizeof(struct e1000_adapter),
+};
+
+int
+rte_igb_pmd_init(void)
+{
+       rte_eth_driver_register(&rte_igb_pmd);
+       return 0;
+}
+
+static int
+eth_igb_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, uint16_t nb_tx_q)
+{
+       struct e1000_interrupt *intr =
+               E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+       int diag;
+
+       PMD_INIT_LOG(DEBUG, ">>");
+
+       intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
+
+       /* Allocate the array of pointers to RX structures */
+       diag = igb_dev_rx_queue_alloc(dev, nb_rx_q);
+       if (diag != 0) {
+               PMD_INIT_LOG(ERR, "ethdev port_id=%u allocation of array of %u"
+                                       " pointers to RX queues failed",
+                                       dev->data->port_id, nb_rx_q);
+               return diag;
+       }
+
+       /* Allocate the array of pointers to TX structures */
+       diag = igb_dev_tx_queue_alloc(dev, nb_tx_q);
+       if (diag != 0) {
+               PMD_INIT_LOG(ERR, "ethdev port_id=%u allocation of array of %u"
+                                       " pointers to TX queues failed",
+                                       dev->data->port_id, nb_tx_q);
+
+               return diag;
+       }
+
+       PMD_INIT_LOG(DEBUG, "<<");
+
+       return (0);
+}
+
+static int
+eth_igb_start(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int ret, i;
+
+       PMD_INIT_LOG(DEBUG, ">>");
+
+       igb_intr_disable(hw);
+
+       /* Power up the phy. Needed to make the link go Up */
+       e1000_power_up_phy(hw);
+
+       /*
+        * Packet Buffer Allocation (PBA)
+        * Writing PBA sets the receive portion of the buffer
+        * the remainder is used for the transmit buffer.
+        */
+       if (hw->mac.type == e1000_82575) {
+               uint32_t pba;
+
+               pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
+               E1000_WRITE_REG(hw, E1000_PBA, pba);
+       }
+
+       /* Put the address into the Receive Address Array */
+       e1000_rar_set(hw, hw->mac.addr, 0);
+
+       /* Initialize the hardware */
+       if (igb_hardware_init(hw)) {
+               PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
+               return (-1);
+       }
+
+       E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN);
+
+       /* Configure for OS presence */
+       igb_init_manageability(hw);
+
+       eth_igb_tx_init(dev);
+
+       /* This can fail when allocating mbufs for descriptor rings */
+       ret = eth_igb_rx_init(dev);
+       if (ret) {
+               PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
+               return ret;
+       }
+
+       e1000_clear_hw_cntrs_base_generic(hw);
+
+       /*
+        * If VLAN filtering is enabled, set up VLAN tag offload and filtering
+        * and restore the VFTA.
+        */
+       if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+               igb_vlan_hw_support_enable(dev);
+       else
+               igb_vlan_hw_support_disable(dev);
+
+       /*
+        * Configure the Interrupt Moderation register (EITR) with the maximum
+        * possible value (0xFFFF) to minimize "System Partial Write" issued by
+        * spurious [DMA] memory updates of RX and TX ring descriptors.
+        *
+        * With a EITR granularity of 2 microseconds in the 82576, only 7/8
+        * spurious memory updates per second should be expected.
+        * ((65535 * 2) / 1000.1000 ~= 0.131 second).
+        *
+        * Because interrupts are not used at all, the MSI-X is not activated
+        * and interrupt moderation is controlled by EITR[0].
+        *
+        * Note that having [almost] disabled memory updates of RX and TX ring
+        * descriptors through the Interrupt Moderation mechanism, memory
+        * updates of ring descriptors are now moderated by the configurable
+        * value of Write-Back Threshold registers.
+        */
+       if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
+                       (hw->mac.type == e1000_i350)) {
+               uint32_t ivar;
+
+               /* Enable all RX & TX queues in the IVAR registers */
+               ivar = (uint32_t) ((E1000_IVAR_VALID << 16) | E1000_IVAR_VALID);
+               for (i = 0; i < 8; i++)
+                       E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, ivar);
+
+               /* Configure EITR with the maximum possible value (0xFFFF) */
+               E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
+       }
+
+       /* Don't reset the phy next time init gets called */
+       hw->phy.reset_disable = 1;
+
+       /* Setup link speed and duplex */
+       switch (dev->data->dev_conf.link_speed) {
+       case ETH_LINK_SPEED_AUTONEG:
+               if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
+                       hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
+               else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
+                       hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX;
+               else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
+                       hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX;
+               else
+                       goto error_invalid_config;
+               break;
+       case ETH_LINK_SPEED_10:
+               if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
+                       hw->phy.autoneg_advertised = E1000_ALL_10_SPEED;
+               else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
+                       hw->phy.autoneg_advertised = ADVERTISE_10_HALF;
+               else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
+                       hw->phy.autoneg_advertised = ADVERTISE_10_FULL;
+               else
+                       goto error_invalid_config;
+               break;
+       case ETH_LINK_SPEED_100:
+               if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
+                       hw->phy.autoneg_advertised = E1000_ALL_100_SPEED;
+               else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
+                       hw->phy.autoneg_advertised = ADVERTISE_100_HALF;
+               else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
+                       hw->phy.autoneg_advertised = ADVERTISE_100_FULL;
+               else
+                       goto error_invalid_config;
+               break;
+       case ETH_LINK_SPEED_1000:
+               if ((dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) ||
+                               (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX))
+                       hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
+               else
+                       goto error_invalid_config;
+               break;
+       case ETH_LINK_SPEED_10000:
+       default:
+               goto error_invalid_config;
+       }
+       e1000_setup_link(hw);
+
+       PMD_INIT_LOG(DEBUG, "<<");
+
+       /* check if lsc interrupt feature is enabled */
+       if (dev->data->dev_conf.intr_conf.lsc != 0)
+               return eth_igb_interrupt_setup(dev);
+
+       return (0);
+
+error_invalid_config:
+       PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u\n",
+                       dev->data->dev_conf.link_speed,
+                       dev->data->dev_conf.link_duplex, dev->data->port_id);
+       return -1;
+}
+
+/*********************************************************************
+ *
+ *  This routine disables all traffic on the adapter by issuing a
+ *  global reset on the MAC.
+ *
+ **********************************************************************/
+static void
+eth_igb_stop(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_eth_link link;
+
+       igb_intr_disable(hw);
+       e1000_reset_hw(hw);
+       E1000_WRITE_REG(hw, E1000_WUC, 0);
+
+       /* Power down the phy. Needed to make the link go Down */
+       e1000_power_down_phy(hw);
+
+       igb_dev_clear_queues(dev);
+
+       /* clear the recorded link status */
+       memset(&link, 0, sizeof(link));
+       rte_igb_dev_atomic_write_link_status(dev, &link);
+}
+
+static void
+eth_igb_close(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_eth_link link;
+
+       eth_igb_stop(dev);
+       e1000_phy_hw_reset(hw);
+       igb_release_manageability(hw);
+       igb_hw_control_release(hw);
+
+       igb_dev_clear_queues(dev);
+
+       memset(&link, 0, sizeof(link));
+       rte_igb_dev_atomic_write_link_status(dev, &link);
+}
+
+static int
+igb_get_rx_buffer_size(struct e1000_hw *hw)
+{
+       uint32_t rx_buf_size;
+       if (hw->mac.type == e1000_82576) {
+               rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
+       } else if (hw->mac.type == e1000_82580) {
+               /* PBS needs to be translated according to a lookup table */
+               rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
+               rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
+               rx_buf_size = (rx_buf_size << 10);
+       } else {
+               rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
+       }
+
+       return rx_buf_size;
+}
+
+/*********************************************************************
+ *
+ *  Initialize the hardware
+ *
+ **********************************************************************/
+static int
+igb_hardware_init(struct e1000_hw *hw)
+{
+       uint32_t rx_buf_size;
+       int diag;
+
+       /* Let the firmware know the OS is in control */
+       igb_hw_control_acquire(hw);
+
+       /*
+        * These parameters control the automatic generation (Tx) and
+        * response (Rx) to Ethernet PAUSE frames.
+        * - High water mark should allow for at least two standard size (1518)
+        *   frames to be received after sending an XOFF.
+        * - Low water mark works best when it is very near the high water mark.
+        *   This allows the receiver to restart by sending XON when it has
+        *   drained a bit. Here we use an arbitary value of 1500 which will
+        *   restart after one full frame is pulled from the buffer. There
+        *   could be several smaller frames in the buffer and if so they will
+        *   not trigger the XON until their total number reduces the buffer
+        *   by 1500.
+        * - The pause time is fairly large at 1000 x 512ns = 512 usec.
+        */
+       rx_buf_size = igb_get_rx_buffer_size(hw);
+
+       hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
+       hw->fc.low_water = hw->fc.high_water - 1500;
+       hw->fc.pause_time = IGB_FC_PAUSE_TIME;
+       hw->fc.send_xon = 1;
+
+       /* Set Flow control, use the tunable location if sane */
+       if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
+               hw->fc.requested_mode = igb_fc_setting;
+       else
+               hw->fc.requested_mode = e1000_fc_none;
+
+       /* Issue a global reset */
+       e1000_reset_hw(hw);
+       E1000_WRITE_REG(hw, E1000_WUC, 0);
+
+       diag = e1000_init_hw(hw);
+       if (diag < 0)
+               return (diag);
+
+       E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN);
+       e1000_get_phy_info(hw);
+       e1000_check_for_link(hw);
+
+       return (0);
+}
+
+/* This function is based on igb_update_stats_counters() in igb/if_igb.c */
+static void
+eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
+{
+       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw_stats *stats =
+                       E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+       int pause_frames;
+
+       if(hw->phy.media_type == e1000_media_type_copper ||
+           (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
+               stats->symerrs +=
+                   E1000_READ_REG(hw,E1000_SYMERRS);
+               stats->sec += E1000_READ_REG(hw, E1000_SEC);
+       }
+
+       stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
+       stats->mpc += E1000_READ_REG(hw, E1000_MPC);
+       stats->scc += E1000_READ_REG(hw, E1000_SCC);
+       stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
+
+       stats->mcc += E1000_READ_REG(hw, E1000_MCC);
+       stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
+       stats->colc += E1000_READ_REG(hw, E1000_COLC);
+       stats->dc += E1000_READ_REG(hw, E1000_DC);
+       stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
+       stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
+       stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
+       /*
+       ** For watchdog management we need to know if we have been
+       ** paused during the last interval, so capture that here.
+       */
+       pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
+       stats->xoffrxc += pause_frames;
+       stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
+       stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
+       stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
+       stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
+       stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
+       stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
+       stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
+       stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
+       stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
+       stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
+       stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
+       stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
+
+       /* For the 64-bit byte counters the low dword must be read first. */
+       /* Both registers clear on the read of the high dword */
+
+       stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
+       stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
+       stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
+       stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
+
+       stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
+       stats->ruc += E1000_READ_REG(hw, E1000_RUC);
+       stats->rfc += E1000_READ_REG(hw, E1000_RFC);
+       stats->roc += E1000_READ_REG(hw, E1000_ROC);
+       stats->rjc += E1000_READ_REG(hw, E1000_RJC);
+
+       stats->tor += E1000_READ_REG(hw, E1000_TORH);
+       stats->tot += E1000_READ_REG(hw, E1000_TOTH);
+
+       stats->tpr += E1000_READ_REG(hw, E1000_TPR);
+       stats->tpt += E1000_READ_REG(hw, E1000_TPT);
+       stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
+       stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
+       stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
+       stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
+       stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
+       stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
+       stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
+       stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
+
+       /* Interrupt Counts */
+
+       stats->iac += E1000_READ_REG(hw, E1000_IAC);
+       stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
+       stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
+       stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
+       stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
+       stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
+       stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
+       stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
+       stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
+
+       /* Host to Card Statistics */
+
+       stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
+       stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
+       stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
+       stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
+       stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
+       stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
+       stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
+       stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
+       stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
+       stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
+       stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
+       stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
+       stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
+       stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
+
+       stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
+       stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
+       stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
+       stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
+       stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
+       stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
+
+       if (rte_stats == NULL)
+               return;
+
+       /* Rx Errors */
+       rte_stats->ierrors = stats->rxerrc + stats->crcerrs + stats->algnerrc +
+           stats->ruc + stats->roc + stats->mpc + stats->cexterr;
+
+       /* Tx Errors */
+       rte_stats->oerrors = stats->ecol + stats->latecol;
+
+       rte_stats->ipackets = stats->gprc;
+       rte_stats->opackets = stats->gptc;
+       rte_stats->ibytes   = stats->gorc;
+       rte_stats->obytes   = stats->gotc;
+}
+
+static void
+eth_igb_stats_reset(struct rte_eth_dev *dev)
+{
+       struct e1000_hw_stats *hw_stats =
+                       E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+       /* HW registers are cleared on read */
+       eth_igb_stats_get(dev, NULL);
+
+       /* Reset software totals */
+       memset(hw_stats, 0, sizeof(*hw_stats));
+}
+
+static void
+eth_igb_infos_get(struct rte_eth_dev *dev,
+                   struct rte_eth_dev_info *dev_info)
+{
+       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
+       dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
+       dev_info->max_mac_addrs = hw->mac.rar_entry_count;
+
+       switch (hw->mac.type) {
+       case e1000_82575:
+               dev_info->max_rx_queues = 4;
+               dev_info->max_tx_queues = 4;
+               break;
+
+       case e1000_82576:
+               dev_info->max_rx_queues = 16;
+               dev_info->max_tx_queues = 16;
+               break;
+
+       case e1000_82580:
+               dev_info->max_rx_queues = 8;
+               dev_info->max_tx_queues = 8;
+               break;
+
+       case e1000_i350:
+               dev_info->max_rx_queues = 8;
+               dev_info->max_tx_queues = 8;
+               break;
+
+       default:
+               /* Should not happen */
+               dev_info->max_rx_queues = 0;
+               dev_info->max_tx_queues = 0;
+       }
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_eth_link link, old;
+       int link_check, count;
+
+       link_check = 0;
+       hw->mac.get_link_status = 1;
+
+       /* possible wait-to-complete in up to 9 seconds */
+       for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
+               /* Read the real link status */
+               switch (hw->phy.media_type) {
+               case e1000_media_type_copper:
+                       /* Do the work to read phy */
+                       e1000_check_for_link(hw);
+                       link_check = !hw->mac.get_link_status;
+                       break;
+
+               case e1000_media_type_fiber:
+                       e1000_check_for_link(hw);
+                       link_check = (E1000_READ_REG(hw, E1000_STATUS) &
+                                     E1000_STATUS_LU);
+                       break;
+
+               case e1000_media_type_internal_serdes:
+                       e1000_check_for_link(hw);
+                       link_check = hw->mac.serdes_has_link;
+                       break;
+
+               default:
+               case e1000_media_type_unknown:
+                       break;
+               }
+               if (link_check || wait_to_complete == 0)
+                       break;
+               rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
+       }
+       memset(&link, 0, sizeof(link));
+       rte_igb_dev_atomic_read_link_status(dev, &link);
+       old = link;
+
+       /* Now we check if a transition has happened */
+       if (link_check) {
+               hw->mac.ops.get_link_up_info(hw, &link.link_speed,
+                                         &link.link_duplex);
+               link.link_status = 1;
+       } else if (!link_check) {
+               link.link_speed = 0;
+               link.link_duplex = 0;
+               link.link_status = 0;
+       }
+       rte_igb_dev_atomic_write_link_status(dev, &link);
+
+       /* not changed */
+       if (old.link_status == link.link_status)
+               return -1;
+
+       /* changed */
+       return 0;
+}
+
+/*
+ * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means
+ * that the driver is loaded.
+ */
+static void
+igb_hw_control_acquire(struct e1000_hw *hw)
+{
+       uint32_t ctrl_ext;
+
+       /* Let firmware know the driver has taken over */
+       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+       E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+}
+
+/*
+ * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded.
+ */
+static void
+igb_hw_control_release(struct e1000_hw *hw)
+{
+       uint32_t ctrl_ext;
+
+       /* Let firmware taken over control of h/w */
+       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+       E1000_WRITE_REG(hw, E1000_CTRL_EXT,
+                       ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+}
+
+/*
+ * Bit of a misnomer, what this really means is
+ * to enable OS management of the system... aka
+ * to disable special hardware management features.
+ */
+static void
+igb_init_manageability(struct e1000_hw *hw)
+{
+       if (e1000_enable_mng_pass_thru(hw)) {
+               uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
+               uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
+
+               /* disable hardware interception of ARP */
+               manc &= ~(E1000_MANC_ARP_EN);
+
+               /* enable receiving management packets to the host */
+               manc |= E1000_MANC_EN_MNG2HOST;
+               manc2h |= 1 << 5;  /* Mng Port 623 */
+               manc2h |= 1 << 6;  /* Mng Port 664 */
+               E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
+               E1000_WRITE_REG(hw, E1000_MANC, manc);
+       }
+}
+
+static void
+igb_release_manageability(struct e1000_hw *hw)
+{
+       if (e1000_enable_mng_pass_thru(hw)) {
+               uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
+
+               manc |= E1000_MANC_ARP_EN;
+               manc &= ~E1000_MANC_EN_MNG2HOST;
+
+               E1000_WRITE_REG(hw, E1000_MANC, manc);
+       }
+}
+
+static void
+eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t rctl;
+
+       rctl = E1000_READ_REG(hw, E1000_RCTL);
+       rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t rctl;
+
+       rctl = E1000_READ_REG(hw, E1000_RCTL);
+       rctl &= (~E1000_RCTL_UPE);
+       if (dev->data->all_multicast == 1)
+               rctl |= E1000_RCTL_MPE;
+       else
+               rctl &= (~E1000_RCTL_MPE);
+       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t rctl;
+
+       rctl = E1000_READ_REG(hw, E1000_RCTL);
+       rctl |= E1000_RCTL_MPE;
+       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t rctl;
+
+       if (dev->data->promiscuous == 1)
+               return; /* must remain in all_multicast mode */
+       rctl = E1000_READ_REG(hw, E1000_RCTL);
+       rctl &= (~E1000_RCTL_MPE);
+       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_vfta * shadow_vfta =
+               E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+       uint32_t vfta;
+       uint32_t vid_idx;
+       uint32_t vid_bit;
+
+       vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
+                             E1000_VFTA_ENTRY_MASK);
+       vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
+       vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
+       if (on)
+               vfta |= vid_bit;
+       else
+               vfta &= ~vid_bit;
+       E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
+
+       /* update local VFTA copy */
+       shadow_vfta->vfta[vid_idx] = vfta;
+}
+
+static void
+igb_vlan_hw_support_enable(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_vfta * shadow_vfta =
+               E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+       uint32_t reg;
+       int i;
+
+       /* VLAN Mode Enable */
+       reg = E1000_READ_REG(hw, E1000_CTRL);
+       reg |= E1000_CTRL_VME;
+       E1000_WRITE_REG(hw, E1000_CTRL, reg);
+
+       /* Filter Table Enable */
+       reg = E1000_READ_REG(hw, E1000_RCTL);
+       reg &= ~E1000_RCTL_CFIEN;
+       reg |= E1000_RCTL_VFE;
+       E1000_WRITE_REG(hw, E1000_RCTL, reg);
+
+       /* Update maximum frame size */
+       reg = E1000_READ_REG(hw, E1000_RLPML);
+       reg += VLAN_TAG_SIZE;
+       E1000_WRITE_REG(hw, E1000_RLPML, reg);
+
+       /* restore VFTA table */
+       for (i = 0; i < E1000_VFTA_SIZE; i++)
+               E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
+}
+
+static void
+igb_vlan_hw_support_disable(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t reg;
+
+       /* VLAN Mode disable */
+       reg = E1000_READ_REG(hw, E1000_CTRL);
+       reg &= ~E1000_CTRL_VME;
+       E1000_WRITE_REG(hw, E1000_CTRL, reg);
+}
+
+static void
+igb_intr_disable(struct e1000_hw *hw)
+{
+       E1000_WRITE_REG(hw, E1000_IMC, ~0);
+       E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * It enables the interrupt mask and then enable the interrupt.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+eth_igb_interrupt_setup(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       E1000_WRITE_REG(hw, E1000_IMS, E1000_ICR_LSC);
+       E1000_WRITE_FLUSH(hw);
+       rte_intr_enable(&(dev->pci_dev->intr_handle));
+
+       return 0;
+}
+
+/*
+ * It reads ICR and gets interrupt causes, check it and set a bit flag
+ * to update link status.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
+{
+       uint32_t icr;
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_interrupt *intr =
+               E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+       /* read-on-clear nic registers here */
+       icr = E1000_READ_REG(hw, E1000_ICR);
+       if (icr & E1000_ICR_LSC) {
+               intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
+       }
+
+       return 0;
+}
+
+/*
+ * It executes link_update after knowing an interrupt is prsent.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+eth_igb_interrupt_action(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_interrupt *intr =
+               E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+       uint32_t tctl, rctl;
+       struct rte_eth_link link;
+       int ret;
+
+       if (!(intr->flags & E1000_FLAG_NEED_LINK_UPDATE))
+               return -1;
+
+       intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
+       rte_intr_enable(&(dev->pci_dev->intr_handle));
+
+       /* set get_link_status to check register later */
+       hw->mac.get_link_status = 1;
+       ret = eth_igb_link_update(dev, 0);
+
+       /* check if link has changed */
+       if (ret < 0)
+               return 0;
+
+       memset(&link, 0, sizeof(link));
+       rte_igb_dev_atomic_read_link_status(dev, &link);
+       if (link.link_status) {
+               PMD_INIT_LOG(INFO,
+                       " Port %d: Link Up - speed %u Mbps - %s\n",
+                       dev->data->port_id, (unsigned)link.link_speed,
+                       link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+                               "full-duplex" : "half-duplex");
+       } else {
+               PMD_INIT_LOG(INFO, " Port %d: Link Down\n",
+                                       dev->data->port_id);
+       }
+       PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
+                               dev->pci_dev->addr.domain,
+                               dev->pci_dev->addr.bus,
+                               dev->pci_dev->addr.devid,
+                               dev->pci_dev->addr.function);
+       tctl = E1000_READ_REG(hw, E1000_TCTL);
+       rctl = E1000_READ_REG(hw, E1000_RCTL);
+       if (link.link_status) {
+               /* enable Tx/Rx */
+               tctl |= E1000_TCTL_EN;
+               rctl |= E1000_RCTL_EN;
+       } else {
+               /* disable Tx/Rx */
+               tctl &= ~E1000_TCTL_EN;
+               rctl &= ~E1000_RCTL_EN;
+       }
+       E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+       E1000_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
+/**
+ * Interrupt handler which shall be registered at first.
+ *
+ * @param handle
+ *  Pointer to interrupt handle.
+ * @param param
+ *  The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ *  void
+ */
+static void
+eth_igb_interrupt_handler(struct rte_intr_handle *handle, void *param)
+{
+       struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+       eth_igb_interrupt_get_status(dev);
+       eth_igb_interrupt_action(dev);
+       _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+}
+
+static int
+eth_igb_led_on(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
+}
+
+static int
+eth_igb_led_off(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
+}
+
+static int
+eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+       struct e1000_hw *hw;
+       int err;
+       enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
+               e1000_fc_none,
+               e1000_fc_rx_pause,
+               e1000_fc_tx_pause,
+               e1000_fc_full
+       };
+       uint32_t rx_buf_size;
+       uint32_t max_high_water;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       rx_buf_size = igb_get_rx_buffer_size(hw);
+       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
+
+       /* At least reserve one Ethernet frame for watermark */
+       max_high_water = rx_buf_size - ETHER_MAX_LEN;
+       if ((fc_conf->high_water > max_high_water) ||
+               (fc_conf->high_water < fc_conf->low_water)) {
+               PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value \n");
+               PMD_INIT_LOG(ERR, "high water must <=  0x%x \n", max_high_water);
+               return (-EINVAL);
+       }
+
+       hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
+       hw->fc.pause_time     = fc_conf->pause_time;
+       hw->fc.high_water     = fc_conf->high_water;
+       hw->fc.low_water      = fc_conf->low_water;
+       hw->fc.send_xon       = fc_conf->send_xon;
+
+       err = e1000_setup_link_generic(hw);
+       if (err == E1000_SUCCESS) {
+               return 0;
+       }
+
+       PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x \n", err);
+       return (-EIO);
+}
+
+static void
+eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+               uint32_t index, __rte_unused uint32_t pool)
+{
+       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       e1000_rar_set(hw, mac_addr->addr_bytes, index);
+}
+
+static void
+eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
+{
+       uint8_t addr[ETHER_ADDR_LEN];
+       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       memset(addr, 0, sizeof(addr));
+
+       e1000_rar_set(hw, addr, index);
+}
diff --git a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c
new file mode 100644 (file)
index 0000000..1ea3d63
--- /dev/null
@@ -0,0 +1,1858 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+#include <sys/queue.h>
+
+#include <endian.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_prefetch.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_string_fns.h>
+
+#include "e1000_logs.h"
+#include "igb/e1000_api.h"
+#include "e1000_ethdev.h"
+
+static inline struct rte_mbuf *
+rte_rxmbuf_alloc(struct rte_mempool *mp)
+{
+       struct rte_mbuf *m;
+
+       m = __rte_mbuf_raw_alloc(mp);
+       __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
+       return (m);
+}
+
+#define RTE_MBUF_DATA_DMA_ADDR(mb) \
+       (uint64_t) ((mb)->buf_physaddr +                   \
+                       (uint64_t) ((char *)((mb)->pkt.data) -     \
+                               (char *)(mb)->buf_addr))
+
+#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
+       (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
+
+/**
+ * Structure associated with each descriptor of the RX ring of a RX queue.
+ */
+struct igb_rx_entry {
+       struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct igb_tx_entry {
+       struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+       uint16_t next_id; /**< Index of next descriptor in ring. */
+       uint16_t last_id; /**< Index of last scattered descriptor. */
+};
+
+/**
+ * Structure associated with each RX queue.
+ */
+struct igb_rx_queue {
+       struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
+       volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
+       uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
+       volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
+       struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
+       struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
+       struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
+       uint16_t            nb_rx_desc; /**< number of RX descriptors. */
+       uint16_t            rx_tail;    /**< current value of RDT register. */
+       uint16_t            nb_rx_hold; /**< number of held free RX desc. */
+       uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
+       uint16_t            queue_id;   /**< RX queue index. */
+       uint8_t             port_id;    /**< Device port identifier. */
+       uint8_t             pthresh;    /**< Prefetch threshold register. */
+       uint8_t             hthresh;    /**< Host threshold register. */
+       uint8_t             wthresh;    /**< Write-back threshold register. */
+       uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
+};
+
+/**
+ * Hardware context number
+ */
+enum igb_advctx_num {
+       IGB_CTX_0    = 0, /**< CTX0    */
+       IGB_CTX_1    = 1, /**< CTX1    */
+       IGB_CTX_NUM  = 2, /**< CTX NUM */
+};
+
+/**
+ * Strucutre to check if new context need be built
+ */
+struct igb_advctx_info {
+       uint16_t flags;           /**< ol_flags related to context build. */
+       uint32_t cmp_mask;        /**< compare mask for vlan_macip_lens */
+       uint32_t vlan_macip_lens; /**< vlan, mac.ip length. */
+};
+
+/**
+ * Structure associated with each TX queue.
+ */
+struct igb_tx_queue {
+       volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
+       uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
+       struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
+       volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
+       uint32_t               txd_type;      /**< Device-specific TXD type */
+       uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
+       uint16_t               tx_tail;  /**< Current value of TDT register. */
+       uint16_t               tx_head;  /**< Index of first used TX descriptor. */
+       uint16_t               queue_id; /**< TX queue index. */
+       uint8_t                port_id;  /**< Device port identifier. */
+       uint8_t                pthresh;  /**< Prefetch threshold register. */
+       uint8_t                hthresh;  /**< Host threshold register. */
+       uint8_t                wthresh;  /**< Write-back threshold register. */
+       uint32_t               ctx_curr; /**< Current used hardware descriptor. */
+       uint32_t               ctx_start;/**< Start context position for transmit queue. */
+       struct igb_advctx_info ctx_cache[IGB_CTX_NUM];  /**< Hardware context history.*/
+};
+
+#if 1
+#define RTE_PMD_USE_PREFETCH
+#endif
+
+#ifdef RTE_PMD_USE_PREFETCH
+#define rte_igb_prefetch(p)    rte_prefetch0(p)
+#else
+#define rte_igb_prefetch(p)    do {} while(0)
+#endif
+
+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_packet_prefetch(p) rte_prefetch1(p)
+#else
+#define rte_packet_prefetch(p) do {} while(0)
+#endif
+
+/*********************************************************************
+ *
+ *  TX function
+ *
+ **********************************************************************/
+
+/*
+ * Advanced context descriptor are almost same between igb/ixgbe
+ * This is a separate function, looking for optimization opportunity here
+ * Rework required to go with the pre-defined values.
+ */
+
+static inline void
+igbe_set_xmit_ctx(struct igb_tx_queue* txq,
+               volatile struct e1000_adv_tx_context_desc *ctx_txd,
+               uint16_t ol_flags, uint32_t vlan_macip_lens)
+{
+       uint32_t type_tucmd_mlhl;
+       uint32_t mss_l4len_idx;
+       uint32_t ctx_idx, ctx_curr;
+       uint32_t cmp_mask;
+
+       ctx_curr = txq->ctx_curr;
+       ctx_idx = ctx_curr + txq->ctx_start;
+
+       cmp_mask = 0;
+       type_tucmd_mlhl = 0;
+
+       if (ol_flags & PKT_TX_VLAN_PKT) {
+               cmp_mask |= TX_VLAN_CMP_MASK;
+       }
+
+       if (ol_flags & PKT_TX_IP_CKSUM) {
+               type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
+               cmp_mask |= TX_MAC_LEN_CMP_MASK;
+       }
+
+       /* Specify which HW CTX to upload. */
+       mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
+       switch (ol_flags & PKT_TX_L4_MASK) {
+       case PKT_TX_UDP_CKSUM:
+               type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
+                               E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
+               mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
+               cmp_mask |= TX_MACIP_LEN_CMP_MASK;
+               break;
+       case PKT_TX_TCP_CKSUM:
+               type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
+                               E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
+               mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
+               cmp_mask |= TX_MACIP_LEN_CMP_MASK;
+               break;
+       case PKT_TX_SCTP_CKSUM:
+               type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
+                               E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
+               mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
+               cmp_mask |= TX_MACIP_LEN_CMP_MASK;
+               break;
+       default:
+               type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
+                               E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
+               break;
+       }
+
+       txq->ctx_cache[ctx_curr].flags           = ol_flags;
+       txq->ctx_cache[ctx_curr].cmp_mask        = cmp_mask;
+       txq->ctx_cache[ctx_curr].vlan_macip_lens = vlan_macip_lens & cmp_mask;
+
+       ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
+       ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
+       ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
+       ctx_txd->seqnum_seed     = 0;
+}
+
+/*
+ * Check which hardware context can be used. Use the existing match
+ * or create a new context descriptor.
+ */
+static inline uint32_t
+what_advctx_update(struct igb_tx_queue *txq, uint16_t flags,
+               uint32_t vlan_macip_lens)
+{
+       /* If match with the current context */
+       if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+               (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens ==
+               (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
+                       return txq->ctx_curr;
+       }
+
+       /* If match with the second context */
+       txq->ctx_curr ^= 1;
+       if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+               (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens ==
+               (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
+                       return txq->ctx_curr;
+       }
+
+       /* Mismatch, use the previous context */
+       return (IGB_CTX_NUM);
+}
+
+static inline uint32_t
+tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags)
+{
+       static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
+       static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
+       uint32_t tmp;
+
+       tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
+       tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
+       return tmp;
+}
+
+static inline uint32_t
+tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags)
+{
+       static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
+       return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
+}
+
+uint16_t
+eth_igb_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts,
+              uint16_t nb_pkts)
+{
+       struct igb_tx_entry *sw_ring;
+       struct igb_tx_entry *txe, *txn;
+       volatile union e1000_adv_tx_desc *txr;
+       volatile union e1000_adv_tx_desc *txd;
+       struct rte_mbuf     *tx_pkt;
+       struct rte_mbuf     *m_seg;
+       uint64_t buf_dma_addr;
+       uint32_t olinfo_status;
+       uint32_t cmd_type_len;
+       uint32_t pkt_len;
+       uint16_t slen;
+       uint16_t ol_flags;
+       uint16_t tx_end;
+       uint16_t tx_id;
+       uint16_t tx_last;
+       uint16_t nb_tx;
+       uint16_t tx_ol_req;
+       uint32_t new_ctx;
+       uint32_t ctx;
+       uint32_t vlan_macip_lens;
+
+       sw_ring = txq->sw_ring;
+       txr     = txq->tx_ring;
+       tx_id   = txq->tx_tail;
+       txe = &sw_ring[tx_id];
+
+       for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+               tx_pkt = *tx_pkts++;
+               pkt_len = tx_pkt->pkt.pkt_len;
+
+               RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
+
+               /*
+                * The number of descriptors that must be allocated for a
+                * packet is the number of segments of that packet, plus 1
+                * Context Descriptor for the VLAN Tag Identifier, if any.
+                * Determine the last TX descriptor to allocate in the TX ring
+                * for the packet, starting from the current position (tx_id)
+                * in the ring.
+                */
+               tx_last = (uint16_t) (tx_id + tx_pkt->pkt.nb_segs - 1);
+
+               ol_flags = tx_pkt->ol_flags;
+               vlan_macip_lens = (tx_pkt->pkt.vlan_tci << 16) | (tx_pkt->pkt.l2_len << E1000_ADVTXD_MACLEN_SHIFT) | tx_pkt->pkt.l3_len;
+               tx_ol_req = (ol_flags & PKT_TX_OFFLOAD_MASK);
+
+               /* If a Context Descriptor need be built . */
+               if (tx_ol_req) {
+                       ctx = what_advctx_update(txq, tx_ol_req,vlan_macip_lens);
+                       /* Only allocate context descriptor if required*/
+                       new_ctx = (ctx == IGB_CTX_NUM);
+                       ctx = txq->ctx_curr;
+                       tx_last = (uint16_t) (tx_last + new_ctx);
+               }
+               if (tx_last >= txq->nb_tx_desc)
+                       tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
+
+               PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
+                          " tx_first=%u tx_last=%u\n",
+                          (unsigned) txq->port_id,
+                          (unsigned) txq->queue_id,
+                          (unsigned) pkt_len,
+                          (unsigned) tx_id,
+                          (unsigned) tx_last);
+
+               /*
+                * Check if there are enough free descriptors in the TX ring
+                * to transmit the next packet.
+                * This operation is based on the two following rules:
+                *
+                *   1- Only check that the last needed TX descriptor can be
+                *      allocated (by construction, if that descriptor is free,
+                *      all intermediate ones are also free).
+                *
+                *      For this purpose, the index of the last TX descriptor
+                *      used for a packet (the "last descriptor" of a packet)
+                *      is recorded in the TX entries (the last one included)
+                *      that are associated with all TX descriptors allocated
+                *      for that packet.
+                *
+                *   2- Avoid to allocate the last free TX descriptor of the
+                *      ring, in order to never set the TDT register with the
+                *      same value stored in parallel by the NIC in the TDH
+                *      register, which makes the TX engine of the NIC enter
+                *      in a deadlock situation.
+                *
+                *      By extension, avoid to allocate a free descriptor that
+                *      belongs to the last set of free descriptors allocated
+                *      to the same packet previously transmitted.
+                */
+
+               /*
+                * The "last descriptor" of the previously sent packet, if any,
+                * which used the last descriptor to allocate.
+                */
+               tx_end = sw_ring[tx_last].last_id;
+
+               /*
+                * The next descriptor following that "last descriptor" in the
+                * ring.
+                */
+               tx_end = sw_ring[tx_end].next_id;
+
+               /*
+                * The "last descriptor" associated with that next descriptor.
+                */
+               tx_end = sw_ring[tx_end].last_id;
+
+               /*
+                * Check that this descriptor is free.
+                */
+               if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
+                       if (nb_tx == 0)
+                               return (0);
+                       goto end_of_tx;
+               }
+
+               /*
+                * Set common flags of all TX Data Descriptors.
+                *
+                * The following bits must be set in all Data Descriptors:
+                *   - E1000_ADVTXD_DTYP_DATA
+                *   - E1000_ADVTXD_DCMD_DEXT
+                *
+                * The following bits must be set in the first Data Descriptor
+                * and are ignored in the other ones:
+                *   - E1000_ADVTXD_DCMD_IFCS
+                *   - E1000_ADVTXD_MAC_1588
+                *   - E1000_ADVTXD_DCMD_VLE
+                *
+                * The following bits must only be set in the last Data
+                * Descriptor:
+                *   - E1000_TXD_CMD_EOP
+                *
+                * The following bits can be set in any Data Descriptor, but
+                * are only set in the last Data Descriptor:
+                *   - E1000_TXD_CMD_RS
+                */
+               cmd_type_len = txq->txd_type |
+                       E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
+               olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
+#if defined(RTE_LIBRTE_IEEE1588)
+               if (ol_flags & PKT_TX_IEEE1588_TMST)
+                       cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
+#endif
+               if (tx_ol_req) {
+                       /* Setup TX Advanced context descriptor if required */
+                       if (new_ctx) {
+                               volatile struct e1000_adv_tx_context_desc *
+                                   ctx_txd;
+
+                               ctx_txd = (volatile struct
+                                   e1000_adv_tx_context_desc *)
+                                   &txr[tx_id];
+
+                               txn = &sw_ring[txe->next_id];
+                               RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+
+                               if (txe->mbuf != NULL) {
+                                       rte_pktmbuf_free_seg(txe->mbuf);
+                                       txe->mbuf = NULL;
+                               }
+
+                               igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
+                                   vlan_macip_lens);
+
+                               txe->last_id = tx_last;
+                               tx_id = txe->next_id;
+                               txe = txn;
+                       }
+
+                       /* Setup the TX Advanced Data Descriptor */
+                       cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
+                       olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
+                       olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
+               }
+
+               m_seg = tx_pkt;
+               do {
+                       txn = &sw_ring[txe->next_id];
+                       txd = &txr[tx_id];
+
+                       if (txe->mbuf != NULL)
+                               rte_pktmbuf_free_seg(txe->mbuf);
+                       txe->mbuf = m_seg;
+
+                       /*
+                        * Set up transmit descriptor.
+                        */
+                       slen = (uint16_t) m_seg->pkt.data_len;
+                       buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+                       txd->read.buffer_addr =
+                               rte_cpu_to_le_64(buf_dma_addr);
+                       txd->read.cmd_type_len =
+                               rte_cpu_to_le_32(cmd_type_len | slen);
+                       txd->read.olinfo_status =
+                               rte_cpu_to_le_32(olinfo_status);
+                       txe->last_id = tx_last;
+                       tx_id = txe->next_id;
+                       txe = txn;
+                       m_seg = m_seg->pkt.next;
+               } while (m_seg != NULL);
+
+               /*
+                * The last packet data descriptor needs End Of Packet (EOP)
+                * and Report Status (RS).
+                */
+               txd->read.cmd_type_len |=
+                       rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
+       }
+ end_of_tx:
+       rte_wmb();
+
+       /*
+        * Set the Transmit Descriptor Tail (TDT).
+        */
+       E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
+       PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+                  (unsigned) txq->port_id, (unsigned) txq->queue_id,
+                  (unsigned) tx_id, (unsigned) nb_tx);
+       txq->tx_tail = tx_id;
+
+       return (nb_tx);
+}
+
+/*********************************************************************
+ *
+ *  RX functions
+ *
+ **********************************************************************/
+static inline uint16_t
+rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
+{
+       uint16_t pkt_flags;
+
+       static uint16_t ip_pkt_types_map[16] = {
+               0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
+               PKT_RX_IPV6_HDR, 0, 0, 0,
+               PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
+               PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
+       };
+
+#if defined(RTE_LIBRTE_IEEE1588)
+       static uint32_t ip_pkt_etqf_map[8] = {
+               0, 0, 0, PKT_RX_IEEE1588_PTP,
+               0, 0, 0, 0,
+       };
+
+       pkt_flags = (uint16_t) (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
+                               ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
+                               ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
+#else
+       pkt_flags = (uint16_t) (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
+                               ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
+#endif
+       return pkt_flags | (uint16_t) (((hl_tp_rs & 0x0F) == 0) ? 0 :
+                                       PKT_RX_RSS_HASH);
+}
+
+static inline uint16_t
+rx_desc_status_to_pkt_flags(uint32_t rx_status)
+{
+       uint16_t pkt_flags;
+
+       /* Check if VLAN present */
+       pkt_flags = (uint16_t) (rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
+
+#if defined(RTE_LIBRTE_IEEE1588)
+       if (rx_status & E1000_RXD_STAT_TMST)
+               pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
+#endif
+       return pkt_flags;
+}
+
+static inline uint16_t
+rx_desc_error_to_pkt_flags(uint32_t rx_status)
+{
+       /*
+        * Bit 30: IPE, IPv4 checksum error
+        * Bit 29: L4I, L4I integrity error
+        */
+
+       static uint16_t error_to_pkt_flags_map[4] = {
+               0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
+               PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
+       };
+       return error_to_pkt_flags_map[(rx_status >>
+               E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
+}
+
+uint16_t
+eth_igb_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+              uint16_t nb_pkts)
+{
+       volatile union e1000_adv_rx_desc *rx_ring;
+       volatile union e1000_adv_rx_desc *rxdp;
+       struct igb_rx_entry *sw_ring;
+       struct igb_rx_entry *rxe;
+       struct rte_mbuf *rxm;
+       struct rte_mbuf *nmb;
+       union e1000_adv_rx_desc rxd;
+       uint64_t dma_addr;
+       uint32_t staterr;
+       uint32_t hlen_type_rss;
+       uint16_t pkt_len;
+       uint16_t rx_id;
+       uint16_t nb_rx;
+       uint16_t nb_hold;
+       uint16_t pkt_flags;
+
+       nb_rx = 0;
+       nb_hold = 0;
+       rx_id = rxq->rx_tail;
+       rx_ring = rxq->rx_ring;
+       sw_ring = rxq->sw_ring;
+       while (nb_rx < nb_pkts) {
+               /*
+                * The order of operations here is important as the DD status
+                * bit must not be read after any other descriptor fields.
+                * rx_ring and rxdp are pointing to volatile data so the order
+                * of accesses cannot be reordered by the compiler. If they were
+                * not volatile, they could be reordered which could lead to
+                * using invalid descriptor fields when read from rxd.
+                */
+               rxdp = &rx_ring[rx_id];
+               staterr = rxdp->wb.upper.status_error;
+               if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
+                       break;
+               rxd = *rxdp;
+
+               /*
+                * End of packet.
+                *
+                * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
+                * likely to be invalid and to be dropped by the various
+                * validation checks performed by the network stack.
+                *
+                * Allocate a new mbuf to replenish the RX ring descriptor.
+                * If the allocation fails:
+                *    - arrange for that RX descriptor to be the first one
+                *      being parsed the next time the receive function is
+                *      invoked [on the same queue].
+                *
+                *    - Stop parsing the RX ring and return immediately.
+                *
+                * This policy do not drop the packet received in the RX
+                * descriptor for which the allocation of a new mbuf failed.
+                * Thus, it allows that packet to be later retrieved if
+                * mbuf have been freed in the mean time.
+                * As a side effect, holding RX descriptors instead of
+                * systematically giving them back to the NIC may lead to
+                * RX ring exhaustion situations.
+                * However, the NIC can gracefully prevent such situations
+                * to happen by sending specific "back-pressure" flow control
+                * frames to its peer(s).
+                */
+               PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
+                          "staterr=0x%x pkt_len=%u\n",
+                          (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+                          (unsigned) rx_id, (unsigned) staterr,
+                          (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
+
+               nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+               if (nmb == NULL) {
+                       PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+                                  "queue_id=%u\n", (unsigned) rxq->port_id,
+                                  (unsigned) rxq->queue_id);
+                       rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+                       break;
+               }
+
+               nb_hold++;
+               rxe = &sw_ring[rx_id];
+               rx_id++;
+               if (rx_id == rxq->nb_rx_desc)
+                       rx_id = 0;
+
+               /* Prefetch next mbuf while processing current one. */
+               rte_igb_prefetch(sw_ring[rx_id].mbuf);
+
+               /*
+                * When next RX descriptor is on a cache-line boundary,
+                * prefetch the next 4 RX descriptors and the next 8 pointers
+                * to mbufs.
+                */
+               if ((rx_id & 0x3) == 0) {
+                       rte_igb_prefetch(&rx_ring[rx_id]);
+                       rte_igb_prefetch(&sw_ring[rx_id]);
+               }
+
+               rxm = rxe->mbuf;
+               rxe->mbuf = nmb;
+               dma_addr =
+                       rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+               rxdp->read.hdr_addr = dma_addr;
+               rxdp->read.pkt_addr = dma_addr;
+
+               /*
+                * Initialize the returned mbuf.
+                * 1) setup generic mbuf fields:
+                *    - number of segments,
+                *    - next segment,
+                *    - packet length,
+                *    - RX port identifier.
+                * 2) integrate hardware offload data, if any:
+                *    - RSS flag & hash,
+                *    - IP checksum flag,
+                *    - VLAN TCI, if any,
+                *    - error flags.
+                */
+               pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
+                                     rxq->crc_len);
+               rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+               rte_packet_prefetch(rxm->pkt.data);
+               rxm->pkt.nb_segs = 1;
+               rxm->pkt.next = NULL;
+               rxm->pkt.pkt_len = pkt_len;
+               rxm->pkt.data_len = pkt_len;
+               rxm->pkt.in_port = rxq->port_id;
+
+               rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+               hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
+               /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
+               rxm->pkt.vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+
+               pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
+               pkt_flags = (pkt_flags |
+                                       rx_desc_status_to_pkt_flags(staterr));
+               pkt_flags = (pkt_flags |
+                                       rx_desc_error_to_pkt_flags(staterr));
+               rxm->ol_flags = pkt_flags;
+
+               /*
+                * Store the mbuf address into the next entry of the array
+                * of returned packets.
+                */
+               rx_pkts[nb_rx++] = rxm;
+       }
+       rxq->rx_tail = rx_id;
+
+       /*
+        * If the number of free RX descriptors is greater than the RX free
+        * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+        * register.
+        * Update the RDT with the value of the last processed RX descriptor
+        * minus 1, to guarantee that the RDT register is never equal to the
+        * RDH register, which creates a "full" ring situtation from the
+        * hardware point of view...
+        */
+       nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+       if (nb_hold > rxq->rx_free_thresh) {
+               PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+                          "nb_hold=%u nb_rx=%u\n",
+                          (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+                          (unsigned) rx_id, (unsigned) nb_hold,
+                          (unsigned) nb_rx);
+               rx_id = (uint16_t) ((rx_id == 0) ?
+                                    (rxq->nb_rx_desc - 1) : (rx_id - 1));
+               E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+               nb_hold = 0;
+       }
+       rxq->nb_rx_hold = nb_hold;
+       return (nb_rx);
+}
+
+uint16_t
+eth_igb_recv_scattered_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+                        uint16_t nb_pkts)
+{
+       volatile union e1000_adv_rx_desc *rx_ring;
+       volatile union e1000_adv_rx_desc *rxdp;
+       struct igb_rx_entry *sw_ring;
+       struct igb_rx_entry *rxe;
+       struct rte_mbuf *first_seg;
+       struct rte_mbuf *last_seg;
+       struct rte_mbuf *rxm;
+       struct rte_mbuf *nmb;
+       union e1000_adv_rx_desc rxd;
+       uint64_t dma; /* Physical address of mbuf data buffer */
+       uint32_t staterr;
+       uint32_t hlen_type_rss;
+       uint16_t rx_id;
+       uint16_t nb_rx;
+       uint16_t nb_hold;
+       uint16_t data_len;
+       uint16_t pkt_flags;
+
+       nb_rx = 0;
+       nb_hold = 0;
+       rx_id = rxq->rx_tail;
+       rx_ring = rxq->rx_ring;
+       sw_ring = rxq->sw_ring;
+
+       /*
+        * Retrieve RX context of current packet, if any.
+        */
+       first_seg = rxq->pkt_first_seg;
+       last_seg = rxq->pkt_last_seg;
+
+       while (nb_rx < nb_pkts) {
+       next_desc:
+               /*
+                * The order of operations here is important as the DD status
+                * bit must not be read after any other descriptor fields.
+                * rx_ring and rxdp are pointing to volatile data so the order
+                * of accesses cannot be reordered by the compiler. If they were
+                * not volatile, they could be reordered which could lead to
+                * using invalid descriptor fields when read from rxd.
+                */
+               rxdp = &rx_ring[rx_id];
+               staterr = rxdp->wb.upper.status_error;
+               if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
+                       break;
+               rxd = *rxdp;
+
+               /*
+                * Descriptor done.
+                *
+                * Allocate a new mbuf to replenish the RX ring descriptor.
+                * If the allocation fails:
+                *    - arrange for that RX descriptor to be the first one
+                *      being parsed the next time the receive function is
+                *      invoked [on the same queue].
+                *
+                *    - Stop parsing the RX ring and return immediately.
+                *
+                * This policy does not drop the packet received in the RX
+                * descriptor for which the allocation of a new mbuf failed.
+                * Thus, it allows that packet to be later retrieved if
+                * mbuf have been freed in the mean time.
+                * As a side effect, holding RX descriptors instead of
+                * systematically giving them back to the NIC may lead to
+                * RX ring exhaustion situations.
+                * However, the NIC can gracefully prevent such situations
+                * to happen by sending specific "back-pressure" flow control
+                * frames to its peer(s).
+                */
+               PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
+                          "staterr=0x%x data_len=%u\n",
+                          (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+                          (unsigned) rx_id, (unsigned) staterr,
+                          (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
+
+               nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+               if (nmb == NULL) {
+                       PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+                                  "queue_id=%u\n", (unsigned) rxq->port_id,
+                                  (unsigned) rxq->queue_id);
+                       rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+                       break;
+               }
+
+               nb_hold++;
+               rxe = &sw_ring[rx_id];
+               rx_id++;
+               if (rx_id == rxq->nb_rx_desc)
+                       rx_id = 0;
+
+               /* Prefetch next mbuf while processing current one. */
+               rte_igb_prefetch(sw_ring[rx_id].mbuf);
+
+               /*
+                * When next RX descriptor is on a cache-line boundary,
+                * prefetch the next 4 RX descriptors and the next 8 pointers
+                * to mbufs.
+                */
+               if ((rx_id & 0x3) == 0) {
+                       rte_igb_prefetch(&rx_ring[rx_id]);
+                       rte_igb_prefetch(&sw_ring[rx_id]);
+               }
+
+               /*
+                * Update RX descriptor with the physical address of the new
+                * data buffer of the new allocated mbuf.
+                */
+               rxm = rxe->mbuf;
+               rxe->mbuf = nmb;
+               dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+               rxdp->read.pkt_addr = dma;
+               rxdp->read.hdr_addr = dma;
+
+               /*
+                * Set data length & data buffer address of mbuf.
+                */
+               data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
+               rxm->pkt.data_len = data_len;
+               rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+               /*
+                * If this is the first buffer of the received packet,
+                * set the pointer to the first mbuf of the packet and
+                * initialize its context.
+                * Otherwise, update the total length and the number of segments
+                * of the current scattered packet, and update the pointer to
+                * the last mbuf of the current packet.
+                */
+               if (first_seg == NULL) {
+                       first_seg = rxm;
+                       first_seg->pkt.pkt_len = data_len;
+                       first_seg->pkt.nb_segs = 1;
+               } else {
+                       first_seg->pkt.pkt_len += data_len;
+                       first_seg->pkt.nb_segs++;
+                       last_seg->pkt.next = rxm;
+               }
+
+               /*
+                * If this is not the last buffer of the received packet,
+                * update the pointer to the last mbuf of the current scattered
+                * packet and continue to parse the RX ring.
+                */
+               if (! (staterr & E1000_RXD_STAT_EOP)) {
+                       last_seg = rxm;
+                       goto next_desc;
+               }
+
+               /*
+                * This is the last buffer of the received packet.
+                * If the CRC is not stripped by the hardware:
+                *   - Subtract the CRC length from the total packet length.
+                *   - If the last buffer only contains the whole CRC or a part
+                *     of it, free the mbuf associated to the last buffer.
+                *     If part of the CRC is also contained in the previous
+                *     mbuf, subtract the length of that CRC part from the
+                *     data length of the previous mbuf.
+                */
+               rxm->pkt.next = NULL;
+               if (unlikely(rxq->crc_len > 0)) {
+                       first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
+                       if (data_len <= ETHER_CRC_LEN) {
+                               rte_pktmbuf_free_seg(rxm);
+                               first_seg->pkt.nb_segs--;
+                               last_seg->pkt.data_len = (uint16_t)
+                                       (last_seg->pkt.data_len -
+                                        (ETHER_CRC_LEN - data_len));
+                               last_seg->pkt.next = NULL;
+                       } else
+                               rxm->pkt.data_len =
+                                       (uint16_t) (data_len - ETHER_CRC_LEN);
+               }
+
+               /*
+                * Initialize the first mbuf of the returned packet:
+                *    - RX port identifier,
+                *    - hardware offload data, if any:
+                *      - RSS flag & hash,
+                *      - IP checksum flag,
+                *      - VLAN TCI, if any,
+                *      - error flags.
+                */
+               first_seg->pkt.in_port = rxq->port_id;
+               first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+
+               /*
+                * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
+                * set in the pkt_flags field.
+                */
+               first_seg->pkt.vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+               hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
+               pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
+               pkt_flags = (pkt_flags | rx_desc_status_to_pkt_flags(staterr));
+               pkt_flags = (pkt_flags | rx_desc_error_to_pkt_flags(staterr));
+               first_seg->ol_flags = pkt_flags;
+
+               /* Prefetch data of first segment, if configured to do so. */
+               rte_packet_prefetch(first_seg->pkt.data);
+
+               /*
+                * Store the mbuf address into the next entry of the array
+                * of returned packets.
+                */
+               rx_pkts[nb_rx++] = first_seg;
+
+               /*
+                * Setup receipt context for a new packet.
+                */
+               first_seg = NULL;
+       }
+
+       /*
+        * Record index of the next RX descriptor to probe.
+        */
+       rxq->rx_tail = rx_id;
+
+       /*
+        * Save receive context.
+        */
+       rxq->pkt_first_seg = first_seg;
+       rxq->pkt_last_seg = last_seg;
+
+       /*
+        * If the number of free RX descriptors is greater than the RX free
+        * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+        * register.
+        * Update the RDT with the value of the last processed RX descriptor
+        * minus 1, to guarantee that the RDT register is never equal to the
+        * RDH register, which creates a "full" ring situtation from the
+        * hardware point of view...
+        */
+       nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+       if (nb_hold > rxq->rx_free_thresh) {
+               PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+                          "nb_hold=%u nb_rx=%u\n",
+                          (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+                          (unsigned) rx_id, (unsigned) nb_hold,
+                          (unsigned) nb_rx);
+               rx_id = (uint16_t) ((rx_id == 0) ?
+                                    (rxq->nb_rx_desc - 1) : (rx_id - 1));
+               E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+               nb_hold = 0;
+       }
+       rxq->nb_rx_hold = nb_hold;
+       return (nb_rx);
+}
+
+/*
+ * Rings setup and release.
+ *
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
+ * This will also optimize cache line size effect.
+ * H/W supports up to cache line size 128.
+ */
+#define IGB_ALIGN 128
+
+/*
+ * Maximum number of Ring Descriptors.
+ *
+ * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
+ * desscriptors should meet the following condition:
+ *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
+ */
+#define IGB_MIN_RING_DESC 32
+#define IGB_MAX_RING_DESC 4096
+
+static const struct rte_memzone *
+ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
+                     uint16_t queue_id, uint32_t ring_size, int socket_id)
+{
+       char z_name[RTE_MEMZONE_NAMESIZE];
+       const struct rte_memzone *mz;
+
+       rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+                       dev->driver->pci_drv.name, ring_name,
+                               dev->data->port_id, queue_id);
+       mz = rte_memzone_lookup(z_name);
+       if (mz)
+               return mz;
+
+       return rte_memzone_reserve_aligned(z_name, (uint64_t)ring_size,
+                       socket_id, 0, IGB_ALIGN);
+}
+
+static void
+igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
+{
+       unsigned i;
+
+       if (txq->sw_ring != NULL) {
+               for (i = 0; i < txq->nb_tx_desc; i++) {
+                       if (txq->sw_ring[i].mbuf != NULL) {
+                               rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+                               txq->sw_ring[i].mbuf = NULL;
+                       }
+               }
+       }
+}
+
+static void
+igb_tx_queue_release(struct igb_tx_queue *txq)
+{
+       igb_tx_queue_release_mbufs(txq);
+        rte_free(txq->sw_ring);
+        rte_free(txq);
+}
+
+int
+igb_dev_tx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues)
+{
+       uint16_t i, old_nb_queues = dev->data->nb_tx_queues;
+       struct igb_tx_queue **txq;
+
+       if (dev->data->tx_queues == NULL) {
+               dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
+                               sizeof(struct igb_tx_queue *) * nb_queues,
+                                                       CACHE_LINE_SIZE);
+               if (dev->data->tx_queues == NULL) {
+                       dev->data->nb_tx_queues = 0;
+                       return -ENOMEM;
+               }
+       } else {
+               if (nb_queues < old_nb_queues)
+                       for (i = nb_queues; i < old_nb_queues; i++)
+                               igb_tx_queue_release(dev->data->tx_queues[i]);
+
+               if (nb_queues != old_nb_queues) {
+                       txq = rte_realloc(dev->data->tx_queues,
+                               sizeof(struct igb_tx_queue *) * nb_queues,
+                                                       CACHE_LINE_SIZE);
+                       if (txq == NULL)
+                               return -ENOMEM;
+                       else
+                               dev->data->tx_queues = txq;
+                       if (nb_queues > old_nb_queues)
+                               memset(&(txq[old_nb_queues]), 0,
+                                       sizeof(struct igb_tx_queue *) *
+                                       (nb_queues - old_nb_queues));
+               }
+       }
+       dev->data->nb_tx_queues = nb_queues;
+
+       return 0;
+}
+
+static void
+igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
+{
+       txq->tx_head = 0;
+       txq->tx_tail = 0;
+       txq->ctx_curr = 0;
+       memset((void*)&txq->ctx_cache, 0,
+               IGB_CTX_NUM * sizeof(struct igb_advctx_info));
+}
+
+static void
+igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
+{
+       struct igb_tx_entry *txe = txq->sw_ring;
+       uint32_t size;
+       uint16_t i, prev;
+       struct e1000_hw *hw;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       size = sizeof(union e1000_adv_tx_desc) * txq->nb_tx_desc;
+       /* Zero out HW ring memory */
+       for (i = 0; i < size; i++) {
+               ((volatile char *)txq->tx_ring)[i] = 0;
+       }
+
+       /* Initialize ring entries */
+       prev = txq->nb_tx_desc - 1;
+       for (i = 0; i < txq->nb_tx_desc; i++) {
+               volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
+
+               txd->wb.status = E1000_TXD_STAT_DD;
+               txe[i].mbuf = NULL;
+               txe[i].last_id = i;
+               txe[prev].next_id = i;
+               prev = i;
+       }
+
+       txq->txd_type = E1000_ADVTXD_DTYP_DATA;
+       /* 82575 specific, each tx queue will use 2 hw contexts */
+       if (hw->mac.type == e1000_82575)
+               txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
+
+       igb_reset_tx_queue_stat(txq);
+}
+
+int
+eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
+                        uint16_t queue_idx,
+                        uint16_t nb_desc,
+                        unsigned int socket_id,
+                        const struct rte_eth_txconf *tx_conf)
+{
+       const struct rte_memzone *tz;
+       struct igb_tx_queue *txq;
+       struct e1000_hw     *hw;
+       uint32_t size;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /*
+        * Validate number of transmit descriptors.
+        * It must not exceed hardware maximum, and must be multiple
+        * of IGB_ALIGN.
+        */
+       if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
+           (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
+               return -EINVAL;
+       }
+
+       /*
+        * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
+        * driver.
+        */
+       if (tx_conf->tx_free_thresh != 0)
+               RTE_LOG(WARNING, PMD,
+                       "The tx_free_thresh parameter is not "
+                       "used for the 1G driver.");
+       if (tx_conf->tx_rs_thresh != 0)
+               RTE_LOG(WARNING, PMD,
+                       "The tx_rs_thresh parameter is not "
+                       "used for the 1G driver.");
+       if (tx_conf->tx_thresh.wthresh == 0)
+               RTE_LOG(WARNING, PMD,
+                       "To improve 1G driver performance, consider setting "
+                       "the TX WTHRESH value to 4, 8, or 16.");
+
+       /* Free memory prior to re-allocation if needed */
+       if (dev->data->tx_queues[queue_idx] != NULL)
+               igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
+
+       /* First allocate the tx queue data structure */
+       txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
+                                                       CACHE_LINE_SIZE);
+       if (txq == NULL)
+               return (-ENOMEM);
+
+       /*
+        * Allocate TX ring hardware descriptors. A memzone large enough to
+        * handle the maximum ring size is allocated in order to allow for
+        * resizing in later calls to the queue setup function.
+        */
+       size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
+       tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
+                                       size, socket_id);
+       if (tz == NULL) {
+               igb_tx_queue_release(txq);
+               return (-ENOMEM);
+       }
+
+       txq->nb_tx_desc = nb_desc;
+       txq->pthresh = tx_conf->tx_thresh.pthresh;
+       txq->hthresh = tx_conf->tx_thresh.hthresh;
+       txq->wthresh = tx_conf->tx_thresh.wthresh;
+       txq->queue_id = queue_idx;
+       txq->port_id = dev->data->port_id;
+
+       txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
+       txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
+       txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
+
+       size = sizeof(union e1000_adv_tx_desc) * nb_desc;
+
+       /* Allocate software ring */
+       txq->sw_ring = rte_zmalloc("txq->sw_ring",
+                                  sizeof(struct igb_tx_entry) * nb_desc,
+                                  CACHE_LINE_SIZE);
+       if (txq->sw_ring == NULL) {
+               igb_tx_queue_release(txq);
+               return (-ENOMEM);
+       }
+       PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
+                    txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
+
+       igb_reset_tx_queue(txq, dev);
+       dev->tx_pkt_burst = eth_igb_xmit_pkts;
+       dev->data->tx_queues[queue_idx] = txq;
+
+       return (0);
+}
+
+static void
+igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
+{
+       unsigned i;
+
+       if (rxq->sw_ring != NULL) {
+               for (i = 0; i < rxq->nb_rx_desc; i++) {
+                       if (rxq->sw_ring[i].mbuf != NULL) {
+                               rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+                               rxq->sw_ring[i].mbuf = NULL;
+                       }
+               }
+       }
+}
+
+static void
+igb_rx_queue_release(struct igb_rx_queue *rxq)
+{
+       igb_rx_queue_release_mbufs(rxq);
+       rte_free(rxq->sw_ring);
+       rte_free(rxq);
+}
+
+int
+igb_dev_rx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues)
+{
+       uint16_t i, old_nb_queues = dev->data->nb_rx_queues;
+       struct igb_rx_queue **rxq;
+
+       if (dev->data->rx_queues == NULL) {
+               dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
+                               sizeof(struct igb_rx_queue *) * nb_queues,
+                                                       CACHE_LINE_SIZE);
+               if (dev->data->rx_queues == NULL) {
+                       dev->data->nb_rx_queues = 0;
+                       return -ENOMEM;
+               }
+       } else {
+               for (i = nb_queues; i < old_nb_queues; i++) {
+                       igb_rx_queue_release(dev->data->rx_queues[i]);
+                       dev->data->rx_queues[i] = NULL;
+               }
+               if (nb_queues != old_nb_queues) {
+                       rxq = rte_realloc(dev->data->rx_queues,
+                               sizeof(struct igb_rx_queue *) * nb_queues,
+                                                       CACHE_LINE_SIZE);
+                       if (rxq == NULL)
+                               return -ENOMEM;
+                       else
+                               dev->data->rx_queues = rxq;
+                       if (nb_queues > old_nb_queues)
+                               memset(&(rxq[old_nb_queues]), 0,
+                                       sizeof(struct igb_rx_queue *) *
+                                       (nb_queues - old_nb_queues));
+               }
+       }
+       dev->data->nb_rx_queues = nb_queues;
+
+       return 0;
+}
+
+static void
+igb_reset_rx_queue(struct igb_rx_queue *rxq)
+{
+       unsigned size;
+       unsigned i;
+
+       /* Zero out HW ring memory */
+       size = sizeof(union e1000_adv_rx_desc) * rxq->nb_rx_desc;
+       for (i = 0; i < size; i++) {
+               ((volatile char *)rxq->rx_ring)[i] = 0;
+       }
+
+       rxq->rx_tail = 0;
+       rxq->pkt_first_seg = NULL;
+       rxq->pkt_last_seg = NULL;
+}
+
+int
+eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
+                        uint16_t queue_idx,
+                        uint16_t nb_desc,
+                        unsigned int socket_id,
+                        const struct rte_eth_rxconf *rx_conf,
+                        struct rte_mempool *mp)
+{
+       const struct rte_memzone *rz;
+       struct igb_rx_queue *rxq;
+       struct e1000_hw     *hw;
+       unsigned int size;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /*
+        * Validate number of receive descriptors.
+        * It must not exceed hardware maximum, and must be multiple
+        * of IGB_ALIGN.
+        */
+       if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
+           (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
+               return (-EINVAL);
+       }
+
+       /* Free memory prior to re-allocation if needed */
+       if (dev->data->rx_queues[queue_idx] != NULL) {
+               igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
+               dev->data->rx_queues[queue_idx] = NULL;
+       }
+
+       /* First allocate the RX queue data structure. */
+       rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
+                         CACHE_LINE_SIZE);
+       if (rxq == NULL)
+               return (-ENOMEM);
+       rxq->mb_pool = mp;
+       rxq->nb_rx_desc = nb_desc;
+       rxq->pthresh = rx_conf->rx_thresh.pthresh;
+       rxq->hthresh = rx_conf->rx_thresh.hthresh;
+       rxq->wthresh = rx_conf->rx_thresh.wthresh;
+       rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+       rxq->queue_id = queue_idx;
+       rxq->port_id = dev->data->port_id;
+       rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
+                                 ETHER_CRC_LEN);
+
+       /*
+        *  Allocate RX ring hardware descriptors. A memzone large enough to
+        *  handle the maximum ring size is allocated in order to allow for
+        *  resizing in later calls to the queue setup function.
+        */
+       size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
+       rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
+       if (rz == NULL) {
+               igb_rx_queue_release(rxq);
+               return (-ENOMEM);
+       }
+       rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
+       rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
+       rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
+
+       /* Allocate software ring. */
+       rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
+                                  sizeof(struct igb_rx_entry) * nb_desc,
+                                  CACHE_LINE_SIZE);
+       if (rxq->sw_ring == NULL) {
+               igb_rx_queue_release(rxq);
+               return (-ENOMEM);
+       }
+       PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
+                    rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
+
+       dev->data->rx_queues[queue_idx] = rxq;
+       igb_reset_rx_queue(rxq);
+
+       return 0;
+}
+
+void
+igb_dev_clear_queues(struct rte_eth_dev *dev)
+{
+       uint16_t i;
+       struct igb_tx_queue *txq;
+       struct igb_rx_queue *rxq;
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               igb_tx_queue_release_mbufs(txq);
+               igb_reset_tx_queue(txq, dev);
+       }
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+               igb_rx_queue_release_mbufs(rxq);
+               igb_reset_rx_queue(rxq);
+       }
+}
+
+/**
+ * Receive Side Scaling (RSS).
+ * See section 7.1.1.7 in the following document:
+ *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
+ *
+ * Principles:
+ * The source and destination IP addresses of the IP header and the source and
+ * destination ports of TCP/UDP headers, if any, of received packets are hashed
+ * against a configurable random key to compute a 32-bit RSS hash result.
+ * The seven (7) LSBs of the 32-bit hash result are used as an index into a
+ * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
+ * RSS output index which is used as the RX queue index where to store the
+ * received packets.
+ * The following output is supplied in the RX write-back descriptor:
+ *     - 32-bit result of the Microsoft RSS hash function,
+ *     - 4-bit RSS type field.
+ */
+
+/*
+ * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
+ * Used as the default key.
+ */
+static uint8_t rss_intel_key[40] = {
+       0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+       0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+       0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+       0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+       0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+};
+
+static void
+igb_rss_disable(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw;
+       uint32_t mrqc;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       mrqc = E1000_READ_REG(hw, E1000_MRQC);
+       mrqc &= ~E1000_MRQC_ENABLE_MASK;
+       E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+}
+
+static void
+igb_rss_configure(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw;
+       uint8_t *hash_key;
+       uint32_t rss_key;
+       uint32_t mrqc;
+       uint32_t shift;
+       uint16_t rss_hf;
+       uint16_t i;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
+       if (rss_hf == 0) /* Disable RSS. */ {
+               igb_rss_disable(dev);
+               return;
+       }
+       hash_key = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
+       if (hash_key == NULL)
+               hash_key = rss_intel_key; /* Default hash key. */
+
+       /* Fill in RSS hash key. */
+       for (i = 0; i < 10; i++) {
+               rss_key  = hash_key[(i * 4)];
+               rss_key |= hash_key[(i * 4) + 1] << 8;
+               rss_key |= hash_key[(i * 4) + 2] << 16;
+               rss_key |= hash_key[(i * 4) + 3] << 24;
+               E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
+       }
+
+       /* Fill in redirection table. */
+       shift = (hw->mac.type == e1000_82575) ? 6 : 0;
+       for (i = 0; i < 128; i++) {
+               union e1000_reta {
+                       uint32_t dword;
+                       uint8_t  bytes[4];
+               } reta;
+               uint8_t q_idx;
+
+               q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
+                                  i % dev->data->nb_rx_queues : 0);
+               reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
+               if ((i & 3) == 3)
+                       E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
+       }
+
+       /* Set configured hashing functions in MRQC register. */
+       mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
+       if (rss_hf & ETH_RSS_IPV4)
+               mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
+       if (rss_hf & ETH_RSS_IPV4_TCP)
+               mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
+       if (rss_hf & ETH_RSS_IPV6)
+               mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
+       if (rss_hf & ETH_RSS_IPV6_EX)
+               mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
+       if (rss_hf & ETH_RSS_IPV6_TCP)
+               mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
+       if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+               mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
+       if (rss_hf & ETH_RSS_IPV4_UDP)
+               mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
+       if (rss_hf & ETH_RSS_IPV6_UDP)
+               mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
+       if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+               mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
+       E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+}
+
+/*********************************************************************
+ *
+ *  Enable receive unit.
+ *
+ **********************************************************************/
+
+static int
+igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
+{
+       struct igb_rx_entry *rxe = rxq->sw_ring;
+       uint64_t dma_addr;
+       unsigned i;
+
+       /* Initialize software ring entries. */
+       for (i = 0; i < rxq->nb_rx_desc; i++) {
+               volatile union e1000_adv_rx_desc *rxd;
+               struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
+
+               if (mbuf == NULL) {
+                       PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
+                               "queue_id=%hu\n", rxq->queue_id);
+                       igb_rx_queue_release(rxq);
+                       return (-ENOMEM);
+               }
+               dma_addr =
+                       rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
+               rxd = &rxq->rx_ring[i];
+               rxd->read.hdr_addr = dma_addr;
+               rxd->read.pkt_addr = dma_addr;
+               rxe[i].mbuf = mbuf;
+       }
+
+       return 0;
+}
+
+int
+eth_igb_rx_init(struct rte_eth_dev *dev)
+{
+       struct e1000_hw     *hw;
+       struct igb_rx_queue *rxq;
+       struct rte_pktmbuf_pool_private *mbp_priv;
+       uint32_t rctl;
+       uint32_t rxcsum;
+       uint32_t srrctl;
+       uint16_t buf_size;
+       uint16_t rctl_bsize;
+       uint16_t i;
+       int ret;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       srrctl = 0;
+
+       /*
+        * Make sure receives are disabled while setting
+        * up the descriptor ring.
+        */
+       rctl = E1000_READ_REG(hw, E1000_RCTL);
+       E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+
+       /*
+        * Configure support of jumbo frames, if any.
+        */
+       if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+               rctl |= E1000_RCTL_LPE;
+
+               /* Set maximum packet length. */
+               E1000_WRITE_REG(hw, E1000_RLPML,
+                               dev->data->dev_conf.rxmode.max_rx_pkt_len);
+       } else
+               rctl &= ~E1000_RCTL_LPE;
+
+       /* Configure and enable each RX queue. */
+       rctl_bsize = 0;
+       dev->rx_pkt_burst = eth_igb_recv_pkts;
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               uint64_t bus_addr;
+               uint32_t rxdctl;
+
+               rxq = dev->data->rx_queues[i];
+
+               /* Allocate buffers for descriptor rings and set up queue */
+               ret = igb_alloc_rx_queue_mbufs(rxq);
+               if (ret) {
+                       igb_dev_clear_queues(dev);
+                       return ret;
+               }
+
+               /*
+                * Reset crc_len in case it was changed after queue setup by a
+                *  call to configure
+                */
+               rxq->crc_len =
+                       (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
+                                                       0 : ETHER_CRC_LEN);
+
+               bus_addr = rxq->rx_ring_phys_addr;
+               E1000_WRITE_REG(hw, E1000_RDLEN(i),
+                               rxq->nb_rx_desc *
+                               sizeof(union e1000_adv_rx_desc));
+               E1000_WRITE_REG(hw, E1000_RDBAH(i),
+                               (uint32_t)(bus_addr >> 32));
+               E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
+
+               srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+               /*
+                * Configure RX buffer size.
+                */
+               mbp_priv = (struct rte_pktmbuf_pool_private *)
+                       ((char *)rxq->mb_pool + sizeof(struct rte_mempool));
+               buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
+                                      RTE_PKTMBUF_HEADROOM);
+               if (buf_size >= 1024) {
+                       /*
+                        * Configure the BSIZEPACKET field of the SRRCTL
+                        * register of the queue.
+                        * Value is in 1 KB resolution, from 1 KB to 127 KB.
+                        * If this field is equal to 0b, then RCTL.BSIZE
+                        * determines the RX packet buffer size.
+                        */
+                       srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
+                                  E1000_SRRCTL_BSIZEPKT_MASK);
+                       buf_size = (uint16_t) ((srrctl &
+                                               E1000_SRRCTL_BSIZEPKT_MASK) <<
+                                              E1000_SRRCTL_BSIZEPKT_SHIFT);
+
+                       if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size){
+                               dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
+                               dev->data->scattered_rx = 1;
+                       }
+               } else {
+                       /*
+                        * Use BSIZE field of the device RCTL register.
+                        */
+                       if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
+                               rctl_bsize = buf_size;
+                       dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
+                       dev->data->scattered_rx = 1;
+               }
+
+               E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
+
+               /* Enable this RX queue. */
+               rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
+               rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
+               rxdctl &= 0xFFF00000;
+               rxdctl |= (rxq->pthresh & 0x1F);
+               rxdctl |= ((rxq->hthresh & 0x1F) << 8);
+               rxdctl |= ((rxq->wthresh & 0x1F) << 16);
+               E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
+       }
+
+       /*
+        * Setup BSIZE field of RCTL register, if needed.
+        * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
+        * register, since the code above configures the SRRCTL register of
+        * the RX queue in such a case.
+        * All configurable sizes are:
+        * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
+        *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
+        *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
+        *  2048: rctl |= E1000_RCTL_SZ_2048;
+        *  1024: rctl |= E1000_RCTL_SZ_1024;
+        *   512: rctl |= E1000_RCTL_SZ_512;
+        *   256: rctl |= E1000_RCTL_SZ_256;
+        */
+       if (rctl_bsize > 0) {
+               if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
+                       rctl |= E1000_RCTL_SZ_512;
+               else /* 256 <= buf_size < 512 - use 256 */
+                       rctl |= E1000_RCTL_SZ_256;
+       }
+
+       /*
+        * Configure RSS if device configured with multiple RX queues.
+        */
+       if (dev->data->nb_rx_queues > 1)
+               igb_rss_configure(dev);
+       else
+               igb_rss_disable(dev);
+
+       /*
+        * Setup the Checksum Register.
+        * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
+        */
+       rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
+       rxcsum |= E1000_RXCSUM_PCSD;
+
+       /* Enable both L3/L4 rx checksum offload */
+       if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+               rxcsum |= (E1000_RXCSUM_IPOFL  | E1000_RXCSUM_TUOFL);
+       else
+               rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
+       E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
+
+       /* Setup the Receive Control Register. */
+       if (dev->data->dev_conf.rxmode.hw_strip_crc) {
+               rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
+
+               /* set STRCRC bit in all queues for Powerville */
+               if (hw->mac.type == e1000_i350) {
+                       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+                               uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(i));
+                               dvmolr |= E1000_DVMOLR_STRCRC;
+                               E1000_WRITE_REG(hw, E1000_DVMOLR(i), dvmolr);
+                       }
+               }
+
+       } else {
+               rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
+
+               /* clear STRCRC bit in all queues for Powerville */
+               if (hw->mac.type == e1000_i350) {
+                       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+                               uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(i));
+                               dvmolr &= ~E1000_DVMOLR_STRCRC;
+                               E1000_WRITE_REG(hw, E1000_DVMOLR(i), dvmolr);
+                       }
+               }
+       }
+
+       rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+       rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
+               E1000_RCTL_RDMTS_HALF |
+               (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+       /* Make sure VLAN Filters are off. */
+       rctl &= ~E1000_RCTL_VFE;
+       /* Don't store bad packets. */
+       rctl &= ~E1000_RCTL_SBP;
+
+       /* Enable Receives. */
+       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+       /*
+        * Setup the HW Rx Head and Tail Descriptor Pointers.
+        * This needs to be done after enable.
+        */
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+               E1000_WRITE_REG(hw, E1000_RDH(i), 0);
+               E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
+       }
+
+       return 0;
+}
+
+/*********************************************************************
+ *
+ *  Enable transmit unit.
+ *
+ **********************************************************************/
+void
+eth_igb_tx_init(struct rte_eth_dev *dev)
+{
+       struct e1000_hw     *hw;
+       struct igb_tx_queue *txq;
+       uint32_t tctl;
+       uint32_t txdctl;
+       uint16_t i;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /* Setup the Base and Length of the Tx Descriptor Rings. */
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               uint64_t bus_addr;
+               txq = dev->data->tx_queues[i];
+               bus_addr = txq->tx_ring_phys_addr;
+
+               E1000_WRITE_REG(hw, E1000_TDLEN(i),
+                               txq->nb_tx_desc *
+                               sizeof(union e1000_adv_tx_desc));
+               E1000_WRITE_REG(hw, E1000_TDBAH(i),
+                               (uint32_t)(bus_addr >> 32));
+               E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
+
+               /* Setup the HW Tx Head and Tail descriptor pointers. */
+               E1000_WRITE_REG(hw, E1000_TDT(i), 0);
+               E1000_WRITE_REG(hw, E1000_TDH(i), 0);
+
+               /* Setup Transmit threshold registers. */
+               txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
+               txdctl |= txq->pthresh & 0x1F;
+               txdctl |= ((txq->hthresh & 0x1F) << 8);
+               txdctl |= ((txq->wthresh & 0x1F) << 16);
+               txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
+               E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
+       }
+
+       /* Program the Transmit Control Register. */
+       tctl = E1000_READ_REG(hw, E1000_TCTL);
+       tctl &= ~E1000_TCTL_CT;
+       tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
+                (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
+
+       e1000_config_collision_dist(hw);
+
+       /* This write will effectively turn on the transmit unit. */
+       E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+}
+
diff --git a/lib/librte_pmd_igb/Makefile b/lib/librte_pmd_igb/Makefile
deleted file mode 100644 (file)
index b0dec40..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-#   BSD LICENSE
-# 
-#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
-#   All rights reserved.
-# 
-#   Redistribution and use in source and binary forms, with or without 
-#   modification, are permitted provided that the following conditions 
-#   are met:
-# 
-#     * Redistributions of source code must retain the above copyright 
-#       notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above copyright 
-#       notice, this list of conditions and the following disclaimer in 
-#       the documentation and/or other materials provided with the 
-#       distribution.
-#     * Neither the name of Intel Corporation nor the names of its 
-#       contributors may be used to endorse or promote products derived 
-#       from this software without specific prior written permission.
-# 
-#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
-#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
-#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
-#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
-#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
-#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
-#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
-#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
-#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
-#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
-#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# 
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# library name
-#
-LIB = librte_pmd_igb.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_nvm.c
-SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_manage.c
-SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_mac.c
-SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_phy.c
-SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_82575.c
-SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_api.c
-SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_osdep.c
-SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_vf.c
-SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_mbx.c
-SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += e1000_rxtx.c
-SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += e1000_ethdev.c
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += lib/librte_net lib/librte_malloc
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_pmd_igb/e1000_ethdev.c b/lib/librte_pmd_igb/e1000_ethdev.c
deleted file mode 100644 (file)
index 5e711c9..0000000
+++ /dev/null
@@ -1,1318 +0,0 @@
-/*-
- *   BSD LICENSE
- * 
- *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
- *   All rights reserved.
- * 
- *   Redistribution and use in source and binary forms, with or without 
- *   modification, are permitted provided that the following conditions 
- *   are met:
- * 
- *     * Redistributions of source code must retain the above copyright 
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright 
- *       notice, this list of conditions and the following disclaimer in 
- *       the documentation and/or other materials provided with the 
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its 
- *       contributors may be used to endorse or promote products derived 
- *       from this software without specific prior written permission.
- * 
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * 
- */
-
-#include <sys/queue.h>
-#include <stdio.h>
-#include <errno.h>
-#include <stdint.h>
-#include <stdarg.h>
-
-#include <rte_common.h>
-#include <rte_interrupts.h>
-#include <rte_byteorder.h>
-#include <rte_log.h>
-#include <rte_debug.h>
-#include <rte_pci.h>
-#include <rte_ether.h>
-#include <rte_ethdev.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_tailq.h>
-#include <rte_eal.h>
-#include <rte_atomic.h>
-#include <rte_malloc.h>
-
-#include "e1000_logs.h"
-#include "igb/e1000_api.h"
-#include "igb/e1000_hw.h"
-#include "e1000_ethdev.h"
-
-static int  eth_igb_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q,
-               uint16_t nb_tx_q);
-static int  eth_igb_start(struct rte_eth_dev *dev);
-static void eth_igb_stop(struct rte_eth_dev *dev);
-static void eth_igb_close(struct rte_eth_dev *dev);
-static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
-static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
-static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
-static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
-static int  eth_igb_link_update(struct rte_eth_dev *dev,
-                               int wait_to_complete);
-static void eth_igb_stats_get(struct rte_eth_dev *dev,
-                               struct rte_eth_stats *rte_stats);
-static void eth_igb_stats_reset(struct rte_eth_dev *dev);
-static void eth_igb_infos_get(struct rte_eth_dev *dev,
-                               struct rte_eth_dev_info *dev_info);
-static int  eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
-                               struct rte_eth_fc_conf *fc_conf);
-static int eth_igb_interrupt_setup(struct rte_eth_dev *dev);
-static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
-static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
-static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
-                                                       void *param);
-static int  igb_hardware_init(struct e1000_hw *hw);
-static void igb_hw_control_acquire(struct e1000_hw *hw);
-static void igb_hw_control_release(struct e1000_hw *hw);
-static void igb_init_manageability(struct e1000_hw *hw);
-static void igb_release_manageability(struct e1000_hw *hw);
-static void igb_vlan_hw_support_enable(struct rte_eth_dev *dev);
-static void igb_vlan_hw_support_disable(struct rte_eth_dev *dev);
-static void eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
-                                     uint16_t vlan_id,
-                                     int on);
-static int eth_igb_led_on(struct rte_eth_dev *dev);
-static int eth_igb_led_off(struct rte_eth_dev *dev);
-
-static void igb_intr_disable(struct e1000_hw *hw);
-static int  igb_get_rx_buffer_size(struct e1000_hw *hw);
-static void eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
-               uint32_t index, uint32_t pool);
-static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
-
-#define IGB_FC_PAUSE_TIME 0x0680
-#define IGB_LINK_UPDATE_CHECK_TIMEOUT  90  /* 9s */
-#define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
-
-static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
-
-/*
- * The set of PCI devices this driver supports
- */
-static struct rte_pci_id pci_id_igb_map[] = {
-
-#undef RTE_LIBRTE_IXGBE_PMD
-#define RTE_PCI_DEV_ID_DECL(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
-#include "rte_pci_dev_ids.h"
-
-{.device_id = 0},
-};
-
-static struct eth_dev_ops eth_igb_ops = {
-       .dev_configure        = eth_igb_configure,
-       .dev_start            = eth_igb_start,
-       .dev_stop             = eth_igb_stop,
-       .dev_close            = eth_igb_close,
-       .promiscuous_enable   = eth_igb_promiscuous_enable,
-       .promiscuous_disable  = eth_igb_promiscuous_disable,
-       .allmulticast_enable  = eth_igb_allmulticast_enable,
-       .allmulticast_disable = eth_igb_allmulticast_disable,
-       .link_update          = eth_igb_link_update,
-       .stats_get            = eth_igb_stats_get,
-       .stats_reset          = eth_igb_stats_reset,
-       .dev_infos_get        = eth_igb_infos_get,
-       .vlan_filter_set      = eth_igb_vlan_filter_set,
-       .rx_queue_setup       = eth_igb_rx_queue_setup,
-       .tx_queue_setup       = eth_igb_tx_queue_setup,
-       .dev_led_on           = eth_igb_led_on,
-       .dev_led_off          = eth_igb_led_off,
-       .flow_ctrl_set        = eth_igb_flow_ctrl_set,
-       .mac_addr_add         = eth_igb_rar_set,
-       .mac_addr_remove      = eth_igb_rar_clear,
-};
-
-/**
- * Atomically reads the link status information from global
- * structure rte_eth_dev.
- *
- * @param dev
- *   - Pointer to the structure rte_eth_dev to read from.
- *   - Pointer to the buffer to be saved with the link status.
- *
- * @return
- *   - On success, zero.
- *   - On failure, negative value.
- */
-static inline int
-rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
-                               struct rte_eth_link *link)
-{
-       struct rte_eth_link *dst = link;
-       struct rte_eth_link *src = &(dev->data->dev_link);
-
-       if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
-                                       *(uint64_t *)src) == 0)
-               return -1;
-
-       return 0;
-}
-
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- *   - Pointer to the structure rte_eth_dev to read from.
- *   - Pointer to the buffer to be saved with the link status.
- *
- * @return
- *   - On success, zero.
- *   - On failure, negative value.
- */
-static inline int
-rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
-                               struct rte_eth_link *link)
-{
-       struct rte_eth_link *dst = &(dev->data->dev_link);
-       struct rte_eth_link *src = link;
-
-       if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
-                                       *(uint64_t *)src) == 0)
-               return -1;
-
-       return 0;
-}
-
-static void
-igb_identify_hardware(struct rte_eth_dev *dev)
-{
-       struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       hw->vendor_id = dev->pci_dev->id.vendor_id;
-       hw->device_id = dev->pci_dev->id.device_id;
-       hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
-       hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
-
-       e1000_set_mac_type(hw);
-
-       /* need to check if it is a vf device below */
-}
-
-static int
-eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
-                  struct rte_eth_dev *eth_dev)
-{
-       int error = 0;
-       struct rte_pci_device *pci_dev;
-       struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
-       struct e1000_vfta * shadow_vfta =
-               E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
-
-       pci_dev = eth_dev->pci_dev;
-       eth_dev->dev_ops = &eth_igb_ops;
-       eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
-       eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
-
-       /* for secondary processes, we don't initialise any further as primary
-        * has already done this work. Only check we don't need a different
-        * RX function */
-       if (rte_eal_process_type() != RTE_PROC_PRIMARY){
-               if (eth_dev->data->scattered_rx)
-                       eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
-               return 0;
-       }
-
-       hw->hw_addr= (void *)pci_dev->mem_resource.addr;
-
-       igb_identify_hardware(eth_dev);
-
-       if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
-               error = -EIO;
-               goto err_late;
-       }
-
-       e1000_get_bus_info(hw);
-
-       hw->mac.autoneg = 1;
-       hw->phy.autoneg_wait_to_complete = 0;
-       hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
-
-       /* Copper options */
-       if (hw->phy.media_type == e1000_media_type_copper) {
-               hw->phy.mdix = 0; /* AUTO_ALL_MODES */
-               hw->phy.disable_polarity_correction = 0;
-               hw->phy.ms_type = e1000_ms_hw_default;
-       }
-
-       /*
-        * Start from a known state, this is important in reading the nvm
-        * and mac from that.
-        */
-       e1000_reset_hw(hw);
-
-       /* Make sure we have a good EEPROM before we read from it */
-       if (e1000_validate_nvm_checksum(hw) < 0) {
-               /*
-                * Some PCI-E parts fail the first check due to
-                * the link being in sleep state, call it again,
-                * if it fails a second time its a real issue.
-                */
-               if (e1000_validate_nvm_checksum(hw) < 0) {
-                       PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
-                       error = -EIO;
-                       goto err_late;
-               }
-       }
-
-       /* Read the permanent MAC address out of the EEPROM */
-       if (e1000_read_mac_addr(hw) != 0) {
-               PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
-               error = -EIO;
-               goto err_late;
-       }
-
-       /* Allocate memory for storing MAC addresses */
-       eth_dev->data->mac_addrs = rte_zmalloc("e1000",
-               ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
-       if (eth_dev->data->mac_addrs == NULL) {
-               PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
-                                               "store MAC addresses",
-                               ETHER_ADDR_LEN * hw->mac.rar_entry_count);
-               error = -ENOMEM;
-               goto err_late;
-       }
-
-       /* Copy the permanent MAC address */
-       ether_addr_copy((struct ether_addr *)hw->mac.addr, &eth_dev->data->mac_addrs[0]);
-
-       /* initialize the vfta */
-       memset(shadow_vfta, 0, sizeof(*shadow_vfta));
-
-       /* Now initialize the hardware */
-       if (igb_hardware_init(hw) != 0) {
-               PMD_INIT_LOG(ERR, "Hardware initialization failed");
-               rte_free(eth_dev->data->mac_addrs);
-               eth_dev->data->mac_addrs = NULL;
-               error = -ENODEV;
-               goto err_late;
-       }
-       hw->mac.get_link_status = 1;
-
-       /* Indicate SOL/IDER usage */
-       if (e1000_check_reset_block(hw) < 0) {
-               PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
-                                       "SOL/IDER session");
-       }
-
-       PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x\n",
-                    eth_dev->data->port_id, pci_dev->id.vendor_id,
-                    pci_dev->id.device_id);
-
-       rte_intr_callback_register(&(pci_dev->intr_handle),
-               eth_igb_interrupt_handler, (void *)eth_dev);
-
-       return 0;
-
-err_late:
-       igb_hw_control_release(hw);
-
-       return (error);
-}
-
-static struct eth_driver rte_igb_pmd = {
-       {
-               .name = "rte_igb_pmd",
-               .id_table = pci_id_igb_map,
-               .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
-       },
-       .eth_dev_init = eth_igb_dev_init,
-       .dev_private_size = sizeof(struct e1000_adapter),
-};
-
-int
-rte_igb_pmd_init(void)
-{
-       rte_eth_driver_register(&rte_igb_pmd);
-       return 0;
-}
-
-static int
-eth_igb_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, uint16_t nb_tx_q)
-{
-       struct e1000_interrupt *intr =
-               E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-       int diag;
-
-       PMD_INIT_LOG(DEBUG, ">>");
-
-       intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
-
-       /* Allocate the array of pointers to RX structures */
-       diag = igb_dev_rx_queue_alloc(dev, nb_rx_q);
-       if (diag != 0) {
-               PMD_INIT_LOG(ERR, "ethdev port_id=%u allocation of array of %u"
-                                       " pointers to RX queues failed",
-                                       dev->data->port_id, nb_rx_q);
-               return diag;
-       }
-
-       /* Allocate the array of pointers to TX structures */
-       diag = igb_dev_tx_queue_alloc(dev, nb_tx_q);
-       if (diag != 0) {
-               PMD_INIT_LOG(ERR, "ethdev port_id=%u allocation of array of %u"
-                                       " pointers to TX queues failed",
-                                       dev->data->port_id, nb_tx_q);
-
-               return diag;
-       }
-
-       PMD_INIT_LOG(DEBUG, "<<");
-
-       return (0);
-}
-
-static int
-eth_igb_start(struct rte_eth_dev *dev)
-{
-       struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       int ret, i;
-
-       PMD_INIT_LOG(DEBUG, ">>");
-
-       igb_intr_disable(hw);
-
-       /* Power up the phy. Needed to make the link go Up */
-       e1000_power_up_phy(hw);
-
-       /*
-        * Packet Buffer Allocation (PBA)
-        * Writing PBA sets the receive portion of the buffer
-        * the remainder is used for the transmit buffer.
-        */
-       if (hw->mac.type == e1000_82575) {
-               uint32_t pba;
-
-               pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
-               E1000_WRITE_REG(hw, E1000_PBA, pba);
-       }
-
-       /* Put the address into the Receive Address Array */
-       e1000_rar_set(hw, hw->mac.addr, 0);
-
-       /* Initialize the hardware */
-       if (igb_hardware_init(hw)) {
-               PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
-               return (-1);
-       }
-
-       E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN);
-
-       /* Configure for OS presence */
-       igb_init_manageability(hw);
-
-       eth_igb_tx_init(dev);
-
-       /* This can fail when allocating mbufs for descriptor rings */
-       ret = eth_igb_rx_init(dev);
-       if (ret) {
-               PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
-               return ret;
-       }
-
-       e1000_clear_hw_cntrs_base_generic(hw);
-
-       /*
-        * If VLAN filtering is enabled, set up VLAN tag offload and filtering
-        * and restore the VFTA.
-        */
-       if (dev->data->dev_conf.rxmode.hw_vlan_filter)
-               igb_vlan_hw_support_enable(dev);
-       else
-               igb_vlan_hw_support_disable(dev);
-
-       /*
-        * Configure the Interrupt Moderation register (EITR) with the maximum
-        * possible value (0xFFFF) to minimize "System Partial Write" issued by
-        * spurious [DMA] memory updates of RX and TX ring descriptors.
-        *
-        * With a EITR granularity of 2 microseconds in the 82576, only 7/8
-        * spurious memory updates per second should be expected.
-        * ((65535 * 2) / 1000.1000 ~= 0.131 second).
-        *
-        * Because interrupts are not used at all, the MSI-X is not activated
-        * and interrupt moderation is controlled by EITR[0].
-        *
-        * Note that having [almost] disabled memory updates of RX and TX ring
-        * descriptors through the Interrupt Moderation mechanism, memory
-        * updates of ring descriptors are now moderated by the configurable
-        * value of Write-Back Threshold registers.
-        */
-       if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
-                       (hw->mac.type == e1000_i350)) {
-               uint32_t ivar;
-
-               /* Enable all RX & TX queues in the IVAR registers */
-               ivar = (uint32_t) ((E1000_IVAR_VALID << 16) | E1000_IVAR_VALID);
-               for (i = 0; i < 8; i++)
-                       E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, ivar);
-
-               /* Configure EITR with the maximum possible value (0xFFFF) */
-               E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
-       }
-
-       /* Don't reset the phy next time init gets called */
-       hw->phy.reset_disable = 1;
-
-       /* Setup link speed and duplex */
-       switch (dev->data->dev_conf.link_speed) {
-       case ETH_LINK_SPEED_AUTONEG:
-               if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
-                       hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
-               else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
-                       hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX;
-               else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
-                       hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX;
-               else
-                       goto error_invalid_config;
-               break;
-       case ETH_LINK_SPEED_10:
-               if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
-                       hw->phy.autoneg_advertised = E1000_ALL_10_SPEED;
-               else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
-                       hw->phy.autoneg_advertised = ADVERTISE_10_HALF;
-               else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
-                       hw->phy.autoneg_advertised = ADVERTISE_10_FULL;
-               else
-                       goto error_invalid_config;
-               break;
-       case ETH_LINK_SPEED_100:
-               if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
-                       hw->phy.autoneg_advertised = E1000_ALL_100_SPEED;
-               else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
-                       hw->phy.autoneg_advertised = ADVERTISE_100_HALF;
-               else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
-                       hw->phy.autoneg_advertised = ADVERTISE_100_FULL;
-               else
-                       goto error_invalid_config;
-               break;
-       case ETH_LINK_SPEED_1000:
-               if ((dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) ||
-                               (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX))
-                       hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
-               else
-                       goto error_invalid_config;
-               break;
-       case ETH_LINK_SPEED_10000:
-       default:
-               goto error_invalid_config;
-       }
-       e1000_setup_link(hw);
-
-       PMD_INIT_LOG(DEBUG, "<<");
-
-       /* check if lsc interrupt feature is enabled */
-       if (dev->data->dev_conf.intr_conf.lsc != 0)
-               return eth_igb_interrupt_setup(dev);
-
-       return (0);
-
-error_invalid_config:
-       PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u\n",
-                       dev->data->dev_conf.link_speed,
-                       dev->data->dev_conf.link_duplex, dev->data->port_id);
-       return -1;
-}
-
-/*********************************************************************
- *
- *  This routine disables all traffic on the adapter by issuing a
- *  global reset on the MAC.
- *
- **********************************************************************/
-static void
-eth_igb_stop(struct rte_eth_dev *dev)
-{
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct rte_eth_link link;
-
-       igb_intr_disable(hw);
-       e1000_reset_hw(hw);
-       E1000_WRITE_REG(hw, E1000_WUC, 0);
-
-       /* Power down the phy. Needed to make the link go Down */
-       e1000_power_down_phy(hw);
-
-       igb_dev_clear_queues(dev);
-
-       /* clear the recorded link status */
-       memset(&link, 0, sizeof(link));
-       rte_igb_dev_atomic_write_link_status(dev, &link);
-}
-
-static void
-eth_igb_close(struct rte_eth_dev *dev)
-{
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct rte_eth_link link;
-
-       eth_igb_stop(dev);
-       e1000_phy_hw_reset(hw);
-       igb_release_manageability(hw);
-       igb_hw_control_release(hw);
-
-       igb_dev_clear_queues(dev);
-
-       memset(&link, 0, sizeof(link));
-       rte_igb_dev_atomic_write_link_status(dev, &link);
-}
-
-static int
-igb_get_rx_buffer_size(struct e1000_hw *hw)
-{
-       uint32_t rx_buf_size;
-       if (hw->mac.type == e1000_82576) {
-               rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
-       } else if (hw->mac.type == e1000_82580) {
-               /* PBS needs to be translated according to a lookup table */
-               rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
-               rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
-               rx_buf_size = (rx_buf_size << 10);
-       } else {
-               rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
-       }
-
-       return rx_buf_size;
-}
-
-/*********************************************************************
- *
- *  Initialize the hardware
- *
- **********************************************************************/
-static int
-igb_hardware_init(struct e1000_hw *hw)
-{
-       uint32_t rx_buf_size;
-       int diag;
-
-       /* Let the firmware know the OS is in control */
-       igb_hw_control_acquire(hw);
-
-       /*
-        * These parameters control the automatic generation (Tx) and
-        * response (Rx) to Ethernet PAUSE frames.
-        * - High water mark should allow for at least two standard size (1518)
-        *   frames to be received after sending an XOFF.
-        * - Low water mark works best when it is very near the high water mark.
-        *   This allows the receiver to restart by sending XON when it has
-        *   drained a bit. Here we use an arbitary value of 1500 which will
-        *   restart after one full frame is pulled from the buffer. There
-        *   could be several smaller frames in the buffer and if so they will
-        *   not trigger the XON until their total number reduces the buffer
-        *   by 1500.
-        * - The pause time is fairly large at 1000 x 512ns = 512 usec.
-        */
-       rx_buf_size = igb_get_rx_buffer_size(hw);
-
-       hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
-       hw->fc.low_water = hw->fc.high_water - 1500;
-       hw->fc.pause_time = IGB_FC_PAUSE_TIME;
-       hw->fc.send_xon = 1;
-
-       /* Set Flow control, use the tunable location if sane */
-       if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
-               hw->fc.requested_mode = igb_fc_setting;
-       else
-               hw->fc.requested_mode = e1000_fc_none;
-
-       /* Issue a global reset */
-       e1000_reset_hw(hw);
-       E1000_WRITE_REG(hw, E1000_WUC, 0);
-
-       diag = e1000_init_hw(hw);
-       if (diag < 0)
-               return (diag);
-
-       E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN);
-       e1000_get_phy_info(hw);
-       e1000_check_for_link(hw);
-
-       return (0);
-}
-
-/* This function is based on igb_update_stats_counters() in igb/if_igb.c */
-static void
-eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
-{
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct e1000_hw_stats *stats =
-                       E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
-       int pause_frames;
-
-       if(hw->phy.media_type == e1000_media_type_copper ||
-           (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
-               stats->symerrs +=
-                   E1000_READ_REG(hw,E1000_SYMERRS);
-               stats->sec += E1000_READ_REG(hw, E1000_SEC);
-       }
-
-       stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
-       stats->mpc += E1000_READ_REG(hw, E1000_MPC);
-       stats->scc += E1000_READ_REG(hw, E1000_SCC);
-       stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
-
-       stats->mcc += E1000_READ_REG(hw, E1000_MCC);
-       stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
-       stats->colc += E1000_READ_REG(hw, E1000_COLC);
-       stats->dc += E1000_READ_REG(hw, E1000_DC);
-       stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
-       stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
-       stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
-       /*
-       ** For watchdog management we need to know if we have been
-       ** paused during the last interval, so capture that here.
-       */
-       pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
-       stats->xoffrxc += pause_frames;
-       stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
-       stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
-       stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
-       stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
-       stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
-       stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
-       stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
-       stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
-       stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
-       stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
-       stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
-       stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
-
-       /* For the 64-bit byte counters the low dword must be read first. */
-       /* Both registers clear on the read of the high dword */
-
-       stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
-       stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
-       stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
-       stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
-
-       stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
-       stats->ruc += E1000_READ_REG(hw, E1000_RUC);
-       stats->rfc += E1000_READ_REG(hw, E1000_RFC);
-       stats->roc += E1000_READ_REG(hw, E1000_ROC);
-       stats->rjc += E1000_READ_REG(hw, E1000_RJC);
-
-       stats->tor += E1000_READ_REG(hw, E1000_TORH);
-       stats->tot += E1000_READ_REG(hw, E1000_TOTH);
-
-       stats->tpr += E1000_READ_REG(hw, E1000_TPR);
-       stats->tpt += E1000_READ_REG(hw, E1000_TPT);
-       stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
-       stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
-       stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
-       stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
-       stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
-       stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
-       stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
-       stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
-
-       /* Interrupt Counts */
-
-       stats->iac += E1000_READ_REG(hw, E1000_IAC);
-       stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
-       stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
-       stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
-       stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
-       stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
-       stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
-       stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
-       stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
-
-       /* Host to Card Statistics */
-
-       stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
-       stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
-       stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
-       stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
-       stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
-       stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
-       stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
-       stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
-       stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
-       stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
-       stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
-       stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
-       stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
-       stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
-
-       stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
-       stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
-       stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
-       stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
-       stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
-       stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
-
-       if (rte_stats == NULL)
-               return;
-
-       /* Rx Errors */
-       rte_stats->ierrors = stats->rxerrc + stats->crcerrs + stats->algnerrc +
-           stats->ruc + stats->roc + stats->mpc + stats->cexterr;
-
-       /* Tx Errors */
-       rte_stats->oerrors = stats->ecol + stats->latecol;
-
-       rte_stats->ipackets = stats->gprc;
-       rte_stats->opackets = stats->gptc;
-       rte_stats->ibytes   = stats->gorc;
-       rte_stats->obytes   = stats->gotc;
-}
-
-static void
-eth_igb_stats_reset(struct rte_eth_dev *dev)
-{
-       struct e1000_hw_stats *hw_stats =
-                       E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
-
-       /* HW registers are cleared on read */
-       eth_igb_stats_get(dev, NULL);
-
-       /* Reset software totals */
-       memset(hw_stats, 0, sizeof(*hw_stats));
-}
-
-static void
-eth_igb_infos_get(struct rte_eth_dev *dev,
-                   struct rte_eth_dev_info *dev_info)
-{
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
-       dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
-       dev_info->max_mac_addrs = hw->mac.rar_entry_count;
-
-       switch (hw->mac.type) {
-       case e1000_82575:
-               dev_info->max_rx_queues = 4;
-               dev_info->max_tx_queues = 4;
-               break;
-
-       case e1000_82576:
-               dev_info->max_rx_queues = 16;
-               dev_info->max_tx_queues = 16;
-               break;
-
-       case e1000_82580:
-               dev_info->max_rx_queues = 8;
-               dev_info->max_tx_queues = 8;
-               break;
-
-       case e1000_i350:
-               dev_info->max_rx_queues = 8;
-               dev_info->max_tx_queues = 8;
-               break;
-
-       default:
-               /* Should not happen */
-               dev_info->max_rx_queues = 0;
-               dev_info->max_tx_queues = 0;
-       }
-}
-
-/* return 0 means link status changed, -1 means not changed */
-static int
-eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
-{
-       struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct rte_eth_link link, old;
-       int link_check, count;
-
-       link_check = 0;
-       hw->mac.get_link_status = 1;
-
-       /* possible wait-to-complete in up to 9 seconds */
-       for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
-               /* Read the real link status */
-               switch (hw->phy.media_type) {
-               case e1000_media_type_copper:
-                       /* Do the work to read phy */
-                       e1000_check_for_link(hw);
-                       link_check = !hw->mac.get_link_status;
-                       break;
-
-               case e1000_media_type_fiber:
-                       e1000_check_for_link(hw);
-                       link_check = (E1000_READ_REG(hw, E1000_STATUS) &
-                                     E1000_STATUS_LU);
-                       break;
-
-               case e1000_media_type_internal_serdes:
-                       e1000_check_for_link(hw);
-                       link_check = hw->mac.serdes_has_link;
-                       break;
-
-               default:
-               case e1000_media_type_unknown:
-                       break;
-               }
-               if (link_check || wait_to_complete == 0)
-                       break;
-               rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
-       }
-       memset(&link, 0, sizeof(link));
-       rte_igb_dev_atomic_read_link_status(dev, &link);
-       old = link;
-
-       /* Now we check if a transition has happened */
-       if (link_check) {
-               hw->mac.ops.get_link_up_info(hw, &link.link_speed,
-                                         &link.link_duplex);
-               link.link_status = 1;
-       } else if (!link_check) {
-               link.link_speed = 0;
-               link.link_duplex = 0;
-               link.link_status = 0;
-       }
-       rte_igb_dev_atomic_write_link_status(dev, &link);
-
-       /* not changed */
-       if (old.link_status == link.link_status)
-               return -1;
-
-       /* changed */
-       return 0;
-}
-
-/*
- * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means
- * that the driver is loaded.
- */
-static void
-igb_hw_control_acquire(struct e1000_hw *hw)
-{
-       uint32_t ctrl_ext;
-
-       /* Let firmware know the driver has taken over */
-       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
-       E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
-}
-
-/*
- * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means that the
- * driver is no longer loaded.
- */
-static void
-igb_hw_control_release(struct e1000_hw *hw)
-{
-       uint32_t ctrl_ext;
-
-       /* Let firmware taken over control of h/w */
-       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
-       E1000_WRITE_REG(hw, E1000_CTRL_EXT,
-                       ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
-}
-
-/*
- * Bit of a misnomer, what this really means is
- * to enable OS management of the system... aka
- * to disable special hardware management features.
- */
-static void
-igb_init_manageability(struct e1000_hw *hw)
-{
-       if (e1000_enable_mng_pass_thru(hw)) {
-               uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
-               uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
-
-               /* disable hardware interception of ARP */
-               manc &= ~(E1000_MANC_ARP_EN);
-
-               /* enable receiving management packets to the host */
-               manc |= E1000_MANC_EN_MNG2HOST;
-               manc2h |= 1 << 5;  /* Mng Port 623 */
-               manc2h |= 1 << 6;  /* Mng Port 664 */
-               E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
-               E1000_WRITE_REG(hw, E1000_MANC, manc);
-       }
-}
-
-static void
-igb_release_manageability(struct e1000_hw *hw)
-{
-       if (e1000_enable_mng_pass_thru(hw)) {
-               uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
-
-               manc |= E1000_MANC_ARP_EN;
-               manc &= ~E1000_MANC_EN_MNG2HOST;
-
-               E1000_WRITE_REG(hw, E1000_MANC, manc);
-       }
-}
-
-static void
-eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
-{
-       struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t rctl;
-
-       rctl = E1000_READ_REG(hw, E1000_RCTL);
-       rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
-       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
-}
-
-static void
-eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
-{
-       struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t rctl;
-
-       rctl = E1000_READ_REG(hw, E1000_RCTL);
-       rctl &= (~E1000_RCTL_UPE);
-       if (dev->data->all_multicast == 1)
-               rctl |= E1000_RCTL_MPE;
-       else
-               rctl &= (~E1000_RCTL_MPE);
-       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
-}
-
-static void
-eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
-{
-       struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t rctl;
-
-       rctl = E1000_READ_REG(hw, E1000_RCTL);
-       rctl |= E1000_RCTL_MPE;
-       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
-}
-
-static void
-eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
-{
-       struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t rctl;
-
-       if (dev->data->promiscuous == 1)
-               return; /* must remain in all_multicast mode */
-       rctl = E1000_READ_REG(hw, E1000_RCTL);
-       rctl &= (~E1000_RCTL_MPE);
-       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
-}
-
-static void
-eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
-{
-       struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct e1000_vfta * shadow_vfta =
-               E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
-       uint32_t vfta;
-       uint32_t vid_idx;
-       uint32_t vid_bit;
-
-       vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
-                             E1000_VFTA_ENTRY_MASK);
-       vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
-       vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
-       if (on)
-               vfta |= vid_bit;
-       else
-               vfta &= ~vid_bit;
-       E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
-
-       /* update local VFTA copy */
-       shadow_vfta->vfta[vid_idx] = vfta;
-}
-
-static void
-igb_vlan_hw_support_enable(struct rte_eth_dev *dev)
-{
-       struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct e1000_vfta * shadow_vfta =
-               E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
-       uint32_t reg;
-       int i;
-
-       /* VLAN Mode Enable */
-       reg = E1000_READ_REG(hw, E1000_CTRL);
-       reg |= E1000_CTRL_VME;
-       E1000_WRITE_REG(hw, E1000_CTRL, reg);
-
-       /* Filter Table Enable */
-       reg = E1000_READ_REG(hw, E1000_RCTL);
-       reg &= ~E1000_RCTL_CFIEN;
-       reg |= E1000_RCTL_VFE;
-       E1000_WRITE_REG(hw, E1000_RCTL, reg);
-
-       /* Update maximum frame size */
-       reg = E1000_READ_REG(hw, E1000_RLPML);
-       reg += VLAN_TAG_SIZE;
-       E1000_WRITE_REG(hw, E1000_RLPML, reg);
-
-       /* restore VFTA table */
-       for (i = 0; i < E1000_VFTA_SIZE; i++)
-               E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
-}
-
-static void
-igb_vlan_hw_support_disable(struct rte_eth_dev *dev)
-{
-       struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t reg;
-
-       /* VLAN Mode disable */
-       reg = E1000_READ_REG(hw, E1000_CTRL);
-       reg &= ~E1000_CTRL_VME;
-       E1000_WRITE_REG(hw, E1000_CTRL, reg);
-}
-
-static void
-igb_intr_disable(struct e1000_hw *hw)
-{
-       E1000_WRITE_REG(hw, E1000_IMC, ~0);
-       E1000_WRITE_FLUSH(hw);
-}
-
-/**
- * It enables the interrupt mask and then enable the interrupt.
- *
- * @param dev
- *  Pointer to struct rte_eth_dev.
- *
- * @return
- *  - On success, zero.
- *  - On failure, a negative value.
- */
-static int
-eth_igb_interrupt_setup(struct rte_eth_dev *dev)
-{
-       struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       E1000_WRITE_REG(hw, E1000_IMS, E1000_ICR_LSC);
-       E1000_WRITE_FLUSH(hw);
-       rte_intr_enable(&(dev->pci_dev->intr_handle));
-
-       return 0;
-}
-
-/*
- * It reads ICR and gets interrupt causes, check it and set a bit flag
- * to update link status.
- *
- * @param dev
- *  Pointer to struct rte_eth_dev.
- *
- * @return
- *  - On success, zero.
- *  - On failure, a negative value.
- */
-static int
-eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
-{
-       uint32_t icr;
-       struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct e1000_interrupt *intr =
-               E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-
-       /* read-on-clear nic registers here */
-       icr = E1000_READ_REG(hw, E1000_ICR);
-       if (icr & E1000_ICR_LSC) {
-               intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
-       }
-
-       return 0;
-}
-
-/*
- * It executes link_update after knowing an interrupt is prsent.
- *
- * @param dev
- *  Pointer to struct rte_eth_dev.
- *
- * @return
- *  - On success, zero.
- *  - On failure, a negative value.
- */
-static int
-eth_igb_interrupt_action(struct rte_eth_dev *dev)
-{
-       struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct e1000_interrupt *intr =
-               E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-       uint32_t tctl, rctl;
-       struct rte_eth_link link;
-       int ret;
-
-       if (!(intr->flags & E1000_FLAG_NEED_LINK_UPDATE))
-               return -1;
-
-       intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
-       rte_intr_enable(&(dev->pci_dev->intr_handle));
-
-       /* set get_link_status to check register later */
-       hw->mac.get_link_status = 1;
-       ret = eth_igb_link_update(dev, 0);
-
-       /* check if link has changed */
-       if (ret < 0)
-               return 0;
-
-       memset(&link, 0, sizeof(link));
-       rte_igb_dev_atomic_read_link_status(dev, &link);
-       if (link.link_status) {
-               PMD_INIT_LOG(INFO,
-                       " Port %d: Link Up - speed %u Mbps - %s\n",
-                       dev->data->port_id, (unsigned)link.link_speed,
-                       link.link_duplex == ETH_LINK_FULL_DUPLEX ?
-                               "full-duplex" : "half-duplex");
-       } else {
-               PMD_INIT_LOG(INFO, " Port %d: Link Down\n",
-                                       dev->data->port_id);
-       }
-       PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
-                               dev->pci_dev->addr.domain,
-                               dev->pci_dev->addr.bus,
-                               dev->pci_dev->addr.devid,
-                               dev->pci_dev->addr.function);
-       tctl = E1000_READ_REG(hw, E1000_TCTL);
-       rctl = E1000_READ_REG(hw, E1000_RCTL);
-       if (link.link_status) {
-               /* enable Tx/Rx */
-               tctl |= E1000_TCTL_EN;
-               rctl |= E1000_RCTL_EN;
-       } else {
-               /* disable Tx/Rx */
-               tctl &= ~E1000_TCTL_EN;
-               rctl &= ~E1000_RCTL_EN;
-       }
-       E1000_WRITE_REG(hw, E1000_TCTL, tctl);
-       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
-       E1000_WRITE_FLUSH(hw);
-
-       return 0;
-}
-
-/**
- * Interrupt handler which shall be registered at first.
- *
- * @param handle
- *  Pointer to interrupt handle.
- * @param param
- *  The address of parameter (struct rte_eth_dev *) regsitered before.
- *
- * @return
- *  void
- */
-static void
-eth_igb_interrupt_handler(struct rte_intr_handle *handle, void *param)
-{
-       struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
-
-       eth_igb_interrupt_get_status(dev);
-       eth_igb_interrupt_action(dev);
-       _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
-}
-
-static int
-eth_igb_led_on(struct rte_eth_dev *dev)
-{
-       struct e1000_hw *hw;
-
-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
-}
-
-static int
-eth_igb_led_off(struct rte_eth_dev *dev)
-{
-       struct e1000_hw *hw;
-
-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
-}
-
-static int
-eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
-{
-       struct e1000_hw *hw;
-       int err;
-       enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
-               e1000_fc_none,
-               e1000_fc_rx_pause,
-               e1000_fc_tx_pause,
-               e1000_fc_full
-       };
-       uint32_t rx_buf_size;
-       uint32_t max_high_water;
-
-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       rx_buf_size = igb_get_rx_buffer_size(hw);
-       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
-
-       /* At least reserve one Ethernet frame for watermark */
-       max_high_water = rx_buf_size - ETHER_MAX_LEN;
-       if ((fc_conf->high_water > max_high_water) ||
-               (fc_conf->high_water < fc_conf->low_water)) {
-               PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value \n");
-               PMD_INIT_LOG(ERR, "high water must <=  0x%x \n", max_high_water);
-               return (-EINVAL);
-       }
-
-       hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
-       hw->fc.pause_time     = fc_conf->pause_time;
-       hw->fc.high_water     = fc_conf->high_water;
-       hw->fc.low_water      = fc_conf->low_water;
-       hw->fc.send_xon       = fc_conf->send_xon;
-
-       err = e1000_setup_link_generic(hw);
-       if (err == E1000_SUCCESS) {
-               return 0;
-       }
-
-       PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x \n", err);
-       return (-EIO);
-}
-
-static void
-eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
-               uint32_t index, __rte_unused uint32_t pool)
-{
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       e1000_rar_set(hw, mac_addr->addr_bytes, index);
-}
-
-static void
-eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
-{
-       uint8_t addr[ETHER_ADDR_LEN];
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       memset(addr, 0, sizeof(addr));
-
-       e1000_rar_set(hw, addr, index);
-}
diff --git a/lib/librte_pmd_igb/e1000_ethdev.h b/lib/librte_pmd_igb/e1000_ethdev.h
deleted file mode 100644 (file)
index 6b72647..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-/*-
- *   BSD LICENSE
- * 
- *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
- *   All rights reserved.
- * 
- *   Redistribution and use in source and binary forms, with or without 
- *   modification, are permitted provided that the following conditions 
- *   are met:
- * 
- *     * Redistributions of source code must retain the above copyright 
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright 
- *       notice, this list of conditions and the following disclaimer in 
- *       the documentation and/or other materials provided with the 
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its 
- *       contributors may be used to endorse or promote products derived 
- *       from this software without specific prior written permission.
- * 
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * 
- */
-
-#ifndef _E1000_ETHDEV_H_
-#define _E1000_ETHDEV_H_
-
-/* need update link, bit flag */
-#define E1000_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
-
-/*
- * Defines that were not part of e1000_hw.h as they are not used by the FreeBSD
- * driver.
- */
-#define E1000_ADVTXD_POPTS_TXSM     0x00000200 /* L4 Checksum offload request */
-#define E1000_ADVTXD_POPTS_IXSM     0x00000100 /* IP Checksum offload request */
-#define E1000_ADVTXD_TUCMD_L4T_RSV  0x00001800 /* L4 Packet TYPE of Reserved */
-#define E1000_RXD_STAT_TMST         0x10000    /* Timestamped Packet indication */
-#define E1000_RXD_ERR_CKSUM_BIT     29 
-#define E1000_RXD_ERR_CKSUM_MSK     3
-#define E1000_ADVTXD_MACLEN_SHIFT   9          /* Bit shift for l2_len */
-
-#define E1000_VFTA_SIZE 128
-
-/* structure for interrupt relative data */
-struct e1000_interrupt {
-       uint32_t flags;
-};
-
-/* local vfta copy */
-struct e1000_vfta {
-       uint32_t vfta[E1000_VFTA_SIZE];
-};
-
-/*
- * Structure to store private data for each driver instance (for each port).
- */
-struct e1000_adapter {
-       struct e1000_hw         hw;
-       struct e1000_hw_stats   stats;
-       struct e1000_interrupt  intr;
-       struct e1000_vfta       shadow_vfta;
-};
-
-#define E1000_DEV_PRIVATE_TO_HW(adapter) \
-       (&((struct e1000_adapter *)adapter)->hw)
-
-#define E1000_DEV_PRIVATE_TO_STATS(adapter) \
-       (&((struct e1000_adapter *)adapter)->stats)
-
-#define E1000_DEV_PRIVATE_TO_INTR(adapter) \
-       (&((struct e1000_adapter *)adapter)->intr)
-
-#define E1000_DEV_PRIVATE_TO_VFTA(adapter) \
-       (&((struct e1000_adapter *)adapter)->shadow_vfta)
-
-/*
- * RX/TX function prototypes
- */
-int igb_dev_tx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues);
-int igb_dev_rx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues);
-void igb_dev_clear_queues(struct rte_eth_dev *dev);
-
-int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
-               uint16_t nb_rx_desc, unsigned int socket_id,
-               const struct rte_eth_rxconf *rx_conf,
-               struct rte_mempool *mb_pool);
-
-int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
-               uint16_t nb_tx_desc, unsigned int socket_id,
-               const struct rte_eth_txconf *tx_conf);
-
-int eth_igb_rx_init(struct rte_eth_dev *dev);
-
-void eth_igb_tx_init(struct rte_eth_dev *dev);
-
-uint16_t eth_igb_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts,
-               uint16_t nb_pkts);
-
-uint16_t eth_igb_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
-               uint16_t nb_pkts);
-
-uint16_t eth_igb_recv_scattered_pkts(struct igb_rx_queue *rxq,
-               struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
-
-#endif /* _E1000_ETHDEV_H_ */
diff --git a/lib/librte_pmd_igb/e1000_logs.h b/lib/librte_pmd_igb/e1000_logs.h
deleted file mode 100644 (file)
index 8a1e321..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-/*-
- *   BSD LICENSE
- * 
- *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
- *   All rights reserved.
- * 
- *   Redistribution and use in source and binary forms, with or without 
- *   modification, are permitted provided that the following conditions 
- *   are met:
- * 
- *     * Redistributions of source code must retain the above copyright 
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright 
- *       notice, this list of conditions and the following disclaimer in 
- *       the documentation and/or other materials provided with the 
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its 
- *       contributors may be used to endorse or promote products derived 
- *       from this software without specific prior written permission.
- * 
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * 
- */
-
-#ifndef _E1000_LOGS_H_
-#define _E1000_LOGS_H_
-
-#ifdef RTE_LIBRTE_IGB_DEBUG_INIT
-#define PMD_INIT_LOG(level, fmt, args...) \
-       RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_INIT_LOG(level, fmt, args...) do { } while(0)
-#endif
-
-#ifdef RTE_LIBRTE_IGB_DEBUG_RX
-#define PMD_RX_LOG(level, fmt, args...) \
-       RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
-#endif
-
-#ifdef RTE_LIBRTE_IGB_DEBUG_TX
-#define PMD_TX_LOG(level, fmt, args...) \
-       RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
-#endif
-
-#ifdef RTE_LIBRTE_IGB_DEBUG_TX_FREE
-#define PMD_TX_FREE_LOG(level, fmt, args...) \
-       RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
-#endif
-
-#ifdef RTE_LIBRTE_IGB_DEBUG_DRIVER
-#define PMD_DRV_LOG(level, fmt, args...) \
-       RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_DRV_LOG(level, fmt, args...) do { } while(0)
-#endif
-
-#endif /* _E1000_LOGS_H_ */
diff --git a/lib/librte_pmd_igb/e1000_rxtx.c b/lib/librte_pmd_igb/e1000_rxtx.c
deleted file mode 100644 (file)
index 1ea3d63..0000000
+++ /dev/null
@@ -1,1858 +0,0 @@
-/*-
- *   BSD LICENSE
- * 
- *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
- *   All rights reserved.
- * 
- *   Redistribution and use in source and binary forms, with or without 
- *   modification, are permitted provided that the following conditions 
- *   are met:
- * 
- *     * Redistributions of source code must retain the above copyright 
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright 
- *       notice, this list of conditions and the following disclaimer in 
- *       the documentation and/or other materials provided with the 
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its 
- *       contributors may be used to endorse or promote products derived 
- *       from this software without specific prior written permission.
- * 
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * 
- */
-
-#include <sys/queue.h>
-
-#include <endian.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-#include <stdint.h>
-#include <stdarg.h>
-#include <inttypes.h>
-
-#include <rte_interrupts.h>
-#include <rte_byteorder.h>
-#include <rte_common.h>
-#include <rte_log.h>
-#include <rte_debug.h>
-#include <rte_pci.h>
-#include <rte_memory.h>
-#include <rte_memcpy.h>
-#include <rte_memzone.h>
-#include <rte_launch.h>
-#include <rte_tailq.h>
-#include <rte_eal.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-#include <rte_atomic.h>
-#include <rte_branch_prediction.h>
-#include <rte_ring.h>
-#include <rte_mempool.h>
-#include <rte_malloc.h>
-#include <rte_mbuf.h>
-#include <rte_ether.h>
-#include <rte_ethdev.h>
-#include <rte_prefetch.h>
-#include <rte_udp.h>
-#include <rte_tcp.h>
-#include <rte_sctp.h>
-#include <rte_string_fns.h>
-
-#include "e1000_logs.h"
-#include "igb/e1000_api.h"
-#include "e1000_ethdev.h"
-
-static inline struct rte_mbuf *
-rte_rxmbuf_alloc(struct rte_mempool *mp)
-{
-       struct rte_mbuf *m;
-
-       m = __rte_mbuf_raw_alloc(mp);
-       __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
-       return (m);
-}
-
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
-       (uint64_t) ((mb)->buf_physaddr +                   \
-                       (uint64_t) ((char *)((mb)->pkt.data) -     \
-                               (char *)(mb)->buf_addr))
-
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
-       (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
-
-/**
- * Structure associated with each descriptor of the RX ring of a RX queue.
- */
-struct igb_rx_entry {
-       struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
-};
-
-/**
- * Structure associated with each descriptor of the TX ring of a TX queue.
- */
-struct igb_tx_entry {
-       struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
-       uint16_t next_id; /**< Index of next descriptor in ring. */
-       uint16_t last_id; /**< Index of last scattered descriptor. */
-};
-
-/**
- * Structure associated with each RX queue.
- */
-struct igb_rx_queue {
-       struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
-       volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
-       uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
-       volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
-       struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
-       struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
-       struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
-       uint16_t            nb_rx_desc; /**< number of RX descriptors. */
-       uint16_t            rx_tail;    /**< current value of RDT register. */
-       uint16_t            nb_rx_hold; /**< number of held free RX desc. */
-       uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
-       uint16_t            queue_id;   /**< RX queue index. */
-       uint8_t             port_id;    /**< Device port identifier. */
-       uint8_t             pthresh;    /**< Prefetch threshold register. */
-       uint8_t             hthresh;    /**< Host threshold register. */
-       uint8_t             wthresh;    /**< Write-back threshold register. */
-       uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
-};
-
-/**
- * Hardware context number
- */
-enum igb_advctx_num {
-       IGB_CTX_0    = 0, /**< CTX0    */
-       IGB_CTX_1    = 1, /**< CTX1    */
-       IGB_CTX_NUM  = 2, /**< CTX NUM */
-};
-
-/**
- * Strucutre to check if new context need be built
- */
-struct igb_advctx_info {
-       uint16_t flags;           /**< ol_flags related to context build. */
-       uint32_t cmp_mask;        /**< compare mask for vlan_macip_lens */
-       uint32_t vlan_macip_lens; /**< vlan, mac.ip length. */
-};
-
-/**
- * Structure associated with each TX queue.
- */
-struct igb_tx_queue {
-       volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
-       uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
-       struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
-       volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
-       uint32_t               txd_type;      /**< Device-specific TXD type */
-       uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
-       uint16_t               tx_tail;  /**< Current value of TDT register. */
-       uint16_t               tx_head;  /**< Index of first used TX descriptor. */
-       uint16_t               queue_id; /**< TX queue index. */
-       uint8_t                port_id;  /**< Device port identifier. */
-       uint8_t                pthresh;  /**< Prefetch threshold register. */
-       uint8_t                hthresh;  /**< Host threshold register. */
-       uint8_t                wthresh;  /**< Write-back threshold register. */
-       uint32_t               ctx_curr; /**< Current used hardware descriptor. */
-       uint32_t               ctx_start;/**< Start context position for transmit queue. */
-       struct igb_advctx_info ctx_cache[IGB_CTX_NUM];  /**< Hardware context history.*/
-};
-
-#if 1
-#define RTE_PMD_USE_PREFETCH
-#endif
-
-#ifdef RTE_PMD_USE_PREFETCH
-#define rte_igb_prefetch(p)    rte_prefetch0(p)
-#else
-#define rte_igb_prefetch(p)    do {} while(0)
-#endif
-
-#ifdef RTE_PMD_PACKET_PREFETCH
-#define rte_packet_prefetch(p) rte_prefetch1(p)
-#else
-#define rte_packet_prefetch(p) do {} while(0)
-#endif
-
-/*********************************************************************
- *
- *  TX function
- *
- **********************************************************************/
-
-/*
- * Advanced context descriptor are almost same between igb/ixgbe
- * This is a separate function, looking for optimization opportunity here
- * Rework required to go with the pre-defined values.
- */
-
-static inline void
-igbe_set_xmit_ctx(struct igb_tx_queue* txq,
-               volatile struct e1000_adv_tx_context_desc *ctx_txd,
-               uint16_t ol_flags, uint32_t vlan_macip_lens)
-{
-       uint32_t type_tucmd_mlhl;
-       uint32_t mss_l4len_idx;
-       uint32_t ctx_idx, ctx_curr;
-       uint32_t cmp_mask;
-
-       ctx_curr = txq->ctx_curr;
-       ctx_idx = ctx_curr + txq->ctx_start;
-
-       cmp_mask = 0;
-       type_tucmd_mlhl = 0;
-
-       if (ol_flags & PKT_TX_VLAN_PKT) {
-               cmp_mask |= TX_VLAN_CMP_MASK;
-       }
-
-       if (ol_flags & PKT_TX_IP_CKSUM) {
-               type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
-               cmp_mask |= TX_MAC_LEN_CMP_MASK;
-       }
-
-       /* Specify which HW CTX to upload. */
-       mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
-       switch (ol_flags & PKT_TX_L4_MASK) {
-       case PKT_TX_UDP_CKSUM:
-               type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
-                               E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
-               mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
-               cmp_mask |= TX_MACIP_LEN_CMP_MASK;
-               break;
-       case PKT_TX_TCP_CKSUM:
-               type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
-                               E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
-               mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
-               cmp_mask |= TX_MACIP_LEN_CMP_MASK;
-               break;
-       case PKT_TX_SCTP_CKSUM:
-               type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
-                               E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
-               mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
-               cmp_mask |= TX_MACIP_LEN_CMP_MASK;
-               break;
-       default:
-               type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
-                               E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
-               break;
-       }
-
-       txq->ctx_cache[ctx_curr].flags           = ol_flags;
-       txq->ctx_cache[ctx_curr].cmp_mask        = cmp_mask;
-       txq->ctx_cache[ctx_curr].vlan_macip_lens = vlan_macip_lens & cmp_mask;
-
-       ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
-       ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
-       ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
-       ctx_txd->seqnum_seed     = 0;
-}
-
-/*
- * Check which hardware context can be used. Use the existing match
- * or create a new context descriptor.
- */
-static inline uint32_t
-what_advctx_update(struct igb_tx_queue *txq, uint16_t flags,
-               uint32_t vlan_macip_lens)
-{
-       /* If match with the current context */
-       if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
-               (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens ==
-               (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
-                       return txq->ctx_curr;
-       }
-
-       /* If match with the second context */
-       txq->ctx_curr ^= 1;
-       if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
-               (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens ==
-               (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
-                       return txq->ctx_curr;
-       }
-
-       /* Mismatch, use the previous context */
-       return (IGB_CTX_NUM);
-}
-
-static inline uint32_t
-tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags)
-{
-       static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
-       static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
-       uint32_t tmp;
-
-       tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
-       tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
-       return tmp;
-}
-
-static inline uint32_t
-tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags)
-{
-       static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
-       return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
-}
-
-uint16_t
-eth_igb_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts,
-              uint16_t nb_pkts)
-{
-       struct igb_tx_entry *sw_ring;
-       struct igb_tx_entry *txe, *txn;
-       volatile union e1000_adv_tx_desc *txr;
-       volatile union e1000_adv_tx_desc *txd;
-       struct rte_mbuf     *tx_pkt;
-       struct rte_mbuf     *m_seg;
-       uint64_t buf_dma_addr;
-       uint32_t olinfo_status;
-       uint32_t cmd_type_len;
-       uint32_t pkt_len;
-       uint16_t slen;
-       uint16_t ol_flags;
-       uint16_t tx_end;
-       uint16_t tx_id;
-       uint16_t tx_last;
-       uint16_t nb_tx;
-       uint16_t tx_ol_req;
-       uint32_t new_ctx;
-       uint32_t ctx;
-       uint32_t vlan_macip_lens;
-
-       sw_ring = txq->sw_ring;
-       txr     = txq->tx_ring;
-       tx_id   = txq->tx_tail;
-       txe = &sw_ring[tx_id];
-
-       for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
-               tx_pkt = *tx_pkts++;
-               pkt_len = tx_pkt->pkt.pkt_len;
-
-               RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
-
-               /*
-                * The number of descriptors that must be allocated for a
-                * packet is the number of segments of that packet, plus 1
-                * Context Descriptor for the VLAN Tag Identifier, if any.
-                * Determine the last TX descriptor to allocate in the TX ring
-                * for the packet, starting from the current position (tx_id)
-                * in the ring.
-                */
-               tx_last = (uint16_t) (tx_id + tx_pkt->pkt.nb_segs - 1);
-
-               ol_flags = tx_pkt->ol_flags;
-               vlan_macip_lens = (tx_pkt->pkt.vlan_tci << 16) | (tx_pkt->pkt.l2_len << E1000_ADVTXD_MACLEN_SHIFT) | tx_pkt->pkt.l3_len;
-               tx_ol_req = (ol_flags & PKT_TX_OFFLOAD_MASK);
-
-               /* If a Context Descriptor need be built . */
-               if (tx_ol_req) {
-                       ctx = what_advctx_update(txq, tx_ol_req,vlan_macip_lens);
-                       /* Only allocate context descriptor if required*/
-                       new_ctx = (ctx == IGB_CTX_NUM);
-                       ctx = txq->ctx_curr;
-                       tx_last = (uint16_t) (tx_last + new_ctx);
-               }
-               if (tx_last >= txq->nb_tx_desc)
-                       tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
-
-               PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
-                          " tx_first=%u tx_last=%u\n",
-                          (unsigned) txq->port_id,
-                          (unsigned) txq->queue_id,
-                          (unsigned) pkt_len,
-                          (unsigned) tx_id,
-                          (unsigned) tx_last);
-
-               /*
-                * Check if there are enough free descriptors in the TX ring
-                * to transmit the next packet.
-                * This operation is based on the two following rules:
-                *
-                *   1- Only check that the last needed TX descriptor can be
-                *      allocated (by construction, if that descriptor is free,
-                *      all intermediate ones are also free).
-                *
-                *      For this purpose, the index of the last TX descriptor
-                *      used for a packet (the "last descriptor" of a packet)
-                *      is recorded in the TX entries (the last one included)
-                *      that are associated with all TX descriptors allocated
-                *      for that packet.
-                *
-                *   2- Avoid to allocate the last free TX descriptor of the
-                *      ring, in order to never set the TDT register with the
-                *      same value stored in parallel by the NIC in the TDH
-                *      register, which makes the TX engine of the NIC enter
-                *      in a deadlock situation.
-                *
-                *      By extension, avoid to allocate a free descriptor that
-                *      belongs to the last set of free descriptors allocated
-                *      to the same packet previously transmitted.
-                */
-
-               /*
-                * The "last descriptor" of the previously sent packet, if any,
-                * which used the last descriptor to allocate.
-                */
-               tx_end = sw_ring[tx_last].last_id;
-
-               /*
-                * The next descriptor following that "last descriptor" in the
-                * ring.
-                */
-               tx_end = sw_ring[tx_end].next_id;
-
-               /*
-                * The "last descriptor" associated with that next descriptor.
-                */
-               tx_end = sw_ring[tx_end].last_id;
-
-               /*
-                * Check that this descriptor is free.
-                */
-               if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
-                       if (nb_tx == 0)
-                               return (0);
-                       goto end_of_tx;
-               }
-
-               /*
-                * Set common flags of all TX Data Descriptors.
-                *
-                * The following bits must be set in all Data Descriptors:
-                *   - E1000_ADVTXD_DTYP_DATA
-                *   - E1000_ADVTXD_DCMD_DEXT
-                *
-                * The following bits must be set in the first Data Descriptor
-                * and are ignored in the other ones:
-                *   - E1000_ADVTXD_DCMD_IFCS
-                *   - E1000_ADVTXD_MAC_1588
-                *   - E1000_ADVTXD_DCMD_VLE
-                *
-                * The following bits must only be set in the last Data
-                * Descriptor:
-                *   - E1000_TXD_CMD_EOP
-                *
-                * The following bits can be set in any Data Descriptor, but
-                * are only set in the last Data Descriptor:
-                *   - E1000_TXD_CMD_RS
-                */
-               cmd_type_len = txq->txd_type |
-                       E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
-               olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
-#if defined(RTE_LIBRTE_IEEE1588)
-               if (ol_flags & PKT_TX_IEEE1588_TMST)
-                       cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
-#endif
-               if (tx_ol_req) {
-                       /* Setup TX Advanced context descriptor if required */
-                       if (new_ctx) {
-                               volatile struct e1000_adv_tx_context_desc *
-                                   ctx_txd;
-
-                               ctx_txd = (volatile struct
-                                   e1000_adv_tx_context_desc *)
-                                   &txr[tx_id];
-
-                               txn = &sw_ring[txe->next_id];
-                               RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
-
-                               if (txe->mbuf != NULL) {
-                                       rte_pktmbuf_free_seg(txe->mbuf);
-                                       txe->mbuf = NULL;
-                               }
-
-                               igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
-                                   vlan_macip_lens);
-
-                               txe->last_id = tx_last;
-                               tx_id = txe->next_id;
-                               txe = txn;
-                       }
-
-                       /* Setup the TX Advanced Data Descriptor */
-                       cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
-                       olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
-                       olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
-               }
-
-               m_seg = tx_pkt;
-               do {
-                       txn = &sw_ring[txe->next_id];
-                       txd = &txr[tx_id];
-
-                       if (txe->mbuf != NULL)
-                               rte_pktmbuf_free_seg(txe->mbuf);
-                       txe->mbuf = m_seg;
-
-                       /*
-                        * Set up transmit descriptor.
-                        */
-                       slen = (uint16_t) m_seg->pkt.data_len;
-                       buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
-                       txd->read.buffer_addr =
-                               rte_cpu_to_le_64(buf_dma_addr);
-                       txd->read.cmd_type_len =
-                               rte_cpu_to_le_32(cmd_type_len | slen);
-                       txd->read.olinfo_status =
-                               rte_cpu_to_le_32(olinfo_status);
-                       txe->last_id = tx_last;
-                       tx_id = txe->next_id;
-                       txe = txn;
-                       m_seg = m_seg->pkt.next;
-               } while (m_seg != NULL);
-
-               /*
-                * The last packet data descriptor needs End Of Packet (EOP)
-                * and Report Status (RS).
-                */
-               txd->read.cmd_type_len |=
-                       rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
-       }
- end_of_tx:
-       rte_wmb();
-
-       /*
-        * Set the Transmit Descriptor Tail (TDT).
-        */
-       E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
-       PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
-                  (unsigned) txq->port_id, (unsigned) txq->queue_id,
-                  (unsigned) tx_id, (unsigned) nb_tx);
-       txq->tx_tail = tx_id;
-
-       return (nb_tx);
-}
-
-/*********************************************************************
- *
- *  RX functions
- *
- **********************************************************************/
-static inline uint16_t
-rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
-{
-       uint16_t pkt_flags;
-
-       static uint16_t ip_pkt_types_map[16] = {
-               0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
-               PKT_RX_IPV6_HDR, 0, 0, 0,
-               PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
-               PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
-       };
-
-#if defined(RTE_LIBRTE_IEEE1588)
-       static uint32_t ip_pkt_etqf_map[8] = {
-               0, 0, 0, PKT_RX_IEEE1588_PTP,
-               0, 0, 0, 0,
-       };
-
-       pkt_flags = (uint16_t) (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
-                               ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
-                               ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
-#else
-       pkt_flags = (uint16_t) (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
-                               ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
-#endif
-       return pkt_flags | (uint16_t) (((hl_tp_rs & 0x0F) == 0) ? 0 :
-                                       PKT_RX_RSS_HASH);
-}
-
-static inline uint16_t
-rx_desc_status_to_pkt_flags(uint32_t rx_status)
-{
-       uint16_t pkt_flags;
-
-       /* Check if VLAN present */
-       pkt_flags = (uint16_t) (rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
-
-#if defined(RTE_LIBRTE_IEEE1588)
-       if (rx_status & E1000_RXD_STAT_TMST)
-               pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
-#endif
-       return pkt_flags;
-}
-
-static inline uint16_t
-rx_desc_error_to_pkt_flags(uint32_t rx_status)
-{
-       /*
-        * Bit 30: IPE, IPv4 checksum error
-        * Bit 29: L4I, L4I integrity error
-        */
-
-       static uint16_t error_to_pkt_flags_map[4] = {
-               0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
-               PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
-       };
-       return error_to_pkt_flags_map[(rx_status >>
-               E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
-}
-
-uint16_t
-eth_igb_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
-              uint16_t nb_pkts)
-{
-       volatile union e1000_adv_rx_desc *rx_ring;
-       volatile union e1000_adv_rx_desc *rxdp;
-       struct igb_rx_entry *sw_ring;
-       struct igb_rx_entry *rxe;
-       struct rte_mbuf *rxm;
-       struct rte_mbuf *nmb;
-       union e1000_adv_rx_desc rxd;
-       uint64_t dma_addr;
-       uint32_t staterr;
-       uint32_t hlen_type_rss;
-       uint16_t pkt_len;
-       uint16_t rx_id;
-       uint16_t nb_rx;
-       uint16_t nb_hold;
-       uint16_t pkt_flags;
-
-       nb_rx = 0;
-       nb_hold = 0;
-       rx_id = rxq->rx_tail;
-       rx_ring = rxq->rx_ring;
-       sw_ring = rxq->sw_ring;
-       while (nb_rx < nb_pkts) {
-               /*
-                * The order of operations here is important as the DD status
-                * bit must not be read after any other descriptor fields.
-                * rx_ring and rxdp are pointing to volatile data so the order
-                * of accesses cannot be reordered by the compiler. If they were
-                * not volatile, they could be reordered which could lead to
-                * using invalid descriptor fields when read from rxd.
-                */
-               rxdp = &rx_ring[rx_id];
-               staterr = rxdp->wb.upper.status_error;
-               if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
-                       break;
-               rxd = *rxdp;
-
-               /*
-                * End of packet.
-                *
-                * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
-                * likely to be invalid and to be dropped by the various
-                * validation checks performed by the network stack.
-                *
-                * Allocate a new mbuf to replenish the RX ring descriptor.
-                * If the allocation fails:
-                *    - arrange for that RX descriptor to be the first one
-                *      being parsed the next time the receive function is
-                *      invoked [on the same queue].
-                *
-                *    - Stop parsing the RX ring and return immediately.
-                *
-                * This policy do not drop the packet received in the RX
-                * descriptor for which the allocation of a new mbuf failed.
-                * Thus, it allows that packet to be later retrieved if
-                * mbuf have been freed in the mean time.
-                * As a side effect, holding RX descriptors instead of
-                * systematically giving them back to the NIC may lead to
-                * RX ring exhaustion situations.
-                * However, the NIC can gracefully prevent such situations
-                * to happen by sending specific "back-pressure" flow control
-                * frames to its peer(s).
-                */
-               PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
-                          "staterr=0x%x pkt_len=%u\n",
-                          (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
-                          (unsigned) rx_id, (unsigned) staterr,
-                          (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
-
-               nmb = rte_rxmbuf_alloc(rxq->mb_pool);
-               if (nmb == NULL) {
-                       PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
-                                  "queue_id=%u\n", (unsigned) rxq->port_id,
-                                  (unsigned) rxq->queue_id);
-                       rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
-                       break;
-               }
-
-               nb_hold++;
-               rxe = &sw_ring[rx_id];
-               rx_id++;
-               if (rx_id == rxq->nb_rx_desc)
-                       rx_id = 0;
-
-               /* Prefetch next mbuf while processing current one. */
-               rte_igb_prefetch(sw_ring[rx_id].mbuf);
-
-               /*
-                * When next RX descriptor is on a cache-line boundary,
-                * prefetch the next 4 RX descriptors and the next 8 pointers
-                * to mbufs.
-                */
-               if ((rx_id & 0x3) == 0) {
-                       rte_igb_prefetch(&rx_ring[rx_id]);
-                       rte_igb_prefetch(&sw_ring[rx_id]);
-               }
-
-               rxm = rxe->mbuf;
-               rxe->mbuf = nmb;
-               dma_addr =
-                       rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
-               rxdp->read.hdr_addr = dma_addr;
-               rxdp->read.pkt_addr = dma_addr;
-
-               /*
-                * Initialize the returned mbuf.
-                * 1) setup generic mbuf fields:
-                *    - number of segments,
-                *    - next segment,
-                *    - packet length,
-                *    - RX port identifier.
-                * 2) integrate hardware offload data, if any:
-                *    - RSS flag & hash,
-                *    - IP checksum flag,
-                *    - VLAN TCI, if any,
-                *    - error flags.
-                */
-               pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
-                                     rxq->crc_len);
-               rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
-               rte_packet_prefetch(rxm->pkt.data);
-               rxm->pkt.nb_segs = 1;
-               rxm->pkt.next = NULL;
-               rxm->pkt.pkt_len = pkt_len;
-               rxm->pkt.data_len = pkt_len;
-               rxm->pkt.in_port = rxq->port_id;
-
-               rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
-               hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
-               /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
-               rxm->pkt.vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
-
-               pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
-               pkt_flags = (pkt_flags |
-                                       rx_desc_status_to_pkt_flags(staterr));
-               pkt_flags = (pkt_flags |
-                                       rx_desc_error_to_pkt_flags(staterr));
-               rxm->ol_flags = pkt_flags;
-
-               /*
-                * Store the mbuf address into the next entry of the array
-                * of returned packets.
-                */
-               rx_pkts[nb_rx++] = rxm;
-       }
-       rxq->rx_tail = rx_id;
-
-       /*
-        * If the number of free RX descriptors is greater than the RX free
-        * threshold of the queue, advance the Receive Descriptor Tail (RDT)
-        * register.
-        * Update the RDT with the value of the last processed RX descriptor
-        * minus 1, to guarantee that the RDT register is never equal to the
-        * RDH register, which creates a "full" ring situtation from the
-        * hardware point of view...
-        */
-       nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
-       if (nb_hold > rxq->rx_free_thresh) {
-               PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
-                          "nb_hold=%u nb_rx=%u\n",
-                          (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
-                          (unsigned) rx_id, (unsigned) nb_hold,
-                          (unsigned) nb_rx);
-               rx_id = (uint16_t) ((rx_id == 0) ?
-                                    (rxq->nb_rx_desc - 1) : (rx_id - 1));
-               E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
-               nb_hold = 0;
-       }
-       rxq->nb_rx_hold = nb_hold;
-       return (nb_rx);
-}
-
-uint16_t
-eth_igb_recv_scattered_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
-                        uint16_t nb_pkts)
-{
-       volatile union e1000_adv_rx_desc *rx_ring;
-       volatile union e1000_adv_rx_desc *rxdp;
-       struct igb_rx_entry *sw_ring;
-       struct igb_rx_entry *rxe;
-       struct rte_mbuf *first_seg;
-       struct rte_mbuf *last_seg;
-       struct rte_mbuf *rxm;
-       struct rte_mbuf *nmb;
-       union e1000_adv_rx_desc rxd;
-       uint64_t dma; /* Physical address of mbuf data buffer */
-       uint32_t staterr;
-       uint32_t hlen_type_rss;
-       uint16_t rx_id;
-       uint16_t nb_rx;
-       uint16_t nb_hold;
-       uint16_t data_len;
-       uint16_t pkt_flags;
-
-       nb_rx = 0;
-       nb_hold = 0;
-       rx_id = rxq->rx_tail;
-       rx_ring = rxq->rx_ring;
-       sw_ring = rxq->sw_ring;
-
-       /*
-        * Retrieve RX context of current packet, if any.
-        */
-       first_seg = rxq->pkt_first_seg;
-       last_seg = rxq->pkt_last_seg;
-
-       while (nb_rx < nb_pkts) {
-       next_desc:
-               /*
-                * The order of operations here is important as the DD status
-                * bit must not be read after any other descriptor fields.
-                * rx_ring and rxdp are pointing to volatile data so the order
-                * of accesses cannot be reordered by the compiler. If they were
-                * not volatile, they could be reordered which could lead to
-                * using invalid descriptor fields when read from rxd.
-                */
-               rxdp = &rx_ring[rx_id];
-               staterr = rxdp->wb.upper.status_error;
-               if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
-                       break;
-               rxd = *rxdp;
-
-               /*
-                * Descriptor done.
-                *
-                * Allocate a new mbuf to replenish the RX ring descriptor.
-                * If the allocation fails:
-                *    - arrange for that RX descriptor to be the first one
-                *      being parsed the next time the receive function is
-                *      invoked [on the same queue].
-                *
-                *    - Stop parsing the RX ring and return immediately.
-                *
-                * This policy does not drop the packet received in the RX
-                * descriptor for which the allocation of a new mbuf failed.
-                * Thus, it allows that packet to be later retrieved if
-                * mbuf have been freed in the mean time.
-                * As a side effect, holding RX descriptors instead of
-                * systematically giving them back to the NIC may lead to
-                * RX ring exhaustion situations.
-                * However, the NIC can gracefully prevent such situations
-                * to happen by sending specific "back-pressure" flow control
-                * frames to its peer(s).
-                */
-               PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
-                          "staterr=0x%x data_len=%u\n",
-                          (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
-                          (unsigned) rx_id, (unsigned) staterr,
-                          (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
-
-               nmb = rte_rxmbuf_alloc(rxq->mb_pool);
-               if (nmb == NULL) {
-                       PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
-                                  "queue_id=%u\n", (unsigned) rxq->port_id,
-                                  (unsigned) rxq->queue_id);
-                       rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
-                       break;
-               }
-
-               nb_hold++;
-               rxe = &sw_ring[rx_id];
-               rx_id++;
-               if (rx_id == rxq->nb_rx_desc)
-                       rx_id = 0;
-
-               /* Prefetch next mbuf while processing current one. */
-               rte_igb_prefetch(sw_ring[rx_id].mbuf);
-
-               /*
-                * When next RX descriptor is on a cache-line boundary,
-                * prefetch the next 4 RX descriptors and the next 8 pointers
-                * to mbufs.
-                */
-               if ((rx_id & 0x3) == 0) {
-                       rte_igb_prefetch(&rx_ring[rx_id]);
-                       rte_igb_prefetch(&sw_ring[rx_id]);
-               }
-
-               /*
-                * Update RX descriptor with the physical address of the new
-                * data buffer of the new allocated mbuf.
-                */
-               rxm = rxe->mbuf;
-               rxe->mbuf = nmb;
-               dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
-               rxdp->read.pkt_addr = dma;
-               rxdp->read.hdr_addr = dma;
-
-               /*
-                * Set data length & data buffer address of mbuf.
-                */
-               data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
-               rxm->pkt.data_len = data_len;
-               rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
-
-               /*
-                * If this is the first buffer of the received packet,
-                * set the pointer to the first mbuf of the packet and
-                * initialize its context.
-                * Otherwise, update the total length and the number of segments
-                * of the current scattered packet, and update the pointer to
-                * the last mbuf of the current packet.
-                */
-               if (first_seg == NULL) {
-                       first_seg = rxm;
-                       first_seg->pkt.pkt_len = data_len;
-                       first_seg->pkt.nb_segs = 1;
-               } else {
-                       first_seg->pkt.pkt_len += data_len;
-                       first_seg->pkt.nb_segs++;
-                       last_seg->pkt.next = rxm;
-               }
-
-               /*
-                * If this is not the last buffer of the received packet,
-                * update the pointer to the last mbuf of the current scattered
-                * packet and continue to parse the RX ring.
-                */
-               if (! (staterr & E1000_RXD_STAT_EOP)) {
-                       last_seg = rxm;
-                       goto next_desc;
-               }
-
-               /*
-                * This is the last buffer of the received packet.
-                * If the CRC is not stripped by the hardware:
-                *   - Subtract the CRC length from the total packet length.
-                *   - If the last buffer only contains the whole CRC or a part
-                *     of it, free the mbuf associated to the last buffer.
-                *     If part of the CRC is also contained in the previous
-                *     mbuf, subtract the length of that CRC part from the
-                *     data length of the previous mbuf.
-                */
-               rxm->pkt.next = NULL;
-               if (unlikely(rxq->crc_len > 0)) {
-                       first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
-                       if (data_len <= ETHER_CRC_LEN) {
-                               rte_pktmbuf_free_seg(rxm);
-                               first_seg->pkt.nb_segs--;
-                               last_seg->pkt.data_len = (uint16_t)
-                                       (last_seg->pkt.data_len -
-                                        (ETHER_CRC_LEN - data_len));
-                               last_seg->pkt.next = NULL;
-                       } else
-                               rxm->pkt.data_len =
-                                       (uint16_t) (data_len - ETHER_CRC_LEN);
-               }
-
-               /*
-                * Initialize the first mbuf of the returned packet:
-                *    - RX port identifier,
-                *    - hardware offload data, if any:
-                *      - RSS flag & hash,
-                *      - IP checksum flag,
-                *      - VLAN TCI, if any,
-                *      - error flags.
-                */
-               first_seg->pkt.in_port = rxq->port_id;
-               first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
-
-               /*
-                * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
-                * set in the pkt_flags field.
-                */
-               first_seg->pkt.vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
-               hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
-               pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
-               pkt_flags = (pkt_flags | rx_desc_status_to_pkt_flags(staterr));
-               pkt_flags = (pkt_flags | rx_desc_error_to_pkt_flags(staterr));
-               first_seg->ol_flags = pkt_flags;
-
-               /* Prefetch data of first segment, if configured to do so. */
-               rte_packet_prefetch(first_seg->pkt.data);
-
-               /*
-                * Store the mbuf address into the next entry of the array
-                * of returned packets.
-                */
-               rx_pkts[nb_rx++] = first_seg;
-
-               /*
-                * Setup receipt context for a new packet.
-                */
-               first_seg = NULL;
-       }
-
-       /*
-        * Record index of the next RX descriptor to probe.
-        */
-       rxq->rx_tail = rx_id;
-
-       /*
-        * Save receive context.
-        */
-       rxq->pkt_first_seg = first_seg;
-       rxq->pkt_last_seg = last_seg;
-
-       /*
-        * If the number of free RX descriptors is greater than the RX free
-        * threshold of the queue, advance the Receive Descriptor Tail (RDT)
-        * register.
-        * Update the RDT with the value of the last processed RX descriptor
-        * minus 1, to guarantee that the RDT register is never equal to the
-        * RDH register, which creates a "full" ring situtation from the
-        * hardware point of view...
-        */
-       nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
-       if (nb_hold > rxq->rx_free_thresh) {
-               PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
-                          "nb_hold=%u nb_rx=%u\n",
-                          (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
-                          (unsigned) rx_id, (unsigned) nb_hold,
-                          (unsigned) nb_rx);
-               rx_id = (uint16_t) ((rx_id == 0) ?
-                                    (rxq->nb_rx_desc - 1) : (rx_id - 1));
-               E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
-               nb_hold = 0;
-       }
-       rxq->nb_rx_hold = nb_hold;
-       return (nb_rx);
-}
-
-/*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
- * This will also optimize cache line size effect.
- * H/W supports up to cache line size 128.
- */
-#define IGB_ALIGN 128
-
-/*
- * Maximum number of Ring Descriptors.
- *
- * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
- * desscriptors should meet the following condition:
- *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
- */
-#define IGB_MIN_RING_DESC 32
-#define IGB_MAX_RING_DESC 4096
-
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
-                     uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
-       char z_name[RTE_MEMZONE_NAMESIZE];
-       const struct rte_memzone *mz;
-
-       rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-                       dev->driver->pci_drv.name, ring_name,
-                               dev->data->port_id, queue_id);
-       mz = rte_memzone_lookup(z_name);
-       if (mz)
-               return mz;
-
-       return rte_memzone_reserve_aligned(z_name, (uint64_t)ring_size,
-                       socket_id, 0, IGB_ALIGN);
-}
-
-static void
-igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
-{
-       unsigned i;
-
-       if (txq->sw_ring != NULL) {
-               for (i = 0; i < txq->nb_tx_desc; i++) {
-                       if (txq->sw_ring[i].mbuf != NULL) {
-                               rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
-                               txq->sw_ring[i].mbuf = NULL;
-                       }
-               }
-       }
-}
-
-static void
-igb_tx_queue_release(struct igb_tx_queue *txq)
-{
-       igb_tx_queue_release_mbufs(txq);
-        rte_free(txq->sw_ring);
-        rte_free(txq);
-}
-
-int
-igb_dev_tx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues)
-{
-       uint16_t i, old_nb_queues = dev->data->nb_tx_queues;
-       struct igb_tx_queue **txq;
-
-       if (dev->data->tx_queues == NULL) {
-               dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
-                               sizeof(struct igb_tx_queue *) * nb_queues,
-                                                       CACHE_LINE_SIZE);
-               if (dev->data->tx_queues == NULL) {
-                       dev->data->nb_tx_queues = 0;
-                       return -ENOMEM;
-               }
-       } else {
-               if (nb_queues < old_nb_queues)
-                       for (i = nb_queues; i < old_nb_queues; i++)
-                               igb_tx_queue_release(dev->data->tx_queues[i]);
-
-               if (nb_queues != old_nb_queues) {
-                       txq = rte_realloc(dev->data->tx_queues,
-                               sizeof(struct igb_tx_queue *) * nb_queues,
-                                                       CACHE_LINE_SIZE);
-                       if (txq == NULL)
-                               return -ENOMEM;
-                       else
-                               dev->data->tx_queues = txq;
-                       if (nb_queues > old_nb_queues)
-                               memset(&(txq[old_nb_queues]), 0,
-                                       sizeof(struct igb_tx_queue *) *
-                                       (nb_queues - old_nb_queues));
-               }
-       }
-       dev->data->nb_tx_queues = nb_queues;
-
-       return 0;
-}
-
-static void
-igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
-{
-       txq->tx_head = 0;
-       txq->tx_tail = 0;
-       txq->ctx_curr = 0;
-       memset((void*)&txq->ctx_cache, 0,
-               IGB_CTX_NUM * sizeof(struct igb_advctx_info));
-}
-
-static void
-igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
-{
-       struct igb_tx_entry *txe = txq->sw_ring;
-       uint32_t size;
-       uint16_t i, prev;
-       struct e1000_hw *hw;
-
-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       size = sizeof(union e1000_adv_tx_desc) * txq->nb_tx_desc;
-       /* Zero out HW ring memory */
-       for (i = 0; i < size; i++) {
-               ((volatile char *)txq->tx_ring)[i] = 0;
-       }
-
-       /* Initialize ring entries */
-       prev = txq->nb_tx_desc - 1;
-       for (i = 0; i < txq->nb_tx_desc; i++) {
-               volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
-
-               txd->wb.status = E1000_TXD_STAT_DD;
-               txe[i].mbuf = NULL;
-               txe[i].last_id = i;
-               txe[prev].next_id = i;
-               prev = i;
-       }
-
-       txq->txd_type = E1000_ADVTXD_DTYP_DATA;
-       /* 82575 specific, each tx queue will use 2 hw contexts */
-       if (hw->mac.type == e1000_82575)
-               txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
-
-       igb_reset_tx_queue_stat(txq);
-}
-
-int
-eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
-                        uint16_t queue_idx,
-                        uint16_t nb_desc,
-                        unsigned int socket_id,
-                        const struct rte_eth_txconf *tx_conf)
-{
-       const struct rte_memzone *tz;
-       struct igb_tx_queue *txq;
-       struct e1000_hw     *hw;
-       uint32_t size;
-
-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       /*
-        * Validate number of transmit descriptors.
-        * It must not exceed hardware maximum, and must be multiple
-        * of IGB_ALIGN.
-        */
-       if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
-           (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
-               return -EINVAL;
-       }
-
-       /*
-        * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
-        * driver.
-        */
-       if (tx_conf->tx_free_thresh != 0)
-               RTE_LOG(WARNING, PMD,
-                       "The tx_free_thresh parameter is not "
-                       "used for the 1G driver.");
-       if (tx_conf->tx_rs_thresh != 0)
-               RTE_LOG(WARNING, PMD,
-                       "The tx_rs_thresh parameter is not "
-                       "used for the 1G driver.");
-       if (tx_conf->tx_thresh.wthresh == 0)
-               RTE_LOG(WARNING, PMD,
-                       "To improve 1G driver performance, consider setting "
-                       "the TX WTHRESH value to 4, 8, or 16.");
-
-       /* Free memory prior to re-allocation if needed */
-       if (dev->data->tx_queues[queue_idx] != NULL)
-               igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
-
-       /* First allocate the tx queue data structure */
-       txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
-                                                       CACHE_LINE_SIZE);
-       if (txq == NULL)
-               return (-ENOMEM);
-
-       /*
-        * Allocate TX ring hardware descriptors. A memzone large enough to
-        * handle the maximum ring size is allocated in order to allow for
-        * resizing in later calls to the queue setup function.
-        */
-       size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
-       tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
-                                       size, socket_id);
-       if (tz == NULL) {
-               igb_tx_queue_release(txq);
-               return (-ENOMEM);
-       }
-
-       txq->nb_tx_desc = nb_desc;
-       txq->pthresh = tx_conf->tx_thresh.pthresh;
-       txq->hthresh = tx_conf->tx_thresh.hthresh;
-       txq->wthresh = tx_conf->tx_thresh.wthresh;
-       txq->queue_id = queue_idx;
-       txq->port_id = dev->data->port_id;
-
-       txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
-       txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
-       txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
-
-       size = sizeof(union e1000_adv_tx_desc) * nb_desc;
-
-       /* Allocate software ring */
-       txq->sw_ring = rte_zmalloc("txq->sw_ring",
-                                  sizeof(struct igb_tx_entry) * nb_desc,
-                                  CACHE_LINE_SIZE);
-       if (txq->sw_ring == NULL) {
-               igb_tx_queue_release(txq);
-               return (-ENOMEM);
-       }
-       PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
-                    txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
-
-       igb_reset_tx_queue(txq, dev);
-       dev->tx_pkt_burst = eth_igb_xmit_pkts;
-       dev->data->tx_queues[queue_idx] = txq;
-
-       return (0);
-}
-
-static void
-igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
-{
-       unsigned i;
-
-       if (rxq->sw_ring != NULL) {
-               for (i = 0; i < rxq->nb_rx_desc; i++) {
-                       if (rxq->sw_ring[i].mbuf != NULL) {
-                               rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
-                               rxq->sw_ring[i].mbuf = NULL;
-                       }
-               }
-       }
-}
-
-static void
-igb_rx_queue_release(struct igb_rx_queue *rxq)
-{
-       igb_rx_queue_release_mbufs(rxq);
-       rte_free(rxq->sw_ring);
-       rte_free(rxq);
-}
-
-int
-igb_dev_rx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues)
-{
-       uint16_t i, old_nb_queues = dev->data->nb_rx_queues;
-       struct igb_rx_queue **rxq;
-
-       if (dev->data->rx_queues == NULL) {
-               dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
-                               sizeof(struct igb_rx_queue *) * nb_queues,
-                                                       CACHE_LINE_SIZE);
-               if (dev->data->rx_queues == NULL) {
-                       dev->data->nb_rx_queues = 0;
-                       return -ENOMEM;
-               }
-       } else {
-               for (i = nb_queues; i < old_nb_queues; i++) {
-                       igb_rx_queue_release(dev->data->rx_queues[i]);
-                       dev->data->rx_queues[i] = NULL;
-               }
-               if (nb_queues != old_nb_queues) {
-                       rxq = rte_realloc(dev->data->rx_queues,
-                               sizeof(struct igb_rx_queue *) * nb_queues,
-                                                       CACHE_LINE_SIZE);
-                       if (rxq == NULL)
-                               return -ENOMEM;
-                       else
-                               dev->data->rx_queues = rxq;
-                       if (nb_queues > old_nb_queues)
-                               memset(&(rxq[old_nb_queues]), 0,
-                                       sizeof(struct igb_rx_queue *) *
-                                       (nb_queues - old_nb_queues));
-               }
-       }
-       dev->data->nb_rx_queues = nb_queues;
-
-       return 0;
-}
-
-static void
-igb_reset_rx_queue(struct igb_rx_queue *rxq)
-{
-       unsigned size;
-       unsigned i;
-
-       /* Zero out HW ring memory */
-       size = sizeof(union e1000_adv_rx_desc) * rxq->nb_rx_desc;
-       for (i = 0; i < size; i++) {
-               ((volatile char *)rxq->rx_ring)[i] = 0;
-       }
-
-       rxq->rx_tail = 0;
-       rxq->pkt_first_seg = NULL;
-       rxq->pkt_last_seg = NULL;
-}
-
-int
-eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
-                        uint16_t queue_idx,
-                        uint16_t nb_desc,
-                        unsigned int socket_id,
-                        const struct rte_eth_rxconf *rx_conf,
-                        struct rte_mempool *mp)
-{
-       const struct rte_memzone *rz;
-       struct igb_rx_queue *rxq;
-       struct e1000_hw     *hw;
-       unsigned int size;
-
-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       /*
-        * Validate number of receive descriptors.
-        * It must not exceed hardware maximum, and must be multiple
-        * of IGB_ALIGN.
-        */
-       if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
-           (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
-               return (-EINVAL);
-       }
-
-       /* Free memory prior to re-allocation if needed */
-       if (dev->data->rx_queues[queue_idx] != NULL) {
-               igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
-               dev->data->rx_queues[queue_idx] = NULL;
-       }
-
-       /* First allocate the RX queue data structure. */
-       rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
-                         CACHE_LINE_SIZE);
-       if (rxq == NULL)
-               return (-ENOMEM);
-       rxq->mb_pool = mp;
-       rxq->nb_rx_desc = nb_desc;
-       rxq->pthresh = rx_conf->rx_thresh.pthresh;
-       rxq->hthresh = rx_conf->rx_thresh.hthresh;
-       rxq->wthresh = rx_conf->rx_thresh.wthresh;
-       rxq->rx_free_thresh = rx_conf->rx_free_thresh;
-       rxq->queue_id = queue_idx;
-       rxq->port_id = dev->data->port_id;
-       rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
-                                 ETHER_CRC_LEN);
-
-       /*
-        *  Allocate RX ring hardware descriptors. A memzone large enough to
-        *  handle the maximum ring size is allocated in order to allow for
-        *  resizing in later calls to the queue setup function.
-        */
-       size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
-       rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
-       if (rz == NULL) {
-               igb_rx_queue_release(rxq);
-               return (-ENOMEM);
-       }
-       rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
-       rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
-       rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
-
-       /* Allocate software ring. */
-       rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
-                                  sizeof(struct igb_rx_entry) * nb_desc,
-                                  CACHE_LINE_SIZE);
-       if (rxq->sw_ring == NULL) {
-               igb_rx_queue_release(rxq);
-               return (-ENOMEM);
-       }
-       PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
-                    rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
-
-       dev->data->rx_queues[queue_idx] = rxq;
-       igb_reset_rx_queue(rxq);
-
-       return 0;
-}
-
-void
-igb_dev_clear_queues(struct rte_eth_dev *dev)
-{
-       uint16_t i;
-       struct igb_tx_queue *txq;
-       struct igb_rx_queue *rxq;
-
-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               txq = dev->data->tx_queues[i];
-               igb_tx_queue_release_mbufs(txq);
-               igb_reset_tx_queue(txq, dev);
-       }
-
-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               rxq = dev->data->rx_queues[i];
-               igb_rx_queue_release_mbufs(rxq);
-               igb_reset_rx_queue(rxq);
-       }
-}
-
-/**
- * Receive Side Scaling (RSS).
- * See section 7.1.1.7 in the following document:
- *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
- *
- * Principles:
- * The source and destination IP addresses of the IP header and the source and
- * destination ports of TCP/UDP headers, if any, of received packets are hashed
- * against a configurable random key to compute a 32-bit RSS hash result.
- * The seven (7) LSBs of the 32-bit hash result are used as an index into a
- * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
- * RSS output index which is used as the RX queue index where to store the
- * received packets.
- * The following output is supplied in the RX write-back descriptor:
- *     - 32-bit result of the Microsoft RSS hash function,
- *     - 4-bit RSS type field.
- */
-
-/*
- * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
- * Used as the default key.
- */
-static uint8_t rss_intel_key[40] = {
-       0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
-       0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
-       0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
-       0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
-       0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
-};
-
-static void
-igb_rss_disable(struct rte_eth_dev *dev)
-{
-       struct e1000_hw *hw;
-       uint32_t mrqc;
-
-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       mrqc = E1000_READ_REG(hw, E1000_MRQC);
-       mrqc &= ~E1000_MRQC_ENABLE_MASK;
-       E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
-}
-
-static void
-igb_rss_configure(struct rte_eth_dev *dev)
-{
-       struct e1000_hw *hw;
-       uint8_t *hash_key;
-       uint32_t rss_key;
-       uint32_t mrqc;
-       uint32_t shift;
-       uint16_t rss_hf;
-       uint16_t i;
-
-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
-       if (rss_hf == 0) /* Disable RSS. */ {
-               igb_rss_disable(dev);
-               return;
-       }
-       hash_key = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
-       if (hash_key == NULL)
-               hash_key = rss_intel_key; /* Default hash key. */
-
-       /* Fill in RSS hash key. */
-       for (i = 0; i < 10; i++) {
-               rss_key  = hash_key[(i * 4)];
-               rss_key |= hash_key[(i * 4) + 1] << 8;
-               rss_key |= hash_key[(i * 4) + 2] << 16;
-               rss_key |= hash_key[(i * 4) + 3] << 24;
-               E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
-       }
-
-       /* Fill in redirection table. */
-       shift = (hw->mac.type == e1000_82575) ? 6 : 0;
-       for (i = 0; i < 128; i++) {
-               union e1000_reta {
-                       uint32_t dword;
-                       uint8_t  bytes[4];
-               } reta;
-               uint8_t q_idx;
-
-               q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
-                                  i % dev->data->nb_rx_queues : 0);
-               reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
-               if ((i & 3) == 3)
-                       E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
-       }
-
-       /* Set configured hashing functions in MRQC register. */
-       mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
-       if (rss_hf & ETH_RSS_IPV4)
-               mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
-       if (rss_hf & ETH_RSS_IPV4_TCP)
-               mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
-       if (rss_hf & ETH_RSS_IPV6)
-               mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
-       if (rss_hf & ETH_RSS_IPV6_EX)
-               mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
-       if (rss_hf & ETH_RSS_IPV6_TCP)
-               mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
-       if (rss_hf & ETH_RSS_IPV6_TCP_EX)
-               mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
-       if (rss_hf & ETH_RSS_IPV4_UDP)
-               mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
-       if (rss_hf & ETH_RSS_IPV6_UDP)
-               mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
-       if (rss_hf & ETH_RSS_IPV6_UDP_EX)
-               mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
-       E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
-}
-
-/*********************************************************************
- *
- *  Enable receive unit.
- *
- **********************************************************************/
-
-static int
-igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
-{
-       struct igb_rx_entry *rxe = rxq->sw_ring;
-       uint64_t dma_addr;
-       unsigned i;
-
-       /* Initialize software ring entries. */
-       for (i = 0; i < rxq->nb_rx_desc; i++) {
-               volatile union e1000_adv_rx_desc *rxd;
-               struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
-
-               if (mbuf == NULL) {
-                       PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
-                               "queue_id=%hu\n", rxq->queue_id);
-                       igb_rx_queue_release(rxq);
-                       return (-ENOMEM);
-               }
-               dma_addr =
-                       rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
-               rxd = &rxq->rx_ring[i];
-               rxd->read.hdr_addr = dma_addr;
-               rxd->read.pkt_addr = dma_addr;
-               rxe[i].mbuf = mbuf;
-       }
-
-       return 0;
-}
-
-int
-eth_igb_rx_init(struct rte_eth_dev *dev)
-{
-       struct e1000_hw     *hw;
-       struct igb_rx_queue *rxq;
-       struct rte_pktmbuf_pool_private *mbp_priv;
-       uint32_t rctl;
-       uint32_t rxcsum;
-       uint32_t srrctl;
-       uint16_t buf_size;
-       uint16_t rctl_bsize;
-       uint16_t i;
-       int ret;
-
-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       srrctl = 0;
-
-       /*
-        * Make sure receives are disabled while setting
-        * up the descriptor ring.
-        */
-       rctl = E1000_READ_REG(hw, E1000_RCTL);
-       E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
-
-       /*
-        * Configure support of jumbo frames, if any.
-        */
-       if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
-               rctl |= E1000_RCTL_LPE;
-
-               /* Set maximum packet length. */
-               E1000_WRITE_REG(hw, E1000_RLPML,
-                               dev->data->dev_conf.rxmode.max_rx_pkt_len);
-       } else
-               rctl &= ~E1000_RCTL_LPE;
-
-       /* Configure and enable each RX queue. */
-       rctl_bsize = 0;
-       dev->rx_pkt_burst = eth_igb_recv_pkts;
-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               uint64_t bus_addr;
-               uint32_t rxdctl;
-
-               rxq = dev->data->rx_queues[i];
-
-               /* Allocate buffers for descriptor rings and set up queue */
-               ret = igb_alloc_rx_queue_mbufs(rxq);
-               if (ret) {
-                       igb_dev_clear_queues(dev);
-                       return ret;
-               }
-
-               /*
-                * Reset crc_len in case it was changed after queue setup by a
-                *  call to configure
-                */
-               rxq->crc_len =
-                       (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
-                                                       0 : ETHER_CRC_LEN);
-
-               bus_addr = rxq->rx_ring_phys_addr;
-               E1000_WRITE_REG(hw, E1000_RDLEN(i),
-                               rxq->nb_rx_desc *
-                               sizeof(union e1000_adv_rx_desc));
-               E1000_WRITE_REG(hw, E1000_RDBAH(i),
-                               (uint32_t)(bus_addr >> 32));
-               E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
-
-               srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
-
-               /*
-                * Configure RX buffer size.
-                */
-               mbp_priv = (struct rte_pktmbuf_pool_private *)
-                       ((char *)rxq->mb_pool + sizeof(struct rte_mempool));
-               buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
-                                      RTE_PKTMBUF_HEADROOM);
-               if (buf_size >= 1024) {
-                       /*
-                        * Configure the BSIZEPACKET field of the SRRCTL
-                        * register of the queue.
-                        * Value is in 1 KB resolution, from 1 KB to 127 KB.
-                        * If this field is equal to 0b, then RCTL.BSIZE
-                        * determines the RX packet buffer size.
-                        */
-                       srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
-                                  E1000_SRRCTL_BSIZEPKT_MASK);
-                       buf_size = (uint16_t) ((srrctl &
-                                               E1000_SRRCTL_BSIZEPKT_MASK) <<
-                                              E1000_SRRCTL_BSIZEPKT_SHIFT);
-
-                       if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size){
-                               dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
-                               dev->data->scattered_rx = 1;
-                       }
-               } else {
-                       /*
-                        * Use BSIZE field of the device RCTL register.
-                        */
-                       if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
-                               rctl_bsize = buf_size;
-                       dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
-                       dev->data->scattered_rx = 1;
-               }
-
-               E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
-
-               /* Enable this RX queue. */
-               rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
-               rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
-               rxdctl &= 0xFFF00000;
-               rxdctl |= (rxq->pthresh & 0x1F);
-               rxdctl |= ((rxq->hthresh & 0x1F) << 8);
-               rxdctl |= ((rxq->wthresh & 0x1F) << 16);
-               E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
-       }
-
-       /*
-        * Setup BSIZE field of RCTL register, if needed.
-        * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
-        * register, since the code above configures the SRRCTL register of
-        * the RX queue in such a case.
-        * All configurable sizes are:
-        * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
-        *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
-        *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
-        *  2048: rctl |= E1000_RCTL_SZ_2048;
-        *  1024: rctl |= E1000_RCTL_SZ_1024;
-        *   512: rctl |= E1000_RCTL_SZ_512;
-        *   256: rctl |= E1000_RCTL_SZ_256;
-        */
-       if (rctl_bsize > 0) {
-               if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
-                       rctl |= E1000_RCTL_SZ_512;
-               else /* 256 <= buf_size < 512 - use 256 */
-                       rctl |= E1000_RCTL_SZ_256;
-       }
-
-       /*
-        * Configure RSS if device configured with multiple RX queues.
-        */
-       if (dev->data->nb_rx_queues > 1)
-               igb_rss_configure(dev);
-       else
-               igb_rss_disable(dev);
-
-       /*
-        * Setup the Checksum Register.
-        * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
-        */
-       rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
-       rxcsum |= E1000_RXCSUM_PCSD;
-
-       /* Enable both L3/L4 rx checksum offload */
-       if (dev->data->dev_conf.rxmode.hw_ip_checksum)
-               rxcsum |= (E1000_RXCSUM_IPOFL  | E1000_RXCSUM_TUOFL);
-       else
-               rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
-       E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
-
-       /* Setup the Receive Control Register. */
-       if (dev->data->dev_conf.rxmode.hw_strip_crc) {
-               rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
-
-               /* set STRCRC bit in all queues for Powerville */
-               if (hw->mac.type == e1000_i350) {
-                       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-                               uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(i));
-                               dvmolr |= E1000_DVMOLR_STRCRC;
-                               E1000_WRITE_REG(hw, E1000_DVMOLR(i), dvmolr);
-                       }
-               }
-
-       } else {
-               rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
-
-               /* clear STRCRC bit in all queues for Powerville */
-               if (hw->mac.type == e1000_i350) {
-                       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-                               uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(i));
-                               dvmolr &= ~E1000_DVMOLR_STRCRC;
-                               E1000_WRITE_REG(hw, E1000_DVMOLR(i), dvmolr);
-                       }
-               }
-       }
-
-       rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
-       rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
-               E1000_RCTL_RDMTS_HALF |
-               (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
-
-       /* Make sure VLAN Filters are off. */
-       rctl &= ~E1000_RCTL_VFE;
-       /* Don't store bad packets. */
-       rctl &= ~E1000_RCTL_SBP;
-
-       /* Enable Receives. */
-       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
-
-       /*
-        * Setup the HW Rx Head and Tail Descriptor Pointers.
-        * This needs to be done after enable.
-        */
-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               rxq = dev->data->rx_queues[i];
-               E1000_WRITE_REG(hw, E1000_RDH(i), 0);
-               E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
-       }
-
-       return 0;
-}
-
-/*********************************************************************
- *
- *  Enable transmit unit.
- *
- **********************************************************************/
-void
-eth_igb_tx_init(struct rte_eth_dev *dev)
-{
-       struct e1000_hw     *hw;
-       struct igb_tx_queue *txq;
-       uint32_t tctl;
-       uint32_t txdctl;
-       uint16_t i;
-
-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       /* Setup the Base and Length of the Tx Descriptor Rings. */
-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               uint64_t bus_addr;
-               txq = dev->data->tx_queues[i];
-               bus_addr = txq->tx_ring_phys_addr;
-
-               E1000_WRITE_REG(hw, E1000_TDLEN(i),
-                               txq->nb_tx_desc *
-                               sizeof(union e1000_adv_tx_desc));
-               E1000_WRITE_REG(hw, E1000_TDBAH(i),
-                               (uint32_t)(bus_addr >> 32));
-               E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
-
-               /* Setup the HW Tx Head and Tail descriptor pointers. */
-               E1000_WRITE_REG(hw, E1000_TDT(i), 0);
-               E1000_WRITE_REG(hw, E1000_TDH(i), 0);
-
-               /* Setup Transmit threshold registers. */
-               txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
-               txdctl |= txq->pthresh & 0x1F;
-               txdctl |= ((txq->hthresh & 0x1F) << 8);
-               txdctl |= ((txq->wthresh & 0x1F) << 16);
-               txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
-               E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
-       }
-
-       /* Program the Transmit Control Register. */
-       tctl = E1000_READ_REG(hw, E1000_TCTL);
-       tctl &= ~E1000_TCTL_CT;
-       tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
-                (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
-
-       e1000_config_collision_dist(hw);
-
-       /* This write will effectively turn on the transmit unit. */
-       E1000_WRITE_REG(hw, E1000_TCTL, tctl);
-}
-
diff --git a/lib/librte_pmd_igb/igb/README b/lib/librte_pmd_igb/igb/README
deleted file mode 100644 (file)
index c511b6e..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-..
-  BSD LICENSE
-
-  Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
-  All rights reserved.
-
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions 
-  are met:
-
-    * Redistributions of source code must retain the above copyright 
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in 
-      the documentation and/or other materials provided with the 
-      distribution.
-    * Neither the name of Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived 
-      from this software without specific prior written permission.
-
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
-  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
-  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
-  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
-  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
-  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
-  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
-  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
-  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
-  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
-  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-Intel® IGB driver
-=================
-
-This directory contains code from the Intel® Network Adapter Driver for 82575/6
-and 82580-based Gigabit Network Connections under FreeBSD, version 2.2.3,
-dated 04/25/2011. This code is available from
-`http://downloadmirror.intel.com/15815/eng/igb-2.2.3.tar.gz`
-
-This driver is valid for the product(s) listed below
-
-* Intel® 82575EB Gigabit Ethernet Controller
-* Intel® 82576 Gigabit Ethernet Controller
-* Intel® 82580EB Gigabit Ethernet Controller
-* Intel® Ethernet Controller I350
-* Intel® Ethernet Server Adapter I340-F4
-* Intel® Ethernet Server Adapter I340-T4
-* Intel® Ethernet Server Adapter I350-F2
-* Intel® Ethernet Server Adapter I350-F4
-* Intel® Ethernet Server Adapter I350-T2
-* Intel® Ethernet Server Adapter I350-T4
-* Intel® Gigabit EF Dual Port Server Adapter
-* Intel® Gigabit ET Dual Port Server Adapter
-* Intel® Gigabit ET Quad Port Server Adapter
-* Intel® Gigabit ET2 Quad Port Server Adapter
-* Intel® Gigabit VT Quad Port Server Adapter
-
-
-Updating driver
-===============
-
-The following modifications have been made to this code to integrate it with the
-Intel® DPDK:
-
-
-e1000_osdep.h and e1000_osdep.c
--------------------------------
-
-The OS dependency layer has been extensively modified to support the drivers in
-the Intel® DPDK environment. It is expected that these files will not need to be
-changed on updating the driver.
diff --git a/lib/librte_pmd_igb/igb/e1000_82575.c b/lib/librte_pmd_igb/igb/e1000_82575.c
deleted file mode 100644 (file)
index b2f1fca..0000000
+++ /dev/null
@@ -1,2429 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-/*
- * 82575EB Gigabit Network Connection
- * 82575EB Gigabit Backplane Connection
- * 82575GB Gigabit Network Connection
- * 82576 Gigabit Network Connection
- * 82576 Quad Port Gigabit Mezzanine Adapter
- */
-
-#include "e1000_api.h"
-
-static s32  e1000_init_phy_params_82575(struct e1000_hw *hw);
-static s32  e1000_init_mac_params_82575(struct e1000_hw *hw);
-static s32  e1000_acquire_phy_82575(struct e1000_hw *hw);
-static void e1000_release_phy_82575(struct e1000_hw *hw);
-static s32  e1000_acquire_nvm_82575(struct e1000_hw *hw);
-static void e1000_release_nvm_82575(struct e1000_hw *hw);
-static s32  e1000_check_for_link_82575(struct e1000_hw *hw);
-static s32  e1000_get_cfg_done_82575(struct e1000_hw *hw);
-static s32  e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
-                                         u16 *duplex);
-static s32  e1000_init_hw_82575(struct e1000_hw *hw);
-static s32  e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw);
-static s32  e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
-                                           u16 *data);
-static s32  e1000_reset_hw_82575(struct e1000_hw *hw);
-static s32  e1000_reset_hw_82580(struct e1000_hw *hw);
-static s32  e1000_read_phy_reg_82580(struct e1000_hw *hw,
-                                    u32 offset, u16 *data);
-static s32  e1000_write_phy_reg_82580(struct e1000_hw *hw,
-                                     u32 offset, u16 data);
-static s32  e1000_set_d0_lplu_state_82580(struct e1000_hw *hw,
-                                          bool active);
-static s32  e1000_set_d3_lplu_state_82580(struct e1000_hw *hw,
-                                          bool active);
-static s32  e1000_set_d0_lplu_state_82575(struct e1000_hw *hw,
-                                          bool active);
-static s32  e1000_setup_copper_link_82575(struct e1000_hw *hw);
-static s32  e1000_setup_serdes_link_82575(struct e1000_hw *hw);
-static s32  e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data);
-static s32  e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
-                                            u32 offset, u16 data);
-static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
-static s32  e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
-static s32  e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
-                                                 u16 *speed, u16 *duplex);
-static s32  e1000_get_phy_id_82575(struct e1000_hw *hw);
-static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
-static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
-static s32  e1000_reset_init_script_82575(struct e1000_hw *hw);
-static s32  e1000_read_mac_addr_82575(struct e1000_hw *hw);
-static void e1000_config_collision_dist_82575(struct e1000_hw *hw);
-static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
-static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
-static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw);
-static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
-static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw);
-static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw);
-static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw);
-static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw,
-                                               u16 offset);
-static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
-                                               u16 offset);
-static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw);
-static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw);
-
-static const u16 e1000_82580_rxpbs_table[] =
-       { 36, 72, 144, 1, 2, 4, 8, 16,
-         35, 70, 140 };
-#define E1000_82580_RXPBS_TABLE_SIZE \
-       (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
-
-
-/**
- *  e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
- *  @hw: pointer to the HW structure
- *
- *  Called to determine if the I2C pins are being used for I2C or as an
- *  external MDIO interface since the two options are mutually exclusive.
- **/
-static bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw)
-{
-       u32 reg = 0;
-       bool ext_mdio = FALSE;
-
-       DEBUGFUNC("e1000_sgmii_uses_mdio_82575");
-
-       switch (hw->mac.type) {
-       case e1000_82575:
-       case e1000_82576:
-               reg = E1000_READ_REG(hw, E1000_MDIC);
-               ext_mdio = !!(reg & E1000_MDIC_DEST);
-               break;
-       case e1000_82580:
-       case e1000_i350:
-               reg = E1000_READ_REG(hw, E1000_MDICNFG);
-               ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
-               break;
-       default:
-               break;
-       }
-       return ext_mdio;
-}
-
-/**
- *  e1000_init_phy_params_82575 - Init PHY func ptrs.
- *  @hw: pointer to the HW structure
- **/
-static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val = E1000_SUCCESS;
-       u32 ctrl_ext;
-
-       DEBUGFUNC("e1000_init_phy_params_82575");
-
-       if (hw->phy.media_type != e1000_media_type_copper) {
-               phy->type = e1000_phy_none;
-               goto out;
-       }
-
-       phy->ops.power_up   = e1000_power_up_phy_copper;
-       phy->ops.power_down = e1000_power_down_phy_copper_82575;
-
-       phy->autoneg_mask           = AUTONEG_ADVERTISE_SPEED_DEFAULT;
-       phy->reset_delay_us         = 100;
-
-       phy->ops.acquire            = e1000_acquire_phy_82575;
-       phy->ops.check_reset_block  = e1000_check_reset_block_generic;
-       phy->ops.commit             = e1000_phy_sw_reset_generic;
-       phy->ops.get_cfg_done       = e1000_get_cfg_done_82575;
-       phy->ops.release            = e1000_release_phy_82575;
-
-       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
-
-       if (e1000_sgmii_active_82575(hw)) {
-               phy->ops.reset      = e1000_phy_hw_reset_sgmii_82575;
-               ctrl_ext |= E1000_CTRL_I2C_ENA;
-       } else {
-               phy->ops.reset      = e1000_phy_hw_reset_generic;
-               ctrl_ext &= ~E1000_CTRL_I2C_ENA;
-       }
-
-       E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
-       e1000_reset_mdicnfg_82580(hw);
-
-       if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) {
-               phy->ops.read_reg   = e1000_read_phy_reg_sgmii_82575;
-               phy->ops.write_reg  = e1000_write_phy_reg_sgmii_82575;
-       } else if (hw->mac.type >= e1000_82580) {
-               phy->ops.read_reg   = e1000_read_phy_reg_82580;
-               phy->ops.write_reg  = e1000_write_phy_reg_82580;
-       } else {
-               phy->ops.read_reg   = e1000_read_phy_reg_igp;
-               phy->ops.write_reg  = e1000_write_phy_reg_igp;
-       }
-
-       /* Set phy->phy_addr and phy->id. */
-       ret_val = e1000_get_phy_id_82575(hw);
-
-       /* Verify phy id and set remaining function pointers */
-       switch (phy->id) {
-       case I347AT4_E_PHY_ID:
-       case M88E1112_E_PHY_ID:
-       case M88E1340M_E_PHY_ID:
-       case M88E1111_I_PHY_ID:
-               phy->type                   = e1000_phy_m88;
-               phy->ops.check_polarity     = e1000_check_polarity_m88;
-               phy->ops.get_info           = e1000_get_phy_info_m88;
-               if (phy->id == I347AT4_E_PHY_ID ||
-                   phy->id == M88E1112_E_PHY_ID ||
-                   phy->id == M88E1340M_E_PHY_ID)
-                       phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2;
-               else
-                       phy->ops.get_cable_length = e1000_get_cable_length_m88;
-               phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
-               break;
-       case IGP03E1000_E_PHY_ID:
-       case IGP04E1000_E_PHY_ID:
-               phy->type                   = e1000_phy_igp_3;
-               phy->ops.check_polarity     = e1000_check_polarity_igp;
-               phy->ops.get_info           = e1000_get_phy_info_igp;
-               phy->ops.get_cable_length   = e1000_get_cable_length_igp_2;
-               phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
-               phy->ops.set_d0_lplu_state  = e1000_set_d0_lplu_state_82575;
-               phy->ops.set_d3_lplu_state  = e1000_set_d3_lplu_state_generic;
-               break;
-       case I82580_I_PHY_ID:
-       case I350_I_PHY_ID:
-               phy->type                   = e1000_phy_82580;
-               phy->ops.check_polarity     = e1000_check_polarity_82577;
-               phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82577;
-               phy->ops.get_cable_length   = e1000_get_cable_length_82577;
-               phy->ops.get_info           = e1000_get_phy_info_82577;
-               phy->ops.set_d0_lplu_state  = e1000_set_d0_lplu_state_82580;
-               phy->ops.set_d3_lplu_state  = e1000_set_d3_lplu_state_82580;
-               break;
-       default:
-               ret_val = -E1000_ERR_PHY;
-               goto out;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_init_nvm_params_82575 - Init NVM func ptrs.
- *  @hw: pointer to the HW structure
- **/
-s32 e1000_init_nvm_params_82575(struct e1000_hw *hw)
-{
-       struct e1000_nvm_info *nvm = &hw->nvm;
-       u32 eecd = E1000_READ_REG(hw, E1000_EECD);
-       u16 size;
-
-       DEBUGFUNC("e1000_init_nvm_params_82575");
-
-       size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
-                    E1000_EECD_SIZE_EX_SHIFT);
-       /*
-        * Added to a constant, "size" becomes the left-shift value
-        * for setting word_size.
-        */
-       size += NVM_WORD_SIZE_BASE_SHIFT;
-
-       nvm->word_size = 1 << size;
-       nvm->opcode_bits        = 8;
-       nvm->delay_usec         = 1;
-       switch (nvm->override) {
-       case e1000_nvm_override_spi_large:
-               nvm->page_size    = 32;
-               nvm->address_bits = 16;
-               break;
-       case e1000_nvm_override_spi_small:
-               nvm->page_size    = 8;
-               nvm->address_bits = 8;
-               break;
-       default:
-               nvm->page_size    = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
-               nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
-               break;
-       }
-
-       nvm->type = e1000_nvm_eeprom_spi;
-
-       if (nvm->word_size == (1 << 15))
-               nvm->page_size = 128;
-
-       /* Function Pointers */
-       nvm->ops.acquire    = e1000_acquire_nvm_82575;
-       nvm->ops.release    = e1000_release_nvm_82575;
-       if (nvm->word_size < (1 << 15))
-               nvm->ops.read    = e1000_read_nvm_eerd;
-       else
-               nvm->ops.read    = e1000_read_nvm_spi;
-
-       nvm->ops.write              = e1000_write_nvm_spi;
-       nvm->ops.validate           = e1000_validate_nvm_checksum_generic;
-       nvm->ops.update             = e1000_update_nvm_checksum_generic;
-       nvm->ops.valid_led_default  = e1000_valid_led_default_82575;
-
-       /* override genric family function pointers for specific descendants */
-       switch (hw->mac.type) {
-       case e1000_82580:
-               nvm->ops.validate = e1000_validate_nvm_checksum_82580;
-               nvm->ops.update = e1000_update_nvm_checksum_82580;
-               break;
-       case e1000_i350:
-               nvm->ops.validate = e1000_validate_nvm_checksum_i350;
-               nvm->ops.update = e1000_update_nvm_checksum_i350;
-               break;
-       default:
-               break;
-       }
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_init_mac_params_82575 - Init MAC func ptrs.
- *  @hw: pointer to the HW structure
- **/
-static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
-{
-       struct e1000_mac_info *mac = &hw->mac;
-       struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
-       u32 ctrl_ext = 0;
-
-       DEBUGFUNC("e1000_init_mac_params_82575");
-
-       /* Set media type */
-        /*
-        * The 82575 uses bits 22:23 for link mode. The mode can be changed
-         * based on the EEPROM. We cannot rely upon device ID. There
-         * is no distinguishable difference between fiber and internal
-         * SerDes mode on the 82575. There can be an external PHY attached
-         * on the SGMII interface. For this, we'll set sgmii_active to TRUE.
-         */
-       hw->phy.media_type = e1000_media_type_copper;
-       dev_spec->sgmii_active = FALSE;
-
-       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
-       switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
-       case E1000_CTRL_EXT_LINK_MODE_SGMII:
-               dev_spec->sgmii_active = TRUE;
-               break;
-       case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
-       case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
-               hw->phy.media_type = e1000_media_type_internal_serdes;
-               break;
-       default:
-               break;
-       }
-
-       /* Set mta register count */
-       mac->mta_reg_count = 128;
-       /* Set uta register count */
-       mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
-       /* Set rar entry count */
-       mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
-       if (mac->type == e1000_82576)
-               mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
-       if (mac->type == e1000_82580)
-               mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
-       if (mac->type == e1000_i350) {
-               mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
-               /* Enable EEE default settings for i350 */
-               dev_spec->eee_disable = FALSE;
-       }
-
-       /* Set if part includes ASF firmware */
-       mac->asf_firmware_present = TRUE;
-       /* FWSM register */
-       mac->has_fwsm = TRUE;
-       /* ARC supported; valid only if manageability features are enabled. */
-       mac->arc_subsystem_valid =
-               (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
-                       ? TRUE : FALSE;
-
-       /* Function pointers */
-
-       /* bus type/speed/width */
-       mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
-       /* reset */
-       if (mac->type >= e1000_82580)
-               mac->ops.reset_hw = e1000_reset_hw_82580;
-       else
-       mac->ops.reset_hw = e1000_reset_hw_82575;
-       /* hw initialization */
-       mac->ops.init_hw = e1000_init_hw_82575;
-       /* link setup */
-       mac->ops.setup_link = e1000_setup_link_generic;
-       /* physical interface link setup */
-       mac->ops.setup_physical_interface =
-               (hw->phy.media_type == e1000_media_type_copper)
-                       ? e1000_setup_copper_link_82575
-                       : e1000_setup_serdes_link_82575;
-       /* physical interface shutdown */
-       mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575;
-       /* physical interface power up */
-       mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575;
-       /* check for link */
-       mac->ops.check_for_link = e1000_check_for_link_82575;
-       /* receive address register setting */
-       mac->ops.rar_set = e1000_rar_set_generic;
-       /* read mac address */
-       mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
-       /* configure collision distance */
-       mac->ops.config_collision_dist = e1000_config_collision_dist_82575;
-       /* multicast address update */
-       mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
-       /* writing VFTA */
-       mac->ops.write_vfta = e1000_write_vfta_generic;
-       /* clearing VFTA */
-       mac->ops.clear_vfta = e1000_clear_vfta_generic;
-       /* ID LED init */
-       mac->ops.id_led_init = e1000_id_led_init_generic;
-       /* blink LED */
-       mac->ops.blink_led = e1000_blink_led_generic;
-       /* setup LED */
-       mac->ops.setup_led = e1000_setup_led_generic;
-       /* cleanup LED */
-       mac->ops.cleanup_led = e1000_cleanup_led_generic;
-       /* turn on/off LED */
-       mac->ops.led_on = e1000_led_on_generic;
-       mac->ops.led_off = e1000_led_off_generic;
-       /* clear hardware counters */
-       mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575;
-       /* link info */
-       mac->ops.get_link_up_info = e1000_get_link_up_info_82575;
-
-       /* set lan id for port to determine which phy lock to use */
-       hw->mac.ops.set_lan_id(hw);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_init_function_pointers_82575 - Init func ptrs.
- *  @hw: pointer to the HW structure
- *
- *  Called to initialize all function pointers and parameters.
- **/
-void e1000_init_function_pointers_82575(struct e1000_hw *hw)
-{
-       DEBUGFUNC("e1000_init_function_pointers_82575");
-
-       hw->mac.ops.init_params = e1000_init_mac_params_82575;
-       hw->nvm.ops.init_params = e1000_init_nvm_params_82575;
-       hw->phy.ops.init_params = e1000_init_phy_params_82575;
-       hw->mbx.ops.init_params = e1000_init_mbx_params_pf;
-}
-
-/**
- *  e1000_acquire_phy_82575 - Acquire rights to access PHY
- *  @hw: pointer to the HW structure
- *
- *  Acquire access rights to the correct PHY.
- **/
-static s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
-{
-       u16 mask = E1000_SWFW_PHY0_SM;
-
-       DEBUGFUNC("e1000_acquire_phy_82575");
-
-       if (hw->bus.func == E1000_FUNC_1)
-               mask = E1000_SWFW_PHY1_SM;
-       else if (hw->bus.func == E1000_FUNC_2)
-               mask = E1000_SWFW_PHY2_SM;
-       else if (hw->bus.func == E1000_FUNC_3)
-               mask = E1000_SWFW_PHY3_SM;
-
-       return e1000_acquire_swfw_sync_82575(hw, mask);
-}
-
-/**
- *  e1000_release_phy_82575 - Release rights to access PHY
- *  @hw: pointer to the HW structure
- *
- *  A wrapper to release access rights to the correct PHY.
- **/
-static void e1000_release_phy_82575(struct e1000_hw *hw)
-{
-       u16 mask = E1000_SWFW_PHY0_SM;
-
-       DEBUGFUNC("e1000_release_phy_82575");
-
-       if (hw->bus.func == E1000_FUNC_1)
-               mask = E1000_SWFW_PHY1_SM;
-       else if (hw->bus.func == E1000_FUNC_2)
-               mask = E1000_SWFW_PHY2_SM;
-       else if (hw->bus.func == E1000_FUNC_3)
-               mask = E1000_SWFW_PHY3_SM;
-
-       e1000_release_swfw_sync_82575(hw, mask);
-}
-
-/**
- *  e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
- *  @hw: pointer to the HW structure
- *  @offset: register offset to be read
- *  @data: pointer to the read data
- *
- *  Reads the PHY register at offset using the serial gigabit media independent
- *  interface and stores the retrieved information in data.
- **/
-static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
-                                          u16 *data)
-{
-       s32 ret_val = -E1000_ERR_PARAM;
-
-       DEBUGFUNC("e1000_read_phy_reg_sgmii_82575");
-
-       if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
-               DEBUGOUT1("PHY Address %u is out of range\n", offset);
-               goto out;
-       }
-
-       ret_val = hw->phy.ops.acquire(hw);
-       if (ret_val)
-               goto out;
-
-       ret_val = e1000_read_phy_reg_i2c(hw, offset, data);
-
-       hw->phy.ops.release(hw);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
- *  @hw: pointer to the HW structure
- *  @offset: register offset to write to
- *  @data: data to write at register offset
- *
- *  Writes the data to PHY register at the offset using the serial gigabit
- *  media independent interface.
- **/
-static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
-                                           u16 data)
-{
-       s32 ret_val = -E1000_ERR_PARAM;
-
-       DEBUGFUNC("e1000_write_phy_reg_sgmii_82575");
-
-       if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
-               DEBUGOUT1("PHY Address %d is out of range\n", offset);
-               goto out;
-       }
-
-       ret_val = hw->phy.ops.acquire(hw);
-       if (ret_val)
-               goto out;
-
-       ret_val = e1000_write_phy_reg_i2c(hw, offset, data);
-
-       hw->phy.ops.release(hw);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_get_phy_id_82575 - Retrieve PHY addr and id
- *  @hw: pointer to the HW structure
- *
- *  Retrieves the PHY address and ID for both PHY's which do and do not use
- *  sgmi interface.
- **/
-static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32  ret_val = E1000_SUCCESS;
-       u16 phy_id;
-       u32 ctrl_ext;
-       u32 mdic;
-
-       DEBUGFUNC("e1000_get_phy_id_82575");
-
-       /*
-        * For SGMII PHYs, we try the list of possible addresses until
-        * we find one that works.  For non-SGMII PHYs
-        * (e.g. integrated copper PHYs), an address of 1 should
-        * work.  The result of this function should mean phy->phy_addr
-        * and phy->id are set correctly.
-        */
-       if (!e1000_sgmii_active_82575(hw)) {
-               phy->addr = 1;
-               ret_val = e1000_get_phy_id(hw);
-               goto out;
-       }
-
-       if (e1000_sgmii_uses_mdio_82575(hw)) {
-               switch (hw->mac.type) {
-               case e1000_82575:
-               case e1000_82576:
-                       mdic = E1000_READ_REG(hw, E1000_MDIC);
-                       mdic &= E1000_MDIC_PHY_MASK;
-                       phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
-                       break;
-               case e1000_82580:
-               case e1000_i350:
-                       mdic = E1000_READ_REG(hw, E1000_MDICNFG);
-                       mdic &= E1000_MDICNFG_PHY_MASK;
-                       phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
-                       break;
-               default:
-                       ret_val = -E1000_ERR_PHY;
-                       goto out;
-                       break;
-               }
-               ret_val = e1000_get_phy_id(hw);
-               goto out;
-       }
-
-       /* Power on sgmii phy if it is disabled */
-       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
-       E1000_WRITE_REG(hw, E1000_CTRL_EXT,
-                       ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
-       E1000_WRITE_FLUSH(hw);
-       msec_delay(300);
-
-       /*
-        * The address field in the I2CCMD register is 3 bits and 0 is invalid.
-        * Therefore, we need to test 1-7
-        */
-       for (phy->addr = 1; phy->addr < 8; phy->addr++) {
-               ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
-               if (ret_val == E1000_SUCCESS) {
-                       DEBUGOUT2("Vendor ID 0x%08X read at address %u\n",
-                                 phy_id,
-                                 phy->addr);
-                       /*
-                        * At the time of this writing, The M88 part is
-                        * the only supported SGMII PHY product.
-                        */
-                       if (phy_id == M88_VENDOR)
-                               break;
-               } else {
-                       DEBUGOUT1("PHY address %u was unreadable\n",
-                                 phy->addr);
-               }
-       }
-
-       /* A valid PHY type couldn't be found. */
-       if (phy->addr == 8) {
-               phy->addr = 0;
-               ret_val = -E1000_ERR_PHY;
-       } else {
-               ret_val = e1000_get_phy_id(hw);
-       }
-
-       /* restore previous sfp cage power state */
-       E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset
- *  @hw: pointer to the HW structure
- *
- *  Resets the PHY using the serial gigabit media independent interface.
- **/
-static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
-{
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575");
-
-       /*
-        * This isn't a TRUE "hard" reset, but is the only reset
-        * available to us at this time.
-        */
-
-       DEBUGOUT("Soft resetting SGMII attached PHY...\n");
-
-       if (!(hw->phy.ops.write_reg))
-               goto out;
-
-       /*
-        * SFP documentation requires the following to configure the SPF module
-        * to work on SGMII.  No further documentation is given.
-        */
-       ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
-       if (ret_val)
-               goto out;
-
-       ret_val = hw->phy.ops.commit(hw);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
- *  @hw: pointer to the HW structure
- *  @active: TRUE to enable LPLU, FALSE to disable
- *
- *  Sets the LPLU D0 state according to the active flag.  When
- *  activating LPLU this function also disables smart speed
- *  and vice versa.  LPLU will not be activated unless the
- *  device autonegotiation advertisement meets standards of
- *  either 10 or 10/100 or 10/100/1000 at all duplexes.
- *  This is a function pointer entry point only called by
- *  PHY setup routines.
- **/
-static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val = E1000_SUCCESS;
-       u16 data;
-
-       DEBUGFUNC("e1000_set_d0_lplu_state_82575");
-
-       if (!(hw->phy.ops.read_reg))
-               goto out;
-
-       ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
-       if (ret_val)
-               goto out;
-
-       if (active) {
-               data |= IGP02E1000_PM_D0_LPLU;
-               ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
-                                            data);
-               if (ret_val)
-                       goto out;
-
-               /* When LPLU is enabled, we should disable SmartSpeed */
-               ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
-                                           &data);
-               data &= ~IGP01E1000_PSCFR_SMART_SPEED;
-               ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
-                                            data);
-               if (ret_val)
-                       goto out;
-       } else {
-               data &= ~IGP02E1000_PM_D0_LPLU;
-               ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
-                                            data);
-               /*
-                * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
-                * during Dx states where the power conservation is most
-                * important.  During driver activity we should enable
-                * SmartSpeed, so performance is maintained.
-                */
-               if (phy->smart_speed == e1000_smart_speed_on) {
-                       ret_val = phy->ops.read_reg(hw,
-                                                   IGP01E1000_PHY_PORT_CONFIG,
-                                                   &data);
-                       if (ret_val)
-                               goto out;
-
-                       data |= IGP01E1000_PSCFR_SMART_SPEED;
-                       ret_val = phy->ops.write_reg(hw,
-                                                    IGP01E1000_PHY_PORT_CONFIG,
-                                                    data);
-                       if (ret_val)
-                               goto out;
-               } else if (phy->smart_speed == e1000_smart_speed_off) {
-                       ret_val = phy->ops.read_reg(hw,
-                                                   IGP01E1000_PHY_PORT_CONFIG,
-                                                   &data);
-                       if (ret_val)
-                               goto out;
-
-                       data &= ~IGP01E1000_PSCFR_SMART_SPEED;
-                       ret_val = phy->ops.write_reg(hw,
-                                                    IGP01E1000_PHY_PORT_CONFIG,
-                                                    data);
-                       if (ret_val)
-                               goto out;
-               }
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
- *  @hw: pointer to the HW structure
- *  @active: TRUE to enable LPLU, FALSE to disable
- *
- *  Sets the LPLU D0 state according to the active flag.  When
- *  activating LPLU this function also disables smart speed
- *  and vice versa.  LPLU will not be activated unless the
- *  device autonegotiation advertisement meets standards of
- *  either 10 or 10/100 or 10/100/1000 at all duplexes.
- *  This is a function pointer entry point only called by
- *  PHY setup routines.
- **/
-static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val = E1000_SUCCESS;
-       u16 data;
-
-       DEBUGFUNC("e1000_set_d0_lplu_state_82580");
-
-       data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
-
-       if (active) {
-               data |= E1000_82580_PM_D0_LPLU;
-
-               /* When LPLU is enabled, we should disable SmartSpeed */
-               data &= ~E1000_82580_PM_SPD;
-       } else {
-               data &= ~E1000_82580_PM_D0_LPLU;
-
-               /*
-                * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
-                * during Dx states where the power conservation is most
-                * important.  During driver activity we should enable
-                * SmartSpeed, so performance is maintained.
-                */
-               if (phy->smart_speed == e1000_smart_speed_on) {
-                       data |= E1000_82580_PM_SPD;
-               } else if (phy->smart_speed == e1000_smart_speed_off) {
-                       data &= ~E1000_82580_PM_SPD;
-               }
-       }
-
-       E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
-       return ret_val;
-}
-
-/**
- *  e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3
- *  @hw: pointer to the HW structure
- *  @active: boolean used to enable/disable lplu
- *
- *  Success returns 0, Failure returns 1
- *
- *  The low power link up (lplu) state is set to the power management level D3
- *  and SmartSpeed is disabled when active is TRUE, else clear lplu for D3
- *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
- *  is used during Dx states where the power conservation is most important.
- *  During driver activity, SmartSpeed should be enabled so performance is
- *  maintained.
- **/
-s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val = E1000_SUCCESS;
-       u16 data;
-
-       DEBUGFUNC("e1000_set_d3_lplu_state_82580");
-
-       data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
-
-       if (!active) {
-               data &= ~E1000_82580_PM_D3_LPLU;
-               /*
-                * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
-                * during Dx states where the power conservation is most
-                * important.  During driver activity we should enable
-                * SmartSpeed, so performance is maintained.
-                */
-               if (phy->smart_speed == e1000_smart_speed_on) {
-                       data |= E1000_82580_PM_SPD;
-               } else if (phy->smart_speed == e1000_smart_speed_off) {
-                       data &= ~E1000_82580_PM_SPD;
-               }
-       } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
-                  (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
-                  (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
-               data |= E1000_82580_PM_D3_LPLU;
-               /* When LPLU is enabled, we should disable SmartSpeed */
-               data &= ~E1000_82580_PM_SPD;
-       }
-
-       E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
-       return ret_val;
-}
-
-/**
- *  e1000_acquire_nvm_82575 - Request for access to EEPROM
- *  @hw: pointer to the HW structure
- *
- *  Acquire the necessary semaphores for exclusive access to the EEPROM.
- *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
- *  Return successful if access grant bit set, else clear the request for
- *  EEPROM access and return -E1000_ERR_NVM (-1).
- **/
-static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
-{
-       s32 ret_val;
-
-       DEBUGFUNC("e1000_acquire_nvm_82575");
-
-       ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
-       if (ret_val)
-               goto out;
-
-       /*
-        * Check if there is some access
-        * error this access may hook on
-        */
-       if (hw->mac.type == e1000_i350) {
-               u32 eecd = E1000_READ_REG(hw, E1000_EECD);
-               if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT |
-                   E1000_EECD_TIMEOUT)) {
-                       /* Clear all access error flags */
-                       E1000_WRITE_REG(hw, E1000_EECD, eecd |
-                                       E1000_EECD_ERROR_CLR);
-                       DEBUGOUT("Nvm bit banging access error"
-                               " detected and cleared.\n");
-               }
-       }
-       if (hw->mac.type == e1000_82580) {
-               u32 eecd = E1000_READ_REG(hw, E1000_EECD);
-               if (eecd & E1000_EECD_BLOCKED) {
-                       /* Clear access error flag */
-                       E1000_WRITE_REG(hw, E1000_EECD, eecd |
-                                       E1000_EECD_BLOCKED);
-                       DEBUGOUT("Nvm bit banging access"
-                               " error detected and cleared.\n");
-               }
-       }
-
-       ret_val = e1000_acquire_nvm_generic(hw);
-       if (ret_val)
-               e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_release_nvm_82575 - Release exclusive access to EEPROM
- *  @hw: pointer to the HW structure
- *
- *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
- *  then release the semaphores acquired.
- **/
-static void e1000_release_nvm_82575(struct e1000_hw *hw)
-{
-       DEBUGFUNC("e1000_release_nvm_82575");
-
-       e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
-}
-
-/**
- *  e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
- *  @hw: pointer to the HW structure
- *  @mask: specifies which semaphore to acquire
- *
- *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
- *  will also specify which port we're acquiring the lock for.
- **/
-static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
-{
-       u32 swfw_sync;
-       u32 swmask = mask;
-       u32 fwmask = mask << 16;
-       s32 ret_val = E1000_SUCCESS;
-       s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
-
-       DEBUGFUNC("e1000_acquire_swfw_sync_82575");
-
-       while (i < timeout) {
-               if (e1000_get_hw_semaphore_generic(hw)) {
-                       ret_val = -E1000_ERR_SWFW_SYNC;
-                       goto out;
-               }
-
-               swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
-               if (!(swfw_sync & (fwmask | swmask)))
-                       break;
-
-               /*
-                * Firmware currently using resource (fwmask)
-                * or other software thread using resource (swmask)
-                */
-               e1000_put_hw_semaphore_generic(hw);
-               msec_delay_irq(5);
-               i++;
-       }
-
-       if (i == timeout) {
-               DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
-               ret_val = -E1000_ERR_SWFW_SYNC;
-               goto out;
-       }
-
-       swfw_sync |= swmask;
-       E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
-
-       e1000_put_hw_semaphore_generic(hw);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_release_swfw_sync_82575 - Release SW/FW semaphore
- *  @hw: pointer to the HW structure
- *  @mask: specifies which semaphore to acquire
- *
- *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
- *  will also specify which port we're releasing the lock for.
- **/
-static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
-{
-       u32 swfw_sync;
-
-       DEBUGFUNC("e1000_release_swfw_sync_82575");
-
-       while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS);
-       /* Empty */
-
-       swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
-       swfw_sync &= ~mask;
-       E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
-
-       e1000_put_hw_semaphore_generic(hw);
-}
-
-/**
- *  e1000_get_cfg_done_82575 - Read config done bit
- *  @hw: pointer to the HW structure
- *
- *  Read the management control register for the config done bit for
- *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
- *  to read the config done bit, so an error is *ONLY* logged and returns
- *  E1000_SUCCESS.  If we were to return with error, EEPROM-less silicon
- *  would not be able to be reset or change link.
- **/
-static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
-{
-       s32 timeout = PHY_CFG_TIMEOUT;
-       s32 ret_val = E1000_SUCCESS;
-       u32 mask = E1000_NVM_CFG_DONE_PORT_0;
-
-       DEBUGFUNC("e1000_get_cfg_done_82575");
-
-       if (hw->bus.func == E1000_FUNC_1)
-               mask = E1000_NVM_CFG_DONE_PORT_1;
-       else if (hw->bus.func == E1000_FUNC_2)
-               mask = E1000_NVM_CFG_DONE_PORT_2;
-       else if (hw->bus.func == E1000_FUNC_3)
-               mask = E1000_NVM_CFG_DONE_PORT_3;
-       while (timeout) {
-               if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
-                       break;
-               msec_delay(1);
-               timeout--;
-       }
-       if (!timeout)
-               DEBUGOUT("MNG configuration cycle has not completed.\n");
-
-       /* If EEPROM is not marked present, init the PHY manually */
-       if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
-           (hw->phy.type == e1000_phy_igp_3))
-               e1000_phy_init_script_igp3(hw);
-
-       return ret_val;
-}
-
-/**
- *  e1000_get_link_up_info_82575 - Get link speed/duplex info
- *  @hw: pointer to the HW structure
- *  @speed: stores the current speed
- *  @duplex: stores the current duplex
- *
- *  This is a wrapper function, if using the serial gigabit media independent
- *  interface, use PCS to retrieve the link speed and duplex information.
- *  Otherwise, use the generic function to get the link speed and duplex info.
- **/
-static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
-                                        u16 *duplex)
-{
-       s32 ret_val;
-
-       DEBUGFUNC("e1000_get_link_up_info_82575");
-
-       if (hw->phy.media_type != e1000_media_type_copper)
-               ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed,
-                                                              duplex);
-       else
-               ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
-                                                                   duplex);
-
-       return ret_val;
-}
-
-/**
- *  e1000_check_for_link_82575 - Check for link
- *  @hw: pointer to the HW structure
- *
- *  If sgmii is enabled, then use the pcs register to determine link, otherwise
- *  use the generic interface for determining link.
- **/
-static s32 e1000_check_for_link_82575(struct e1000_hw *hw)
-{
-       s32 ret_val;
-       u16 speed, duplex;
-
-       DEBUGFUNC("e1000_check_for_link_82575");
-
-       if (hw->phy.media_type != e1000_media_type_copper) {
-               ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
-                                                              &duplex);
-               /*
-                * Use this flag to determine if link needs to be checked or
-                * not.  If we have link clear the flag so that we do not
-                * continue to check for link.
-                */
-               hw->mac.get_link_status = !hw->mac.serdes_has_link;
-       } else {
-               ret_val = e1000_check_for_copper_link_generic(hw);
-       }
-
-       return ret_val;
-}
-
-/**
- *  e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown
- *  @hw: pointer to the HW structure
- **/
-static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw)
-{
-       u32 reg;
-
-       DEBUGFUNC("e1000_power_up_serdes_link_82575");
-
-       if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
-           !e1000_sgmii_active_82575(hw))
-               return;
-
-       /* Enable PCS to turn on link */
-       reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
-       reg |= E1000_PCS_CFG_PCS_EN;
-       E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
-
-       /* Power up the laser */
-       reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
-       reg &= ~E1000_CTRL_EXT_SDP3_DATA;
-       E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
-
-       /* flush the write to verify completion */
-       E1000_WRITE_FLUSH(hw);
-       msec_delay(1);
-}
-
-/**
- *  e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
- *  @hw: pointer to the HW structure
- *  @speed: stores the current speed
- *  @duplex: stores the current duplex
- *
- *  Using the physical coding sub-layer (PCS), retrieve the current speed and
- *  duplex, then store the values in the pointers provided.
- **/
-static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
-                                                u16 *speed, u16 *duplex)
-{
-       struct e1000_mac_info *mac = &hw->mac;
-       u32 pcs;
-
-       DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575");
-
-       /* Set up defaults for the return values of this function */
-       mac->serdes_has_link = FALSE;
-       *speed = 0;
-       *duplex = 0;
-
-       /*
-        * Read the PCS Status register for link state. For non-copper mode,
-        * the status register is not accurate. The PCS status register is
-        * used instead.
-        */
-       pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT);
-
-       /*
-        * The link up bit determines when link is up on autoneg. The sync ok
-        * gets set once both sides sync up and agree upon link. Stable link
-        * can be determined by checking for both link up and link sync ok
-        */
-       if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
-               mac->serdes_has_link = TRUE;
-
-               /* Detect and store PCS speed */
-               if (pcs & E1000_PCS_LSTS_SPEED_1000) {
-                       *speed = SPEED_1000;
-               } else if (pcs & E1000_PCS_LSTS_SPEED_100) {
-                       *speed = SPEED_100;
-               } else {
-                       *speed = SPEED_10;
-               }
-
-               /* Detect and store PCS duplex */
-               if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
-                       *duplex = FULL_DUPLEX;
-               } else {
-                       *duplex = HALF_DUPLEX;
-               }
-       }
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_shutdown_serdes_link_82575 - Remove link during power down
- *  @hw: pointer to the HW structure
- *
- *  In the case of serdes shut down sfp and PCS on driver unload
- *  when management pass thru is not enabled.
- **/
-void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
-{
-       u32 reg;
-
-       DEBUGFUNC("e1000_shutdown_serdes_link_82575");
-
-       if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
-           !e1000_sgmii_active_82575(hw))
-               return;
-
-       if (!e1000_enable_mng_pass_thru(hw)) {
-               /* Disable PCS to turn off link */
-               reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
-               reg &= ~E1000_PCS_CFG_PCS_EN;
-               E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
-
-               /* shutdown the laser */
-               reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
-               reg |= E1000_CTRL_EXT_SDP3_DATA;
-               E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
-
-               /* flush the write to verify completion */
-               E1000_WRITE_FLUSH(hw);
-               msec_delay(1);
-       }
-
-       return;
-}
-
-/**
- *  e1000_reset_hw_82575 - Reset hardware
- *  @hw: pointer to the HW structure
- *
- *  This resets the hardware into a known state.
- **/
-static s32 e1000_reset_hw_82575(struct e1000_hw *hw)
-{
-       u32 ctrl;
-       s32 ret_val;
-
-       DEBUGFUNC("e1000_reset_hw_82575");
-
-       /*
-        * Prevent the PCI-E bus from sticking if there is no TLP connection
-        * on the last TLP read/write transaction when MAC is reset.
-        */
-       ret_val = e1000_disable_pcie_master_generic(hw);
-       if (ret_val) {
-               DEBUGOUT("PCI-E Master disable polling has failed.\n");
-       }
-
-       /* set the completion timeout for interface */
-       ret_val = e1000_set_pcie_completion_timeout(hw);
-       if (ret_val) {
-               DEBUGOUT("PCI-E Set completion timeout has failed.\n");
-       }
-
-       DEBUGOUT("Masking off all interrupts\n");
-       E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
-
-       E1000_WRITE_REG(hw, E1000_RCTL, 0);
-       E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
-       E1000_WRITE_FLUSH(hw);
-
-       msec_delay(10);
-
-       ctrl = E1000_READ_REG(hw, E1000_CTRL);
-
-       DEBUGOUT("Issuing a global reset to MAC\n");
-       E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
-
-       ret_val = e1000_get_auto_rd_done_generic(hw);
-       if (ret_val) {
-               /*
-                * When auto config read does not complete, do not
-                * return with an error. This can happen in situations
-                * where there is no eeprom and prevents getting link.
-                */
-               DEBUGOUT("Auto Read Done did not complete\n");
-       }
-
-       /* If EEPROM is not present, run manual init scripts */
-       if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
-               e1000_reset_init_script_82575(hw);
-
-       /* Clear any pending interrupt events. */
-       E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
-       E1000_READ_REG(hw, E1000_ICR);
-
-       /* Install any alternate MAC address into RAR0 */
-       ret_val = e1000_check_alt_mac_addr_generic(hw);
-
-       return ret_val;
-}
-
-/**
- *  e1000_init_hw_82575 - Initialize hardware
- *  @hw: pointer to the HW structure
- *
- *  This inits the hardware readying it for operation.
- **/
-static s32 e1000_init_hw_82575(struct e1000_hw *hw)
-{
-       struct e1000_mac_info *mac = &hw->mac;
-       s32 ret_val;
-       u16 i, rar_count = mac->rar_entry_count;
-
-       DEBUGFUNC("e1000_init_hw_82575");
-
-       /* Initialize identification LED */
-       ret_val = mac->ops.id_led_init(hw);
-       if (ret_val) {
-               DEBUGOUT("Error initializing identification LED\n");
-               /* This is not fatal and we should not stop init due to this */
-       }
-
-       /* Disabling VLAN filtering */
-       DEBUGOUT("Initializing the IEEE VLAN\n");
-       mac->ops.clear_vfta(hw);
-
-       /* Setup the receive address */
-       e1000_init_rx_addrs_generic(hw, rar_count);
-
-       /* Zero out the Multicast HASH table */
-       DEBUGOUT("Zeroing the MTA\n");
-       for (i = 0; i < mac->mta_reg_count; i++)
-               E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
-
-       /* Zero out the Unicast HASH table */
-       DEBUGOUT("Zeroing the UTA\n");
-       for (i = 0; i < mac->uta_reg_count; i++)
-               E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
-
-       /* Setup link and flow control */
-       ret_val = mac->ops.setup_link(hw);
-
-       /*
-        * Clear all of the statistics registers (clear on read).  It is
-        * important that we do this after we have tried to establish link
-        * because the symbol error count will increment wildly if there
-        * is no link.
-        */
-       e1000_clear_hw_cntrs_82575(hw);
-
-       return ret_val;
-}
-
-/**
- *  e1000_setup_copper_link_82575 - Configure copper link settings
- *  @hw: pointer to the HW structure
- *
- *  Configures the link for auto-neg or forced speed and duplex.  Then we check
- *  for link, once link is established calls to configure collision distance
- *  and flow control are called.
- **/
-static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
-{
-       u32 ctrl;
-       s32  ret_val;
-
-       DEBUGFUNC("e1000_setup_copper_link_82575");
-
-       ctrl = E1000_READ_REG(hw, E1000_CTRL);
-       ctrl |= E1000_CTRL_SLU;
-       ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
-       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
-
-       ret_val = e1000_setup_serdes_link_82575(hw);
-       if (ret_val)
-               goto out;
-
-       if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
-               /* allow time for SFP cage time to power up phy */
-               msec_delay(300);
-
-               ret_val = hw->phy.ops.reset(hw);
-               if (ret_val) {
-                       DEBUGOUT("Error resetting the PHY.\n");
-                       goto out;
-               }
-       }
-       switch (hw->phy.type) {
-       case e1000_phy_m88:
-               if (hw->phy.id == I347AT4_E_PHY_ID ||
-                   hw->phy.id == M88E1112_E_PHY_ID ||
-                   hw->phy.id == M88E1340M_E_PHY_ID)
-                       ret_val = e1000_copper_link_setup_m88_gen2(hw);
-               else
-                       ret_val = e1000_copper_link_setup_m88(hw);
-               break;
-       case e1000_phy_igp_3:
-               ret_val = e1000_copper_link_setup_igp(hw);
-               break;
-       case e1000_phy_82580:
-               ret_val = e1000_copper_link_setup_82577(hw);
-               break;
-       default:
-               ret_val = -E1000_ERR_PHY;
-               break;
-       }
-
-       if (ret_val)
-               goto out;
-
-       ret_val = e1000_setup_copper_link_generic(hw);
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_setup_serdes_link_82575 - Setup link for serdes
- *  @hw: pointer to the HW structure
- *
- *  Configure the physical coding sub-layer (PCS) link.  The PCS link is
- *  used on copper connections where the serialized gigabit media independent
- *  interface (sgmii), or serdes fiber is being used.  Configures the link
- *  for auto-negotiation or forces speed/duplex.
- **/
-static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
-{
-       u32 ctrl_ext, ctrl_reg, reg;
-       bool pcs_autoneg;
-
-       DEBUGFUNC("e1000_setup_serdes_link_82575");
-
-       if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
-           !e1000_sgmii_active_82575(hw))
-               return E1000_SUCCESS;
-
-       /*
-        * On the 82575, SerDes loopback mode persists until it is
-        * explicitly turned off or a power cycle is performed.  A read to
-        * the register does not indicate its status.  Therefore, we ensure
-        * loopback mode is disabled during initialization.
-        */
-       E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
-
-       /* power on the sfp cage if present */
-       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
-       ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
-       E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
-
-       ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
-       ctrl_reg |= E1000_CTRL_SLU;
-
-       /* set both sw defined pins on 82575/82576*/
-       if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576)
-               ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
-
-       reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
-
-       /* default pcs_autoneg to the same setting as mac autoneg */
-       pcs_autoneg = hw->mac.autoneg;
-
-       switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
-       case E1000_CTRL_EXT_LINK_MODE_SGMII:
-               /* sgmii mode lets the phy handle forcing speed/duplex */
-               pcs_autoneg = TRUE;
-               /* autoneg time out should be disabled for SGMII mode */
-               reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
-               break;
-       case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
-               /* disable PCS autoneg and support parallel detect only */
-               pcs_autoneg = FALSE;
-               /* fall through to default case */
-       default:
-               /*
-                * non-SGMII modes only supports a speed of 1000/Full for the
-                * link so it is best to just force the MAC and let the pcs
-                * link either autoneg or be forced to 1000/Full
-                */
-               ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
-                           E1000_CTRL_FD | E1000_CTRL_FRCDPX;
-
-               /* set speed of 1000/Full if speed/duplex is forced */
-               reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
-               break;
-       }
-
-       E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
-
-       /*
-        * New SerDes mode allows for forcing speed or autonegotiating speed
-        * at 1gb. Autoneg should be default set by most drivers. This is the
-        * mode that will be compatible with older link partners and switches.
-        * However, both are supported by the hardware and some drivers/tools.
-        */
-       reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
-                E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
-
-       /*
-        * We force flow control to prevent the CTRL register values from being
-        * overwritten by the autonegotiated flow control values
-        */
-       reg |= E1000_PCS_LCTL_FORCE_FCTRL;
-
-       if (pcs_autoneg) {
-               /* Set PCS register for autoneg */
-               reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
-                      E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
-               DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
-       } else {
-               /* Set PCS register for forced link */
-               reg |= E1000_PCS_LCTL_FSD;        /* Force Speed */
-               DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
-       }
-
-       E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
-
-       if (!e1000_sgmii_active_82575(hw))
-               e1000_force_mac_fc_generic(hw);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_valid_led_default_82575 - Verify a valid default LED config
- *  @hw: pointer to the HW structure
- *  @data: pointer to the NVM (EEPROM)
- *
- *  Read the EEPROM for the current default LED configuration.  If the
- *  LED configuration is not valid, set to a valid LED configuration.
- **/
-static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data)
-{
-       s32 ret_val;
-
-       DEBUGFUNC("e1000_valid_led_default_82575");
-
-       ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
-       if (ret_val) {
-               DEBUGOUT("NVM Read Error\n");
-               goto out;
-       }
-
-       if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
-               switch(hw->phy.media_type) {
-               case e1000_media_type_internal_serdes:
-                       *data = ID_LED_DEFAULT_82575_SERDES;
-                       break;
-               case e1000_media_type_copper:
-               default:
-                       *data = ID_LED_DEFAULT;
-                       break;
-               }
-       }
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_sgmii_active_82575 - Return sgmii state
- *  @hw: pointer to the HW structure
- *
- *  82575 silicon has a serialized gigabit media independent interface (sgmii)
- *  which can be enabled for use in the embedded applications.  Simply
- *  return the current state of the sgmii interface.
- **/
-static bool e1000_sgmii_active_82575(struct e1000_hw *hw)
-{
-       struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
-       return dev_spec->sgmii_active;
-}
-
-/**
- *  e1000_reset_init_script_82575 - Inits HW defaults after reset
- *  @hw: pointer to the HW structure
- *
- *  Inits recommended HW defaults after a reset when there is no EEPROM
- *  detected. This is only for the 82575.
- **/
-static s32 e1000_reset_init_script_82575(struct e1000_hw* hw)
-{
-       DEBUGFUNC("e1000_reset_init_script_82575");
-
-       if (hw->mac.type == e1000_82575) {
-               DEBUGOUT("Running reset init script for 82575\n");
-               /* SerDes configuration via SERDESCTRL */
-               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C);
-               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78);
-               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23);
-               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15);
-
-               /* CCM configuration via CCMCTL register */
-               e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00);
-               e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00);
-
-               /* PCIe lanes configuration */
-               e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC);
-               e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF);
-               e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05);
-               e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81);
-
-               /* PCIe PLL Configuration */
-               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47);
-               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00);
-               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00);
-       }
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_read_mac_addr_82575 - Read device MAC address
- *  @hw: pointer to the HW structure
- **/
-static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw)
-{
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_read_mac_addr_82575");
-
-       /*
-        * If there's an alternate MAC address place it in RAR0
-        * so that it will override the Si installed default perm
-        * address.
-        */
-       ret_val = e1000_check_alt_mac_addr_generic(hw);
-       if (ret_val)
-               goto out;
-
-       ret_val = e1000_read_mac_addr_generic(hw);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_config_collision_dist_82575 - Configure collision distance
- *  @hw: pointer to the HW structure
- *
- *  Configures the collision distance to the default value and is used
- *  during link setup.
- **/
-static void e1000_config_collision_dist_82575(struct e1000_hw *hw)
-{
-       u32 tctl_ext;
-
-       DEBUGFUNC("e1000_config_collision_dist_82575");
-
-       tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT);
-
-       tctl_ext &= ~E1000_TCTL_EXT_COLD;
-       tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT;
-
-       E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext);
-       E1000_WRITE_FLUSH(hw);
-}
-
-/**
- * e1000_power_down_phy_copper_82575 - Remove link during PHY power down
- * @hw: pointer to the HW structure
- *
- * In the case of a PHY power down to save power, or to turn off link during a
- * driver unload, or wake on lan is not enabled, remove the link.
- **/
-static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-
-       if (!(phy->ops.check_reset_block))
-               return;
-
-       /* If the management interface is not enabled, then power down */
-       if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw)))
-               e1000_power_down_phy_copper(hw);
-
-       return;
-}
-
-/**
- *  e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters
- *  @hw: pointer to the HW structure
- *
- *  Clears the hardware counters by reading the counter registers.
- **/
-static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
-{
-       DEBUGFUNC("e1000_clear_hw_cntrs_82575");
-
-       e1000_clear_hw_cntrs_base_generic(hw);
-
-       E1000_READ_REG(hw, E1000_PRC64);
-       E1000_READ_REG(hw, E1000_PRC127);
-       E1000_READ_REG(hw, E1000_PRC255);
-       E1000_READ_REG(hw, E1000_PRC511);
-       E1000_READ_REG(hw, E1000_PRC1023);
-       E1000_READ_REG(hw, E1000_PRC1522);
-       E1000_READ_REG(hw, E1000_PTC64);
-       E1000_READ_REG(hw, E1000_PTC127);
-       E1000_READ_REG(hw, E1000_PTC255);
-       E1000_READ_REG(hw, E1000_PTC511);
-       E1000_READ_REG(hw, E1000_PTC1023);
-       E1000_READ_REG(hw, E1000_PTC1522);
-
-       E1000_READ_REG(hw, E1000_ALGNERRC);
-       E1000_READ_REG(hw, E1000_RXERRC);
-       E1000_READ_REG(hw, E1000_TNCRS);
-       E1000_READ_REG(hw, E1000_CEXTERR);
-       E1000_READ_REG(hw, E1000_TSCTC);
-       E1000_READ_REG(hw, E1000_TSCTFC);
-
-       E1000_READ_REG(hw, E1000_MGTPRC);
-       E1000_READ_REG(hw, E1000_MGTPDC);
-       E1000_READ_REG(hw, E1000_MGTPTC);
-
-       E1000_READ_REG(hw, E1000_IAC);
-       E1000_READ_REG(hw, E1000_ICRXOC);
-
-       E1000_READ_REG(hw, E1000_ICRXPTC);
-       E1000_READ_REG(hw, E1000_ICRXATC);
-       E1000_READ_REG(hw, E1000_ICTXPTC);
-       E1000_READ_REG(hw, E1000_ICTXATC);
-       E1000_READ_REG(hw, E1000_ICTXQEC);
-       E1000_READ_REG(hw, E1000_ICTXQMTC);
-       E1000_READ_REG(hw, E1000_ICRXDMTC);
-
-       E1000_READ_REG(hw, E1000_CBTMPC);
-       E1000_READ_REG(hw, E1000_HTDPMC);
-       E1000_READ_REG(hw, E1000_CBRMPC);
-       E1000_READ_REG(hw, E1000_RPTHC);
-       E1000_READ_REG(hw, E1000_HGPTC);
-       E1000_READ_REG(hw, E1000_HTCBDPC);
-       E1000_READ_REG(hw, E1000_HGORCL);
-       E1000_READ_REG(hw, E1000_HGORCH);
-       E1000_READ_REG(hw, E1000_HGOTCL);
-       E1000_READ_REG(hw, E1000_HGOTCH);
-       E1000_READ_REG(hw, E1000_LENERRS);
-
-       /* This register should not be read in copper configurations */
-       if ((hw->phy.media_type == e1000_media_type_internal_serdes) ||
-           e1000_sgmii_active_82575(hw))
-               E1000_READ_REG(hw, E1000_SCVPC);
-}
-
-/**
- *  e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable
- *  @hw: pointer to the HW structure
- *
- *  After rx enable if managability is enabled then there is likely some
- *  bad data at the start of the fifo and possibly in the DMA fifo.  This
- *  function clears the fifos and flushes any packets that came in as rx was
- *  being enabled.
- **/
-void e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
-{
-       u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
-       int i, ms_wait;
-
-       DEBUGFUNC("e1000_rx_fifo_workaround_82575");
-       if (hw->mac.type != e1000_82575 ||
-           !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
-               return;
-
-       /* Disable all Rx queues */
-       for (i = 0; i < 4; i++) {
-               rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
-               E1000_WRITE_REG(hw, E1000_RXDCTL(i),
-                               rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
-       }
-       /* Poll all queues to verify they have shut down */
-       for (ms_wait = 0; ms_wait < 10; ms_wait++) {
-               msec_delay(1);
-               rx_enabled = 0;
-               for (i = 0; i < 4; i++)
-                       rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
-               if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
-                       break;
-       }
-
-       if (ms_wait == 10)
-               DEBUGOUT("Queue disable timed out after 10ms\n");
-
-       /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
-        * incoming packets are rejected.  Set enable and wait 2ms so that
-        * any packet that was coming in as RCTL.EN was set is flushed
-        */
-       rfctl = E1000_READ_REG(hw, E1000_RFCTL);
-       E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
-
-       rlpml = E1000_READ_REG(hw, E1000_RLPML);
-       E1000_WRITE_REG(hw, E1000_RLPML, 0);
-
-       rctl = E1000_READ_REG(hw, E1000_RCTL);
-       temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
-       temp_rctl |= E1000_RCTL_LPE;
-
-       E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
-       E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
-       E1000_WRITE_FLUSH(hw);
-       msec_delay(2);
-
-       /* Enable Rx queues that were previously enabled and restore our
-        * previous state
-        */
-       for (i = 0; i < 4; i++)
-               E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
-       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
-       E1000_WRITE_FLUSH(hw);
-
-       E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
-       E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
-
-       /* Flush receive errors generated by workaround */
-       E1000_READ_REG(hw, E1000_ROC);
-       E1000_READ_REG(hw, E1000_RNBC);
-       E1000_READ_REG(hw, E1000_MPC);
-}
-
-/**
- *  e1000_set_pcie_completion_timeout - set pci-e completion timeout
- *  @hw: pointer to the HW structure
- *
- *  The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
- *  however the hardware default for these parts is 500us to 1ms which is less
- *  than the 10ms recommended by the pci-e spec.  To address this we need to
- *  increase the value to either 10ms to 200ms for capability version 1 config,
- *  or 16ms to 55ms for version 2.
- **/
-static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw)
-{
-       u32 gcr = E1000_READ_REG(hw, E1000_GCR);
-       s32 ret_val = E1000_SUCCESS;
-       u16 pcie_devctl2;
-
-       /* only take action if timeout value is defaulted to 0 */
-       if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
-               goto out;
-
-       /*
-        * if capababilities version is type 1 we can write the
-        * timeout of 10ms to 200ms through the GCR register
-        */
-       if (!(gcr & E1000_GCR_CAP_VER2)) {
-               gcr |= E1000_GCR_CMPL_TMOUT_10ms;
-               goto out;
-       }
-
-       /*
-        * for version 2 capabilities we need to write the config space
-        * directly in order to set the completion timeout value for
-        * 16ms to 55ms
-        */
-       ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
-                                         &pcie_devctl2);
-       if (ret_val)
-               goto out;
-
-       pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
-
-       ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
-                                          &pcie_devctl2);
-out:
-       /* disable completion timeout resend */
-       gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
-
-       E1000_WRITE_REG(hw, E1000_GCR, gcr);
-       return ret_val;
-}
-
-/**
- *  e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
- *  @hw: pointer to the hardware struct
- *  @enable: state to enter, either enabled or disabled
- *  @pf: Physical Function pool - do not set anti-spoofing for the PF
- *
- *  enables/disables L2 switch anti-spoofing functionality.
- **/
-void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
-{
-       u32 dtxswc;
-
-       switch (hw->mac.type) {
-       case e1000_82576:
-               dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
-               if (enable) {
-                       dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
-                                  E1000_DTXSWC_VLAN_SPOOF_MASK);
-                       /* The PF can spoof - it has to in order to
-                        * support emulation mode NICs */
-                       dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
-               } else {
-                       dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
-                                   E1000_DTXSWC_VLAN_SPOOF_MASK);
-               }
-               E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
-               break;
-       case e1000_i350:
-               dtxswc = E1000_READ_REG(hw, E1000_TXSWC);
-               if (enable) {
-                       dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
-                                  E1000_DTXSWC_VLAN_SPOOF_MASK);
-                       /* The PF can spoof - it has to in order to
-                        * support emulation mode NICs
-                        */
-                       dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
-               } else {
-                       dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
-                                   E1000_DTXSWC_VLAN_SPOOF_MASK);
-               }
-               E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc);
-       default:
-               break;
-       }
-}
-
-/**
- *  e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
- *  @hw: pointer to the hardware struct
- *  @enable: state to enter, either enabled or disabled
- *
- *  enables/disables L2 switch loopback functionality.
- **/
-void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
-{
-       u32 dtxswc;
-
-       switch (hw->mac.type) {
-       case e1000_82576:
-               dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
-               if (enable)
-                       dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
-               else
-                       dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
-               E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
-               break;
-       case e1000_i350:
-               dtxswc = E1000_READ_REG(hw, E1000_TXSWC);
-               if (enable)
-                       dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
-               else
-                       dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
-               E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc);
-               break;
-       default:
-               /* Currently no other hardware supports loopback */
-               break;
-       }
-
-
-}
-
-/**
- *  e1000_vmdq_set_replication_pf - enable or disable vmdq replication
- *  @hw: pointer to the hardware struct
- *  @enable: state to enter, either enabled or disabled
- *
- *  enables/disables replication of packets across multiple pools.
- **/
-void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
-{
-       u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
-
-       if (enable)
-               vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
-       else
-               vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
-
-       E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
-}
-
-/**
- *  e1000_read_phy_reg_82580 - Read 82580 MDI control register
- *  @hw: pointer to the HW structure
- *  @offset: register offset to be read
- *  @data: pointer to the read data
- *
- *  Reads the MDI control register in the PHY at offset and stores the
- *  information read to data.
- **/
-static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
-{
-       s32 ret_val;
-
-       DEBUGFUNC("e1000_read_phy_reg_82580");
-
-       ret_val = hw->phy.ops.acquire(hw);
-       if (ret_val)
-               goto out;
-
-       ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
-
-       hw->phy.ops.release(hw);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_write_phy_reg_82580 - Write 82580 MDI control register
- *  @hw: pointer to the HW structure
- *  @offset: register offset to write to
- *  @data: data to write to register at offset
- *
- *  Writes data to MDI control register in the PHY at offset.
- **/
-static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
-{
-       s32 ret_val;
-
-       DEBUGFUNC("e1000_write_phy_reg_82580");
-
-       ret_val = hw->phy.ops.acquire(hw);
-       if (ret_val)
-               goto out;
-
-       ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
-
-       hw->phy.ops.release(hw);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
- *  @hw: pointer to the HW structure
- *
- *  This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
- *  the values found in the EEPROM.  This addresses an issue in which these
- *  bits are not restored from EEPROM after reset.
- **/
-static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw)
-{
-       s32 ret_val = E1000_SUCCESS;
-       u32 mdicnfg;
-       u16 nvm_data = 0;
-
-       DEBUGFUNC("e1000_reset_mdicnfg_82580");
-
-       if (hw->mac.type != e1000_82580)
-               goto out;
-       if (!e1000_sgmii_active_82575(hw))
-               goto out;
-
-       ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
-                                  NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
-                                  &nvm_data);
-       if (ret_val) {
-               DEBUGOUT("NVM Read Error\n");
-               goto out;
-       }
-
-       mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
-       if (nvm_data & NVM_WORD24_EXT_MDIO)
-               mdicnfg |= E1000_MDICNFG_EXT_MDIO;
-       if (nvm_data & NVM_WORD24_COM_MDIO)
-               mdicnfg |= E1000_MDICNFG_COM_MDIO;
-       E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_reset_hw_82580 - Reset hardware
- *  @hw: pointer to the HW structure
- *
- *  This resets function or entire device (all ports, etc.)
- *  to a known state.
- **/
-static s32 e1000_reset_hw_82580(struct e1000_hw *hw)
-{
-       s32 ret_val = E1000_SUCCESS;
-       /* BH SW mailbox bit in SW_FW_SYNC */
-       u16 swmbsw_mask = E1000_SW_SYNCH_MB;
-       u32 ctrl;
-       bool global_device_reset = hw->dev_spec._82575.global_device_reset;
-
-       DEBUGFUNC("e1000_reset_hw_82580");
-
-       hw->dev_spec._82575.global_device_reset = FALSE;
-
-       /* Get current control state. */
-       ctrl = E1000_READ_REG(hw, E1000_CTRL);
-
-       /*
-        * Prevent the PCI-E bus from sticking if there is no TLP connection
-        * on the last TLP read/write transaction when MAC is reset.
-        */
-       ret_val = e1000_disable_pcie_master_generic(hw);
-       if (ret_val)
-               DEBUGOUT("PCI-E Master disable polling has failed.\n");
-
-       DEBUGOUT("Masking off all interrupts\n");
-       E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
-       E1000_WRITE_REG(hw, E1000_RCTL, 0);
-       E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
-       E1000_WRITE_FLUSH(hw);
-
-       msec_delay(10);
-
-       /* Determine whether or not a global dev reset is requested */
-       if (global_device_reset &&
-               e1000_acquire_swfw_sync_82575(hw, swmbsw_mask))
-                       global_device_reset = FALSE;
-
-       if (global_device_reset &&
-               !(E1000_READ_REG(hw, E1000_STATUS) & E1000_STAT_DEV_RST_SET))
-               ctrl |= E1000_CTRL_DEV_RST;
-       else
-               ctrl |= E1000_CTRL_RST;
-
-       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
-
-       /* Add delay to insure DEV_RST has time to complete */
-       if (global_device_reset)
-               msec_delay(5);
-
-       ret_val = e1000_get_auto_rd_done_generic(hw);
-       if (ret_val) {
-               /*
-                * When auto config read does not complete, do not
-                * return with an error. This can happen in situations
-                * where there is no eeprom and prevents getting link.
-                */
-               DEBUGOUT("Auto Read Done did not complete\n");
-       }
-
-       /* If EEPROM is not present, run manual init scripts */
-       if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
-               e1000_reset_init_script_82575(hw);
-
-       /* clear global device reset status bit */
-       E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET);
-
-       /* Clear any pending interrupt events. */
-       E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
-       E1000_READ_REG(hw, E1000_ICR);
-
-       ret_val = e1000_reset_mdicnfg_82580(hw);
-       if (ret_val)
-               DEBUGOUT("Could not reset MDICNFG based on EEPROM\n");
-
-       /* Install any alternate MAC address into RAR0 */
-       ret_val = e1000_check_alt_mac_addr_generic(hw);
-
-       /* Release semaphore */
-       if (global_device_reset)
-               e1000_release_swfw_sync_82575(hw, swmbsw_mask);
-
-       return ret_val;
-}
-
-/**
- *  e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size
- *  @data: data received by reading RXPBS register
- *
- *  The 82580 uses a table based approach for packet buffer allocation sizes.
- *  This function converts the retrieved value into the correct table value
- *     0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
- *  0x0 36  72 144   1   2   4   8  16
- *  0x8 35  70 140 rsv rsv rsv rsv rsv
- */
-u16 e1000_rxpbs_adjust_82580(u32 data)
-{
-       u16 ret_val = 0;
-
-       if (data < E1000_82580_RXPBS_TABLE_SIZE)
-               ret_val = e1000_82580_rxpbs_table[data];
-
-       return ret_val;
-}
-
-/**
- *  e1000_validate_nvm_checksum_with_offset - Validate EEPROM
- *  checksum
- *  @hw: pointer to the HW structure
- *  @offset: offset in words of the checksum protected region
- *
- *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
- *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
- **/
-s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
-{
-       s32 ret_val = E1000_SUCCESS;
-       u16 checksum = 0;
-       u16 i, nvm_data;
-
-       DEBUGFUNC("e1000_validate_nvm_checksum_with_offset");
-
-       for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
-               ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
-               if (ret_val) {
-                       DEBUGOUT("NVM Read Error\n");
-                       goto out;
-               }
-               checksum += nvm_data;
-       }
-
-       if (checksum != (u16) NVM_SUM) {
-               DEBUGOUT("NVM Checksum Invalid\n");
-               ret_val = -E1000_ERR_NVM;
-               goto out;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_update_nvm_checksum_with_offset - Update EEPROM
- *  checksum
- *  @hw: pointer to the HW structure
- *  @offset: offset in words of the checksum protected region
- *
- *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
- *  up to the checksum.  Then calculates the EEPROM checksum and writes the
- *  value to the EEPROM.
- **/
-s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
-{
-       s32 ret_val;
-       u16 checksum = 0;
-       u16 i, nvm_data;
-
-       DEBUGFUNC("e1000_update_nvm_checksum_with_offset");
-
-       for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
-               ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
-               if (ret_val) {
-                       DEBUGOUT("NVM Read Error while updating checksum.\n");
-                       goto out;
-               }
-               checksum += nvm_data;
-       }
-       checksum = (u16) NVM_SUM - checksum;
-       ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
-                               &checksum);
-       if (ret_val)
-               DEBUGOUT("NVM Write Error while updating checksum.\n");
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum
- *  @hw: pointer to the HW structure
- *
- *  Calculates the EEPROM section checksum by reading/adding each word of
- *  the EEPROM and then verifies that the sum of the EEPROM is
- *  equal to 0xBABA.
- **/
-static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw)
-{
-       s32 ret_val = E1000_SUCCESS;
-       u16 eeprom_regions_count = 1;
-       u16 j, nvm_data;
-       u16 nvm_offset;
-
-       DEBUGFUNC("e1000_validate_nvm_checksum_82580");
-
-       ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
-       if (ret_val) {
-               DEBUGOUT("NVM Read Error\n");
-               goto out;
-       }
-
-       if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
-               /* if chekcsums compatibility bit is set validate checksums
-                * for all 4 ports. */
-               eeprom_regions_count = 4;
-       }
-
-       for (j = 0; j < eeprom_regions_count; j++) {
-               nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
-               ret_val = e1000_validate_nvm_checksum_with_offset(hw,
-                                                               nvm_offset);
-               if (ret_val != E1000_SUCCESS)
-                       goto out;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_update_nvm_checksum_82580 - Update EEPROM checksum
- *  @hw: pointer to the HW structure
- *
- *  Updates the EEPROM section checksums for all 4 ports by reading/adding
- *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
- *  checksum and writes the value to the EEPROM.
- **/
-static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw)
-{
-       s32 ret_val;
-       u16 j, nvm_data;
-       u16 nvm_offset;
-
-       DEBUGFUNC("e1000_update_nvm_checksum_82580");
-
-       ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
-       if (ret_val) {
-               DEBUGOUT("NVM Read Error while updating checksum"
-                       " compatibility bit.\n");
-               goto out;
-       }
-
-       if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
-               /* set compatibility bit to validate checksums appropriately */
-               nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
-               ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
-                                       &nvm_data);
-               if (ret_val) {
-                       DEBUGOUT("NVM Write Error while updating checksum"
-                               " compatibility bit.\n");
-                       goto out;
-               }
-       }
-
-       for (j = 0; j < 4; j++) {
-               nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
-               ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
-               if (ret_val) {
-                       goto out;
-               }
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum
- *  @hw: pointer to the HW structure
- *
- *  Calculates the EEPROM section checksum by reading/adding each word of
- *  the EEPROM and then verifies that the sum of the EEPROM is
- *  equal to 0xBABA.
- **/
-static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw)
-{
-       s32 ret_val = E1000_SUCCESS;
-       u16 j;
-       u16 nvm_offset;
-
-       DEBUGFUNC("e1000_validate_nvm_checksum_i350");
-
-       for (j = 0; j < 4; j++) {
-               nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
-               ret_val = e1000_validate_nvm_checksum_with_offset(hw,
-                                                               nvm_offset);
-               if (ret_val != E1000_SUCCESS)
-                       goto out;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_update_nvm_checksum_i350 - Update EEPROM checksum
- *  @hw: pointer to the HW structure
- *
- *  Updates the EEPROM section checksums for all 4 ports by reading/adding
- *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
- *  checksum and writes the value to the EEPROM.
- **/
-static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw)
-{
-       s32 ret_val = E1000_SUCCESS;
-       u16 j;
-       u16 nvm_offset;
-
-       DEBUGFUNC("e1000_update_nvm_checksum_i350");
-
-       for (j = 0; j < 4; j++) {
-               nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
-               ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
-               if (ret_val != E1000_SUCCESS)
-                       goto out;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_set_eee_i350 - Enable/disable EEE support
- *  @hw: pointer to the HW structure
- *
- *  Enable/disable EEE based on setting in dev_spec structure.
- *
- **/
-s32 e1000_set_eee_i350(struct e1000_hw *hw)
-{
-       s32 ret_val = E1000_SUCCESS;
-       u32 ipcnfg, eeer, ctrl_ext;
-
-       DEBUGFUNC("e1000_set_eee_i350");
-
-       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
-       if ((hw->mac.type != e1000_i350) ||
-           (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK))
-               goto out;
-       ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
-       eeer = E1000_READ_REG(hw, E1000_EEER);
-
-       /* enable or disable per user setting */
-       if (!(hw->dev_spec._82575.eee_disable)) {
-               ipcnfg |= (E1000_IPCNFG_EEE_1G_AN |
-                          E1000_IPCNFG_EEE_100M_AN);
-               eeer |= (E1000_EEER_TX_LPI_EN |
-                        E1000_EEER_RX_LPI_EN |
-                        E1000_EEER_LPI_FC);
-
-       } else {
-               ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
-                           E1000_IPCNFG_EEE_100M_AN);
-               eeer &= ~(E1000_EEER_TX_LPI_EN |
-                         E1000_EEER_RX_LPI_EN |
-                         E1000_EEER_LPI_FC);
-       }
-       E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg);
-       E1000_WRITE_REG(hw, E1000_EEER, eeer);
-                       E1000_READ_REG(hw, E1000_IPCNFG);
-                       E1000_READ_REG(hw, E1000_EEER);
-out:
-
-       return ret_val;
-}
diff --git a/lib/librte_pmd_igb/igb/e1000_82575.h b/lib/librte_pmd_igb/igb/e1000_82575.h
deleted file mode 100644 (file)
index 415756e..0000000
+++ /dev/null
@@ -1,487 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#ifndef _E1000_82575_H_
-#define _E1000_82575_H_
-
-#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
-                                     (ID_LED_DEF1_DEF2 <<  8) | \
-                                     (ID_LED_DEF1_DEF2 <<  4) | \
-                                     (ID_LED_OFF1_ON2))
-/*
- * Receive Address Register Count
- * Number of high/low register pairs in the RAR.  The RAR (Receive Address
- * Registers) holds the directed and multicast addresses that we monitor.
- * These entries are also used for MAC-based filtering.
- */
-/*
- * For 82576, there are an additional set of RARs that begin at an offset
- * separate from the first set of RARs.
- */
-#define E1000_RAR_ENTRIES_82575        16
-#define E1000_RAR_ENTRIES_82576        24
-#define E1000_RAR_ENTRIES_82580        24
-#define E1000_RAR_ENTRIES_I350         32
-#define E1000_SW_SYNCH_MB              0x00000100
-#define E1000_STAT_DEV_RST_SET         0x00100000
-#define E1000_CTRL_DEV_RST             0x20000000
-
-#ifdef E1000_BIT_FIELDS
-struct e1000_adv_data_desc {
-       __le64 buffer_addr;    /* Address of the descriptor's data buffer */
-       union {
-               u32 data;
-               struct {
-                       u32 datalen :16; /* Data buffer length */
-                       u32 rsvd    :4;
-                       u32 dtyp    :4;  /* Descriptor type */
-                       u32 dcmd    :8;  /* Descriptor command */
-               } config;
-       } lower;
-       union {
-               u32 data;
-               struct {
-                       u32 status  :4;  /* Descriptor status */
-                       u32 idx     :4;
-                       u32 popts   :6;  /* Packet Options */
-                       u32 paylen  :18; /* Payload length */
-               } options;
-       } upper;
-};
-
-#define E1000_TXD_DTYP_ADV_C    0x2  /* Advanced Context Descriptor */
-#define E1000_TXD_DTYP_ADV_D    0x3  /* Advanced Data Descriptor */
-#define E1000_ADV_TXD_CMD_DEXT  0x20 /* Descriptor extension (0 = legacy) */
-#define E1000_ADV_TUCMD_IPV4    0x2  /* IP Packet Type: 1=IPv4 */
-#define E1000_ADV_TUCMD_IPV6    0x0  /* IP Packet Type: 0=IPv6 */
-#define E1000_ADV_TUCMD_L4T_UDP 0x0  /* L4 Packet TYPE of UDP */
-#define E1000_ADV_TUCMD_L4T_TCP 0x4  /* L4 Packet TYPE of TCP */
-#define E1000_ADV_TUCMD_MKRREQ  0x10 /* Indicates markers are required */
-#define E1000_ADV_DCMD_EOP      0x1  /* End of Packet */
-#define E1000_ADV_DCMD_IFCS     0x2  /* Insert FCS (Ethernet CRC) */
-#define E1000_ADV_DCMD_RS       0x8  /* Report Status */
-#define E1000_ADV_DCMD_VLE      0x40 /* Add VLAN tag */
-#define E1000_ADV_DCMD_TSE      0x80 /* TCP Seg enable */
-/* Extended Device Control */
-#define E1000_CTRL_EXT_NSICR    0x00000001 /* Disable Intr Clear all on read */
-
-struct e1000_adv_context_desc {
-       union {
-               u32 ip_config;
-               struct {
-                       u32 iplen    :9;
-                       u32 maclen   :7;
-                       u32 vlan_tag :16;
-               } fields;
-       } ip_setup;
-       u32 seq_num;
-       union {
-               u64 l4_config;
-               struct {
-                       u32 mkrloc :9;
-                       u32 tucmd  :11;
-                       u32 dtyp   :4;
-                       u32 adv    :8;
-                       u32 rsvd   :4;
-                       u32 idx    :4;
-                       u32 l4len  :8;
-                       u32 mss    :16;
-               } fields;
-       } l4_setup;
-};
-#endif
-
-/* SRRCTL bit definitions */
-#define E1000_SRRCTL_BSIZEPKT_SHIFT                     10 /* Shift _right_ */
-#define E1000_SRRCTL_BSIZEHDRSIZE_MASK                  0x00000F00
-#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT                 2  /* Shift _left_ */
-#define E1000_SRRCTL_DESCTYPE_LEGACY                    0x00000000
-#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF                0x02000000
-#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT                 0x04000000
-#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS          0x0A000000
-#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION           0x06000000
-#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
-#define E1000_SRRCTL_DESCTYPE_MASK                      0x0E000000
-#define E1000_SRRCTL_TIMESTAMP                          0x40000000
-#define E1000_SRRCTL_DROP_EN                            0x80000000
-
-#define E1000_SRRCTL_BSIZEPKT_MASK      0x0000007F
-#define E1000_SRRCTL_BSIZEHDR_MASK      0x00003F00
-
-#define E1000_TX_HEAD_WB_ENABLE   0x1
-#define E1000_TX_SEQNUM_WB_ENABLE 0x2
-
-#define E1000_MRQC_ENABLE_RSS_4Q            0x00000002
-#define E1000_MRQC_ENABLE_VMDQ              0x00000003
-#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q       0x00000005
-#define E1000_MRQC_RSS_FIELD_IPV4_UDP       0x00400000
-#define E1000_MRQC_RSS_FIELD_IPV6_UDP       0x00800000
-#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX    0x01000000
-#define E1000_MRQC_ENABLE_RSS_8Q            0x00000002
-
-#define E1000_VMRCTL_MIRROR_PORT_SHIFT      8
-#define E1000_VMRCTL_MIRROR_DSTPORT_MASK    (7 << E1000_VMRCTL_MIRROR_PORT_SHIFT)
-#define E1000_VMRCTL_POOL_MIRROR_ENABLE     (1 << 0)
-#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE   (1 << 1)
-#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2)
-
-#define E1000_EICR_TX_QUEUE ( \
-    E1000_EICR_TX_QUEUE0 |    \
-    E1000_EICR_TX_QUEUE1 |    \
-    E1000_EICR_TX_QUEUE2 |    \
-    E1000_EICR_TX_QUEUE3)
-
-#define E1000_EICR_RX_QUEUE ( \
-    E1000_EICR_RX_QUEUE0 |    \
-    E1000_EICR_RX_QUEUE1 |    \
-    E1000_EICR_RX_QUEUE2 |    \
-    E1000_EICR_RX_QUEUE3)
-
-#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE
-#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE
-
-#define EIMS_ENABLE_MASK ( \
-    E1000_EIMS_RX_QUEUE  | \
-    E1000_EIMS_TX_QUEUE  | \
-    E1000_EIMS_TCP_TIMER | \
-    E1000_EIMS_OTHER)
-
-/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
-#define E1000_IMIR_PORT_IM_EN     0x00010000  /* TCP port enable */
-#define E1000_IMIR_PORT_BP        0x00020000  /* TCP port check bypass */
-#define E1000_IMIREXT_SIZE_BP     0x00001000  /* Packet size bypass */
-#define E1000_IMIREXT_CTRL_URG    0x00002000  /* Check URG bit in header */
-#define E1000_IMIREXT_CTRL_ACK    0x00004000  /* Check ACK bit in header */
-#define E1000_IMIREXT_CTRL_PSH    0x00008000  /* Check PSH bit in header */
-#define E1000_IMIREXT_CTRL_RST    0x00010000  /* Check RST bit in header */
-#define E1000_IMIREXT_CTRL_SYN    0x00020000  /* Check SYN bit in header */
-#define E1000_IMIREXT_CTRL_FIN    0x00040000  /* Check FIN bit in header */
-#define E1000_IMIREXT_CTRL_BP     0x00080000  /* Bypass check of ctrl bits */
-
-/* Receive Descriptor - Advanced */
-union e1000_adv_rx_desc {
-       struct {
-               __le64 pkt_addr;             /* Packet buffer address */
-               __le64 hdr_addr;             /* Header buffer address */
-       } read;
-       struct {
-               struct {
-                       union {
-                               __le32 data;
-                               struct {
-                                       __le16 pkt_info; /*RSS type, Pkt type*/
-                                       /* Split Header, header buffer len */
-                                       __le16 hdr_info;
-                               } hs_rss;
-                       } lo_dword;
-                       union {
-                               __le32 rss;          /* RSS Hash */
-                               struct {
-                                       __le16 ip_id;    /* IP id */
-                                       __le16 csum;     /* Packet Checksum */
-                               } csum_ip;
-                       } hi_dword;
-               } lower;
-               struct {
-                       __le32 status_error;     /* ext status/error */
-                       __le16 length;           /* Packet length */
-                       __le16 vlan;             /* VLAN tag */
-               } upper;
-       } wb;  /* writeback */
-};
-
-#define E1000_RXDADV_RSSTYPE_MASK        0x0000000F
-#define E1000_RXDADV_RSSTYPE_SHIFT       12
-#define E1000_RXDADV_HDRBUFLEN_MASK      0x7FE0
-#define E1000_RXDADV_HDRBUFLEN_SHIFT     5
-#define E1000_RXDADV_SPLITHEADER_EN      0x00001000
-#define E1000_RXDADV_SPH                 0x8000
-#define E1000_RXDADV_STAT_TS             0x10000 /* Pkt was time stamped */
-#define E1000_RXDADV_STAT_TSIP           0x08000 /* timestamp in packet */
-#define E1000_RXDADV_ERR_HBO             0x00800000
-
-/* RSS Hash results */
-#define E1000_RXDADV_RSSTYPE_NONE        0x00000000
-#define E1000_RXDADV_RSSTYPE_IPV4_TCP    0x00000001
-#define E1000_RXDADV_RSSTYPE_IPV4        0x00000002
-#define E1000_RXDADV_RSSTYPE_IPV6_TCP    0x00000003
-#define E1000_RXDADV_RSSTYPE_IPV6_EX     0x00000004
-#define E1000_RXDADV_RSSTYPE_IPV6        0x00000005
-#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
-#define E1000_RXDADV_RSSTYPE_IPV4_UDP    0x00000007
-#define E1000_RXDADV_RSSTYPE_IPV6_UDP    0x00000008
-#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
-
-/* RSS Packet Types as indicated in the receive descriptor */
-#define E1000_RXDADV_PKTTYPE_NONE        0x00000000
-#define E1000_RXDADV_PKTTYPE_IPV4        0x00000010 /* IPV4 hdr present */
-#define E1000_RXDADV_PKTTYPE_IPV4_EX     0x00000020 /* IPV4 hdr + extensions */
-#define E1000_RXDADV_PKTTYPE_IPV6        0x00000040 /* IPV6 hdr present */
-#define E1000_RXDADV_PKTTYPE_IPV6_EX     0x00000080 /* IPV6 hdr + extensions */
-#define E1000_RXDADV_PKTTYPE_TCP         0x00000100 /* TCP hdr present */
-#define E1000_RXDADV_PKTTYPE_UDP         0x00000200 /* UDP hdr present */
-#define E1000_RXDADV_PKTTYPE_SCTP        0x00000400 /* SCTP hdr present */
-#define E1000_RXDADV_PKTTYPE_NFS         0x00000800 /* NFS hdr present */
-
-#define E1000_RXDADV_PKTTYPE_IPSEC_ESP   0x00001000 /* IPSec ESP */
-#define E1000_RXDADV_PKTTYPE_IPSEC_AH    0x00002000 /* IPSec AH */
-#define E1000_RXDADV_PKTTYPE_LINKSEC     0x00004000 /* LinkSec Encap */
-#define E1000_RXDADV_PKTTYPE_ETQF        0x00008000 /* PKTTYPE is ETQF index */
-#define E1000_RXDADV_PKTTYPE_ETQF_MASK   0x00000070 /* ETQF has 8 indices */
-#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT  4          /* Right-shift 4 bits */
-
-/* LinkSec results */
-/* Security Processing bit Indication */
-#define E1000_RXDADV_LNKSEC_STATUS_SECP         0x00020000
-#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK      0x18000000
-#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH   0x08000000
-#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR  0x10000000
-#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG       0x18000000
-
-#define E1000_RXDADV_IPSEC_STATUS_SECP          0x00020000
-#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK       0x18000000
-#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL       0x08000000
-#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH         0x10000000
-#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED  0x18000000
-
-/* Transmit Descriptor - Advanced */
-union e1000_adv_tx_desc {
-       struct {
-               __le64 buffer_addr;    /* Address of descriptor's data buf */
-               __le32 cmd_type_len;
-               __le32 olinfo_status;
-       } read;
-       struct {
-               __le64 rsvd;       /* Reserved */
-               __le32 nxtseq_seed;
-               __le32 status;
-       } wb;
-};
-
-/* Adv Transmit Descriptor Config Masks */
-#define E1000_ADVTXD_DTYP_CTXT    0x00200000 /* Advanced Context Descriptor */
-#define E1000_ADVTXD_DTYP_DATA    0x00300000 /* Advanced Data Descriptor */
-#define E1000_ADVTXD_DCMD_EOP     0x01000000 /* End of Packet */
-#define E1000_ADVTXD_DCMD_IFCS    0x02000000 /* Insert FCS (Ethernet CRC) */
-#define E1000_ADVTXD_DCMD_RS      0x08000000 /* Report Status */
-#define E1000_ADVTXD_DCMD_DDTYP_ISCSI  0x10000000 /* DDP hdr type or iSCSI */
-#define E1000_ADVTXD_DCMD_DEXT    0x20000000 /* Descriptor extension (1=Adv) */
-#define E1000_ADVTXD_DCMD_VLE     0x40000000 /* VLAN pkt enable */
-#define E1000_ADVTXD_DCMD_TSE     0x80000000 /* TCP Seg enable */
-#define E1000_ADVTXD_MAC_LINKSEC  0x00040000 /* Apply LinkSec on packet */
-#define E1000_ADVTXD_MAC_TSTAMP   0x00080000 /* IEEE1588 Timestamp packet */
-#define E1000_ADVTXD_STAT_SN_CRC  0x00000002 /* NXTSEQ/SEED present in WB */
-#define E1000_ADVTXD_IDX_SHIFT    4  /* Adv desc Index shift */
-#define E1000_ADVTXD_POPTS_ISCO_1ST  0x00000000 /* 1st TSO of iSCSI PDU */
-#define E1000_ADVTXD_POPTS_ISCO_MDL  0x00000800 /* Middle TSO of iSCSI PDU */
-#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
-#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU*/
-#define E1000_ADVTXD_POPTS_IPSEC     0x00000400 /* IPSec offload request */
-#define E1000_ADVTXD_PAYLEN_SHIFT    14 /* Adv desc PAYLEN shift */
-
-/* Context descriptors */
-struct e1000_adv_tx_context_desc {
-       __le32 vlan_macip_lens;
-       __le32 seqnum_seed;
-       __le32 type_tucmd_mlhl;
-       __le32 mss_l4len_idx;
-};
-
-#define E1000_ADVTXD_MACLEN_SHIFT    9  /* Adv ctxt desc mac len shift */
-#define E1000_ADVTXD_VLAN_SHIFT     16  /* Adv ctxt vlan tag shift */
-#define E1000_ADVTXD_TUCMD_IPV4    0x00000400  /* IP Packet Type: 1=IPv4 */
-#define E1000_ADVTXD_TUCMD_IPV6    0x00000000  /* IP Packet Type: 0=IPv6 */
-#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000  /* L4 Packet TYPE of UDP */
-#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800  /* L4 Packet TYPE of TCP */
-#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000  /* L4 Packet TYPE of SCTP */
-#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP    0x00002000 /* IPSec Type ESP */
-/* IPSec Encrypt Enable for ESP */
-#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN  0x00004000
-#define E1000_ADVTXD_TUCMD_MKRREQ  0x00002000 /* Req requires Markers and CRC */
-#define E1000_ADVTXD_L4LEN_SHIFT     8  /* Adv ctxt L4LEN shift */
-#define E1000_ADVTXD_MSS_SHIFT      16  /* Adv ctxt MSS shift */
-/* Adv ctxt IPSec SA IDX mask */
-#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK     0x000000FF
-/* Adv ctxt IPSec ESP len mask */
-#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK      0x000000FF
-
-/* Additional Transmit Descriptor Control definitions */
-#define E1000_TXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Tx Queue */
-#define E1000_TXDCTL_SWFLSH        0x04000000 /* Tx Desc. write-back flushing */
-/* Tx Queue Arbitration Priority 0=low, 1=high */
-#define E1000_TXDCTL_PRIORITY      0x08000000
-
-/* Additional Receive Descriptor Control definitions */
-#define E1000_RXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Rx Queue */
-#define E1000_RXDCTL_SWFLSH        0x04000000 /* Rx Desc. write-back flushing */
-
-/* Direct Cache Access (DCA) definitions */
-#define E1000_DCA_CTRL_DCA_ENABLE  0x00000000 /* DCA Enable */
-#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
-
-#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
-#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
-
-#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
-#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
-#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
-#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
-
-#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
-#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
-
-#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
-#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
-#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */
-#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */
-
-/* Additional interrupt register bit definitions */
-#define E1000_ICR_LSECPNS       0x00000020          /* PN threshold - server */
-#define E1000_IMS_LSECPNS       E1000_ICR_LSECPNS   /* PN threshold - server */
-#define E1000_ICS_LSECPNS       E1000_ICR_LSECPNS   /* PN threshold - server */
-
-/* ETQF register bit definitions */
-#define E1000_ETQF_FILTER_ENABLE   (1 << 26)
-#define E1000_ETQF_IMM_INT         (1 << 29)
-#define E1000_ETQF_1588            (1 << 30)
-#define E1000_ETQF_QUEUE_ENABLE    (1 << 31)
-/*
- * ETQF filter list: one static filter per filter consumer. This is
- *                   to avoid filter collisions later. Add new filters
- *                   here!!
- *
- * Current filters:
- *    EAPOL 802.1x (0x888e): Filter 0
- */
-#define E1000_ETQF_FILTER_EAPOL          0
-
-#define E1000_FTQF_VF_BP               0x00008000
-#define E1000_FTQF_1588_TIME_STAMP     0x08000000
-#define E1000_FTQF_MASK                0xF0000000
-#define E1000_FTQF_MASK_PROTO_BP       0x10000000
-#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000
-#define E1000_FTQF_MASK_DEST_ADDR_BP   0x40000000
-#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
-
-#define E1000_NVM_APME_82575          0x0400
-#define MAX_NUM_VFS                   8
-
-#define E1000_DTXSWC_MAC_SPOOF_MASK   0x000000FF /* Per VF MAC spoof control */
-#define E1000_DTXSWC_VLAN_SPOOF_MASK  0x0000FF00 /* Per VF VLAN spoof control */
-#define E1000_DTXSWC_LLE_MASK         0x00FF0000 /* Per VF Local LB enables */
-#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
-#define E1000_DTXSWC_LLE_SHIFT        16
-#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31)  /* global VF LB enable */
-
-/* Easy defines for setting default pool, would normally be left a zero */
-#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
-#define E1000_VT_CTL_DEFAULT_POOL_MASK  (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
-
-/* Other useful VMD_CTL register defines */
-#define E1000_VT_CTL_IGNORE_MAC         (1 << 28)
-#define E1000_VT_CTL_DISABLE_DEF_POOL   (1 << 29)
-#define E1000_VT_CTL_VM_REPL_EN         (1 << 30)
-
-/* Per VM Offload register setup */
-#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
-#define E1000_VMOLR_LPE        0x00010000 /* Accept Long packet */
-#define E1000_VMOLR_RSSE       0x00020000 /* Enable RSS */
-#define E1000_VMOLR_AUPE       0x01000000 /* Accept untagged packets */
-#define E1000_VMOLR_ROMPE      0x02000000 /* Accept overflow multicast */
-#define E1000_VMOLR_ROPE       0x04000000 /* Accept overflow unicast */
-#define E1000_VMOLR_BAM        0x08000000 /* Accept Broadcast packets */
-#define E1000_VMOLR_MPME       0x10000000 /* Multicast promiscuous mode */
-#define E1000_VMOLR_STRVLAN    0x40000000 /* Vlan stripping enable */
-#define E1000_VMOLR_STRCRC     0x80000000 /* CRC stripping enable */
-
-#define E1000_VMOLR_VPE        0x00800000 /* VLAN promiscuous enable */
-#define E1000_VMOLR_UPE        0x20000000 /* Unicast promisuous enable */
-#define E1000_DVMOLR_HIDVLAN   0x20000000 /* Vlan hiding enable */
-#define E1000_DVMOLR_STRVLAN   0x40000000 /* Vlan stripping enable */
-#define E1000_DVMOLR_STRCRC    0x80000000 /* CRC stripping enable */
-
-#define E1000_PBRWAC_WALPB     0x00000007 /* Wrap around event on LAN Rx PB */
-#define E1000_PBRWAC_PBE       0x00000008 /* Rx packet buffer empty */
-
-#define E1000_VLVF_ARRAY_SIZE     32
-#define E1000_VLVF_VLANID_MASK    0x00000FFF
-#define E1000_VLVF_POOLSEL_SHIFT  12
-#define E1000_VLVF_POOLSEL_MASK   (0xFF << E1000_VLVF_POOLSEL_SHIFT)
-#define E1000_VLVF_LVLAN          0x00100000
-#define E1000_VLVF_VLANID_ENABLE  0x80000000
-
-#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
-#define E1000_VMVIR_VLANA_NEVER   0x80000000 /* Never insert VLAN tag */
-
-#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
-
-#define E1000_IOVCTL 0x05BBC
-#define E1000_IOVCTL_REUSE_VFQ 0x00000001
-
-#define E1000_RPLOLR_STRVLAN   0x40000000
-#define E1000_RPLOLR_STRCRC    0x80000000
-
-#define E1000_TCTL_EXT_COLD       0x000FFC00
-#define E1000_TCTL_EXT_COLD_SHIFT 10
-
-#define E1000_DTXCTL_8023LL     0x0004
-#define E1000_DTXCTL_VLAN_ADDED 0x0008
-#define E1000_DTXCTL_OOS_ENABLE 0x0010
-#define E1000_DTXCTL_MDP_EN     0x0020
-#define E1000_DTXCTL_SPOOF_INT  0x0040
-
-#define ALL_QUEUES   0xFFFF
-
-/* Rx packet buffer size defines */
-#define E1000_RXPBS_SIZE_MASK_82576  0x0000007F
-void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable);
-void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf);
-void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable);
-s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
-
-enum e1000_promisc_type {
-       e1000_promisc_disabled = 0,   /* all promisc modes disabled */
-       e1000_promisc_unicast = 1,    /* unicast promiscuous enabled */
-       e1000_promisc_multicast = 2,  /* multicast promiscuous enabled */
-       e1000_promisc_enabled = 3,    /* both uni and multicast promisc */
-       e1000_num_promisc_types
-};
-
-void e1000_vfta_set_vf(struct e1000_hw *, u16, bool);
-void e1000_rlpml_set_vf(struct e1000_hw *, u16);
-s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type type);
-u16 e1000_rxpbs_adjust_82580(u32 data);
-s32 e1000_set_eee_i350(struct e1000_hw *);
-#endif /* _E1000_82575_H_ */
diff --git a/lib/librte_pmd_igb/igb/e1000_api.c b/lib/librte_pmd_igb/igb/e1000_api.c
deleted file mode 100644 (file)
index fc41f73..0000000
+++ /dev/null
@@ -1,1152 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#include "e1000_api.h"
-
-/**
- *  e1000_init_mac_params - Initialize MAC function pointers
- *  @hw: pointer to the HW structure
- *
- *  This function initializes the function pointers for the MAC
- *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
- **/
-s32 e1000_init_mac_params(struct e1000_hw *hw)
-{
-       s32 ret_val = E1000_SUCCESS;
-
-       if (hw->mac.ops.init_params) {
-               ret_val = hw->mac.ops.init_params(hw);
-               if (ret_val) {
-                       DEBUGOUT("MAC Initialization Error\n");
-                       goto out;
-               }
-       } else {
-               DEBUGOUT("mac.init_mac_params was NULL\n");
-               ret_val = -E1000_ERR_CONFIG;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_init_nvm_params - Initialize NVM function pointers
- *  @hw: pointer to the HW structure
- *
- *  This function initializes the function pointers for the NVM
- *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
- **/
-s32 e1000_init_nvm_params(struct e1000_hw *hw)
-{
-       s32 ret_val = E1000_SUCCESS;
-
-       if (hw->nvm.ops.init_params) {
-               ret_val = hw->nvm.ops.init_params(hw);
-               if (ret_val) {
-                       DEBUGOUT("NVM Initialization Error\n");
-                       goto out;
-               }
-       } else {
-               DEBUGOUT("nvm.init_nvm_params was NULL\n");
-               ret_val = -E1000_ERR_CONFIG;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_init_phy_params - Initialize PHY function pointers
- *  @hw: pointer to the HW structure
- *
- *  This function initializes the function pointers for the PHY
- *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
- **/
-s32 e1000_init_phy_params(struct e1000_hw *hw)
-{
-       s32 ret_val = E1000_SUCCESS;
-
-       if (hw->phy.ops.init_params) {
-               ret_val = hw->phy.ops.init_params(hw);
-               if (ret_val) {
-                       DEBUGOUT("PHY Initialization Error\n");
-                       goto out;
-               }
-       } else {
-               DEBUGOUT("phy.init_phy_params was NULL\n");
-               ret_val =  -E1000_ERR_CONFIG;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_init_mbx_params - Initialize mailbox function pointers
- *  @hw: pointer to the HW structure
- *
- *  This function initializes the function pointers for the PHY
- *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
- **/
-s32 e1000_init_mbx_params(struct e1000_hw *hw)
-{
-       s32 ret_val = E1000_SUCCESS;
-
-       if (hw->mbx.ops.init_params) {
-               ret_val = hw->mbx.ops.init_params(hw);
-               if (ret_val) {
-                       DEBUGOUT("Mailbox Initialization Error\n");
-                       goto out;
-               }
-       } else {
-               DEBUGOUT("mbx.init_mbx_params was NULL\n");
-               ret_val =  -E1000_ERR_CONFIG;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_set_mac_type - Sets MAC type
- *  @hw: pointer to the HW structure
- *
- *  This function sets the mac type of the adapter based on the
- *  device ID stored in the hw structure.
- *  MUST BE FIRST FUNCTION CALLED (explicitly or through
- *  e1000_setup_init_funcs()).
- **/
-s32 e1000_set_mac_type(struct e1000_hw *hw)
-{
-       struct e1000_mac_info *mac = &hw->mac;
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_set_mac_type");
-
-       switch (hw->device_id) {
-       case E1000_DEV_ID_82575EB_COPPER:
-       case E1000_DEV_ID_82575EB_FIBER_SERDES:
-       case E1000_DEV_ID_82575GB_QUAD_COPPER:
-               mac->type = e1000_82575;
-               break;
-       case E1000_DEV_ID_82576:
-       case E1000_DEV_ID_82576_FIBER:
-       case E1000_DEV_ID_82576_SERDES:
-       case E1000_DEV_ID_82576_QUAD_COPPER:
-       case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
-       case E1000_DEV_ID_82576_NS:
-       case E1000_DEV_ID_82576_NS_SERDES:
-       case E1000_DEV_ID_82576_SERDES_QUAD:
-               mac->type = e1000_82576;
-               break;
-       case E1000_DEV_ID_82580_COPPER:
-       case E1000_DEV_ID_82580_FIBER:
-       case E1000_DEV_ID_82580_SERDES:
-       case E1000_DEV_ID_82580_SGMII:
-       case E1000_DEV_ID_82580_COPPER_DUAL:
-       case E1000_DEV_ID_82580_QUAD_FIBER:
-       case E1000_DEV_ID_DH89XXCC_SGMII:
-       case E1000_DEV_ID_DH89XXCC_SERDES:
-       case E1000_DEV_ID_DH89XXCC_BACKPLANE:
-       case E1000_DEV_ID_DH89XXCC_SFP:
-               mac->type = e1000_82580;
-               break;
-       case E1000_DEV_ID_I350_COPPER:
-       case E1000_DEV_ID_I350_FIBER:
-       case E1000_DEV_ID_I350_SERDES:
-       case E1000_DEV_ID_I350_SGMII:
-       case E1000_DEV_ID_I350_DA4:
-               mac->type = e1000_i350;
-               break;
-       case E1000_DEV_ID_82576_VF:
-               mac->type = e1000_vfadapt;
-               break;
-       case E1000_DEV_ID_I350_VF:
-               mac->type = e1000_vfadapt_i350;
-               break;
-       default:
-               /* Should never have loaded on this device */
-               ret_val = -E1000_ERR_MAC_INIT;
-               break;
-       }
-
-       return ret_val;
-}
-
-/**
- *  e1000_setup_init_funcs - Initializes function pointers
- *  @hw: pointer to the HW structure
- *  @init_device: TRUE will initialize the rest of the function pointers
- *                 getting the device ready for use.  FALSE will only set
- *                 MAC type and the function pointers for the other init
- *                 functions.  Passing FALSE will not generate any hardware
- *                 reads or writes.
- *
- *  This function must be called by a driver in order to use the rest
- *  of the 'shared' code files. Called by drivers only.
- **/
-s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
-{
-       s32 ret_val;
-
-       /* Can't do much good without knowing the MAC type. */
-       ret_val = e1000_set_mac_type(hw);
-       if (ret_val) {
-               DEBUGOUT("ERROR: MAC type could not be set properly.\n");
-               goto out;
-       }
-
-       if (!hw->hw_addr) {
-               DEBUGOUT("ERROR: Registers not mapped\n");
-               ret_val = -E1000_ERR_CONFIG;
-               goto out;
-       }
-
-       /*
-        * Init function pointers to generic implementations. We do this first
-        * allowing a driver module to override it afterward.
-        */
-       e1000_init_mac_ops_generic(hw);
-       e1000_init_phy_ops_generic(hw);
-       e1000_init_nvm_ops_generic(hw);
-       e1000_init_mbx_ops_generic(hw);
-
-       /*
-        * Set up the init function pointers. These are functions within the
-        * adapter family file that sets up function pointers for the rest of
-        * the functions in that family.
-        */
-       switch (hw->mac.type) {
-       case e1000_82575:
-       case e1000_82576:
-       case e1000_82580:
-       case e1000_i350:
-               e1000_init_function_pointers_82575(hw);
-               break;
-       case e1000_vfadapt:
-               e1000_init_function_pointers_vf(hw);
-               break;
-       case e1000_vfadapt_i350:
-               e1000_init_function_pointers_vf(hw);
-               break;
-       default:
-               DEBUGOUT("Hardware not supported\n");
-               ret_val = -E1000_ERR_CONFIG;
-               break;
-       }
-
-       /*
-        * Initialize the rest of the function pointers. These require some
-        * register reads/writes in some cases.
-        */
-       if (!(ret_val) && init_device) {
-               ret_val = e1000_init_mac_params(hw);
-               if (ret_val)
-                       goto out;
-
-               ret_val = e1000_init_nvm_params(hw);
-               if (ret_val)
-                       goto out;
-
-               ret_val = e1000_init_phy_params(hw);
-               if (ret_val)
-                       goto out;
-
-               ret_val = e1000_init_mbx_params(hw);
-               if (ret_val)
-                       goto out;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_get_bus_info - Obtain bus information for adapter
- *  @hw: pointer to the HW structure
- *
- *  This will obtain information about the HW bus for which the
- *  adapter is attached and stores it in the hw structure. This is a
- *  function pointer entry point called by drivers.
- **/
-s32 e1000_get_bus_info(struct e1000_hw *hw)
-{
-       if (hw->mac.ops.get_bus_info)
-               return hw->mac.ops.get_bus_info(hw);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_clear_vfta - Clear VLAN filter table
- *  @hw: pointer to the HW structure
- *
- *  This clears the VLAN filter table on the adapter. This is a function
- *  pointer entry point called by drivers.
- **/
-void e1000_clear_vfta(struct e1000_hw *hw)
-{
-       if (hw->mac.ops.clear_vfta)
-               hw->mac.ops.clear_vfta(hw);
-}
-
-/**
- *  e1000_write_vfta - Write value to VLAN filter table
- *  @hw: pointer to the HW structure
- *  @offset: the 32-bit offset in which to write the value to.
- *  @value: the 32-bit value to write at location offset.
- *
- *  This writes a 32-bit value to a 32-bit offset in the VLAN filter
- *  table. This is a function pointer entry point called by drivers.
- **/
-void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
-{
-       if (hw->mac.ops.write_vfta)
-               hw->mac.ops.write_vfta(hw, offset, value);
-}
-
-/**
- *  e1000_update_mc_addr_list - Update Multicast addresses
- *  @hw: pointer to the HW structure
- *  @mc_addr_list: array of multicast addresses to program
- *  @mc_addr_count: number of multicast addresses to program
- *
- *  Updates the Multicast Table Array.
- *  The caller must have a packed mc_addr_list of multicast addresses.
- **/
-void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
-                               u32 mc_addr_count)
-{
-       if (hw->mac.ops.update_mc_addr_list)
-               hw->mac.ops.update_mc_addr_list(hw, mc_addr_list,
-                                               mc_addr_count);
-}
-
-/**
- *  e1000_force_mac_fc - Force MAC flow control
- *  @hw: pointer to the HW structure
- *
- *  Force the MAC's flow control settings. Currently no func pointer exists
- *  and all implementations are handled in the generic version of this
- *  function.
- **/
-s32 e1000_force_mac_fc(struct e1000_hw *hw)
-{
-       return e1000_force_mac_fc_generic(hw);
-}
-
-/**
- *  e1000_check_for_link - Check/Store link connection
- *  @hw: pointer to the HW structure
- *
- *  This checks the link condition of the adapter and stores the
- *  results in the hw->mac structure. This is a function pointer entry
- *  point called by drivers.
- **/
-s32 e1000_check_for_link(struct e1000_hw *hw)
-{
-       if (hw->mac.ops.check_for_link)
-               return hw->mac.ops.check_for_link(hw);
-
-       return -E1000_ERR_CONFIG;
-}
-
-/**
- *  e1000_check_mng_mode - Check management mode
- *  @hw: pointer to the HW structure
- *
- *  This checks if the adapter has manageability enabled.
- *  This is a function pointer entry point called by drivers.
- **/
-bool e1000_check_mng_mode(struct e1000_hw *hw)
-{
-       if (hw->mac.ops.check_mng_mode)
-               return hw->mac.ops.check_mng_mode(hw);
-
-       return FALSE;
-}
-
-/**
- *  e1000_mng_write_dhcp_info - Writes DHCP info to host interface
- *  @hw: pointer to the HW structure
- *  @buffer: pointer to the host interface
- *  @length: size of the buffer
- *
- *  Writes the DHCP information to the host interface.
- **/
-s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
-{
-       return e1000_mng_write_dhcp_info_generic(hw, buffer, length);
-}
-
-/**
- *  e1000_reset_hw - Reset hardware
- *  @hw: pointer to the HW structure
- *
- *  This resets the hardware into a known state. This is a function pointer
- *  entry point called by drivers.
- **/
-s32 e1000_reset_hw(struct e1000_hw *hw)
-{
-       if (hw->mac.ops.reset_hw)
-               return hw->mac.ops.reset_hw(hw);
-
-       return -E1000_ERR_CONFIG;
-}
-
-/**
- *  e1000_init_hw - Initialize hardware
- *  @hw: pointer to the HW structure
- *
- *  This inits the hardware readying it for operation. This is a function
- *  pointer entry point called by drivers.
- **/
-s32 e1000_init_hw(struct e1000_hw *hw)
-{
-       if (hw->mac.ops.init_hw)
-               return hw->mac.ops.init_hw(hw);
-
-       return -E1000_ERR_CONFIG;
-}
-
-/**
- *  e1000_setup_link - Configures link and flow control
- *  @hw: pointer to the HW structure
- *
- *  This configures link and flow control settings for the adapter. This
- *  is a function pointer entry point called by drivers. While modules can
- *  also call this, they probably call their own version of this function.
- **/
-s32 e1000_setup_link(struct e1000_hw *hw)
-{
-       if (hw->mac.ops.setup_link)
-               return hw->mac.ops.setup_link(hw);
-
-       return -E1000_ERR_CONFIG;
-}
-
-/**
- *  e1000_get_speed_and_duplex - Returns current speed and duplex
- *  @hw: pointer to the HW structure
- *  @speed: pointer to a 16-bit value to store the speed
- *  @duplex: pointer to a 16-bit value to store the duplex.
- *
- *  This returns the speed and duplex of the adapter in the two 'out'
- *  variables passed in. This is a function pointer entry point called
- *  by drivers.
- **/
-s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
-{
-       if (hw->mac.ops.get_link_up_info)
-               return hw->mac.ops.get_link_up_info(hw, speed, duplex);
-
-       return -E1000_ERR_CONFIG;
-}
-
-/**
- *  e1000_setup_led - Configures SW controllable LED
- *  @hw: pointer to the HW structure
- *
- *  This prepares the SW controllable LED for use and saves the current state
- *  of the LED so it can be later restored. This is a function pointer entry
- *  point called by drivers.
- **/
-s32 e1000_setup_led(struct e1000_hw *hw)
-{
-       if (hw->mac.ops.setup_led)
-               return hw->mac.ops.setup_led(hw);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_cleanup_led - Restores SW controllable LED
- *  @hw: pointer to the HW structure
- *
- *  This restores the SW controllable LED to the value saved off by
- *  e1000_setup_led. This is a function pointer entry point called by drivers.
- **/
-s32 e1000_cleanup_led(struct e1000_hw *hw)
-{
-       if (hw->mac.ops.cleanup_led)
-               return hw->mac.ops.cleanup_led(hw);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_blink_led - Blink SW controllable LED
- *  @hw: pointer to the HW structure
- *
- *  This starts the adapter LED blinking. Request the LED to be setup first
- *  and cleaned up after. This is a function pointer entry point called by
- *  drivers.
- **/
-s32 e1000_blink_led(struct e1000_hw *hw)
-{
-       if (hw->mac.ops.blink_led)
-               return hw->mac.ops.blink_led(hw);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_id_led_init - store LED configurations in SW
- *  @hw: pointer to the HW structure
- *
- *  Initializes the LED config in SW. This is a function pointer entry point
- *  called by drivers.
- **/
-s32 e1000_id_led_init(struct e1000_hw *hw)
-{
-       if (hw->mac.ops.id_led_init)
-               return hw->mac.ops.id_led_init(hw);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_led_on - Turn on SW controllable LED
- *  @hw: pointer to the HW structure
- *
- *  Turns the SW defined LED on. This is a function pointer entry point
- *  called by drivers.
- **/
-s32 e1000_led_on(struct e1000_hw *hw)
-{
-       if (hw->mac.ops.led_on)
-               return hw->mac.ops.led_on(hw);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_led_off - Turn off SW controllable LED
- *  @hw: pointer to the HW structure
- *
- *  Turns the SW defined LED off. This is a function pointer entry point
- *  called by drivers.
- **/
-s32 e1000_led_off(struct e1000_hw *hw)
-{
-       if (hw->mac.ops.led_off)
-               return hw->mac.ops.led_off(hw);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_reset_adaptive - Reset adaptive IFS
- *  @hw: pointer to the HW structure
- *
- *  Resets the adaptive IFS. Currently no func pointer exists and all
- *  implementations are handled in the generic version of this function.
- **/
-void e1000_reset_adaptive(struct e1000_hw *hw)
-{
-       e1000_reset_adaptive_generic(hw);
-}
-
-/**
- *  e1000_update_adaptive - Update adaptive IFS
- *  @hw: pointer to the HW structure
- *
- *  Updates adapter IFS. Currently no func pointer exists and all
- *  implementations are handled in the generic version of this function.
- **/
-void e1000_update_adaptive(struct e1000_hw *hw)
-{
-       e1000_update_adaptive_generic(hw);
-}
-
-/**
- *  e1000_disable_pcie_master - Disable PCI-Express master access
- *  @hw: pointer to the HW structure
- *
- *  Disables PCI-Express master access and verifies there are no pending
- *  requests. Currently no func pointer exists and all implementations are
- *  handled in the generic version of this function.
- **/
-s32 e1000_disable_pcie_master(struct e1000_hw *hw)
-{
-       return e1000_disable_pcie_master_generic(hw);
-}
-
-/**
- *  e1000_config_collision_dist - Configure collision distance
- *  @hw: pointer to the HW structure
- *
- *  Configures the collision distance to the default value and is used
- *  during link setup.
- **/
-void e1000_config_collision_dist(struct e1000_hw *hw)
-{
-       if (hw->mac.ops.config_collision_dist)
-               hw->mac.ops.config_collision_dist(hw);
-}
-
-/**
- *  e1000_rar_set - Sets a receive address register
- *  @hw: pointer to the HW structure
- *  @addr: address to set the RAR to
- *  @index: the RAR to set
- *
- *  Sets a Receive Address Register (RAR) to the specified address.
- **/
-void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
-{
-       if (hw->mac.ops.rar_set)
-               hw->mac.ops.rar_set(hw, addr, index);
-}
-
-/**
- *  e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state
- *  @hw: pointer to the HW structure
- *
- *  Ensures that the MDI/MDIX SW state is valid.
- **/
-s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
-{
-       if (hw->mac.ops.validate_mdi_setting)
-               return hw->mac.ops.validate_mdi_setting(hw);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_hash_mc_addr - Determines address location in multicast table
- *  @hw: pointer to the HW structure
- *  @mc_addr: Multicast address to hash.
- *
- *  This hashes an address to determine its location in the multicast
- *  table. Currently no func pointer exists and all implementations
- *  are handled in the generic version of this function.
- **/
-u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
-{
-       return e1000_hash_mc_addr_generic(hw, mc_addr);
-}
-
-/**
- *  e1000_enable_tx_pkt_filtering - Enable packet filtering on TX
- *  @hw: pointer to the HW structure
- *
- *  Enables packet filtering on transmit packets if manageability is enabled
- *  and host interface is enabled.
- *  Currently no func pointer exists and all implementations are handled in the
- *  generic version of this function.
- **/
-bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
-{
-       return e1000_enable_tx_pkt_filtering_generic(hw);
-}
-
-/**
- *  e1000_mng_host_if_write - Writes to the manageability host interface
- *  @hw: pointer to the HW structure
- *  @buffer: pointer to the host interface buffer
- *  @length: size of the buffer
- *  @offset: location in the buffer to write to
- *  @sum: sum of the data (not checksum)
- *
- *  This function writes the buffer content at the offset given on the host if.
- *  It also does alignment considerations to do the writes in most efficient
- *  way.  Also fills up the sum of the buffer in *buffer parameter.
- **/
-s32 e1000_mng_host_if_write(struct e1000_hw * hw, u8 *buffer, u16 length,
-                            u16 offset, u8 *sum)
-{
-       if (hw->mac.ops.mng_host_if_write)
-               return hw->mac.ops.mng_host_if_write(hw, buffer, length,
-                                                    offset, sum);
-
-       return E1000_NOT_IMPLEMENTED;
-}
-
-/**
- *  e1000_mng_write_cmd_header - Writes manageability command header
- *  @hw: pointer to the HW structure
- *  @hdr: pointer to the host interface command header
- *
- *  Writes the command header after does the checksum calculation.
- **/
-s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
-                               struct e1000_host_mng_command_header *hdr)
-{
-       if (hw->mac.ops.mng_write_cmd_header)
-               return hw->mac.ops.mng_write_cmd_header(hw, hdr);
-
-       return E1000_NOT_IMPLEMENTED;
-}
-
-/**
- *  e1000_mng_enable_host_if - Checks host interface is enabled
- *  @hw: pointer to the HW structure
- *
- *  Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
- *
- *  This function checks whether the HOST IF is enabled for command operation
- *  and also checks whether the previous command is completed.  It busy waits
- *  in case of previous command is not completed.
- **/
-s32 e1000_mng_enable_host_if(struct e1000_hw * hw)
-{
-       if (hw->mac.ops.mng_enable_host_if)
-               return hw->mac.ops.mng_enable_host_if(hw);
-
-       return E1000_NOT_IMPLEMENTED;
-}
-
-/**
- *  e1000_wait_autoneg - Waits for autonegotiation completion
- *  @hw: pointer to the HW structure
- *
- *  Waits for autoneg to complete. Currently no func pointer exists and all
- *  implementations are handled in the generic version of this function.
- **/
-s32 e1000_wait_autoneg(struct e1000_hw *hw)
-{
-       if (hw->mac.ops.wait_autoneg)
-               return hw->mac.ops.wait_autoneg(hw);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_check_reset_block - Verifies PHY can be reset
- *  @hw: pointer to the HW structure
- *
- *  Checks if the PHY is in a state that can be reset or if manageability
- *  has it tied up. This is a function pointer entry point called by drivers.
- **/
-s32 e1000_check_reset_block(struct e1000_hw *hw)
-{
-       if (hw->phy.ops.check_reset_block)
-               return hw->phy.ops.check_reset_block(hw);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_read_phy_reg - Reads PHY register
- *  @hw: pointer to the HW structure
- *  @offset: the register to read
- *  @data: the buffer to store the 16-bit read.
- *
- *  Reads the PHY register and returns the value in data.
- *  This is a function pointer entry point called by drivers.
- **/
-s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
-{
-       if (hw->phy.ops.read_reg)
-               return hw->phy.ops.read_reg(hw, offset, data);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_write_phy_reg - Writes PHY register
- *  @hw: pointer to the HW structure
- *  @offset: the register to write
- *  @data: the value to write.
- *
- *  Writes the PHY register at offset with the value in data.
- *  This is a function pointer entry point called by drivers.
- **/
-s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
-{
-       if (hw->phy.ops.write_reg)
-               return hw->phy.ops.write_reg(hw, offset, data);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_release_phy - Generic release PHY
- *  @hw: pointer to the HW structure
- *
- *  Return if silicon family does not require a semaphore when accessing the
- *  PHY.
- **/
-void e1000_release_phy(struct e1000_hw *hw)
-{
-       if (hw->phy.ops.release)
-               hw->phy.ops.release(hw);
-}
-
-/**
- *  e1000_acquire_phy - Generic acquire PHY
- *  @hw: pointer to the HW structure
- *
- *  Return success if silicon family does not require a semaphore when
- *  accessing the PHY.
- **/
-s32 e1000_acquire_phy(struct e1000_hw *hw)
-{
-       if (hw->phy.ops.acquire)
-               return hw->phy.ops.acquire(hw);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_read_kmrn_reg - Reads register using Kumeran interface
- *  @hw: pointer to the HW structure
- *  @offset: the register to read
- *  @data: the location to store the 16-bit value read.
- *
- *  Reads a register out of the Kumeran interface. Currently no func pointer
- *  exists and all implementations are handled in the generic version of
- *  this function.
- **/
-s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
-{
-       return e1000_read_kmrn_reg_generic(hw, offset, data);
-}
-
-/**
- *  e1000_write_kmrn_reg - Writes register using Kumeran interface
- *  @hw: pointer to the HW structure
- *  @offset: the register to write
- *  @data: the value to write.
- *
- *  Writes a register to the Kumeran interface. Currently no func pointer
- *  exists and all implementations are handled in the generic version of
- *  this function.
- **/
-s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
-{
-       return e1000_write_kmrn_reg_generic(hw, offset, data);
-}
-
-/**
- *  e1000_get_cable_length - Retrieves cable length estimation
- *  @hw: pointer to the HW structure
- *
- *  This function estimates the cable length and stores them in
- *  hw->phy.min_length and hw->phy.max_length. This is a function pointer
- *  entry point called by drivers.
- **/
-s32 e1000_get_cable_length(struct e1000_hw *hw)
-{
-       if (hw->phy.ops.get_cable_length)
-               return hw->phy.ops.get_cable_length(hw);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_get_phy_info - Retrieves PHY information from registers
- *  @hw: pointer to the HW structure
- *
- *  This function gets some information from various PHY registers and
- *  populates hw->phy values with it. This is a function pointer entry
- *  point called by drivers.
- **/
-s32 e1000_get_phy_info(struct e1000_hw *hw)
-{
-       if (hw->phy.ops.get_info)
-               return hw->phy.ops.get_info(hw);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_phy_hw_reset - Hard PHY reset
- *  @hw: pointer to the HW structure
- *
- *  Performs a hard PHY reset. This is a function pointer entry point called
- *  by drivers.
- **/
-s32 e1000_phy_hw_reset(struct e1000_hw *hw)
-{
-       if (hw->phy.ops.reset)
-               return hw->phy.ops.reset(hw);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_phy_commit - Soft PHY reset
- *  @hw: pointer to the HW structure
- *
- *  Performs a soft PHY reset on those that apply. This is a function pointer
- *  entry point called by drivers.
- **/
-s32 e1000_phy_commit(struct e1000_hw *hw)
-{
-       if (hw->phy.ops.commit)
-               return hw->phy.ops.commit(hw);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_set_d0_lplu_state - Sets low power link up state for D0
- *  @hw: pointer to the HW structure
- *  @active: boolean used to enable/disable lplu
- *
- *  Success returns 0, Failure returns 1
- *
- *  The low power link up (lplu) state is set to the power management level D0
- *  and SmartSpeed is disabled when active is TRUE, else clear lplu for D0
- *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
- *  is used during Dx states where the power conservation is most important.
- *  During driver activity, SmartSpeed should be enabled so performance is
- *  maintained.  This is a function pointer entry point called by drivers.
- **/
-s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
-{
-       if (hw->phy.ops.set_d0_lplu_state)
-               return hw->phy.ops.set_d0_lplu_state(hw, active);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_set_d3_lplu_state - Sets low power link up state for D3
- *  @hw: pointer to the HW structure
- *  @active: boolean used to enable/disable lplu
- *
- *  Success returns 0, Failure returns 1
- *
- *  The low power link up (lplu) state is set to the power management level D3
- *  and SmartSpeed is disabled when active is TRUE, else clear lplu for D3
- *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
- *  is used during Dx states where the power conservation is most important.
- *  During driver activity, SmartSpeed should be enabled so performance is
- *  maintained.  This is a function pointer entry point called by drivers.
- **/
-s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
-{
-       if (hw->phy.ops.set_d3_lplu_state)
-               return hw->phy.ops.set_d3_lplu_state(hw, active);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_read_mac_addr - Reads MAC address
- *  @hw: pointer to the HW structure
- *
- *  Reads the MAC address out of the adapter and stores it in the HW structure.
- *  Currently no func pointer exists and all implementations are handled in the
- *  generic version of this function.
- **/
-s32 e1000_read_mac_addr(struct e1000_hw *hw)
-{
-       if (hw->mac.ops.read_mac_addr)
-               return hw->mac.ops.read_mac_addr(hw);
-
-       return e1000_read_mac_addr_generic(hw);
-}
-
-/**
- *  e1000_read_pba_string - Read device part number string
- *  @hw: pointer to the HW structure
- *  @pba_num: pointer to device part number
- *  @pba_num_size: size of part number buffer
- *
- *  Reads the product board assembly (PBA) number from the EEPROM and stores
- *  the value in pba_num.
- *  Currently no func pointer exists and all implementations are handled in the
- *  generic version of this function.
- **/
-s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size)
-{
-       return e1000_read_pba_string_generic(hw, pba_num, pba_num_size);
-}
-
-/**
- *  e1000_read_pba_length - Read device part number string length
- *  @hw: pointer to the HW structure
- *  @pba_num_size: size of part number buffer
- *
- *  Reads the product board assembly (PBA) number length from the EEPROM and
- *  stores the value in pba_num.
- *  Currently no func pointer exists and all implementations are handled in the
- *  generic version of this function.
- **/
-s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size)
-{
-       return e1000_read_pba_length_generic(hw, pba_num_size);
-}
-
-/**
- *  e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum
- *  @hw: pointer to the HW structure
- *
- *  Validates the NVM checksum is correct. This is a function pointer entry
- *  point called by drivers.
- **/
-s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
-{
-       if (hw->nvm.ops.validate)
-               return hw->nvm.ops.validate(hw);
-
-       return -E1000_ERR_CONFIG;
-}
-
-/**
- *  e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum
- *  @hw: pointer to the HW structure
- *
- *  Updates the NVM checksum. Currently no func pointer exists and all
- *  implementations are handled in the generic version of this function.
- **/
-s32 e1000_update_nvm_checksum(struct e1000_hw *hw)
-{
-       if (hw->nvm.ops.update)
-               return hw->nvm.ops.update(hw);
-
-       return -E1000_ERR_CONFIG;
-}
-
-/**
- *  e1000_reload_nvm - Reloads EEPROM
- *  @hw: pointer to the HW structure
- *
- *  Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
- *  extended control register.
- **/
-void e1000_reload_nvm(struct e1000_hw *hw)
-{
-       if (hw->nvm.ops.reload)
-               hw->nvm.ops.reload(hw);
-}
-
-/**
- *  e1000_read_nvm - Reads NVM (EEPROM)
- *  @hw: pointer to the HW structure
- *  @offset: the word offset to read
- *  @words: number of 16-bit words to read
- *  @data: pointer to the properly sized buffer for the data.
- *
- *  Reads 16-bit chunks of data from the NVM (EEPROM). This is a function
- *  pointer entry point called by drivers.
- **/
-s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
-{
-       if (hw->nvm.ops.read)
-               return hw->nvm.ops.read(hw, offset, words, data);
-
-       return -E1000_ERR_CONFIG;
-}
-
-/**
- *  e1000_write_nvm - Writes to NVM (EEPROM)
- *  @hw: pointer to the HW structure
- *  @offset: the word offset to read
- *  @words: number of 16-bit words to write
- *  @data: pointer to the properly sized buffer for the data.
- *
- *  Writes 16-bit chunks of data to the NVM (EEPROM). This is a function
- *  pointer entry point called by drivers.
- **/
-s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
-{
-       if (hw->nvm.ops.write)
-               return hw->nvm.ops.write(hw, offset, words, data);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_write_8bit_ctrl_reg - Writes 8bit Control register
- *  @hw: pointer to the HW structure
- *  @reg: 32bit register offset
- *  @offset: the register to write
- *  @data: the value to write.
- *
- *  Writes the PHY register at offset with the value in data.
- *  This is a function pointer entry point called by drivers.
- **/
-s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
-                              u8 data)
-{
-       return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data);
-}
-
-/**
- * e1000_power_up_phy - Restores link in case of PHY power down
- * @hw: pointer to the HW structure
- *
- * The phy may be powered down to save power, to turn off link when the
- * driver is unloaded, or wake on lan is not enabled (among others).
- **/
-void e1000_power_up_phy(struct e1000_hw *hw)
-{
-       if (hw->phy.ops.power_up)
-               hw->phy.ops.power_up(hw);
-
-       e1000_setup_link(hw);
-}
-
-/**
- * e1000_power_down_phy - Power down PHY
- * @hw: pointer to the HW structure
- *
- * The phy may be powered down to save power, to turn off link when the
- * driver is unloaded, or wake on lan is not enabled (among others).
- **/
-void e1000_power_down_phy(struct e1000_hw *hw)
-{
-       if (hw->phy.ops.power_down)
-               hw->phy.ops.power_down(hw);
-}
-
-/**
- *  e1000_power_up_fiber_serdes_link - Power up serdes link
- *  @hw: pointer to the HW structure
- *
- *  Power on the optics and PCS.
- **/
-void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw)
-{
-       if (hw->mac.ops.power_up_serdes)
-               hw->mac.ops.power_up_serdes(hw);
-}
-
-/**
- *  e1000_shutdown_fiber_serdes_link - Remove link during power down
- *  @hw: pointer to the HW structure
- *
- *  Shutdown the optics and PCS on driver unload.
- **/
-void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw)
-{
-       if (hw->mac.ops.shutdown_serdes)
-               hw->mac.ops.shutdown_serdes(hw);
-}
-
diff --git a/lib/librte_pmd_igb/igb/e1000_api.h b/lib/librte_pmd_igb/igb/e1000_api.h
deleted file mode 100644 (file)
index daf8642..0000000
+++ /dev/null
@@ -1,156 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#ifndef _E1000_API_H_
-#define _E1000_API_H_
-
-#include "e1000_hw.h"
-
-extern void    e1000_init_function_pointers_82575(struct e1000_hw *hw);
-extern void    e1000_rx_fifo_flush_82575(struct e1000_hw *hw);
-extern void    e1000_init_function_pointers_vf(struct e1000_hw *hw);
-extern void    e1000_power_up_fiber_serdes_link(struct e1000_hw *hw);
-extern void    e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw);
-
-s32  e1000_set_mac_type(struct e1000_hw *hw);
-s32  e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device);
-s32  e1000_init_mac_params(struct e1000_hw *hw);
-s32  e1000_init_nvm_params(struct e1000_hw *hw);
-s32  e1000_init_phy_params(struct e1000_hw *hw);
-s32  e1000_init_mbx_params(struct e1000_hw *hw);
-s32  e1000_get_bus_info(struct e1000_hw *hw);
-void e1000_clear_vfta(struct e1000_hw *hw);
-void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
-s32  e1000_force_mac_fc(struct e1000_hw *hw);
-s32  e1000_check_for_link(struct e1000_hw *hw);
-s32  e1000_reset_hw(struct e1000_hw *hw);
-s32  e1000_init_hw(struct e1000_hw *hw);
-s32  e1000_setup_link(struct e1000_hw *hw);
-s32  e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed,
-                                u16 *duplex);
-s32  e1000_disable_pcie_master(struct e1000_hw *hw);
-void e1000_config_collision_dist(struct e1000_hw *hw);
-void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
-u32  e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
-void e1000_update_mc_addr_list(struct e1000_hw *hw,
-                               u8 *mc_addr_list, u32 mc_addr_count);
-s32  e1000_setup_led(struct e1000_hw *hw);
-s32  e1000_cleanup_led(struct e1000_hw *hw);
-s32  e1000_check_reset_block(struct e1000_hw *hw);
-s32  e1000_blink_led(struct e1000_hw *hw);
-s32  e1000_led_on(struct e1000_hw *hw);
-s32  e1000_led_off(struct e1000_hw *hw);
-s32 e1000_id_led_init(struct e1000_hw *hw);
-void e1000_reset_adaptive(struct e1000_hw *hw);
-void e1000_update_adaptive(struct e1000_hw *hw);
-s32  e1000_get_cable_length(struct e1000_hw *hw);
-s32  e1000_validate_mdi_setting(struct e1000_hw *hw);
-s32  e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data);
-s32  e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data);
-s32  e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
-                               u32 offset, u8 data);
-s32  e1000_get_phy_info(struct e1000_hw *hw);
-void e1000_release_phy(struct e1000_hw *hw);
-s32  e1000_acquire_phy(struct e1000_hw *hw);
-s32  e1000_phy_hw_reset(struct e1000_hw *hw);
-s32  e1000_phy_commit(struct e1000_hw *hw);
-void e1000_power_up_phy(struct e1000_hw *hw);
-void e1000_power_down_phy(struct e1000_hw *hw);
-s32  e1000_read_mac_addr(struct e1000_hw *hw);
-s32  e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, 
-                           u32 pba_num_size);
-s32  e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size);
-void e1000_reload_nvm(struct e1000_hw *hw);
-s32  e1000_update_nvm_checksum(struct e1000_hw *hw);
-s32  e1000_validate_nvm_checksum(struct e1000_hw *hw);
-s32  e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-s32  e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
-s32  e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
-s32  e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words,
-                     u16 *data);
-s32  e1000_wait_autoneg(struct e1000_hw *hw);
-s32  e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
-s32  e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
-bool e1000_check_mng_mode(struct e1000_hw *hw);
-bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
-s32  e1000_mng_enable_host_if(struct e1000_hw *hw);
-s32  e1000_mng_host_if_write(struct e1000_hw *hw,
-                             u8 *buffer, u16 length, u16 offset, u8 *sum);
-s32  e1000_mng_write_cmd_header(struct e1000_hw *hw,
-                                struct e1000_host_mng_command_header *hdr);
-s32  e1000_mng_write_dhcp_info(struct e1000_hw * hw,
-                                    u8 *buffer, u16 length);
-
-/*
- * TBI_ACCEPT macro definition:
- *
- * This macro requires:
- *      adapter = a pointer to struct e1000_hw
- *      status = the 8 bit status field of the Rx descriptor with EOP set
- *      error = the 8 bit error field of the Rx descriptor with EOP set
- *      length = the sum of all the length fields of the Rx descriptors that
- *               make up the current frame
- *      last_byte = the last byte of the frame DMAed by the hardware
- *      max_frame_length = the maximum frame length we want to accept.
- *      min_frame_length = the minimum frame length we want to accept.
- *
- * This macro is a conditional that should be used in the interrupt
- * handler's Rx processing routine when RxErrors have been detected.
- *
- * Typical use:
- *  ...
- *  if (TBI_ACCEPT) {
- *      accept_frame = TRUE;
- *      e1000_tbi_adjust_stats(adapter, MacAddress);
- *      frame_length--;
- *  } else {
- *      accept_frame = FALSE;
- *  }
- *  ...
- */
-
-/* The carrier extension symbol, as received by the NIC. */
-#define CARRIER_EXTENSION   0x0F
-
-#define TBI_ACCEPT(a, status, errors, length, last_byte, min_frame_size, max_frame_size) \
-    (e1000_tbi_sbp_enabled_82543(a) && \
-     (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
-     ((last_byte) == CARRIER_EXTENSION) && \
-     (((status) & E1000_RXD_STAT_VP) ? \
-          (((length) > (min_frame_size - VLAN_TAG_SIZE)) && \
-           ((length) <= (max_frame_size + 1))) : \
-          (((length) > min_frame_size) && \
-           ((length) <= (max_frame_size + VLAN_TAG_SIZE + 1)))))
-
-#endif
diff --git a/lib/librte_pmd_igb/igb/e1000_defines.h b/lib/librte_pmd_igb/igb/e1000_defines.h
deleted file mode 100644 (file)
index a7be67c..0000000
+++ /dev/null
@@ -1,1733 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#ifndef _E1000_DEFINES_H_
-#define _E1000_DEFINES_H_
-
-/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
-#define REQ_TX_DESCRIPTOR_MULTIPLE  8
-#define REQ_RX_DESCRIPTOR_MULTIPLE  8
-
-/* Definitions for power management and wakeup registers */
-/* Wake Up Control */
-#define E1000_WUC_APME       0x00000001 /* APM Enable */
-#define E1000_WUC_PME_EN     0x00000002 /* PME Enable */
-#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
-#define E1000_WUC_APMPME     0x00000008 /* Assert PME on APM Wakeup */
-#define E1000_WUC_LSCWE      0x00000010 /* Link Status wake up enable */
-#define E1000_WUC_PPROXYE    0x00000010 /* Protocol Proxy Enable */
-#define E1000_WUC_LSCWO      0x00000020 /* Link Status wake up override */
-#define E1000_WUC_SPM        0x80000000 /* Enable SPM */
-#define E1000_WUC_PHY_WAKE   0x00000100 /* if PHY supports wakeup */
-
-/* Wake Up Filter Control */
-#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
-#define E1000_WUFC_MAG  0x00000002 /* Magic Packet Wakeup Enable */
-#define E1000_WUFC_EX   0x00000004 /* Directed Exact Wakeup Enable */
-#define E1000_WUFC_MC   0x00000008 /* Directed Multicast Wakeup Enable */
-#define E1000_WUFC_BC   0x00000010 /* Broadcast Wakeup Enable */
-#define E1000_WUFC_ARP  0x00000020 /* ARP Request Packet Wakeup Enable */
-#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
-#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
-#define E1000_WUFC_IGNORE_TCO   0x00008000 /* Ignore WakeOn TCO packets */
-#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
-#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
-#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
-#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
-#define E1000_WUFC_FW_RST 0x80000000 /* Wake on FW Reset Enable */
-#define E1000_WUFC_ALL_FILTERS  0x000F00FF /* Mask for all wakeup filters */
-#define E1000_WUFC_FLX_OFFSET   16 /* Offset to the Flexible Filters bits */
-#define E1000_WUFC_FLX_FILTERS  0x000F0000 /*Mask for the 4 flexible filters */
-/*
- * For 82576 to utilize Extended filter masks in addition to
- * existing (filter) masks
- */
-#define E1000_WUFC_EXT_FLX_FILTERS      0x00300000 /* Ext. FLX filter mask */
-
-/* Wake Up Status */
-#define E1000_WUS_LNKC         E1000_WUFC_LNKC
-#define E1000_WUS_MAG          E1000_WUFC_MAG
-#define E1000_WUS_EX           E1000_WUFC_EX
-#define E1000_WUS_MC           E1000_WUFC_MC
-#define E1000_WUS_BC           E1000_WUFC_BC
-#define E1000_WUS_ARP          E1000_WUFC_ARP
-#define E1000_WUS_IPV4         E1000_WUFC_IPV4
-#define E1000_WUS_IPV6         E1000_WUFC_IPV6
-#define E1000_WUS_FLX0         E1000_WUFC_FLX0
-#define E1000_WUS_FLX1         E1000_WUFC_FLX1
-#define E1000_WUS_FLX2         E1000_WUFC_FLX2
-#define E1000_WUS_FLX3         E1000_WUFC_FLX3
-#define E1000_WUS_FLX_FILTERS  E1000_WUFC_FLX_FILTERS
-
-/* Wake Up Packet Length */
-#define E1000_WUPL_LENGTH_MASK 0x0FFF   /* Only the lower 12 bits are valid */
-
-/* Four Flexible Filters are supported */
-#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
-/* Two Extended Flexible Filters are supported (82576) */
-#define E1000_EXT_FLEXIBLE_FILTER_COUNT_MAX     2
-#define E1000_FHFT_LENGTH_OFFSET        0xFC /* Length byte in FHFT */
-#define E1000_FHFT_LENGTH_MASK          0x0FF /* Length in lower byte */
-
-/* Each Flexible Filter is at most 128 (0x80) bytes in length */
-#define E1000_FLEXIBLE_FILTER_SIZE_MAX  128
-
-#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX
-#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
-#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
-
-/* Extended Device Control */
-#define E1000_CTRL_EXT_GPI0_EN   0x00000001 /* Maps SDP4 to GPI0 */
-#define E1000_CTRL_EXT_GPI1_EN   0x00000002 /* Maps SDP5 to GPI1 */
-#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN
-#define E1000_CTRL_EXT_GPI2_EN   0x00000004 /* Maps SDP6 to GPI2 */
-#define E1000_CTRL_EXT_GPI3_EN   0x00000008 /* Maps SDP7 to GPI3 */
-/* Reserved (bits 4,5) in >= 82575 */
-#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Definable Pin 4 */
-#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Definable Pin 5 */
-#define E1000_CTRL_EXT_PHY_INT   E1000_CTRL_EXT_SDP5_DATA
-#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Definable Pin 6 */
-#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
-/* SDP 4/5 (bits 8,9) are reserved in >= 82575 */
-#define E1000_CTRL_EXT_SDP4_DIR  0x00000100 /* Direction of SDP4 0=in 1=out */
-#define E1000_CTRL_EXT_SDP5_DIR  0x00000200 /* Direction of SDP5 0=in 1=out */
-#define E1000_CTRL_EXT_SDP6_DIR  0x00000400 /* Direction of SDP6 0=in 1=out */
-#define E1000_CTRL_EXT_SDP3_DIR  0x00000800 /* Direction of SDP3 0=in 1=out */
-#define E1000_CTRL_EXT_ASDCHK    0x00001000 /* Initiate an ASD sequence */
-#define E1000_CTRL_EXT_EE_RST    0x00002000 /* Reinitialize from EEPROM */
-#define E1000_CTRL_EXT_IPS       0x00004000 /* Invert Power State */
-/* Physical Func Reset Done Indication */
-#define E1000_CTRL_EXT_PFRSTD    0x00004000
-#define E1000_CTRL_EXT_SPD_BYPS  0x00008000 /* Speed Select Bypass */
-#define E1000_CTRL_EXT_RO_DIS    0x00020000 /* Relaxed Ordering disable */
-#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */
-#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
-#define E1000_CTRL_EXT_LINK_MODE_82580_MASK 0x01C00000 /*82580 bit 24:22*/
-#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX  0x00400000
-#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
-#define E1000_CTRL_EXT_LINK_MODE_TBI  0x00C00000
-#define E1000_CTRL_EXT_LINK_MODE_KMRN    0x00000000
-#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES  0x00C00000
-#define E1000_CTRL_EXT_LINK_MODE_PCIX_SERDES  0x00800000
-#define E1000_CTRL_EXT_LINK_MODE_SGMII   0x00800000
-#define E1000_CTRL_EXT_EIAME          0x01000000
-#define E1000_CTRL_EXT_IRCA           0x00000001
-#define E1000_CTRL_EXT_WR_WMARK_MASK  0x03000000
-#define E1000_CTRL_EXT_WR_WMARK_256   0x00000000
-#define E1000_CTRL_EXT_WR_WMARK_320   0x01000000
-#define E1000_CTRL_EXT_WR_WMARK_384   0x02000000
-#define E1000_CTRL_EXT_WR_WMARK_448   0x03000000
-#define E1000_CTRL_EXT_CANC           0x04000000 /* Int delay cancellation */
-#define E1000_CTRL_EXT_DRV_LOAD       0x10000000 /* Driver loaded bit for FW */
-/* IAME enable bit (27) was removed in >= 82575 */
-#define E1000_CTRL_EXT_IAME          0x08000000 /* Int acknowledge Auto-mask */
-#define E1000_CRTL_EXT_PB_PAREN       0x01000000 /* packet buffer parity error
-                                                  * detection enabled */
-#define E1000_CTRL_EXT_DF_PAREN       0x02000000 /* descriptor FIFO parity
-                                                  * error detection enable */
-#define E1000_CTRL_EXT_GHOST_PAREN    0x40000000
-#define E1000_CTRL_EXT_PBA_CLR        0x80000000 /* PBA Clear */
-#define E1000_I2CCMD_REG_ADDR_SHIFT   16
-#define E1000_I2CCMD_REG_ADDR         0x00FF0000
-#define E1000_I2CCMD_PHY_ADDR_SHIFT   24
-#define E1000_I2CCMD_PHY_ADDR         0x07000000
-#define E1000_I2CCMD_OPCODE_READ      0x08000000
-#define E1000_I2CCMD_OPCODE_WRITE     0x00000000
-#define E1000_I2CCMD_RESET            0x10000000
-#define E1000_I2CCMD_READY            0x20000000
-#define E1000_I2CCMD_INTERRUPT_ENA    0x40000000
-#define E1000_I2CCMD_ERROR            0x80000000
-#define E1000_MAX_SGMII_PHY_REG_ADDR  255
-#define E1000_I2CCMD_PHY_TIMEOUT      200
-#define E1000_IVAR_VALID        0x80
-#define E1000_GPIE_NSICR        0x00000001
-#define E1000_GPIE_MSIX_MODE    0x00000010
-#define E1000_GPIE_EIAME        0x40000000
-#define E1000_GPIE_PBA          0x80000000
-
-/* Receive Descriptor bit definitions */
-#define E1000_RXD_STAT_DD       0x01    /* Descriptor Done */
-#define E1000_RXD_STAT_EOP      0x02    /* End of Packet */
-#define E1000_RXD_STAT_IXSM     0x04    /* Ignore checksum */
-#define E1000_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
-#define E1000_RXD_STAT_UDPCS    0x10    /* UDP xsum calculated */
-#define E1000_RXD_STAT_TCPCS    0x20    /* TCP xsum calculated */
-#define E1000_RXD_STAT_IPCS     0x40    /* IP xsum calculated */
-#define E1000_RXD_STAT_PIF      0x80    /* passed in-exact filter */
-#define E1000_RXD_STAT_CRCV     0x100   /* Speculative CRC Valid */
-#define E1000_RXD_STAT_IPIDV    0x200   /* IP identification valid */
-#define E1000_RXD_STAT_UDPV     0x400   /* Valid UDP checksum */
-#define E1000_RXD_STAT_DYNINT   0x800   /* Pkt caused INT via DYNINT */
-#define E1000_RXD_STAT_ACK      0x8000  /* ACK Packet indication */
-#define E1000_RXD_ERR_CE        0x01    /* CRC Error */
-#define E1000_RXD_ERR_SE        0x02    /* Symbol Error */
-#define E1000_RXD_ERR_SEQ       0x04    /* Sequence Error */
-#define E1000_RXD_ERR_CXE       0x10    /* Carrier Extension Error */
-#define E1000_RXD_ERR_TCPE      0x20    /* TCP/UDP Checksum Error */
-#define E1000_RXD_ERR_IPE       0x40    /* IP Checksum Error */
-#define E1000_RXD_ERR_RXE       0x80    /* Rx Data Error */
-#define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
-#define E1000_RXD_SPC_PRI_MASK  0xE000  /* Priority is in upper 3 bits */
-#define E1000_RXD_SPC_PRI_SHIFT 13
-#define E1000_RXD_SPC_CFI_MASK  0x1000  /* CFI is bit 12 */
-#define E1000_RXD_SPC_CFI_SHIFT 12
-
-#define E1000_RXDEXT_STATERR_LB    0x00040000
-#define E1000_RXDEXT_STATERR_CE    0x01000000
-#define E1000_RXDEXT_STATERR_SE    0x02000000
-#define E1000_RXDEXT_STATERR_SEQ   0x04000000
-#define E1000_RXDEXT_STATERR_CXE   0x10000000
-#define E1000_RXDEXT_STATERR_TCPE  0x20000000
-#define E1000_RXDEXT_STATERR_IPE   0x40000000
-#define E1000_RXDEXT_STATERR_RXE   0x80000000
-
-/* mask to determine if packets should be dropped due to frame errors */
-#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
-    E1000_RXD_ERR_CE  |                \
-    E1000_RXD_ERR_SE  |                \
-    E1000_RXD_ERR_SEQ |                \
-    E1000_RXD_ERR_CXE |                \
-    E1000_RXD_ERR_RXE)
-
-/* Same mask, but for extended and packet split descriptors */
-#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
-    E1000_RXDEXT_STATERR_CE  |            \
-    E1000_RXDEXT_STATERR_SE  |            \
-    E1000_RXDEXT_STATERR_SEQ |            \
-    E1000_RXDEXT_STATERR_CXE |            \
-    E1000_RXDEXT_STATERR_RXE)
-
-#define E1000_MRQC_ENABLE_MASK                 0x00000007
-#define E1000_MRQC_ENABLE_RSS_2Q               0x00000001
-#define E1000_MRQC_ENABLE_RSS_INT              0x00000004
-#define E1000_MRQC_RSS_FIELD_MASK              0xFFFF0000
-#define E1000_MRQC_RSS_FIELD_IPV4_TCP          0x00010000
-#define E1000_MRQC_RSS_FIELD_IPV4              0x00020000
-#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX       0x00040000
-#define E1000_MRQC_RSS_FIELD_IPV6_EX           0x00080000
-#define E1000_MRQC_RSS_FIELD_IPV6              0x00100000
-#define E1000_MRQC_RSS_FIELD_IPV6_TCP          0x00200000
-
-#define E1000_RXDPS_HDRSTAT_HDRSP              0x00008000
-#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK        0x000003FF
-
-/* Management Control */
-#define E1000_MANC_SMBUS_EN      0x00000001 /* SMBus Enabled - RO */
-#define E1000_MANC_ASF_EN        0x00000002 /* ASF Enabled - RO */
-#define E1000_MANC_R_ON_FORCE    0x00000004 /* Reset on Force TCO - RO */
-#define E1000_MANC_RMCP_EN       0x00000100 /* Enable RCMP 026Fh Filtering */
-#define E1000_MANC_0298_EN       0x00000200 /* Enable RCMP 0298h Filtering */
-#define E1000_MANC_IPV4_EN       0x00000400 /* Enable IPv4 */
-#define E1000_MANC_IPV6_EN       0x00000800 /* Enable IPv6 */
-#define E1000_MANC_SNAP_EN       0x00001000 /* Accept LLC/SNAP */
-#define E1000_MANC_ARP_EN        0x00002000 /* Enable ARP Request Filtering */
-/* Enable Neighbor Discovery Filtering */
-#define E1000_MANC_NEIGHBOR_EN   0x00004000
-#define E1000_MANC_ARP_RES_EN    0x00008000 /* Enable ARP response Filtering */
-#define E1000_MANC_TCO_RESET     0x00010000 /* TCO Reset Occurred */
-#define E1000_MANC_RCV_TCO_EN    0x00020000 /* Receive TCO Packets Enabled */
-#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
-#define E1000_MANC_RCV_ALL       0x00080000 /* Receive All Enabled */
-#define E1000_MANC_BLK_PHY_RST_ON_IDE   0x00040000 /* Block phy resets */
-/* Enable MAC address filtering */
-#define E1000_MANC_EN_MAC_ADDR_FILTER   0x00100000
-/* Enable MNG packets to host memory */
-#define E1000_MANC_EN_MNG2HOST   0x00200000
-/* Enable IP address filtering */
-#define E1000_MANC_EN_IP_ADDR_FILTER    0x00400000
-#define E1000_MANC_EN_XSUM_FILTER   0x00800000 /* Enable checksum filtering */
-#define E1000_MANC_BR_EN            0x01000000 /* Enable broadcast filtering */
-#define E1000_MANC_SMB_REQ       0x01000000 /* SMBus Request */
-#define E1000_MANC_SMB_GNT       0x02000000 /* SMBus Grant */
-#define E1000_MANC_SMB_CLK_IN    0x04000000 /* SMBus Clock In */
-#define E1000_MANC_SMB_DATA_IN   0x08000000 /* SMBus Data In */
-#define E1000_MANC_SMB_DATA_OUT  0x10000000 /* SMBus Data Out */
-#define E1000_MANC_SMB_CLK_OUT   0x20000000 /* SMBus Clock Out */
-#define E1000_MANC_MPROXYE       0x40000000 /* Mngment Proxy Enable */
-#define E1000_MANC_EN_BMC2OS     0x10000000 /* OS2BMC is enabled or not */
-
-#define E1000_MANC_SMB_DATA_OUT_SHIFT  28 /* SMBus Data Out Shift */
-#define E1000_MANC_SMB_CLK_OUT_SHIFT   29 /* SMBus Clock Out Shift */
-
-#define E1000_MANC2H_PORT_623    0x00000020 /* Port 0x26f */
-#define E1000_MANC2H_PORT_664    0x00000040 /* Port 0x298 */
-#define E1000_MDEF_PORT_623      0x00000800 /* Port 0x26f */
-#define E1000_MDEF_PORT_664      0x00000400 /* Port 0x298 */
-
-/* Receive Control */
-#define E1000_RCTL_RST            0x00000001    /* Software reset */
-#define E1000_RCTL_EN             0x00000002    /* enable */
-#define E1000_RCTL_SBP            0x00000004    /* store bad packet */
-#define E1000_RCTL_UPE            0x00000008    /* unicast promisc enable */
-#define E1000_RCTL_MPE            0x00000010    /* multicast promisc enable */
-#define E1000_RCTL_LPE            0x00000020    /* long packet enable */
-#define E1000_RCTL_LBM_NO         0x00000000    /* no loopback mode */
-#define E1000_RCTL_LBM_MAC        0x00000040    /* MAC loopback mode */
-#define E1000_RCTL_LBM_SLP        0x00000080    /* serial link loopback mode */
-#define E1000_RCTL_LBM_TCVR       0x000000C0    /* tcvr loopback mode */
-#define E1000_RCTL_DTYP_MASK      0x00000C00    /* Descriptor type mask */
-#define E1000_RCTL_DTYP_PS        0x00000400    /* Packet Split descriptor */
-#define E1000_RCTL_RDMTS_HALF     0x00000000    /* Rx desc min thresh size */
-#define E1000_RCTL_RDMTS_QUAT     0x00000100    /* Rx desc min thresh size */
-#define E1000_RCTL_RDMTS_EIGTH    0x00000200    /* Rx desc min thresh size */
-#define E1000_RCTL_MO_SHIFT       12            /* multicast offset shift */
-#define E1000_RCTL_MO_0           0x00000000    /* multicast offset 11:0 */
-#define E1000_RCTL_MO_1           0x00001000    /* multicast offset 12:1 */
-#define E1000_RCTL_MO_2           0x00002000    /* multicast offset 13:2 */
-#define E1000_RCTL_MO_3           0x00003000    /* multicast offset 15:4 */
-#define E1000_RCTL_MDR            0x00004000    /* multicast desc ring 0 */
-#define E1000_RCTL_BAM            0x00008000    /* broadcast enable */
-/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
-#define E1000_RCTL_SZ_2048        0x00000000    /* Rx buffer size 2048 */
-#define E1000_RCTL_SZ_1024        0x00010000    /* Rx buffer size 1024 */
-#define E1000_RCTL_SZ_512         0x00020000    /* Rx buffer size 512 */
-#define E1000_RCTL_SZ_256         0x00030000    /* Rx buffer size 256 */
-/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
-#define E1000_RCTL_SZ_16384       0x00010000    /* Rx buffer size 16384 */
-#define E1000_RCTL_SZ_8192        0x00020000    /* Rx buffer size 8192 */
-#define E1000_RCTL_SZ_4096        0x00030000    /* Rx buffer size 4096 */
-#define E1000_RCTL_VFE            0x00040000    /* vlan filter enable */
-#define E1000_RCTL_CFIEN          0x00080000    /* canonical form enable */
-#define E1000_RCTL_CFI            0x00100000    /* canonical form indicator */
-#define E1000_RCTL_DPF            0x00400000    /* discard pause frames */
-#define E1000_RCTL_PMCF           0x00800000    /* pass MAC control frames */
-#define E1000_RCTL_BSEX           0x02000000    /* Buffer size extension */
-#define E1000_RCTL_SECRC          0x04000000    /* Strip Ethernet CRC */
-#define E1000_RCTL_FLXBUF_MASK    0x78000000    /* Flexible buffer size */
-#define E1000_RCTL_FLXBUF_SHIFT   27            /* Flexible buffer shift */
-
-/*
- * Use byte values for the following shift parameters
- * Usage:
- *     psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
- *                  E1000_PSRCTL_BSIZE0_MASK) |
- *                ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
- *                  E1000_PSRCTL_BSIZE1_MASK) |
- *                ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
- *                  E1000_PSRCTL_BSIZE2_MASK) |
- *                ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
- *                  E1000_PSRCTL_BSIZE3_MASK))
- * where value0 = [128..16256],  default=256
- *       value1 = [1024..64512], default=4096
- *       value2 = [0..64512],    default=4096
- *       value3 = [0..64512],    default=0
- */
-
-#define E1000_PSRCTL_BSIZE0_MASK   0x0000007F
-#define E1000_PSRCTL_BSIZE1_MASK   0x00003F00
-#define E1000_PSRCTL_BSIZE2_MASK   0x003F0000
-#define E1000_PSRCTL_BSIZE3_MASK   0x3F000000
-
-#define E1000_PSRCTL_BSIZE0_SHIFT  7            /* Shift _right_ 7 */
-#define E1000_PSRCTL_BSIZE1_SHIFT  2            /* Shift _right_ 2 */
-#define E1000_PSRCTL_BSIZE2_SHIFT  6            /* Shift _left_ 6 */
-#define E1000_PSRCTL_BSIZE3_SHIFT 14            /* Shift _left_ 14 */
-
-/* SWFW_SYNC Definitions */
-#define E1000_SWFW_EEP_SM   0x01
-#define E1000_SWFW_PHY0_SM  0x02
-#define E1000_SWFW_PHY1_SM  0x04
-#define E1000_SWFW_CSR_SM   0x08
-#define E1000_SWFW_PHY2_SM  0x20
-#define E1000_SWFW_PHY3_SM  0x40
-#define E1000_SWFW_SW_MNG_SM 0x400
-
-/* FACTPS Definitions */
-#define E1000_FACTPS_LFS    0x40000000  /* LAN Function Select */
-/* Device Control */
-#define E1000_CTRL_FD       0x00000001  /* Full duplex.0=half; 1=full */
-#define E1000_CTRL_BEM      0x00000002  /* Endian Mode.0=little,1=big */
-#define E1000_CTRL_PRIOR    0x00000004  /* Priority on PCI. 0=rx,1=fair */
-#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */
-#define E1000_CTRL_LRST     0x00000008  /* Link reset. 0=normal,1=reset */
-#define E1000_CTRL_TME      0x00000010  /* Test mode. 0=normal,1=test */
-#define E1000_CTRL_SLE      0x00000020  /* Serial Link on 0=dis,1=en */
-#define E1000_CTRL_ASDE     0x00000020  /* Auto-speed detect enable */
-#define E1000_CTRL_SLU      0x00000040  /* Set link up (Force Link) */
-#define E1000_CTRL_ILOS     0x00000080  /* Invert Loss-Of Signal */
-#define E1000_CTRL_SPD_SEL  0x00000300  /* Speed Select Mask */
-#define E1000_CTRL_SPD_10   0x00000000  /* Force 10Mb */
-#define E1000_CTRL_SPD_100  0x00000100  /* Force 100Mb */
-#define E1000_CTRL_SPD_1000 0x00000200  /* Force 1Gb */
-#define E1000_CTRL_BEM32    0x00000400  /* Big Endian 32 mode */
-#define E1000_CTRL_FRCSPD   0x00000800  /* Force Speed */
-#define E1000_CTRL_FRCDPX   0x00001000  /* Force Duplex */
-#define E1000_CTRL_D_UD_EN  0x00002000  /* Dock/Undock enable */
-#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock
-                                             * indication in SDP[0] */
-#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through
-                                               * PHYRST_N pin */
-#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external
-                                           * LINK_0 and LINK_1 pins */
-#define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
-#define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
-#define E1000_CTRL_SWDPIN2  0x00100000  /* SWDPIN 2 value */
-#define E1000_CTRL_ADVD3WUC 0x00100000  /* D3 WUC */
-#define E1000_CTRL_SWDPIN3  0x00200000  /* SWDPIN 3 value */
-#define E1000_CTRL_SWDPIO0  0x00400000  /* SWDPIN 0 Input or output */
-#define E1000_CTRL_SWDPIO1  0x00800000  /* SWDPIN 1 input or output */
-#define E1000_CTRL_SWDPIO2  0x01000000  /* SWDPIN 2 input or output */
-#define E1000_CTRL_SWDPIO3  0x02000000  /* SWDPIN 3 input or output */
-#define E1000_CTRL_RST      0x04000000  /* Global reset */
-#define E1000_CTRL_RFCE     0x08000000  /* Receive Flow Control enable */
-#define E1000_CTRL_TFCE     0x10000000  /* Transmit flow control enable */
-#define E1000_CTRL_RTE      0x20000000  /* Routing tag enable */
-#define E1000_CTRL_VME      0x40000000  /* IEEE VLAN mode enable */
-#define E1000_CTRL_PHY_RST  0x80000000  /* PHY Reset */
-#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to ME */
-#define E1000_CTRL_I2C_ENA  0x02000000  /* I2C enable */
-
-/*
- * Bit definitions for the Management Data IO (MDIO) and Management Data
- * Clock (MDC) pins in the Device Control Register.
- */
-#define E1000_CTRL_PHY_RESET_DIR  E1000_CTRL_SWDPIO0
-#define E1000_CTRL_PHY_RESET      E1000_CTRL_SWDPIN0
-#define E1000_CTRL_MDIO_DIR       E1000_CTRL_SWDPIO2
-#define E1000_CTRL_MDIO           E1000_CTRL_SWDPIN2
-#define E1000_CTRL_MDC_DIR        E1000_CTRL_SWDPIO3
-#define E1000_CTRL_MDC            E1000_CTRL_SWDPIN3
-#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR
-#define E1000_CTRL_PHY_RESET4     E1000_CTRL_EXT_SDP4_DATA
-
-#define E1000_CONNSW_ENRGSRC             0x4
-#define E1000_PCS_CFG_PCS_EN             8
-#define E1000_PCS_LCTL_FLV_LINK_UP       1
-#define E1000_PCS_LCTL_FSV_10            0
-#define E1000_PCS_LCTL_FSV_100           2
-#define E1000_PCS_LCTL_FSV_1000          4
-#define E1000_PCS_LCTL_FDV_FULL          8
-#define E1000_PCS_LCTL_FSD               0x10
-#define E1000_PCS_LCTL_FORCE_LINK        0x20
-#define E1000_PCS_LCTL_LOW_LINK_LATCH    0x40
-#define E1000_PCS_LCTL_FORCE_FCTRL       0x80
-#define E1000_PCS_LCTL_AN_ENABLE         0x10000
-#define E1000_PCS_LCTL_AN_RESTART        0x20000
-#define E1000_PCS_LCTL_AN_TIMEOUT        0x40000
-#define E1000_PCS_LCTL_AN_SGMII_BYPASS   0x80000
-#define E1000_PCS_LCTL_AN_SGMII_TRIGGER  0x100000
-#define E1000_PCS_LCTL_FAST_LINK_TIMER   0x1000000
-#define E1000_PCS_LCTL_LINK_OK_FIX       0x2000000
-#define E1000_PCS_LCTL_CRS_ON_NI         0x4000000
-#define E1000_ENABLE_SERDES_LOOPBACK     0x0410
-
-#define E1000_PCS_LSTS_LINK_OK           1
-#define E1000_PCS_LSTS_SPEED_10          0
-#define E1000_PCS_LSTS_SPEED_100         2
-#define E1000_PCS_LSTS_SPEED_1000        4
-#define E1000_PCS_LSTS_DUPLEX_FULL       8
-#define E1000_PCS_LSTS_SYNK_OK           0x10
-#define E1000_PCS_LSTS_AN_COMPLETE       0x10000
-#define E1000_PCS_LSTS_AN_PAGE_RX        0x20000
-#define E1000_PCS_LSTS_AN_TIMED_OUT      0x40000
-#define E1000_PCS_LSTS_AN_REMOTE_FAULT   0x80000
-#define E1000_PCS_LSTS_AN_ERROR_RWS      0x100000
-
-/* Device Status */
-#define E1000_STATUS_FD         0x00000001      /* Full duplex.0=half,1=full */
-#define E1000_STATUS_LU         0x00000002      /* Link up.0=no,1=link */
-#define E1000_STATUS_FUNC_MASK  0x0000000C      /* PCI Function Mask */
-#define E1000_STATUS_FUNC_SHIFT 2
-#define E1000_STATUS_FUNC_0     0x00000000      /* Function 0 */
-#define E1000_STATUS_FUNC_1     0x00000004      /* Function 1 */
-#define E1000_STATUS_TXOFF      0x00000010      /* transmission paused */
-#define E1000_STATUS_TBIMODE    0x00000020      /* TBI mode */
-#define E1000_STATUS_SPEED_MASK 0x000000C0
-#define E1000_STATUS_SPEED_10   0x00000000      /* Speed 10Mb/s */
-#define E1000_STATUS_SPEED_100  0x00000040      /* Speed 100Mb/s */
-#define E1000_STATUS_SPEED_1000 0x00000080      /* Speed 1000Mb/s */
-#define E1000_STATUS_LAN_INIT_DONE 0x00000200  /* Lan Init Completion by NVM */
-#define E1000_STATUS_ASDV       0x00000300      /* Auto speed detect value */
-#define E1000_STATUS_PHYRA      0x00000400      /* PHY Reset Asserted */
-#define E1000_STATUS_DOCK_CI    0x00000800      /* Change in Dock/Undock state.
-                                                 * Clear on write '0'. */
-#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */
-#define E1000_STATUS_MTXCKOK    0x00000400      /* MTX clock running OK */
-#define E1000_STATUS_PCI66      0x00000800      /* In 66Mhz slot */
-#define E1000_STATUS_BUS64      0x00001000      /* In 64 bit slot */
-#define E1000_STATUS_PCIX_MODE  0x00002000      /* PCI-X mode */
-#define E1000_STATUS_PCIX_SPEED 0x0000C000      /* PCI-X bus speed */
-#define E1000_STATUS_BMC_SKU_0  0x00100000 /* BMC USB redirect disabled */
-#define E1000_STATUS_BMC_SKU_1  0x00200000 /* BMC SRAM disabled */
-#define E1000_STATUS_BMC_SKU_2  0x00400000 /* BMC SDRAM disabled */
-#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
-#define E1000_STATUS_BMC_LITE   0x01000000 /* BMC external code execution
-                                            * disabled */
-#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
-#define E1000_STATUS_FUSE_8       0x04000000
-#define E1000_STATUS_FUSE_9       0x08000000
-#define E1000_STATUS_SERDES0_DIS  0x10000000 /* SERDES disabled on port 0 */
-#define E1000_STATUS_SERDES1_DIS  0x20000000 /* SERDES disabled on port 1 */
-
-/* Constants used to interpret the masked PCI-X bus speed. */
-#define E1000_STATUS_PCIX_SPEED_66  0x00000000 /* PCI-X bus speed 50-66 MHz */
-#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */
-#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /*PCI-X bus speed 100-133 MHz*/
-
-#define SPEED_10    10
-#define SPEED_100   100
-#define SPEED_1000  1000
-#define HALF_DUPLEX 1
-#define FULL_DUPLEX 2
-
-#define PHY_FORCE_TIME   20
-
-#define ADVERTISE_10_HALF                 0x0001
-#define ADVERTISE_10_FULL                 0x0002
-#define ADVERTISE_100_HALF                0x0004
-#define ADVERTISE_100_FULL                0x0008
-#define ADVERTISE_1000_HALF               0x0010 /* Not used, just FYI */
-#define ADVERTISE_1000_FULL               0x0020
-
-/* 1000/H is not supported, nor spec-compliant. */
-#define E1000_ALL_SPEED_DUPLEX  (ADVERTISE_10_HALF |   ADVERTISE_10_FULL | \
-                                ADVERTISE_100_HALF |  ADVERTISE_100_FULL | \
-                                                     ADVERTISE_1000_FULL)
-#define E1000_ALL_NOT_GIG       (ADVERTISE_10_HALF |   ADVERTISE_10_FULL | \
-                                ADVERTISE_100_HALF |  ADVERTISE_100_FULL)
-#define E1000_ALL_100_SPEED    (ADVERTISE_100_HALF |  ADVERTISE_100_FULL)
-#define E1000_ALL_10_SPEED      (ADVERTISE_10_HALF |   ADVERTISE_10_FULL)
-#define E1000_ALL_FULL_DUPLEX   (ADVERTISE_10_FULL |  ADVERTISE_100_FULL | \
-                                                     ADVERTISE_1000_FULL)
-#define E1000_ALL_HALF_DUPLEX   (ADVERTISE_10_HALF |  ADVERTISE_100_HALF)
-
-#define AUTONEG_ADVERTISE_SPEED_DEFAULT   E1000_ALL_SPEED_DUPLEX
-
-/* LED Control */
-#define E1000_LEDCTL_LED0_MODE_MASK       0x0000000F
-#define E1000_LEDCTL_LED0_MODE_SHIFT      0
-#define E1000_LEDCTL_LED0_BLINK_RATE      0x00000020
-#define E1000_LEDCTL_LED0_IVRT            0x00000040
-#define E1000_LEDCTL_LED0_BLINK           0x00000080
-#define E1000_LEDCTL_LED1_MODE_MASK       0x00000F00
-#define E1000_LEDCTL_LED1_MODE_SHIFT      8
-#define E1000_LEDCTL_LED1_BLINK_RATE      0x00002000
-#define E1000_LEDCTL_LED1_IVRT            0x00004000
-#define E1000_LEDCTL_LED1_BLINK           0x00008000
-#define E1000_LEDCTL_LED2_MODE_MASK       0x000F0000
-#define E1000_LEDCTL_LED2_MODE_SHIFT      16
-#define E1000_LEDCTL_LED2_BLINK_RATE      0x00200000
-#define E1000_LEDCTL_LED2_IVRT            0x00400000
-#define E1000_LEDCTL_LED2_BLINK           0x00800000
-#define E1000_LEDCTL_LED3_MODE_MASK       0x0F000000
-#define E1000_LEDCTL_LED3_MODE_SHIFT      24
-#define E1000_LEDCTL_LED3_BLINK_RATE      0x20000000
-#define E1000_LEDCTL_LED3_IVRT            0x40000000
-#define E1000_LEDCTL_LED3_BLINK           0x80000000
-
-#define E1000_LEDCTL_MODE_LINK_10_1000  0x0
-#define E1000_LEDCTL_MODE_LINK_100_1000 0x1
-#define E1000_LEDCTL_MODE_LINK_UP       0x2
-#define E1000_LEDCTL_MODE_ACTIVITY      0x3
-#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4
-#define E1000_LEDCTL_MODE_LINK_10       0x5
-#define E1000_LEDCTL_MODE_LINK_100      0x6
-#define E1000_LEDCTL_MODE_LINK_1000     0x7
-#define E1000_LEDCTL_MODE_PCIX_MODE     0x8
-#define E1000_LEDCTL_MODE_FULL_DUPLEX   0x9
-#define E1000_LEDCTL_MODE_COLLISION     0xA
-#define E1000_LEDCTL_MODE_BUS_SPEED     0xB
-#define E1000_LEDCTL_MODE_BUS_SIZE      0xC
-#define E1000_LEDCTL_MODE_PAUSED        0xD
-#define E1000_LEDCTL_MODE_LED_ON        0xE
-#define E1000_LEDCTL_MODE_LED_OFF       0xF
-
-/* Transmit Descriptor bit definitions */
-#define E1000_TXD_DTYP_D     0x00100000 /* Data Descriptor */
-#define E1000_TXD_DTYP_C     0x00000000 /* Context Descriptor */
-#define E1000_TXD_POPTS_SHIFT 8         /* POPTS shift */
-#define E1000_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
-#define E1000_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
-#define E1000_TXD_CMD_EOP    0x01000000 /* End of Packet */
-#define E1000_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
-#define E1000_TXD_CMD_IC     0x04000000 /* Insert Checksum */
-#define E1000_TXD_CMD_RS     0x08000000 /* Report Status */
-#define E1000_TXD_CMD_RPS    0x10000000 /* Report Packet Sent */
-#define E1000_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
-#define E1000_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
-#define E1000_TXD_CMD_IDE    0x80000000 /* Enable Tidv register */
-#define E1000_TXD_STAT_DD    0x00000001 /* Descriptor Done */
-#define E1000_TXD_STAT_EC    0x00000002 /* Excess Collisions */
-#define E1000_TXD_STAT_LC    0x00000004 /* Late Collisions */
-#define E1000_TXD_STAT_TU    0x00000008 /* Transmit underrun */
-#define E1000_TXD_CMD_TCP    0x01000000 /* TCP packet */
-#define E1000_TXD_CMD_IP     0x02000000 /* IP packet */
-#define E1000_TXD_CMD_TSE    0x04000000 /* TCP Seg enable */
-#define E1000_TXD_STAT_TC    0x00000004 /* Tx Underrun */
-/* Extended desc bits for Linksec and timesync */
-
-/* Transmit Control */
-#define E1000_TCTL_RST    0x00000001    /* software reset */
-#define E1000_TCTL_EN     0x00000002    /* enable Tx */
-#define E1000_TCTL_BCE    0x00000004    /* busy check enable */
-#define E1000_TCTL_PSP    0x00000008    /* pad short packets */
-#define E1000_TCTL_CT     0x00000ff0    /* collision threshold */
-#define E1000_TCTL_COLD   0x003ff000    /* collision distance */
-#define E1000_TCTL_SWXOFF 0x00400000    /* SW Xoff transmission */
-#define E1000_TCTL_PBE    0x00800000    /* Packet Burst Enable */
-#define E1000_TCTL_RTLC   0x01000000    /* Re-transmit on late collision */
-#define E1000_TCTL_NRTU   0x02000000    /* No Re-transmit on underrun */
-#define E1000_TCTL_MULR   0x10000000    /* Multiple request support */
-
-/* Transmit Arbitration Count */
-#define E1000_TARC0_ENABLE     0x00000400   /* Enable Tx Queue 0 */
-
-/* SerDes Control */
-#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
-
-/* Receive Checksum Control */
-#define E1000_RXCSUM_PCSS_MASK 0x000000FF   /* Packet Checksum Start */
-#define E1000_RXCSUM_IPOFL     0x00000100   /* IPv4 checksum offload */
-#define E1000_RXCSUM_TUOFL     0x00000200   /* TCP / UDP checksum offload */
-#define E1000_RXCSUM_IPV6OFL   0x00000400   /* IPv6 checksum offload */
-#define E1000_RXCSUM_CRCOFL    0x00000800   /* CRC32 offload enable */
-#define E1000_RXCSUM_IPPCSE    0x00001000   /* IP payload checksum enable */
-#define E1000_RXCSUM_PCSD      0x00002000   /* packet checksum disabled */
-
-/* Header split receive */
-#define E1000_RFCTL_ISCSI_DIS           0x00000001
-#define E1000_RFCTL_ISCSI_DWC_MASK      0x0000003E
-#define E1000_RFCTL_ISCSI_DWC_SHIFT     1
-#define E1000_RFCTL_NFSW_DIS            0x00000040
-#define E1000_RFCTL_NFSR_DIS            0x00000080
-#define E1000_RFCTL_NFS_VER_MASK        0x00000300
-#define E1000_RFCTL_NFS_VER_SHIFT       8
-#define E1000_RFCTL_IPV6_DIS            0x00000400
-#define E1000_RFCTL_IPV6_XSUM_DIS       0x00000800
-#define E1000_RFCTL_ACK_DIS             0x00001000
-#define E1000_RFCTL_ACKD_DIS            0x00002000
-#define E1000_RFCTL_IPFRSP_DIS          0x00004000
-#define E1000_RFCTL_EXTEN               0x00008000
-#define E1000_RFCTL_IPV6_EX_DIS         0x00010000
-#define E1000_RFCTL_NEW_IPV6_EXT_DIS    0x00020000
-#define E1000_RFCTL_LEF                 0x00040000
-
-/* Collision related configuration parameters */
-#define E1000_COLLISION_THRESHOLD       15
-#define E1000_CT_SHIFT                  4
-#define E1000_COLLISION_DISTANCE        63
-#define E1000_COLD_SHIFT                12
-
-/* Default values for the transmit IPG register */
-#define DEFAULT_82543_TIPG_IPGT_FIBER  9
-#define DEFAULT_82543_TIPG_IPGT_COPPER 8
-
-#define E1000_TIPG_IPGT_MASK  0x000003FF
-#define E1000_TIPG_IPGR1_MASK 0x000FFC00
-#define E1000_TIPG_IPGR2_MASK 0x3FF00000
-
-#define DEFAULT_82543_TIPG_IPGR1 8
-#define E1000_TIPG_IPGR1_SHIFT  10
-
-#define DEFAULT_82543_TIPG_IPGR2 6
-#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
-#define E1000_TIPG_IPGR2_SHIFT  20
-
-/* Ethertype field values */
-#define ETHERNET_IEEE_VLAN_TYPE 0x8100  /* 802.3ac packet */
-
-#define ETHERNET_FCS_SIZE       4
-#define MAX_JUMBO_FRAME_SIZE    0x3F00
-
-/* Extended Configuration Control and Size */
-#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP      0x00000020
-#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE       0x00000001
-#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE       0x00000008
-#define E1000_EXTCNF_CTRL_SWFLAG                 0x00000020
-#define E1000_EXTCNF_CTRL_GATE_PHY_CFG           0x00000080
-#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK   0x00FF0000
-#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT          16
-#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK   0x0FFF0000
-#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT          16
-
-#define E1000_PHY_CTRL_SPD_EN             0x00000001
-#define E1000_PHY_CTRL_D0A_LPLU           0x00000002
-#define E1000_PHY_CTRL_NOND0A_LPLU        0x00000004
-#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
-#define E1000_PHY_CTRL_GBE_DISABLE        0x00000040
-
-#define E1000_KABGTXD_BGSQLBIAS           0x00050000
-
-/* PBA constants */
-#define E1000_PBA_6K  0x0006    /* 6KB */
-#define E1000_PBA_8K  0x0008    /* 8KB */
-#define E1000_PBA_10K 0x000A    /* 10KB */
-#define E1000_PBA_12K 0x000C    /* 12KB */
-#define E1000_PBA_14K 0x000E    /* 14KB */
-#define E1000_PBA_16K 0x0010    /* 16KB */
-#define E1000_PBA_18K 0x0012
-#define E1000_PBA_20K 0x0014
-#define E1000_PBA_22K 0x0016
-#define E1000_PBA_24K 0x0018
-#define E1000_PBA_26K 0x001A
-#define E1000_PBA_30K 0x001E
-#define E1000_PBA_32K 0x0020
-#define E1000_PBA_34K 0x0022
-#define E1000_PBA_35K 0x0023
-#define E1000_PBA_38K 0x0026
-#define E1000_PBA_40K 0x0028
-#define E1000_PBA_48K 0x0030    /* 48KB */
-#define E1000_PBA_64K 0x0040    /* 64KB */
-
-#define E1000_PBS_16K E1000_PBA_16K
-#define E1000_PBS_24K E1000_PBA_24K
-
-#define IFS_MAX       80
-#define IFS_MIN       40
-#define IFS_RATIO     4
-#define IFS_STEP      10
-#define MIN_NUM_XMITS 1000
-
-/* SW Semaphore Register */
-#define E1000_SWSM_SMBI         0x00000001 /* Driver Semaphore bit */
-#define E1000_SWSM_SWESMBI      0x00000002 /* FW Semaphore bit */
-#define E1000_SWSM_WMNG         0x00000004 /* Wake MNG Clock */
-#define E1000_SWSM_DRV_LOAD     0x00000008 /* Driver Loaded Bit */
-
-#define E1000_SWSM2_LOCK        0x00000002 /* Secondary driver semaphore bit */
-
-/* Interrupt Cause Read */
-#define E1000_ICR_TXDW          0x00000001 /* Transmit desc written back */
-#define E1000_ICR_TXQE          0x00000002 /* Transmit Queue empty */
-#define E1000_ICR_LSC           0x00000004 /* Link Status Change */
-#define E1000_ICR_RXSEQ         0x00000008 /* Rx sequence error */
-#define E1000_ICR_RXDMT0        0x00000010 /* Rx desc min. threshold (0) */
-#define E1000_ICR_RXO           0x00000040 /* Rx overrun */
-#define E1000_ICR_RXT0          0x00000080 /* Rx timer intr (ring 0) */
-#define E1000_ICR_VMMB          0x00000100 /* VM MB event */
-#define E1000_ICR_MDAC          0x00000200 /* MDIO access complete */
-#define E1000_ICR_RXCFG         0x00000400 /* Rx /c/ ordered set */
-#define E1000_ICR_GPI_EN0       0x00000800 /* GP Int 0 */
-#define E1000_ICR_GPI_EN1       0x00001000 /* GP Int 1 */
-#define E1000_ICR_GPI_EN2       0x00002000 /* GP Int 2 */
-#define E1000_ICR_GPI_EN3       0x00004000 /* GP Int 3 */
-#define E1000_ICR_TXD_LOW       0x00008000
-#define E1000_ICR_SRPD          0x00010000
-#define E1000_ICR_ACK           0x00020000 /* Receive Ack frame */
-#define E1000_ICR_MNG           0x00040000 /* Manageability event */
-#define E1000_ICR_DOCK          0x00080000 /* Dock/Undock */
-#define E1000_ICR_DRSTA         0x40000000 /* Device Reset Asserted */
-#define E1000_ICR_INT_ASSERTED  0x80000000 /* If this bit asserted, the driver
-                                            * should claim the interrupt */
-#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* Q0 Rx desc FIFO parity error */
-#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* Q0 Tx desc FIFO parity error */
-#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity err */
-#define E1000_ICR_PB_PAR        0x00800000 /* packet buffer parity error */
-#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* Q1 Rx desc FIFO parity error */
-#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* Q1 Tx desc FIFO parity error */
-#define E1000_ICR_ALL_PARITY    0x03F00000 /* all parity error bits */
-#define E1000_ICR_DSW           0x00000020 /* FW changed the status of DISSW
-                                            * bit in the FWSM */
-#define E1000_ICR_PHYINT        0x00001000 /* LAN connected device generates
-                                            * an interrupt */
-#define E1000_ICR_DOUTSYNC      0x10000000 /* NIC DMA out of sync */
-#define E1000_ICR_EPRST         0x00100000 /* ME hardware reset occurs */
-#define E1000_ICR_FER           0x00400000 /* Fatal Error */
-
-#define E1000_ICR_THS           0x00800000 /* ICR.THS: Thermal Sensor Event*/
-#define E1000_ICR_MDDET         0x10000000 /* Malicious Driver Detect */
-
-/* Extended Interrupt Cause Read */
-#define E1000_EICR_RX_QUEUE0    0x00000001 /* Rx Queue 0 Interrupt */
-#define E1000_EICR_RX_QUEUE1    0x00000002 /* Rx Queue 1 Interrupt */
-#define E1000_EICR_RX_QUEUE2    0x00000004 /* Rx Queue 2 Interrupt */
-#define E1000_EICR_RX_QUEUE3    0x00000008 /* Rx Queue 3 Interrupt */
-#define E1000_EICR_TX_QUEUE0    0x00000100 /* Tx Queue 0 Interrupt */
-#define E1000_EICR_TX_QUEUE1    0x00000200 /* Tx Queue 1 Interrupt */
-#define E1000_EICR_TX_QUEUE2    0x00000400 /* Tx Queue 2 Interrupt */
-#define E1000_EICR_TX_QUEUE3    0x00000800 /* Tx Queue 3 Interrupt */
-#define E1000_EICR_TCP_TIMER    0x40000000 /* TCP Timer */
-#define E1000_EICR_OTHER        0x80000000 /* Interrupt Cause Active */
-/* TCP Timer */
-#define E1000_TCPTIMER_KS       0x00000100 /* KickStart */
-#define E1000_TCPTIMER_COUNT_ENABLE       0x00000200 /* Count Enable */
-#define E1000_TCPTIMER_COUNT_FINISH       0x00000400 /* Count finish */
-#define E1000_TCPTIMER_LOOP     0x00000800 /* Loop */
-
-/*
- * This defines the bits that are set in the Interrupt Mask
- * Set/Read Register.  Each bit is documented below:
- *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
- *   o RXSEQ  = Receive Sequence Error
- */
-#define POLL_IMS_ENABLE_MASK ( \
-    E1000_IMS_RXDMT0 |    \
-    E1000_IMS_RXSEQ)
-
-/*
- * This defines the bits that are set in the Interrupt Mask
- * Set/Read Register.  Each bit is documented below:
- *   o RXT0   = Receiver Timer Interrupt (ring 0)
- *   o TXDW   = Transmit Descriptor Written Back
- *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
- *   o RXSEQ  = Receive Sequence Error
- *   o LSC    = Link Status Change
- */
-#define IMS_ENABLE_MASK ( \
-    E1000_IMS_RXT0   |    \
-    E1000_IMS_TXDW   |    \
-    E1000_IMS_RXDMT0 |    \
-    E1000_IMS_RXSEQ  |    \
-    E1000_IMS_LSC)
-
-/* Interrupt Mask Set */
-#define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Tx desc written back */
-#define E1000_IMS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
-#define E1000_IMS_LSC       E1000_ICR_LSC       /* Link Status Change */
-#define E1000_IMS_VMMB      E1000_ICR_VMMB      /* Mail box activity */
-#define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* Rx sequence error */
-#define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* Rx desc min. threshold */
-#define E1000_IMS_RXO       E1000_ICR_RXO       /* Rx overrun */
-#define E1000_IMS_RXT0      E1000_ICR_RXT0      /* Rx timer intr */
-#define E1000_IMS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
-#define E1000_IMS_RXCFG     E1000_ICR_RXCFG     /* Rx /c/ ordered set */
-#define E1000_IMS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
-#define E1000_IMS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
-#define E1000_IMS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
-#define E1000_IMS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
-#define E1000_IMS_TXD_LOW   E1000_ICR_TXD_LOW
-#define E1000_IMS_SRPD      E1000_ICR_SRPD
-#define E1000_IMS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
-#define E1000_IMS_MNG       E1000_ICR_MNG       /* Manageability event */
-#define E1000_IMS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
-#define E1000_IMS_DRSTA     E1000_ICR_DRSTA     /* Device Reset Asserted */
-#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* Q0 Rx desc FIFO
-                                                         * parity error */
-#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* Q0 Tx desc FIFO
-                                                         * parity error */
-#define E1000_IMS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer
-                                                         * parity error */
-#define E1000_IMS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity
-                                                         * error */
-#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* Q1 Rx desc FIFO
-                                                         * parity error */
-#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* Q1 Tx desc FIFO
-                                                         * parity error */
-#define E1000_IMS_DSW       E1000_ICR_DSW
-#define E1000_IMS_PHYINT    E1000_ICR_PHYINT
-#define E1000_IMS_DOUTSYNC  E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
-#define E1000_IMS_EPRST     E1000_ICR_EPRST
-#define E1000_IMS_FER           E1000_ICR_FER /* Fatal Error */
-
-#define E1000_IMS_THS           E1000_ICR_THS /* ICR.TS: Thermal Sensor Event*/
-#define E1000_IMS_MDDET         E1000_ICR_MDDET /* Malicious Driver Detect */
-/* Extended Interrupt Mask Set */
-#define E1000_EIMS_RX_QUEUE0    E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
-#define E1000_EIMS_RX_QUEUE1    E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
-#define E1000_EIMS_RX_QUEUE2    E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
-#define E1000_EIMS_RX_QUEUE3    E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
-#define E1000_EIMS_TX_QUEUE0    E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
-#define E1000_EIMS_TX_QUEUE1    E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
-#define E1000_EIMS_TX_QUEUE2    E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
-#define E1000_EIMS_TX_QUEUE3    E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
-#define E1000_EIMS_TCP_TIMER    E1000_EICR_TCP_TIMER /* TCP Timer */
-#define E1000_EIMS_OTHER        E1000_EICR_OTHER   /* Interrupt Cause Active */
-
-/* Interrupt Cause Set */
-#define E1000_ICS_TXDW      E1000_ICR_TXDW      /* Tx desc written back */
-#define E1000_ICS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
-#define E1000_ICS_LSC       E1000_ICR_LSC       /* Link Status Change */
-#define E1000_ICS_RXSEQ     E1000_ICR_RXSEQ     /* Rx sequence error */
-#define E1000_ICS_RXDMT0    E1000_ICR_RXDMT0    /* Rx desc min. threshold */
-#define E1000_ICS_RXO       E1000_ICR_RXO       /* Rx overrun */
-#define E1000_ICS_RXT0      E1000_ICR_RXT0      /* Rx timer intr */
-#define E1000_ICS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
-#define E1000_ICS_RXCFG     E1000_ICR_RXCFG     /* Rx /c/ ordered set */
-#define E1000_ICS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
-#define E1000_ICS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
-#define E1000_ICS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
-#define E1000_ICS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
-#define E1000_ICS_TXD_LOW   E1000_ICR_TXD_LOW
-#define E1000_ICS_SRPD      E1000_ICR_SRPD
-#define E1000_ICS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
-#define E1000_ICS_MNG       E1000_ICR_MNG       /* Manageability event */
-#define E1000_ICS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
-#define E1000_ICS_DRSTA     E1000_ICR_DRSTA     /* Device Reset Aserted */
-#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* Q0 Rx desc FIFO
-                                                         * parity error */
-#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* Q0 Tx desc FIFO
-                                                         * parity error */
-#define E1000_ICS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer
-                                                         * parity error */
-#define E1000_ICS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity
-                                                         * error */
-#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* Q1 Rx desc FIFO
-                                                         * parity error */
-#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* Q1 Tx desc FIFO
-                                                         * parity error */
-#define E1000_ICS_DSW       E1000_ICR_DSW
-#define E1000_ICS_DOUTSYNC  E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
-#define E1000_ICS_PHYINT    E1000_ICR_PHYINT
-#define E1000_ICS_EPRST     E1000_ICR_EPRST
-
-/* Extended Interrupt Cause Set */
-#define E1000_EICS_RX_QUEUE0    E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
-#define E1000_EICS_RX_QUEUE1    E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
-#define E1000_EICS_RX_QUEUE2    E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
-#define E1000_EICS_RX_QUEUE3    E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
-#define E1000_EICS_TX_QUEUE0    E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
-#define E1000_EICS_TX_QUEUE1    E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
-#define E1000_EICS_TX_QUEUE2    E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
-#define E1000_EICS_TX_QUEUE3    E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
-#define E1000_EICS_TCP_TIMER    E1000_EICR_TCP_TIMER /* TCP Timer */
-#define E1000_EICS_OTHER        E1000_EICR_OTHER   /* Interrupt Cause Active */
-
-#define E1000_EITR_ITR_INT_MASK 0x0000FFFF
-/* E1000_EITR_CNT_IGNR is only for 82576 and newer */
-#define E1000_EITR_CNT_IGNR     0x80000000 /* Don't reset counters on write */
-
-/* Transmit Descriptor Control */
-#define E1000_TXDCTL_PTHRESH    0x0000003F /* TXDCTL Prefetch Threshold */
-#define E1000_TXDCTL_HTHRESH    0x00003F00 /* TXDCTL Host Threshold */
-#define E1000_TXDCTL_WTHRESH    0x003F0000 /* TXDCTL Writeback Threshold */
-#define E1000_TXDCTL_GRAN       0x01000000 /* TXDCTL Granularity */
-#define E1000_TXDCTL_LWTHRESH   0xFE000000 /* TXDCTL Low Threshold */
-#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
-#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
-/* Enable the counting of descriptors still to be processed. */
-#define E1000_TXDCTL_COUNT_DESC 0x00400000
-
-/* Flow Control Constants */
-#define FLOW_CONTROL_ADDRESS_LOW  0x00C28001
-#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
-#define FLOW_CONTROL_TYPE         0x8808
-
-/* 802.1q VLAN Packet Size */
-#define VLAN_TAG_SIZE              4    /* 802.3ac tag (not DMA'd) */
-#define E1000_VLAN_FILTER_TBL_SIZE 128  /* VLAN Filter Table (4096 bits) */
-
-/* Receive Address */
-/*
- * Number of high/low register pairs in the RAR. The RAR (Receive Address
- * Registers) holds the directed and multicast addresses that we monitor.
- * Technically, we have 16 spots.  However, we reserve one of these spots
- * (RAR[15]) for our directed address used by controllers with
- * manageability enabled, allowing us room for 15 multicast addresses.
- */
-#define E1000_RAR_ENTRIES     15
-#define E1000_RAH_AV  0x80000000        /* Receive descriptor valid */
-#define E1000_RAL_MAC_ADDR_LEN 4
-#define E1000_RAH_MAC_ADDR_LEN 2
-#define E1000_RAH_QUEUE_MASK_82575 0x000C0000
-#define E1000_RAH_POOL_MASK     0x03FC0000
-#define E1000_RAH_POOL_SHIFT    18
-#define E1000_RAH_POOL_1        0x00040000
-
-/* Error Codes */
-#define E1000_SUCCESS      0
-#define E1000_ERR_NVM      1
-#define E1000_ERR_PHY      2
-#define E1000_ERR_CONFIG   3
-#define E1000_ERR_PARAM    4
-#define E1000_ERR_MAC_INIT 5
-#define E1000_ERR_PHY_TYPE 6
-#define E1000_ERR_RESET   9
-#define E1000_ERR_MASTER_REQUESTS_PENDING 10
-#define E1000_ERR_HOST_INTERFACE_COMMAND 11
-#define E1000_BLK_PHY_RESET   12
-#define E1000_ERR_SWFW_SYNC 13
-#define E1000_NOT_IMPLEMENTED 14
-#define E1000_ERR_MBX      15
-#define E1000_ERR_INVALID_ARGUMENT  16
-#define E1000_ERR_NO_SPACE          17
-#define E1000_ERR_NVM_PBA_SECTION   18
-
-/* Loop limit on how long we wait for auto-negotiation to complete */
-#define FIBER_LINK_UP_LIMIT               50
-#define COPPER_LINK_UP_LIMIT              10
-#define PHY_AUTO_NEG_LIMIT                45
-#define PHY_FORCE_LIMIT                   20
-/* Number of 100 microseconds we wait for PCI Express master disable */
-#define MASTER_DISABLE_TIMEOUT      800
-/* Number of milliseconds we wait for PHY configuration done after MAC reset */
-#define PHY_CFG_TIMEOUT             100
-/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
-#define MDIO_OWNERSHIP_TIMEOUT      10
-/* Number of milliseconds for NVM auto read done after MAC reset. */
-#define AUTO_READ_DONE_TIMEOUT      10
-
-/* Flow Control */
-#define E1000_FCRTH_RTH  0x0000FFF8     /* Mask Bits[15:3] for RTH */
-#define E1000_FCRTH_XFCE 0x80000000     /* External Flow Control Enable */
-#define E1000_FCRTL_RTL  0x0000FFF8     /* Mask Bits[15:3] for RTL */
-#define E1000_FCRTL_XONE 0x80000000     /* Enable XON frame transmission */
-
-/* Transmit Configuration Word */
-#define E1000_TXCW_FD         0x00000020        /* TXCW full duplex */
-#define E1000_TXCW_HD         0x00000040        /* TXCW half duplex */
-#define E1000_TXCW_PAUSE      0x00000080        /* TXCW sym pause request */
-#define E1000_TXCW_ASM_DIR    0x00000100        /* TXCW astm pause direction */
-#define E1000_TXCW_PAUSE_MASK 0x00000180        /* TXCW pause request mask */
-#define E1000_TXCW_RF         0x00003000        /* TXCW remote fault */
-#define E1000_TXCW_NP         0x00008000        /* TXCW next page */
-#define E1000_TXCW_CW         0x0000ffff        /* TxConfigWord mask */
-#define E1000_TXCW_TXC        0x40000000        /* Transmit Config control */
-#define E1000_TXCW_ANE        0x80000000        /* Auto-neg enable */
-
-/* Receive Configuration Word */
-#define E1000_RXCW_CW         0x0000ffff        /* RxConfigWord mask */
-#define E1000_RXCW_NC         0x04000000        /* Receive config no carrier */
-#define E1000_RXCW_IV         0x08000000        /* Receive config invalid */
-#define E1000_RXCW_CC         0x10000000        /* Receive config change */
-#define E1000_RXCW_C          0x20000000        /* Receive config */
-#define E1000_RXCW_SYNCH      0x40000000        /* Receive config synch */
-#define E1000_RXCW_ANC        0x80000000        /* Auto-neg complete */
-
-#define E1000_TSYNCTXCTL_VALID    0x00000001 /* Tx timestamp valid */
-#define E1000_TSYNCTXCTL_ENABLED  0x00000010 /* enable Tx timestamping */
-
-#define E1000_TSYNCRXCTL_VALID      0x00000001 /* Rx timestamp valid */
-#define E1000_TSYNCRXCTL_TYPE_MASK  0x0000000E /* Rx type mask */
-#define E1000_TSYNCRXCTL_TYPE_L2_V2       0x00
-#define E1000_TSYNCRXCTL_TYPE_L4_V1       0x02
-#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2    0x04
-#define E1000_TSYNCRXCTL_TYPE_ALL         0x08
-#define E1000_TSYNCRXCTL_TYPE_EVENT_V2    0x0A
-#define E1000_TSYNCRXCTL_ENABLED    0x00000010 /* enable Rx timestamping */
-
-#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK   0x000000FF
-#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE       0x00
-#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE  0x01
-#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE   0x02
-#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
-#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
-
-#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK               0x00000F00
-#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE                 0x0000
-#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE            0x0100
-#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE       0x0200
-#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE      0x0300
-#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE             0x0800
-#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE           0x0900
-#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE  0x0A00
-#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE             0x0B00
-#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE           0x0C00
-#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE           0x0D00
-
-#define E1000_TIMINCA_16NS_SHIFT 24
-/* TUPLE Filtering Configuration */
-#define E1000_TTQF_DISABLE_MASK   0xF0008000     /* TTQF Disable Mask */
-#define E1000_TTQF_QUEUE_ENABLE   0x100          /* TTQF Queue Enable Bit */
-#define E1000_TTQF_PROTOCOL_MASK  0xFF           /* TTQF Protocol Mask */
-/* TTQF TCP Bit, shift with E1000_TTQF_PROTOCOL SHIFT */
-#define E1000_TTQF_PROTOCOL_TCP   0x0
-/* TTQF UDP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */
-#define E1000_TTQF_PROTOCOL_UDP   0x1
-/* TTQF SCTP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */
-#define E1000_TTQF_PROTOCOL_SCTP  0x2
-#define E1000_TTQF_PROTOCOL_SHIFT 5              /* TTQF Protocol Shift */
-#define E1000_TTQF_QUEUE_SHIFT    16             /* TTQF Queue Shfit */
-#define E1000_TTQF_RX_QUEUE_MASK  0x70000        /* TTQF Queue Mask */
-#define E1000_TTQF_MASK_ENABLE    0x10000000     /* TTQF Mask Enable Bit */
-#define E1000_IMIR_CLEAR_MASK     0xF001FFFF     /* IMIR Reg Clear Mask */
-#define E1000_IMIR_PORT_BYPASS    0x20000        /* IMIR Port Bypass Bit */
-#define E1000_IMIR_PRIORITY_SHIFT 29             /* IMIR Priority Shift */
-#define E1000_IMIREXT_CLEAR_MASK  0x7FFFF        /* IMIREXT Reg Clear Mask */
-
-#define E1000_MDICNFG_EXT_MDIO    0x80000000      /* MDI ext/int destination */
-#define E1000_MDICNFG_COM_MDIO    0x40000000      /* MDI shared w/ lan 0 */
-#define E1000_MDICNFG_PHY_MASK    0x03E00000
-#define E1000_MDICNFG_PHY_SHIFT   21
-
-#define E1000_THSTAT_LOW_EVENT      0x20000000  /* Low thermal threshold */
-#define E1000_THSTAT_MID_EVENT      0x00200000  /* Mid thermal threshold */
-#define E1000_THSTAT_HIGH_EVENT     0x00002000  /* High thermal threshold */
-#define E1000_THSTAT_PWR_DOWN       0x00000001  /* Power Down Event */
-#define E1000_THSTAT_LINK_THROTTLE  0x00000002  /* Link Speed Throttle Event */
-
-/* Powerville EEE defines */
-#define E1000_IPCNFG_EEE_1G_AN      0x00000008  /* IPCNFG EEE Enable 1G AN */
-#define E1000_IPCNFG_EEE_100M_AN    0x00000004  /* IPCNFG EEE Enable 100M AN */
-#define E1000_EEER_TX_LPI_EN        0x00010000  /* EEER Tx LPI Enable */
-#define E1000_EEER_RX_LPI_EN        0x00020000  /* EEER Rx LPI Enable */
-#define E1000_EEER_LPI_FC           0x00040000  /* EEER Enable on Flow Control*/
-/* EEE status */
-#define E1000_EEER_EEE_NEG          0x20000000  /* EEE capability negotiated */
-#define E1000_EEER_RX_LPI_STATUS    0x40000000  /* Rx in LPI state */
-#define E1000_EEER_TX_LPI_STATUS    0x80000000  /* Tx in LPI state */
-
-/* PCI Express Control */
-#define E1000_GCR_RXD_NO_SNOOP          0x00000001
-#define E1000_GCR_RXDSCW_NO_SNOOP       0x00000002
-#define E1000_GCR_RXDSCR_NO_SNOOP       0x00000004
-#define E1000_GCR_TXD_NO_SNOOP          0x00000008
-#define E1000_GCR_TXDSCW_NO_SNOOP       0x00000010
-#define E1000_GCR_TXDSCR_NO_SNOOP       0x00000020
-#define E1000_GCR_CMPL_TMOUT_MASK       0x0000F000
-#define E1000_GCR_CMPL_TMOUT_10ms       0x00001000
-#define E1000_GCR_CMPL_TMOUT_RESEND     0x00010000
-#define E1000_GCR_CAP_VER2              0x00040000
-
-#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP         | \
-                           E1000_GCR_RXDSCW_NO_SNOOP      | \
-                           E1000_GCR_RXDSCR_NO_SNOOP      | \
-                           E1000_GCR_TXD_NO_SNOOP         | \
-                           E1000_GCR_TXDSCW_NO_SNOOP      | \
-                           E1000_GCR_TXDSCR_NO_SNOOP)
-
-/* PHY Control Register */
-#define MII_CR_SPEED_SELECT_MSB 0x0040  /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_COLL_TEST_ENABLE 0x0080  /* Collision test enable */
-#define MII_CR_FULL_DUPLEX      0x0100  /* FDX =1, half duplex =0 */
-#define MII_CR_RESTART_AUTO_NEG 0x0200  /* Restart auto negotiation */
-#define MII_CR_ISOLATE          0x0400  /* Isolate PHY from MII */
-#define MII_CR_POWER_DOWN       0x0800  /* Power down */
-#define MII_CR_AUTO_NEG_EN      0x1000  /* Auto Neg Enable */
-#define MII_CR_SPEED_SELECT_LSB 0x2000  /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_LOOPBACK         0x4000  /* 0 = normal, 1 = loopback */
-#define MII_CR_RESET            0x8000  /* 0 = normal, 1 = PHY reset */
-#define MII_CR_SPEED_1000       0x0040
-#define MII_CR_SPEED_100        0x2000
-#define MII_CR_SPEED_10         0x0000
-
-/* PHY Status Register */
-#define MII_SR_EXTENDED_CAPS     0x0001 /* Extended register capabilities */
-#define MII_SR_JABBER_DETECT     0x0002 /* Jabber Detected */
-#define MII_SR_LINK_STATUS       0x0004 /* Link Status 1 = link */
-#define MII_SR_AUTONEG_CAPS      0x0008 /* Auto Neg Capable */
-#define MII_SR_REMOTE_FAULT      0x0010 /* Remote Fault Detect */
-#define MII_SR_AUTONEG_COMPLETE  0x0020 /* Auto Neg Complete */
-#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
-#define MII_SR_EXTENDED_STATUS   0x0100 /* Ext. status info in Reg 0x0F */
-#define MII_SR_100T2_HD_CAPS     0x0200 /* 100T2 Half Duplex Capable */
-#define MII_SR_100T2_FD_CAPS     0x0400 /* 100T2 Full Duplex Capable */
-#define MII_SR_10T_HD_CAPS       0x0800 /* 10T   Half Duplex Capable */
-#define MII_SR_10T_FD_CAPS       0x1000 /* 10T   Full Duplex Capable */
-#define MII_SR_100X_HD_CAPS      0x2000 /* 100X  Half Duplex Capable */
-#define MII_SR_100X_FD_CAPS      0x4000 /* 100X  Full Duplex Capable */
-#define MII_SR_100T4_CAPS        0x8000 /* 100T4 Capable */
-
-/* Autoneg Advertisement Register */
-#define NWAY_AR_SELECTOR_FIELD   0x0001   /* indicates IEEE 802.3 CSMA/CD */
-#define NWAY_AR_10T_HD_CAPS      0x0020   /* 10T   Half Duplex Capable */
-#define NWAY_AR_10T_FD_CAPS      0x0040   /* 10T   Full Duplex Capable */
-#define NWAY_AR_100TX_HD_CAPS    0x0080   /* 100TX Half Duplex Capable */
-#define NWAY_AR_100TX_FD_CAPS    0x0100   /* 100TX Full Duplex Capable */
-#define NWAY_AR_100T4_CAPS       0x0200   /* 100T4 Capable */
-#define NWAY_AR_PAUSE            0x0400   /* Pause operation desired */
-#define NWAY_AR_ASM_DIR          0x0800   /* Asymmetric Pause Direction bit */
-#define NWAY_AR_REMOTE_FAULT     0x2000   /* Remote Fault detected */
-#define NWAY_AR_NEXT_PAGE        0x8000   /* Next Page ability supported */
-
-/* Link Partner Ability Register (Base Page) */
-#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
-#define NWAY_LPAR_10T_HD_CAPS    0x0020 /* LP is 10T   Half Duplex Capable */
-#define NWAY_LPAR_10T_FD_CAPS    0x0040 /* LP is 10T   Full Duplex Capable */
-#define NWAY_LPAR_100TX_HD_CAPS  0x0080 /* LP is 100TX Half Duplex Capable */
-#define NWAY_LPAR_100TX_FD_CAPS  0x0100 /* LP is 100TX Full Duplex Capable */
-#define NWAY_LPAR_100T4_CAPS     0x0200 /* LP is 100T4 Capable */
-#define NWAY_LPAR_PAUSE          0x0400 /* LP Pause operation desired */
-#define NWAY_LPAR_ASM_DIR        0x0800 /* LP Asymmetric Pause Direction bit */
-#define NWAY_LPAR_REMOTE_FAULT   0x2000 /* LP has detected Remote Fault */
-#define NWAY_LPAR_ACKNOWLEDGE    0x4000 /* LP has rx'd link code word */
-#define NWAY_LPAR_NEXT_PAGE      0x8000 /* Next Page ability supported */
-
-/* Autoneg Expansion Register */
-#define NWAY_ER_LP_NWAY_CAPS      0x0001 /* LP has Auto Neg Capability */
-#define NWAY_ER_PAGE_RXD          0x0002 /* LP is 10T   Half Duplex Capable */
-#define NWAY_ER_NEXT_PAGE_CAPS    0x0004 /* LP is 10T   Full Duplex Capable */
-#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
-#define NWAY_ER_PAR_DETECT_FAULT  0x0010 /* LP is 100TX Full Duplex Capable */
-
-/* 1000BASE-T Control Register */
-#define CR_1000T_ASYM_PAUSE      0x0080 /* Advertise asymmetric pause bit */
-#define CR_1000T_HD_CAPS         0x0100 /* Advertise 1000T HD capability */
-#define CR_1000T_FD_CAPS         0x0200 /* Advertise 1000T FD capability  */
-#define CR_1000T_REPEATER_DTE    0x0400 /* 1=Repeater/switch device port */
-                                        /* 0=DTE device */
-#define CR_1000T_MS_VALUE        0x0800 /* 1=Configure PHY as Master */
-                                        /* 0=Configure PHY as Slave */
-#define CR_1000T_MS_ENABLE      0x1000 /* 1=Master/Slave manual config value */
-                                        /* 0=Automatic Master/Slave config */
-#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
-#define CR_1000T_TEST_MODE_1     0x2000 /* Transmit Waveform test */
-#define CR_1000T_TEST_MODE_2     0x4000 /* Master Transmit Jitter test */
-#define CR_1000T_TEST_MODE_3     0x6000 /* Slave Transmit Jitter test */
-#define CR_1000T_TEST_MODE_4     0x8000 /* Transmitter Distortion test */
-
-/* 1000BASE-T Status Register */
-#define SR_1000T_IDLE_ERROR_CNT   0x00FF /* Num idle errors since last read */
-#define SR_1000T_ASYM_PAUSE_DIR  0x0100 /* LP asymmetric pause direction bit */
-#define SR_1000T_LP_HD_CAPS       0x0400 /* LP is 1000T HD capable */
-#define SR_1000T_LP_FD_CAPS       0x0800 /* LP is 1000T FD capable */
-#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
-#define SR_1000T_LOCAL_RX_STATUS  0x2000 /* Local receiver OK */
-#define SR_1000T_MS_CONFIG_RES    0x4000 /* 1=Local Tx is Master, 0=Slave */
-#define SR_1000T_MS_CONFIG_FAULT  0x8000 /* Master/Slave config fault */
-
-#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
-
-/* PHY 1000 MII Register/Bit Definitions */
-/* PHY Registers defined by IEEE */
-#define PHY_CONTROL      0x00 /* Control Register */
-#define PHY_STATUS       0x01 /* Status Register */
-#define PHY_ID1          0x02 /* Phy Id Reg (word 1) */
-#define PHY_ID2          0x03 /* Phy Id Reg (word 2) */
-#define PHY_AUTONEG_ADV  0x04 /* Autoneg Advertisement */
-#define PHY_LP_ABILITY   0x05 /* Link Partner Ability (Base Page) */
-#define PHY_AUTONEG_EXP  0x06 /* Autoneg Expansion Reg */
-#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */
-#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
-#define PHY_1000T_CTRL   0x09 /* 1000Base-T Control Reg */
-#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
-#define PHY_EXT_STATUS   0x0F /* Extended Status Reg */
-
-#define PHY_CONTROL_LB   0x4000 /* PHY Loopback bit */
-
-/* NVM Control */
-#define E1000_EECD_SK        0x00000001 /* NVM Clock */
-#define E1000_EECD_CS        0x00000002 /* NVM Chip Select */
-#define E1000_EECD_DI        0x00000004 /* NVM Data In */
-#define E1000_EECD_DO        0x00000008 /* NVM Data Out */
-#define E1000_EECD_FWE_MASK  0x00000030
-#define E1000_EECD_FWE_DIS   0x00000010 /* Disable FLASH writes */
-#define E1000_EECD_FWE_EN    0x00000020 /* Enable FLASH writes */
-#define E1000_EECD_FWE_SHIFT 4
-#define E1000_EECD_REQ       0x00000040 /* NVM Access Request */
-#define E1000_EECD_GNT       0x00000080 /* NVM Access Grant */
-#define E1000_EECD_PRES      0x00000100 /* NVM Present */
-#define E1000_EECD_SIZE      0x00000200 /* NVM Size (0=64 word 1=256 word) */
-#define E1000_EECD_BLOCKED   0x00008000 /* Bit banging access blocked flag */
-#define E1000_EECD_ABORT     0x00010000 /* NVM operation aborted flag */
-#define E1000_EECD_TIMEOUT   0x00020000 /* NVM read operation timeout flag */
-#define E1000_EECD_ERROR_CLR 0x00040000 /* NVM error status clear bit */
-/* NVM Addressing bits based on type 0=small, 1=large */
-#define E1000_EECD_ADDR_BITS 0x00000400
-#define E1000_EECD_TYPE      0x00002000 /* NVM Type (1-SPI, 0-Microwire) */
-#ifndef E1000_NVM_GRANT_ATTEMPTS
-#define E1000_NVM_GRANT_ATTEMPTS   1000 /* NVM # attempts to gain grant */
-#endif
-#define E1000_EECD_AUTO_RD          0x00000200  /* NVM Auto Read done */
-#define E1000_EECD_SIZE_EX_MASK     0x00007800  /* NVM Size */
-#define E1000_EECD_SIZE_EX_SHIFT     11
-#define E1000_EECD_NVADDS    0x00018000 /* NVM Address Size */
-#define E1000_EECD_SELSHAD   0x00020000 /* Select Shadow RAM */
-#define E1000_EECD_INITSRAM  0x00040000 /* Initialize Shadow RAM */
-#define E1000_EECD_FLUPD     0x00080000 /* Update FLASH */
-#define E1000_EECD_AUPDEN    0x00100000 /* Enable Autonomous FLASH update */
-#define E1000_EECD_SHADV     0x00200000 /* Shadow RAM Data Valid */
-#define E1000_EECD_SEC1VAL   0x00400000 /* Sector One Valid */
-#define E1000_EECD_SECVAL_SHIFT      22
-#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
-
-#define E1000_NVM_SWDPIN0   0x0001   /* SWDPIN 0 NVM Value */
-#define E1000_NVM_LED_LOGIC 0x0020   /* Led Logic Word */
-#define E1000_NVM_RW_REG_DATA   16  /* Offset to data in NVM read/write regs */
-#define E1000_NVM_RW_REG_DONE   2    /* Offset to READ/WRITE done bit */
-#define E1000_NVM_RW_REG_START  1    /* Start operation */
-#define E1000_NVM_RW_ADDR_SHIFT 2    /* Shift to the address bits */
-#define E1000_NVM_POLL_WRITE    1    /* Flag for polling for write complete */
-#define E1000_NVM_POLL_READ     0    /* Flag for polling for read complete */
-#define E1000_FLASH_UPDATES  2000
-
-/* NVM Word Offsets */
-#define NVM_COMPAT                 0x0003
-#define NVM_ID_LED_SETTINGS        0x0004
-#define NVM_VERSION                0x0005
-#define NVM_SERDES_AMPLITUDE       0x0006 /* SERDES output amplitude */
-#define NVM_PHY_CLASS_WORD         0x0007
-#define NVM_INIT_CONTROL1_REG      0x000A
-#define NVM_INIT_CONTROL2_REG      0x000F
-#define NVM_SWDEF_PINS_CTRL_PORT_1 0x0010
-#define NVM_INIT_CONTROL3_PORT_B   0x0014
-#define NVM_INIT_3GIO_3            0x001A
-#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020
-#define NVM_INIT_CONTROL3_PORT_A   0x0024
-#define NVM_CFG                    0x0012
-#define NVM_FLASH_VERSION          0x0032
-#define NVM_ALT_MAC_ADDR_PTR       0x0037
-#define NVM_CHECKSUM_REG           0x003F
-#define NVM_COMPATIBILITY_REG_3    0x0003
-#define NVM_COMPATIBILITY_BIT_MASK 0x8000
-
-#define E1000_NVM_CFG_DONE_PORT_0  0x040000 /* MNG config cycle done */
-#define E1000_NVM_CFG_DONE_PORT_1  0x080000 /* ...for second port */
-#define E1000_NVM_CFG_DONE_PORT_2  0x100000 /* ...for third port */
-#define E1000_NVM_CFG_DONE_PORT_3  0x200000 /* ...for fourth port */
-
-#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0)
-
-/* Mask bits for fields in Word 0x24 of the NVM */
-#define NVM_WORD24_COM_MDIO         0x0008 /* MDIO interface shared */
-#define NVM_WORD24_EXT_MDIO         0x0004 /* MDIO accesses routed external */
-
-/* Mask bits for fields in Word 0x0f of the NVM */
-#define NVM_WORD0F_PAUSE_MASK       0x3000
-#define NVM_WORD0F_PAUSE            0x1000
-#define NVM_WORD0F_ASM_DIR          0x2000
-#define NVM_WORD0F_ANE              0x0800
-#define NVM_WORD0F_SWPDIO_EXT_MASK  0x00F0
-#define NVM_WORD0F_LPLU             0x0001
-
-/* Mask bits for fields in Word 0x1a of the NVM */
-#define NVM_WORD1A_ASPM_MASK  0x000C
-
-/* Mask bits for fields in Word 0x03 of the EEPROM */
-#define NVM_COMPAT_LOM    0x0800
-
-/* length of string needed to store PBA number */
-#define E1000_PBANUM_LENGTH             11
-
-/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
-#define NVM_SUM                    0xBABA
-
-#define NVM_MAC_ADDR_OFFSET        0
-#define NVM_PBA_OFFSET_0           8
-#define NVM_PBA_OFFSET_1           9
-#define NVM_PBA_PTR_GUARD          0xFAFA
-#define NVM_RESERVED_WORD          0xFFFF
-#define NVM_PHY_CLASS_A            0x8000
-#define NVM_SERDES_AMPLITUDE_MASK  0x000F
-#define NVM_SIZE_MASK              0x1C00
-#define NVM_SIZE_SHIFT             10
-#define NVM_WORD_SIZE_BASE_SHIFT   6
-#define NVM_SWDPIO_EXT_SHIFT       4
-
-/* NVM Commands - Microwire */
-#define NVM_READ_OPCODE_MICROWIRE  0x6  /* NVM read opcode */
-#define NVM_WRITE_OPCODE_MICROWIRE 0x5  /* NVM write opcode */
-#define NVM_ERASE_OPCODE_MICROWIRE 0x7  /* NVM erase opcode */
-#define NVM_EWEN_OPCODE_MICROWIRE  0x13 /* NVM erase/write enable */
-#define NVM_EWDS_OPCODE_MICROWIRE  0x10 /* NVM erase/write disable */
-
-/* NVM Commands - SPI */
-#define NVM_MAX_RETRY_SPI          5000 /* Max wait of 5ms, for RDY signal */
-#define NVM_READ_OPCODE_SPI        0x03 /* NVM read opcode */
-#define NVM_WRITE_OPCODE_SPI       0x02 /* NVM write opcode */
-#define NVM_A8_OPCODE_SPI          0x08 /* opcode bit-3 = address bit-8 */
-#define NVM_WREN_OPCODE_SPI        0x06 /* NVM set Write Enable latch */
-#define NVM_WRDI_OPCODE_SPI        0x04 /* NVM reset Write Enable latch */
-#define NVM_RDSR_OPCODE_SPI        0x05 /* NVM read Status register */
-#define NVM_WRSR_OPCODE_SPI        0x01 /* NVM write Status register */
-
-/* SPI NVM Status Register */
-#define NVM_STATUS_RDY_SPI         0x01
-#define NVM_STATUS_WEN_SPI         0x02
-#define NVM_STATUS_BP0_SPI         0x04
-#define NVM_STATUS_BP1_SPI         0x08
-#define NVM_STATUS_WPEN_SPI        0x80
-
-/* Word definitions for ID LED Settings */
-#define ID_LED_RESERVED_0000 0x0000
-#define ID_LED_RESERVED_FFFF 0xFFFF
-#define ID_LED_DEFAULT       ((ID_LED_OFF1_ON2  << 12) | \
-                              (ID_LED_OFF1_OFF2 <<  8) | \
-                              (ID_LED_DEF1_DEF2 <<  4) | \
-                              (ID_LED_DEF1_DEF2))
-#define ID_LED_DEF1_DEF2     0x1
-#define ID_LED_DEF1_ON2      0x2
-#define ID_LED_DEF1_OFF2     0x3
-#define ID_LED_ON1_DEF2      0x4
-#define ID_LED_ON1_ON2       0x5
-#define ID_LED_ON1_OFF2      0x6
-#define ID_LED_OFF1_DEF2     0x7
-#define ID_LED_OFF1_ON2      0x8
-#define ID_LED_OFF1_OFF2     0x9
-
-#define IGP_ACTIVITY_LED_MASK   0xFFFFF0FF
-#define IGP_ACTIVITY_LED_ENABLE 0x0300
-#define IGP_LED3_MODE           0x07000000
-
-/* PCI/PCI-X/PCI-EX Config space */
-#define PCIX_COMMAND_REGISTER        0xE6
-#define PCIX_STATUS_REGISTER_LO      0xE8
-#define PCIX_STATUS_REGISTER_HI      0xEA
-#define PCI_HEADER_TYPE_REGISTER     0x0E
-#define PCIE_LINK_STATUS             0x12
-#define PCIE_DEVICE_CONTROL2         0x28
-
-#define PCIX_COMMAND_MMRBC_MASK      0x000C
-#define PCIX_COMMAND_MMRBC_SHIFT     0x2
-#define PCIX_STATUS_HI_MMRBC_MASK    0x0060
-#define PCIX_STATUS_HI_MMRBC_SHIFT   0x5
-#define PCIX_STATUS_HI_MMRBC_4K      0x3
-#define PCIX_STATUS_HI_MMRBC_2K      0x2
-#define PCIX_STATUS_LO_FUNC_MASK     0x7
-#define PCI_HEADER_TYPE_MULTIFUNC    0x80
-#define PCIE_LINK_WIDTH_MASK         0x3F0
-#define PCIE_LINK_WIDTH_SHIFT        4
-#define PCIE_LINK_SPEED_MASK         0x0F
-#define PCIE_LINK_SPEED_2500         0x01
-#define PCIE_LINK_SPEED_5000         0x02
-#define PCIE_DEVICE_CONTROL2_16ms    0x0005
-
-#ifndef ETH_ADDR_LEN
-#define ETH_ADDR_LEN                 6
-#endif
-
-#define PHY_REVISION_MASK      0xFFFFFFF0
-#define MAX_PHY_REG_ADDRESS    0x1F  /* 5 bit address bus (0-0x1F) */
-#define MAX_PHY_MULTI_PAGE_REG 0xF
-
-/* Bit definitions for valid PHY IDs. */
-/*
- * I = Integrated
- * E = External
- */
-#define M88E1000_E_PHY_ID    0x01410C50
-#define M88E1000_I_PHY_ID    0x01410C30
-#define M88E1011_I_PHY_ID    0x01410C20
-#define IGP01E1000_I_PHY_ID  0x02A80380
-#define M88E1011_I_REV_4     0x04
-#define M88E1111_I_PHY_ID    0x01410CC0
-#define M88E1112_E_PHY_ID    0x01410C90
-#define I347AT4_E_PHY_ID     0x01410DC0
-#define M88E1340M_E_PHY_ID   0x01410DF0
-#define GG82563_E_PHY_ID     0x01410CA0
-#define IGP03E1000_E_PHY_ID  0x02A80390
-#define IFE_E_PHY_ID         0x02A80330
-#define IFE_PLUS_E_PHY_ID    0x02A80320
-#define IFE_C_E_PHY_ID       0x02A80310
-#define I82580_I_PHY_ID      0x015403A0
-#define I350_I_PHY_ID        0x015403B0
-#define IGP04E1000_E_PHY_ID  0x02A80391
-#define M88_VENDOR           0x0141
-
-/* M88E1000 Specific Registers */
-#define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
-#define M88E1000_PHY_SPEC_STATUS   0x11  /* PHY Specific Status Register */
-#define M88E1000_INT_ENABLE        0x12  /* Interrupt Enable Register */
-#define M88E1000_INT_STATUS        0x13  /* Interrupt Status Register */
-#define M88E1000_EXT_PHY_SPEC_CTRL 0x14  /* Extended PHY Specific Control */
-#define M88E1000_RX_ERR_CNTR       0x15  /* Receive Error Counter */
-
-#define M88E1000_PHY_EXT_CTRL      0x1A  /* PHY extend control register */
-#define M88E1000_PHY_PAGE_SELECT   0x1D  /* Reg 29 for page number setting */
-#define M88E1000_PHY_GEN_CONTROL   0x1E  /* Its meaning depends on reg 29 */
-#define M88E1000_PHY_VCO_REG_BIT8  0x100 /* Bits 8 & 11 are adjusted for */
-#define M88E1000_PHY_VCO_REG_BIT11 0x800    /* improved BER performance */
-
-/* M88E1000 PHY Specific Control Register */
-#define M88E1000_PSCR_JABBER_DISABLE    0x0001 /* 1=Jabber Function disabled */
-#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */
-#define M88E1000_PSCR_SQE_TEST          0x0004 /* 1=SQE Test enabled */
-/* 1=CLK125 low, 0=CLK125 toggling */
-#define M88E1000_PSCR_CLK125_DISABLE    0x0010
-#define M88E1000_PSCR_MDI_MANUAL_MODE  0x0000 /* MDI Crossover Mode bits 6:5 */
-                                               /* Manual MDI configuration */
-#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020  /* Manual MDIX configuration */
-/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
-#define M88E1000_PSCR_AUTO_X_1000T     0x0040
-/* Auto crossover enabled all speeds */
-#define M88E1000_PSCR_AUTO_X_MODE      0x0060
-/*
- * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
- * 0=Normal 10BASE-T Rx Threshold
- */
-#define M88E1000_PSCR_EN_10BT_EXT_DIST 0x0080
-/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
-#define M88E1000_PSCR_MII_5BIT_ENABLE      0x0100
-#define M88E1000_PSCR_SCRAMBLER_DISABLE    0x0200 /* 1=Scrambler disable */
-#define M88E1000_PSCR_FORCE_LINK_GOOD      0x0400 /* 1=Force link good */
-#define M88E1000_PSCR_ASSERT_CRS_ON_TX     0x0800 /* 1=Assert CRS on Tx */
-
-/* M88E1000 PHY Specific Status Register */
-#define M88E1000_PSSR_JABBER             0x0001 /* 1=Jabber */
-#define M88E1000_PSSR_REV_POLARITY       0x0002 /* 1=Polarity reversed */
-#define M88E1000_PSSR_DOWNSHIFT          0x0020 /* 1=Downshifted */
-#define M88E1000_PSSR_MDIX               0x0040 /* 1=MDIX; 0=MDI */
-/*
- * 0 = <50M
- * 1 = 50-80M
- * 2 = 80-110M
- * 3 = 110-140M
- * 4 = >140M
- */
-#define M88E1000_PSSR_CABLE_LENGTH       0x0380
-#define M88E1000_PSSR_LINK               0x0400 /* 1=Link up, 0=Link down */
-#define M88E1000_PSSR_SPD_DPLX_RESOLVED  0x0800 /* 1=Speed & Duplex resolved */
-#define M88E1000_PSSR_PAGE_RCVD          0x1000 /* 1=Page received */
-#define M88E1000_PSSR_DPLX               0x2000 /* 1=Duplex 0=Half Duplex */
-#define M88E1000_PSSR_SPEED              0xC000 /* Speed, bits 14:15 */
-#define M88E1000_PSSR_10MBS              0x0000 /* 00=10Mbs */
-#define M88E1000_PSSR_100MBS             0x4000 /* 01=100Mbs */
-#define M88E1000_PSSR_1000MBS            0x8000 /* 10=1000Mbs */
-
-#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
-
-/* M88E1000 Extended PHY Specific Control Register */
-#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
-/*
- * 1 = Lost lock detect enabled.
- * Will assert lost lock and bring
- * link down if idle not seen
- * within 1ms in 1000BASE-T
- */
-#define M88E1000_EPSCR_DOWN_NO_IDLE   0x8000
-/*
- * Number of times we will attempt to autonegotiate before downshifting if we
- * are the master
- */
-#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
-#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X   0x0000
-#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X   0x0400
-#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X   0x0800
-#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X   0x0C00
-/*
- * Number of times we will attempt to autonegotiate before downshifting if we
- * are the slave
- */
-#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK  0x0300
-#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS   0x0000
-#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X    0x0100
-#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X    0x0200
-#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X    0x0300
-#define M88E1000_EPSCR_TX_CLK_2_5       0x0060 /* 2.5 MHz TX_CLK */
-#define M88E1000_EPSCR_TX_CLK_25        0x0070 /* 25  MHz TX_CLK */
-#define M88E1000_EPSCR_TX_CLK_0         0x0000 /* NO  TX_CLK */
-
-/* M88E1111 Specific Registers */
-#define M88E1111_PHY_PAGE_SELECT1       0x16  /* for registers 0-28 */
-#define M88E1111_PHY_PAGE_SELECT2       0x1D  /* for registers 30-31 */
-
-/* M88E1111 page select register mask */
-#define M88E1111_PHY_PAGE_SELECT_MASK1  0xFF
-#define M88E1111_PHY_PAGE_SELECT_MASK2  0x3F
-
-/* Intel I347AT4 Registers */
-
-#define I347AT4_PCDL            0x10 /* PHY Cable Diagnostics Length */
-#define I347AT4_PCDC            0x15 /* PHY Cable Diagnostics Control */
-#define I347AT4_PAGE_SELECT     0x16
-
-/* I347AT4 Extended PHY Specific Control Register */
-
-/*
- * Number of times we will attempt to autonegotiate before downshifting if we
- * are the master
- */
-#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
-#define I347AT4_PSCR_DOWNSHIFT_MASK   0x7000
-#define I347AT4_PSCR_DOWNSHIFT_1X     0x0000
-#define I347AT4_PSCR_DOWNSHIFT_2X     0x1000
-#define I347AT4_PSCR_DOWNSHIFT_3X     0x2000
-#define I347AT4_PSCR_DOWNSHIFT_4X     0x3000
-#define I347AT4_PSCR_DOWNSHIFT_5X     0x4000
-#define I347AT4_PSCR_DOWNSHIFT_6X     0x5000
-#define I347AT4_PSCR_DOWNSHIFT_7X     0x6000
-#define I347AT4_PSCR_DOWNSHIFT_8X     0x7000
-
-/* I347AT4 PHY Cable Diagnostics Control */
-#define I347AT4_PCDC_CABLE_LENGTH_UNIT  0x0400 /* 0=cm 1=meters */
-
-/* M88E1112 only registers */
-#define M88E1112_VCT_DSP_DISTANCE       0x001A
-
-/* M88EC018 Rev 2 specific DownShift settings */
-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK  0x0E00
-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X    0x0000
-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X    0x0200
-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X    0x0400
-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X    0x0600
-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X    0x0800
-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X    0x0A00
-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X    0x0C00
-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X    0x0E00
-
-/*
- * Bits...
- * 15-5: page
- * 4-0: register offset
- */
-#define GG82563_PAGE_SHIFT        5
-#define GG82563_REG(page, reg)    \
-        (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
-#define GG82563_MIN_ALT_REG       30
-
-/* GG82563 Specific Registers */
-#define GG82563_PHY_SPEC_CTRL           \
-        GG82563_REG(0, 16) /* PHY Specific Control */
-#define GG82563_PHY_SPEC_STATUS         \
-        GG82563_REG(0, 17) /* PHY Specific Status */
-#define GG82563_PHY_INT_ENABLE          \
-        GG82563_REG(0, 18) /* Interrupt Enable */
-#define GG82563_PHY_SPEC_STATUS_2       \
-        GG82563_REG(0, 19) /* PHY Specific Status 2 */
-#define GG82563_PHY_RX_ERR_CNTR         \
-        GG82563_REG(0, 21) /* Receive Error Counter */
-#define GG82563_PHY_PAGE_SELECT         \
-        GG82563_REG(0, 22) /* Page Select */
-#define GG82563_PHY_SPEC_CTRL_2         \
-        GG82563_REG(0, 26) /* PHY Specific Control 2 */
-#define GG82563_PHY_PAGE_SELECT_ALT     \
-        GG82563_REG(0, 29) /* Alternate Page Select */
-#define GG82563_PHY_TEST_CLK_CTRL       \
-        GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */
-
-#define GG82563_PHY_MAC_SPEC_CTRL       \
-        GG82563_REG(2, 21) /* MAC Specific Control Register */
-#define GG82563_PHY_MAC_SPEC_CTRL_2     \
-        GG82563_REG(2, 26) /* MAC Specific Control 2 */
-
-#define GG82563_PHY_DSP_DISTANCE    \
-        GG82563_REG(5, 26) /* DSP Distance */
-
-/* Page 193 - Port Control Registers */
-#define GG82563_PHY_KMRN_MODE_CTRL   \
-        GG82563_REG(193, 16) /* Kumeran Mode Control */
-#define GG82563_PHY_PORT_RESET          \
-        GG82563_REG(193, 17) /* Port Reset */
-#define GG82563_PHY_REVISION_ID         \
-        GG82563_REG(193, 18) /* Revision ID */
-#define GG82563_PHY_DEVICE_ID           \
-        GG82563_REG(193, 19) /* Device ID */
-#define GG82563_PHY_PWR_MGMT_CTRL       \
-        GG82563_REG(193, 20) /* Power Management Control */
-#define GG82563_PHY_RATE_ADAPT_CTRL     \
-        GG82563_REG(193, 25) /* Rate Adaptation Control */
-
-/* Page 194 - KMRN Registers */
-#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \
-        GG82563_REG(194, 16) /* FIFO's Control/Status */
-#define GG82563_PHY_KMRN_CTRL           \
-        GG82563_REG(194, 17) /* Control */
-#define GG82563_PHY_INBAND_CTRL         \
-        GG82563_REG(194, 18) /* Inband Control */
-#define GG82563_PHY_KMRN_DIAGNOSTIC     \
-        GG82563_REG(194, 19) /* Diagnostic */
-#define GG82563_PHY_ACK_TIMEOUTS        \
-        GG82563_REG(194, 20) /* Acknowledge Timeouts */
-#define GG82563_PHY_ADV_ABILITY         \
-        GG82563_REG(194, 21) /* Advertised Ability */
-#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \
-        GG82563_REG(194, 23) /* Link Partner Advertised Ability */
-#define GG82563_PHY_ADV_NEXT_PAGE       \
-        GG82563_REG(194, 24) /* Advertised Next Page */
-#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \
-        GG82563_REG(194, 25) /* Link Partner Advertised Next page */
-#define GG82563_PHY_KMRN_MISC           \
-        GG82563_REG(194, 26) /* Misc. */
-
-/* MDI Control */
-#define E1000_MDIC_DATA_MASK 0x0000FFFF
-#define E1000_MDIC_REG_MASK  0x001F0000
-#define E1000_MDIC_REG_SHIFT 16
-#define E1000_MDIC_PHY_MASK  0x03E00000
-#define E1000_MDIC_PHY_SHIFT 21
-#define E1000_MDIC_OP_WRITE  0x04000000
-#define E1000_MDIC_OP_READ   0x08000000
-#define E1000_MDIC_READY     0x10000000
-#define E1000_MDIC_INT_EN    0x20000000
-#define E1000_MDIC_ERROR     0x40000000
-#define E1000_MDIC_DEST      0x80000000
-
-/* SerDes Control */
-#define E1000_GEN_CTL_READY             0x80000000
-#define E1000_GEN_CTL_ADDRESS_SHIFT     8
-#define E1000_GEN_POLL_TIMEOUT          640
-
-/* LinkSec register fields */
-#define E1000_LSECTXCAP_SUM_MASK        0x00FF0000
-#define E1000_LSECTXCAP_SUM_SHIFT       16
-#define E1000_LSECRXCAP_SUM_MASK        0x00FF0000
-#define E1000_LSECRXCAP_SUM_SHIFT       16
-
-#define E1000_LSECTXCTRL_EN_MASK        0x00000003
-#define E1000_LSECTXCTRL_DISABLE        0x0
-#define E1000_LSECTXCTRL_AUTH           0x1
-#define E1000_LSECTXCTRL_AUTH_ENCRYPT   0x2
-#define E1000_LSECTXCTRL_AISCI          0x00000020
-#define E1000_LSECTXCTRL_PNTHRSH_MASK   0xFFFFFF00
-#define E1000_LSECTXCTRL_RSV_MASK       0x000000D8
-
-#define E1000_LSECRXCTRL_EN_MASK        0x0000000C
-#define E1000_LSECRXCTRL_EN_SHIFT       2
-#define E1000_LSECRXCTRL_DISABLE        0x0
-#define E1000_LSECRXCTRL_CHECK          0x1
-#define E1000_LSECRXCTRL_STRICT         0x2
-#define E1000_LSECRXCTRL_DROP           0x3
-#define E1000_LSECRXCTRL_PLSH           0x00000040
-#define E1000_LSECRXCTRL_RP             0x00000080
-#define E1000_LSECRXCTRL_RSV_MASK       0xFFFFFF33
-
-/* Tx Rate-Scheduler Config fields */
-#define E1000_RTTBCNRC_RS_ENA          0x80000000
-#define E1000_RTTBCNRC_RF_DEC_MASK     0x00003FFF
-#define E1000_RTTBCNRC_RF_INT_SHIFT     14
-#define E1000_RTTBCNRC_RF_INT_MASK     \
-       (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
-
-/* DMA Coalescing register fields */
-#define E1000_DMACR_DMACWT_MASK         0x00003FFF /* DMA Coalescing
-                                                    * Watchdog Timer */
-#define E1000_DMACR_DMACTHR_MASK        0x00FF0000 /* DMA Coalescing Rx
-                                                    * Threshold */
-#define E1000_DMACR_DMACTHR_SHIFT       16
-#define E1000_DMACR_DMAC_LX_MASK        0x30000000 /* Lx when no PCIe
-                                                    * transactions */
-#define E1000_DMACR_DMAC_LX_SHIFT       28
-#define E1000_DMACR_DMAC_EN             0x80000000 /* Enable DMA Coalescing */
-
-#define E1000_DMCTXTH_DMCTTHR_MASK      0x00000FFF /* DMA Coalescing Transmit
-                                                    * Threshold */
-
-#define E1000_DMCTLX_TTLX_MASK          0x00000FFF /* Time to LX request */
-
-#define E1000_DMCRTRH_UTRESH_MASK       0x0007FFFF /* Rx Traffic Rate
-                                                    * Threshold */
-#define E1000_DMCRTRH_LRPRCW            0x80000000 /* Rx packet rate in
-                                                    * current window */
-
-#define E1000_DMCCNT_CCOUNT_MASK        0x01FFFFFF /* DMA Coal Rx Traffic
-                                                    * Current Cnt */
-
-#define E1000_FCRTC_RTH_COAL_MASK       0x0003FFF0 /* Flow ctrl Rx Threshold
-                                                    * High val */
-#define E1000_FCRTC_RTH_COAL_SHIFT      4
-#define E1000_PCIEMISC_LX_DECISION      0x00000080 /* Lx power decision based
-                                                      on DMA coal */
-
-/* Proxy Filer Control */
-#define E1000_PROXYFC_D0               0x00000001  /* Enable offload in D0 */
-#define E1000_PROXYFC_EX               0x00000004  /* Directed exact proxy */
-#define E1000_PROXYFC_MC               0x00000008  /* Directed Multicast
-                                                    * Proxy */
-#define E1000_PROXYFC_BC               0x00000010  /* Broadcast Proxy Enable */
-#define E1000_PROXYFC_ARP_DIRECTED     0x00000020  /* Directed ARP Proxy
-                                                    * Enable */
-#define E1000_PROXYFC_IPV4             0x00000040  /* Directed IPv4 Enable */
-#define E1000_PROXYFC_IPV6             0x00000080  /* Directed IPv6 Enable */
-#define E1000_PROXYFC_NS               0x00000200  /* IPv4 Neighborhood
-                                                    * Solicitation */
-#define E1000_PROXYFC_ARP              0x00000800  /* ARP Request Proxy
-                                                    * Enable */
-/* Proxy Status */
-#define E1000_PROXYS_CLEAR             0xFFFFFFFF  /* Clear */
-
-/* Firmware Status */
-#define E1000_FWSTS_FWRI               0x80000000 /* Firmware Reset
-                                                   * Indication */
-
-
-#endif /* _E1000_DEFINES_H_ */
diff --git a/lib/librte_pmd_igb/igb/e1000_hw.h b/lib/librte_pmd_igb/igb/e1000_hw.h
deleted file mode 100644 (file)
index bed673b..0000000
+++ /dev/null
@@ -1,767 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#ifndef _E1000_HW_H_
-#define _E1000_HW_H_
-
-#include "e1000_osdep.h"
-#include "e1000_regs.h"
-#include "e1000_defines.h"
-
-struct e1000_hw;
-
-#define E1000_DEV_ID_82576                    0x10C9
-#define E1000_DEV_ID_82576_FIBER              0x10E6
-#define E1000_DEV_ID_82576_SERDES             0x10E7
-#define E1000_DEV_ID_82576_QUAD_COPPER        0x10E8
-#define E1000_DEV_ID_82576_QUAD_COPPER_ET2    0x1526
-#define E1000_DEV_ID_82576_NS                 0x150A
-#define E1000_DEV_ID_82576_NS_SERDES          0x1518
-#define E1000_DEV_ID_82576_SERDES_QUAD        0x150D
-#define E1000_DEV_ID_82576_VF                 0x10CA
-#define E1000_DEV_ID_I350_VF                  0x1520
-#define E1000_DEV_ID_82575EB_COPPER           0x10A7
-#define E1000_DEV_ID_82575EB_FIBER_SERDES     0x10A9
-#define E1000_DEV_ID_82575GB_QUAD_COPPER      0x10D6
-#define E1000_DEV_ID_82580_COPPER             0x150E
-#define E1000_DEV_ID_82580_FIBER              0x150F
-#define E1000_DEV_ID_82580_SERDES             0x1510
-#define E1000_DEV_ID_82580_SGMII              0x1511
-#define E1000_DEV_ID_82580_COPPER_DUAL        0x1516
-#define E1000_DEV_ID_82580_QUAD_FIBER         0x1527
-#define E1000_DEV_ID_I350_COPPER              0x1521
-#define E1000_DEV_ID_I350_FIBER               0x1522
-#define E1000_DEV_ID_I350_SERDES              0x1523
-#define E1000_DEV_ID_I350_SGMII               0x1524
-#define E1000_DEV_ID_I350_DA4                 0x1546
-#define E1000_DEV_ID_DH89XXCC_SGMII           0x0438
-#define E1000_DEV_ID_DH89XXCC_SERDES          0x043A
-#define E1000_DEV_ID_DH89XXCC_BACKPLANE       0x043C
-#define E1000_DEV_ID_DH89XXCC_SFP             0x0440
-#define E1000_REVISION_0 0
-#define E1000_REVISION_1 1
-#define E1000_REVISION_2 2
-#define E1000_REVISION_3 3
-#define E1000_REVISION_4 4
-
-#define E1000_FUNC_0     0
-#define E1000_FUNC_1     1
-#define E1000_FUNC_2     2
-#define E1000_FUNC_3     3
-
-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0   0
-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1   3
-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2   6
-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3   9
-
-enum e1000_mac_type {
-       e1000_undefined = 0,
-       e1000_82575,
-       e1000_82576,
-       e1000_82580,
-       e1000_i350,
-       e1000_vfadapt,
-       e1000_vfadapt_i350,
-       e1000_num_macs  /* List is 1-based, so subtract 1 for TRUE count. */
-};
-
-enum e1000_media_type {
-       e1000_media_type_unknown = 0,
-       e1000_media_type_copper = 1,
-       e1000_media_type_fiber = 2,
-       e1000_media_type_internal_serdes = 3,
-       e1000_num_media_types
-};
-
-enum e1000_nvm_type {
-       e1000_nvm_unknown = 0,
-       e1000_nvm_none,
-       e1000_nvm_eeprom_spi,
-       e1000_nvm_eeprom_microwire,
-       e1000_nvm_flash_hw,
-       e1000_nvm_flash_sw
-};
-
-enum e1000_nvm_override {
-       e1000_nvm_override_none = 0,
-       e1000_nvm_override_spi_small,
-       e1000_nvm_override_spi_large,
-       e1000_nvm_override_microwire_small,
-       e1000_nvm_override_microwire_large
-};
-
-enum e1000_phy_type {
-       e1000_phy_unknown = 0,
-       e1000_phy_none,
-       e1000_phy_m88,
-       e1000_phy_igp,
-       e1000_phy_igp_2,
-       e1000_phy_gg82563,
-       e1000_phy_igp_3,
-       e1000_phy_ife,
-       e1000_phy_82580,
-       e1000_phy_vf,
-};
-
-enum e1000_bus_type {
-       e1000_bus_type_unknown = 0,
-       e1000_bus_type_pci,
-       e1000_bus_type_pcix,
-       e1000_bus_type_pci_express,
-       e1000_bus_type_reserved
-};
-
-enum e1000_bus_speed {
-       e1000_bus_speed_unknown = 0,
-       e1000_bus_speed_33,
-       e1000_bus_speed_66,
-       e1000_bus_speed_100,
-       e1000_bus_speed_120,
-       e1000_bus_speed_133,
-       e1000_bus_speed_2500,
-       e1000_bus_speed_5000,
-       e1000_bus_speed_reserved
-};
-
-enum e1000_bus_width {
-       e1000_bus_width_unknown = 0,
-       e1000_bus_width_pcie_x1,
-       e1000_bus_width_pcie_x2,
-       e1000_bus_width_pcie_x4 = 4,
-       e1000_bus_width_pcie_x8 = 8,
-       e1000_bus_width_32,
-       e1000_bus_width_64,
-       e1000_bus_width_reserved
-};
-
-enum e1000_1000t_rx_status {
-       e1000_1000t_rx_status_not_ok = 0,
-       e1000_1000t_rx_status_ok,
-       e1000_1000t_rx_status_undefined = 0xFF
-};
-
-enum e1000_rev_polarity {
-       e1000_rev_polarity_normal = 0,
-       e1000_rev_polarity_reversed,
-       e1000_rev_polarity_undefined = 0xFF
-};
-
-enum e1000_fc_mode {
-       e1000_fc_none = 0,
-       e1000_fc_rx_pause,
-       e1000_fc_tx_pause,
-       e1000_fc_full,
-       e1000_fc_default = 0xFF
-};
-
-enum e1000_ms_type {
-       e1000_ms_hw_default = 0,
-       e1000_ms_force_master,
-       e1000_ms_force_slave,
-       e1000_ms_auto
-};
-
-enum e1000_smart_speed {
-       e1000_smart_speed_default = 0,
-       e1000_smart_speed_on,
-       e1000_smart_speed_off
-};
-
-enum e1000_serdes_link_state {
-       e1000_serdes_link_down = 0,
-       e1000_serdes_link_autoneg_progress,
-       e1000_serdes_link_autoneg_complete,
-       e1000_serdes_link_forced_up
-};
-
-#define __le16 u16
-#define __le32 u32
-#define __le64 u64
-/* Receive Descriptor */
-struct e1000_rx_desc {
-       __le64 buffer_addr; /* Address of the descriptor's data buffer */
-       __le16 length;      /* Length of data DMAed into data buffer */
-       __le16 csum;        /* Packet checksum */
-       u8  status;         /* Descriptor status */
-       u8  errors;         /* Descriptor Errors */
-       __le16 special;
-};
-
-/* Receive Descriptor - Extended */
-union e1000_rx_desc_extended {
-       struct {
-               __le64 buffer_addr;
-               __le64 reserved;
-       } read;
-       struct {
-               struct {
-                       __le32 mrq;           /* Multiple Rx Queues */
-                       union {
-                               __le32 rss;         /* RSS Hash */
-                               struct {
-                                       __le16 ip_id;  /* IP id */
-                                       __le16 csum;   /* Packet Checksum */
-                               } csum_ip;
-                       } hi_dword;
-               } lower;
-               struct {
-                       __le32 status_error;  /* ext status/error */
-                       __le16 length;
-                       __le16 vlan;          /* VLAN tag */
-               } upper;
-       } wb;  /* writeback */
-};
-
-#define MAX_PS_BUFFERS 4
-/* Receive Descriptor - Packet Split */
-union e1000_rx_desc_packet_split {
-       struct {
-               /* one buffer for protocol header(s), three data buffers */
-               __le64 buffer_addr[MAX_PS_BUFFERS];
-       } read;
-       struct {
-               struct {
-                       __le32 mrq;           /* Multiple Rx Queues */
-                       union {
-                               __le32 rss;           /* RSS Hash */
-                               struct {
-                                       __le16 ip_id;    /* IP id */
-                                       __le16 csum;     /* Packet Checksum */
-                               } csum_ip;
-                       } hi_dword;
-               } lower;
-               struct {
-                       __le32 status_error;  /* ext status/error */
-                       __le16 length0;       /* length of buffer 0 */
-                       __le16 vlan;          /* VLAN tag */
-               } middle;
-               struct {
-                       __le16 header_status;
-                       __le16 length[3];     /* length of buffers 1-3 */
-               } upper;
-               __le64 reserved;
-       } wb; /* writeback */
-};
-
-/* Transmit Descriptor */
-struct e1000_tx_desc {
-       __le64 buffer_addr;   /* Address of the descriptor's data buffer */
-       union {
-               __le32 data;
-               struct {
-                       __le16 length;    /* Data buffer length */
-                       u8 cso;           /* Checksum offset */
-                       u8 cmd;           /* Descriptor control */
-               } flags;
-       } lower;
-       union {
-               __le32 data;
-               struct {
-                       u8 status;        /* Descriptor status */
-                       u8 css;           /* Checksum start */
-                       __le16 special;
-               } fields;
-       } upper;
-};
-
-/* Offload Context Descriptor */
-struct e1000_context_desc {
-       union {
-               __le32 ip_config;
-               struct {
-                       u8 ipcss;         /* IP checksum start */
-                       u8 ipcso;         /* IP checksum offset */
-                       __le16 ipcse;     /* IP checksum end */
-               } ip_fields;
-       } lower_setup;
-       union {
-               __le32 tcp_config;
-               struct {
-                       u8 tucss;         /* TCP checksum start */
-                       u8 tucso;         /* TCP checksum offset */
-                       __le16 tucse;     /* TCP checksum end */
-               } tcp_fields;
-       } upper_setup;
-       __le32 cmd_and_length;
-       union {
-               __le32 data;
-               struct {
-                       u8 status;        /* Descriptor status */
-                       u8 hdr_len;       /* Header length */
-                       __le16 mss;       /* Maximum segment size */
-               } fields;
-       } tcp_seg_setup;
-};
-
-/* Offload data descriptor */
-struct e1000_data_desc {
-       __le64 buffer_addr;   /* Address of the descriptor's buffer address */
-       union {
-               __le32 data;
-               struct {
-                       __le16 length;    /* Data buffer length */
-                       u8 typ_len_ext;
-                       u8 cmd;
-               } flags;
-       } lower;
-       union {
-               __le32 data;
-               struct {
-                       u8 status;        /* Descriptor status */
-                       u8 popts;         /* Packet Options */
-                       __le16 special;
-               } fields;
-       } upper;
-};
-
-/* Statistics counters collected by the MAC */
-struct e1000_hw_stats {
-       u64 crcerrs;
-       u64 algnerrc;
-       u64 symerrs;
-       u64 rxerrc;
-       u64 mpc;
-       u64 scc;
-       u64 ecol;
-       u64 mcc;
-       u64 latecol;
-       u64 colc;
-       u64 dc;
-       u64 tncrs;
-       u64 sec;
-       u64 cexterr;
-       u64 rlec;
-       u64 xonrxc;
-       u64 xontxc;
-       u64 xoffrxc;
-       u64 xofftxc;
-       u64 fcruc;
-       u64 prc64;
-       u64 prc127;
-       u64 prc255;
-       u64 prc511;
-       u64 prc1023;
-       u64 prc1522;
-       u64 gprc;
-       u64 bprc;
-       u64 mprc;
-       u64 gptc;
-       u64 gorc;
-       u64 gotc;
-       u64 rnbc;
-       u64 ruc;
-       u64 rfc;
-       u64 roc;
-       u64 rjc;
-       u64 mgprc;
-       u64 mgpdc;
-       u64 mgptc;
-       u64 tor;
-       u64 tot;
-       u64 tpr;
-       u64 tpt;
-       u64 ptc64;
-       u64 ptc127;
-       u64 ptc255;
-       u64 ptc511;
-       u64 ptc1023;
-       u64 ptc1522;
-       u64 mptc;
-       u64 bptc;
-       u64 tsctc;
-       u64 tsctfc;
-       u64 iac;
-       u64 icrxptc;
-       u64 icrxatc;
-       u64 ictxptc;
-       u64 ictxatc;
-       u64 ictxqec;
-       u64 ictxqmtc;
-       u64 icrxdmtc;
-       u64 icrxoc;
-       u64 cbtmpc;
-       u64 htdpmc;
-       u64 cbrdpc;
-       u64 cbrmpc;
-       u64 rpthc;
-       u64 hgptc;
-       u64 htcbdpc;
-       u64 hgorc;
-       u64 hgotc;
-       u64 lenerrs;
-       u64 scvpc;
-       u64 hrmpc;
-       u64 doosync;
-};
-
-struct e1000_vf_stats {
-       u64 base_gprc;
-       u64 base_gptc;
-       u64 base_gorc;
-       u64 base_gotc;
-       u64 base_mprc;
-       u64 base_gotlbc;
-       u64 base_gptlbc;
-       u64 base_gorlbc;
-       u64 base_gprlbc;
-
-       u32 last_gprc;
-       u32 last_gptc;
-       u32 last_gorc;
-       u32 last_gotc;
-       u32 last_mprc;
-       u32 last_gotlbc;
-       u32 last_gptlbc;
-       u32 last_gorlbc;
-       u32 last_gprlbc;
-
-       u64 gprc;
-       u64 gptc;
-       u64 gorc;
-       u64 gotc;
-       u64 mprc;
-       u64 gotlbc;
-       u64 gptlbc;
-       u64 gorlbc;
-       u64 gprlbc;
-};
-
-struct e1000_phy_stats {
-       u32 idle_errors;
-       u32 receive_errors;
-};
-
-struct e1000_host_mng_dhcp_cookie {
-       u32 signature;
-       u8  status;
-       u8  reserved0;
-       u16 vlan_id;
-       u32 reserved1;
-       u16 reserved2;
-       u8  reserved3;
-       u8  checksum;
-};
-
-/* Host Interface "Rev 1" */
-struct e1000_host_command_header {
-       u8 command_id;
-       u8 command_length;
-       u8 command_options;
-       u8 checksum;
-};
-
-#define E1000_HI_MAX_DATA_LENGTH     252
-struct e1000_host_command_info {
-       struct e1000_host_command_header command_header;
-       u8 command_data[E1000_HI_MAX_DATA_LENGTH];
-};
-
-/* Host Interface "Rev 2" */
-struct e1000_host_mng_command_header {
-       u8  command_id;
-       u8  checksum;
-       u16 reserved1;
-       u16 reserved2;
-       u16 command_length;
-};
-
-#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
-struct e1000_host_mng_command_info {
-       struct e1000_host_mng_command_header command_header;
-       u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
-};
-
-#include "e1000_mac.h"
-#include "e1000_phy.h"
-#include "e1000_nvm.h"
-#include "e1000_manage.h"
-#include "e1000_mbx.h"
-
-struct e1000_mac_operations {
-       /* Function pointers for the MAC. */
-       s32  (*init_params)(struct e1000_hw *);
-       s32  (*id_led_init)(struct e1000_hw *);
-       s32  (*blink_led)(struct e1000_hw *);
-       s32  (*check_for_link)(struct e1000_hw *);
-       bool (*check_mng_mode)(struct e1000_hw *hw);
-       s32  (*cleanup_led)(struct e1000_hw *);
-       void (*clear_hw_cntrs)(struct e1000_hw *);
-       void (*clear_vfta)(struct e1000_hw *);
-       s32  (*get_bus_info)(struct e1000_hw *);
-       void (*set_lan_id)(struct e1000_hw *);
-       s32  (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
-       s32  (*led_on)(struct e1000_hw *);
-       s32  (*led_off)(struct e1000_hw *);
-       void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
-       s32  (*reset_hw)(struct e1000_hw *);
-       s32  (*init_hw)(struct e1000_hw *);
-       void (*shutdown_serdes)(struct e1000_hw *);
-       void (*power_up_serdes)(struct e1000_hw *);
-       s32  (*setup_link)(struct e1000_hw *);
-       s32  (*setup_physical_interface)(struct e1000_hw *);
-       s32  (*setup_led)(struct e1000_hw *);
-       void (*write_vfta)(struct e1000_hw *, u32, u32);
-       void (*config_collision_dist)(struct e1000_hw *);
-       void (*rar_set)(struct e1000_hw *, u8*, u32);
-       s32  (*read_mac_addr)(struct e1000_hw *);
-       s32  (*validate_mdi_setting)(struct e1000_hw *);
-       s32  (*mng_host_if_write)(struct e1000_hw *, u8*, u16, u16, u8*);
-       s32  (*mng_write_cmd_header)(struct e1000_hw *hw,
-                      struct e1000_host_mng_command_header*);
-       s32  (*mng_enable_host_if)(struct e1000_hw *);
-       s32  (*wait_autoneg)(struct e1000_hw *);
-};
-
-struct e1000_phy_operations {
-       s32  (*init_params)(struct e1000_hw *);
-       s32  (*acquire)(struct e1000_hw *);
-       s32  (*check_polarity)(struct e1000_hw *);
-       s32  (*check_reset_block)(struct e1000_hw *);
-       s32  (*commit)(struct e1000_hw *);
-       s32  (*force_speed_duplex)(struct e1000_hw *);
-       s32  (*get_cfg_done)(struct e1000_hw *hw);
-       s32  (*get_cable_length)(struct e1000_hw *);
-       s32  (*get_info)(struct e1000_hw *);
-       s32  (*read_reg)(struct e1000_hw *, u32, u16 *);
-       s32  (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
-       void (*release)(struct e1000_hw *);
-       s32  (*reset)(struct e1000_hw *);
-       s32  (*set_d0_lplu_state)(struct e1000_hw *, bool);
-       s32  (*set_d3_lplu_state)(struct e1000_hw *, bool);
-       s32  (*write_reg)(struct e1000_hw *, u32, u16);
-       s32  (*write_reg_locked)(struct e1000_hw *, u32, u16);
-       void (*power_up)(struct e1000_hw *);
-       void (*power_down)(struct e1000_hw *);
-};
-
-struct e1000_nvm_operations {
-       s32  (*init_params)(struct e1000_hw *);
-       s32  (*acquire)(struct e1000_hw *);
-       s32  (*read)(struct e1000_hw *, u16, u16, u16 *);
-       void (*release)(struct e1000_hw *);
-       void (*reload)(struct e1000_hw *);
-       s32  (*update)(struct e1000_hw *);
-       s32  (*valid_led_default)(struct e1000_hw *, u16 *);
-       s32  (*validate)(struct e1000_hw *);
-       s32  (*write)(struct e1000_hw *, u16, u16, u16 *);
-};
-
-struct e1000_mac_info {
-       struct e1000_mac_operations ops;
-       u8 addr[ETH_ADDR_LEN];
-       u8 perm_addr[ETH_ADDR_LEN];
-
-       enum e1000_mac_type type;
-
-       u32 collision_delta;
-       u32 ledctl_default;
-       u32 ledctl_mode1;
-       u32 ledctl_mode2;
-       u32 mc_filter_type;
-       u32 tx_packet_delta;
-       u32 txcw;
-
-       u16 current_ifs_val;
-       u16 ifs_max_val;
-       u16 ifs_min_val;
-       u16 ifs_ratio;
-       u16 ifs_step_size;
-       u16 mta_reg_count;
-       u16 uta_reg_count;
-
-       /* Maximum size of the MTA register table in all supported adapters */
-       #define MAX_MTA_REG 128
-       u32 mta_shadow[MAX_MTA_REG];
-       u16 rar_entry_count;
-
-       u8  forced_speed_duplex;
-
-       bool adaptive_ifs;
-       bool has_fwsm;
-       bool arc_subsystem_valid;
-       bool asf_firmware_present;
-       bool autoneg;
-       bool autoneg_failed;
-       bool get_link_status;
-       bool in_ifs_mode;
-       enum e1000_serdes_link_state serdes_link_state;
-       bool serdes_has_link;
-       bool tx_pkt_filtering;
-};
-
-struct e1000_phy_info {
-       struct e1000_phy_operations ops;
-       enum e1000_phy_type type;
-
-       enum e1000_1000t_rx_status local_rx;
-       enum e1000_1000t_rx_status remote_rx;
-       enum e1000_ms_type ms_type;
-       enum e1000_ms_type original_ms_type;
-       enum e1000_rev_polarity cable_polarity;
-       enum e1000_smart_speed smart_speed;
-
-       u32 addr;
-       u32 id;
-       u32 reset_delay_us; /* in usec */
-       u32 revision;
-
-       enum e1000_media_type media_type;
-
-       u16 autoneg_advertised;
-       u16 autoneg_mask;
-       u16 cable_length;
-       u16 max_cable_length;
-       u16 min_cable_length;
-
-       u8 mdix;
-
-       bool disable_polarity_correction;
-       bool is_mdix;
-       bool polarity_correction;
-       bool reset_disable;
-       bool speed_downgraded;
-       bool autoneg_wait_to_complete;
-};
-
-struct e1000_nvm_info {
-       struct e1000_nvm_operations ops;
-       enum e1000_nvm_type type;
-       enum e1000_nvm_override override;
-
-       u32 flash_bank_size;
-       u32 flash_base_addr;
-
-       u16 word_size;
-       u16 delay_usec;
-       u16 address_bits;
-       u16 opcode_bits;
-       u16 page_size;
-};
-
-struct e1000_bus_info {
-       enum e1000_bus_type type;
-       enum e1000_bus_speed speed;
-       enum e1000_bus_width width;
-
-       u16 func;
-       u16 pci_cmd_word;
-};
-
-struct e1000_fc_info {
-       u32 high_water;          /* Flow control high-water mark */
-       u32 low_water;           /* Flow control low-water mark */
-       u16 pause_time;          /* Flow control pause timer */
-       u16 refresh_time;        /* Flow control refresh timer */
-       bool send_xon;           /* Flow control send XON */
-       bool strict_ieee;        /* Strict IEEE mode */
-       enum e1000_fc_mode current_mode; /* FC mode in effect */
-       enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
-};
-
-struct e1000_mbx_operations {
-       s32 (*init_params)(struct e1000_hw *hw);
-       s32 (*read)(struct e1000_hw *, u32 *, u16,  u16);
-       s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
-       s32 (*read_posted)(struct e1000_hw *, u32 *, u16,  u16);
-       s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
-       s32 (*check_for_msg)(struct e1000_hw *, u16);
-       s32 (*check_for_ack)(struct e1000_hw *, u16);
-       s32 (*check_for_rst)(struct e1000_hw *, u16);
-};
-
-struct e1000_mbx_stats {
-       u32 msgs_tx;
-       u32 msgs_rx;
-
-       u32 acks;
-       u32 reqs;
-       u32 rsts;
-};
-
-struct e1000_mbx_info {
-       struct e1000_mbx_operations ops;
-       struct e1000_mbx_stats stats;
-       u32 timeout;
-       u32 usec_delay;
-       u16 size;
-};
-
-struct e1000_dev_spec_82575 {
-       bool sgmii_active;
-       bool global_device_reset;
-       bool eee_disable;
-};
-
-struct e1000_dev_spec_vf {
-       u32 vf_number;
-       u32 v2p_mailbox;
-};
-
-struct e1000_hw {
-       void *back;
-
-       u8 *hw_addr;
-       u8 *flash_address;
-       unsigned long io_base;
-
-       struct e1000_mac_info  mac;
-       struct e1000_fc_info   fc;
-       struct e1000_phy_info  phy;
-       struct e1000_nvm_info  nvm;
-       struct e1000_bus_info  bus;
-       struct e1000_mbx_info mbx;
-       struct e1000_host_mng_dhcp_cookie mng_cookie;
-
-       union {
-               struct e1000_dev_spec_82575 _82575;
-               struct e1000_dev_spec_vf vf;
-       } dev_spec;
-
-       u16 device_id;
-       u16 subsystem_vendor_id;
-       u16 subsystem_device_id;
-       u16 vendor_id;
-
-       u8  revision_id;
-};
-
-#include "e1000_82575.h"
-
-/* These functions must be implemented by drivers */
-s32  e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
-s32  e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
-void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
-void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
-
-#endif
diff --git a/lib/librte_pmd_igb/igb/e1000_mac.c b/lib/librte_pmd_igb/igb/e1000_mac.c
deleted file mode 100644 (file)
index 1fff576..0000000
+++ /dev/null
@@ -1,2170 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#include "e1000_api.h"
-
-static s32 e1000_set_default_fc_generic(struct e1000_hw *hw);
-static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw);
-static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw);
-static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
-static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
-
-/**
- *  e1000_init_mac_ops_generic - Initialize MAC function pointers
- *  @hw: pointer to the HW structure
- *
- *  Setups up the function pointers to no-op functions
- **/
-void e1000_init_mac_ops_generic(struct e1000_hw *hw)
-{
-       struct e1000_mac_info *mac = &hw->mac;
-       DEBUGFUNC("e1000_init_mac_ops_generic");
-
-       /* General Setup */
-       mac->ops.init_params = e1000_null_ops_generic;
-       mac->ops.init_hw = e1000_null_ops_generic;
-       mac->ops.reset_hw = e1000_null_ops_generic;
-       mac->ops.setup_physical_interface = e1000_null_ops_generic;
-       mac->ops.get_bus_info = e1000_null_ops_generic;
-       mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie;
-       mac->ops.read_mac_addr = e1000_read_mac_addr_generic;
-       mac->ops.config_collision_dist = e1000_config_collision_dist_generic;
-       mac->ops.clear_hw_cntrs = e1000_null_mac_generic;
-       /* LED */
-       mac->ops.cleanup_led = e1000_null_ops_generic;
-       mac->ops.setup_led = e1000_null_ops_generic;
-       mac->ops.blink_led = e1000_null_ops_generic;
-       mac->ops.led_on = e1000_null_ops_generic;
-       mac->ops.led_off = e1000_null_ops_generic;
-       /* LINK */
-       mac->ops.setup_link = e1000_null_ops_generic;
-       mac->ops.get_link_up_info = e1000_null_link_info;
-       mac->ops.check_for_link = e1000_null_ops_generic;
-       mac->ops.wait_autoneg = e1000_wait_autoneg_generic;
-       /* Management */
-       mac->ops.check_mng_mode = e1000_null_mng_mode;
-       mac->ops.mng_host_if_write = e1000_mng_host_if_write_generic;
-       mac->ops.mng_write_cmd_header = e1000_mng_write_cmd_header_generic;
-       mac->ops.mng_enable_host_if = e1000_mng_enable_host_if_generic;
-       /* VLAN, MC, etc. */
-       mac->ops.update_mc_addr_list = e1000_null_update_mc;
-       mac->ops.clear_vfta = e1000_null_mac_generic;
-       mac->ops.write_vfta = e1000_null_write_vfta;
-       mac->ops.rar_set = e1000_rar_set_generic;
-       mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic;
-}
-
-/**
- *  e1000_null_ops_generic - No-op function, returns 0
- *  @hw: pointer to the HW structure
- **/
-s32 e1000_null_ops_generic(struct e1000_hw *hw)
-{
-       DEBUGFUNC("e1000_null_ops_generic");
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_null_mac_generic - No-op function, return void
- *  @hw: pointer to the HW structure
- **/
-void e1000_null_mac_generic(struct e1000_hw *hw)
-{
-       DEBUGFUNC("e1000_null_mac_generic");
-       return;
-}
-
-/**
- *  e1000_null_link_info - No-op function, return 0
- *  @hw: pointer to the HW structure
- **/
-s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d)
-{
-       DEBUGFUNC("e1000_null_link_info");
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_null_mng_mode - No-op function, return FALSE
- *  @hw: pointer to the HW structure
- **/
-bool e1000_null_mng_mode(struct e1000_hw *hw)
-{
-       DEBUGFUNC("e1000_null_mng_mode");
-       return FALSE;
-}
-
-/**
- *  e1000_null_update_mc - No-op function, return void
- *  @hw: pointer to the HW structure
- **/
-void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a)
-{
-       DEBUGFUNC("e1000_null_update_mc");
-       return;
-}
-
-/**
- *  e1000_null_write_vfta - No-op function, return void
- *  @hw: pointer to the HW structure
- **/
-void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b)
-{
-       DEBUGFUNC("e1000_null_write_vfta");
-       return;
-}
-
-/**
- *  e1000_null_rar_set - No-op function, return void
- *  @hw: pointer to the HW structure
- **/
-void e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a)
-{
-       DEBUGFUNC("e1000_null_rar_set");
-       return;
-}
-
-/**
- *  e1000_get_bus_info_pci_generic - Get PCI(x) bus information
- *  @hw: pointer to the HW structure
- *
- *  Determines and stores the system bus information for a particular
- *  network interface.  The following bus information is determined and stored:
- *  bus speed, bus width, type (PCI/PCIx), and PCI(-x) function.
- **/
-s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw)
-{
-       struct e1000_mac_info *mac = &hw->mac;
-       struct e1000_bus_info *bus = &hw->bus;
-       u32 status = E1000_READ_REG(hw, E1000_STATUS);
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_get_bus_info_pci_generic");
-
-       /* PCI or PCI-X? */
-       bus->type = (status & E1000_STATUS_PCIX_MODE)
-                       ? e1000_bus_type_pcix
-                       : e1000_bus_type_pci;
-
-       /* Bus speed */
-       if (bus->type == e1000_bus_type_pci) {
-               bus->speed = (status & E1000_STATUS_PCI66)
-                            ? e1000_bus_speed_66
-                            : e1000_bus_speed_33;
-       } else {
-               switch (status & E1000_STATUS_PCIX_SPEED) {
-               case E1000_STATUS_PCIX_SPEED_66:
-                       bus->speed = e1000_bus_speed_66;
-                       break;
-               case E1000_STATUS_PCIX_SPEED_100:
-                       bus->speed = e1000_bus_speed_100;
-                       break;
-               case E1000_STATUS_PCIX_SPEED_133:
-                       bus->speed = e1000_bus_speed_133;
-                       break;
-               default:
-                       bus->speed = e1000_bus_speed_reserved;
-                       break;
-               }
-       }
-
-       /* Bus width */
-       bus->width = (status & E1000_STATUS_BUS64)
-                    ? e1000_bus_width_64
-                    : e1000_bus_width_32;
-
-       /* Which PCI(-X) function? */
-       mac->ops.set_lan_id(hw);
-
-       return ret_val;
-}
-
-/**
- *  e1000_get_bus_info_pcie_generic - Get PCIe bus information
- *  @hw: pointer to the HW structure
- *
- *  Determines and stores the system bus information for a particular
- *  network interface.  The following bus information is determined and stored:
- *  bus speed, bus width, type (PCIe), and PCIe function.
- **/
-s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw)
-{
-       struct e1000_mac_info *mac = &hw->mac;
-       struct e1000_bus_info *bus = &hw->bus;
-       s32 ret_val;
-       u16 pcie_link_status;
-
-       DEBUGFUNC("e1000_get_bus_info_pcie_generic");
-
-       bus->type = e1000_bus_type_pci_express;
-
-       ret_val = e1000_read_pcie_cap_reg(hw,
-                                         PCIE_LINK_STATUS,
-                                         &pcie_link_status);
-       if (ret_val) {
-               bus->width = e1000_bus_width_unknown;
-               bus->speed = e1000_bus_speed_unknown;
-       } else {
-               switch (pcie_link_status & PCIE_LINK_SPEED_MASK) {
-               case PCIE_LINK_SPEED_2500:
-                       bus->speed = e1000_bus_speed_2500;
-                       break;
-               case PCIE_LINK_SPEED_5000:
-                       bus->speed = e1000_bus_speed_5000;
-                       break;
-               default:
-                       bus->speed = e1000_bus_speed_unknown;
-                       break;
-               }
-
-               bus->width = (enum e1000_bus_width)((pcie_link_status &
-                                               PCIE_LINK_WIDTH_MASK) >>
-                                              PCIE_LINK_WIDTH_SHIFT);
-       }
-
-       mac->ops.set_lan_id(hw);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
- *
- *  @hw: pointer to the HW structure
- *
- *  Determines the LAN function id by reading memory-mapped registers
- *  and swaps the port value if requested.
- **/
-static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
-{
-       struct e1000_bus_info *bus = &hw->bus;
-       u32 reg;
-
-       /*
-        * The status register reports the correct function number
-        * for the device regardless of function swap state.
-        */
-       reg = E1000_READ_REG(hw, E1000_STATUS);
-       bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
-}
-
-/**
- *  e1000_set_lan_id_multi_port_pci - Set LAN id for PCI multiple port devices
- *  @hw: pointer to the HW structure
- *
- *  Determines the LAN function id by reading PCI config space.
- **/
-void e1000_set_lan_id_multi_port_pci(struct e1000_hw *hw)
-{
-       struct e1000_bus_info *bus = &hw->bus;
-       u16 pci_header_type;
-       u32 status;
-
-       e1000_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type);
-       if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
-               status = E1000_READ_REG(hw, E1000_STATUS);
-               bus->func = (status & E1000_STATUS_FUNC_MASK)
-                           >> E1000_STATUS_FUNC_SHIFT;
-       } else {
-               bus->func = 0;
-       }
-}
-
-/**
- *  e1000_set_lan_id_single_port - Set LAN id for a single port device
- *  @hw: pointer to the HW structure
- *
- *  Sets the LAN function id to zero for a single port device.
- **/
-void e1000_set_lan_id_single_port(struct e1000_hw *hw)
-{
-       struct e1000_bus_info *bus = &hw->bus;
-
-       bus->func = 0;
-}
-
-/**
- *  e1000_clear_vfta_generic - Clear VLAN filter table
- *  @hw: pointer to the HW structure
- *
- *  Clears the register array which contains the VLAN filter table by
- *  setting all the values to 0.
- **/
-void e1000_clear_vfta_generic(struct e1000_hw *hw)
-{
-       u32 offset;
-
-       DEBUGFUNC("e1000_clear_vfta_generic");
-
-       for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
-               E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
-               E1000_WRITE_FLUSH(hw);
-       }
-}
-
-/**
- *  e1000_write_vfta_generic - Write value to VLAN filter table
- *  @hw: pointer to the HW structure
- *  @offset: register offset in VLAN filter table
- *  @value: register value written to VLAN filter table
- *
- *  Writes value at the given offset in the register array which stores
- *  the VLAN filter table.
- **/
-void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
-{
-       DEBUGFUNC("e1000_write_vfta_generic");
-
-       E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
-       E1000_WRITE_FLUSH(hw);
-}
-
-/**
- *  e1000_init_rx_addrs_generic - Initialize receive address's
- *  @hw: pointer to the HW structure
- *  @rar_count: receive address registers
- *
- *  Setup the receive address registers by setting the base receive address
- *  register to the devices MAC address and clearing all the other receive
- *  address registers to 0.
- **/
-void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count)
-{
-       u32 i;
-       u8 mac_addr[ETH_ADDR_LEN] = {0};
-
-       DEBUGFUNC("e1000_init_rx_addrs_generic");
-
-       /* Setup the receive address */
-       DEBUGOUT("Programming MAC Address into RAR[0]\n");
-
-       hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
-
-       /* Zero out the other (rar_entry_count - 1) receive addresses */
-       DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1);
-       for (i = 1; i < rar_count; i++)
-               hw->mac.ops.rar_set(hw, mac_addr, i);
-}
-
-/**
- *  e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
- *  @hw: pointer to the HW structure
- *
- *  Checks the nvm for an alternate MAC address.  An alternate MAC address
- *  can be setup by pre-boot software and must be treated like a permanent
- *  address and must override the actual permanent MAC address. If an
- *  alternate MAC address is found it is programmed into RAR0, replacing
- *  the permanent address that was installed into RAR0 by the Si on reset.
- *  This function will return SUCCESS unless it encounters an error while
- *  reading the EEPROM.
- **/
-s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
-{
-       u32 i;
-       s32 ret_val = E1000_SUCCESS;
-       u16 offset, nvm_alt_mac_addr_offset, nvm_data;
-       u8 alt_mac_addr[ETH_ADDR_LEN];
-
-       DEBUGFUNC("e1000_check_alt_mac_addr_generic");
-
-       ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data);
-       if (ret_val)
-               goto out;
-
-       if (!(nvm_data & NVM_COMPAT_LOM))
-               goto out;
-
-       ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
-                                &nvm_alt_mac_addr_offset);
-       if (ret_val) {
-               DEBUGOUT("NVM Read Error\n");
-               goto out;
-       }
-
-       if (nvm_alt_mac_addr_offset == 0xFFFF) {
-               /* There is no Alternate MAC Address */
-               goto out;
-       }
-
-       if (hw->bus.func == E1000_FUNC_1)
-               nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
-       if (hw->bus.func == E1000_FUNC_2)
-               nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
-
-       if (hw->bus.func == E1000_FUNC_3)
-               nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
-       for (i = 0; i < ETH_ADDR_LEN; i += 2) {
-               offset = nvm_alt_mac_addr_offset + (i >> 1);
-               ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
-               if (ret_val) {
-                       DEBUGOUT("NVM Read Error\n");
-                       goto out;
-               }
-
-               alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
-               alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
-       }
-
-       /* if multicast bit is set, the alternate address will not be used */
-       if (alt_mac_addr[0] & 0x01) {
-               DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n");
-               goto out;
-       }
-
-       /*
-        * We have a valid alternate MAC address, and we want to treat it the
-        * same as the normal permanent MAC address stored by the HW into the
-        * RAR. Do this by mapping this address into RAR0.
-        */
-       hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_rar_set_generic - Set receive address register
- *  @hw: pointer to the HW structure
- *  @addr: pointer to the receive address
- *  @index: receive address array register
- *
- *  Sets the receive address array register at index to the address passed
- *  in by addr.
- **/
-void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
-{
-       u32 rar_low, rar_high;
-
-       DEBUGFUNC("e1000_rar_set_generic");
-
-       /*
-        * HW expects these in little endian so we reverse the byte order
-        * from network order (big endian) to little endian
-        */
-       rar_low = ((u32) addr[0] |
-                  ((u32) addr[1] << 8) |
-                  ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
-
-       rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
-
-       /* If MAC address zero, no need to set the AV bit */
-       if (rar_low || rar_high)
-               rar_high |= E1000_RAH_AV;
-
-       /*
-        * Some bridges will combine consecutive 32-bit writes into
-        * a single burst write, which will malfunction on some parts.
-        * The flushes avoid this.
-        */
-       E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
-       E1000_WRITE_FLUSH(hw);
-       E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
-       E1000_WRITE_FLUSH(hw);
-}
-
-/**
- *  e1000_update_mc_addr_list_generic - Update Multicast addresses
- *  @hw: pointer to the HW structure
- *  @mc_addr_list: array of multicast addresses to program
- *  @mc_addr_count: number of multicast addresses to program
- *
- *  Updates entire Multicast Table Array.
- *  The caller must have a packed mc_addr_list of multicast addresses.
- **/
-void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
-                                       u8 *mc_addr_list, u32 mc_addr_count)
-{
-       u32 hash_value, hash_bit, hash_reg;
-       int i;
-
-       DEBUGFUNC("e1000_update_mc_addr_list_generic");
-
-       /* clear mta_shadow */
-       memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
-
-       /* update mta_shadow from mc_addr_list */
-       for (i = 0; (u32) i < mc_addr_count; i++) {
-               hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list);
-
-               hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
-               hash_bit = hash_value & 0x1F;
-
-               hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
-               mc_addr_list += (ETH_ADDR_LEN);
-       }
-
-       /* replace the entire MTA table */
-       for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
-               E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
-       E1000_WRITE_FLUSH(hw);
-}
-
-/**
- *  e1000_hash_mc_addr_generic - Generate a multicast hash value
- *  @hw: pointer to the HW structure
- *  @mc_addr: pointer to a multicast address
- *
- *  Generates a multicast address hash value which is used to determine
- *  the multicast filter table array address and new table value.
- **/
-u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr)
-{
-       u32 hash_value, hash_mask;
-       u8 bit_shift = 0;
-
-       DEBUGFUNC("e1000_hash_mc_addr_generic");
-
-       /* Register count multiplied by bits per register */
-       hash_mask = (hw->mac.mta_reg_count * 32) - 1;
-
-       /*
-        * For a mc_filter_type of 0, bit_shift is the number of left-shifts
-        * where 0xFF would still fall within the hash mask.
-        */
-       while (hash_mask >> bit_shift != 0xFF)
-               bit_shift++;
-
-       /*
-        * The portion of the address that is used for the hash table
-        * is determined by the mc_filter_type setting.
-        * The algorithm is such that there is a total of 8 bits of shifting.
-        * The bit_shift for a mc_filter_type of 0 represents the number of
-        * left-shifts where the MSB of mc_addr[5] would still fall within
-        * the hash_mask.  Case 0 does this exactly.  Since there are a total
-        * of 8 bits of shifting, then mc_addr[4] will shift right the
-        * remaining number of bits. Thus 8 - bit_shift.  The rest of the
-        * cases are a variation of this algorithm...essentially raising the
-        * number of bits to shift mc_addr[5] left, while still keeping the
-        * 8-bit shifting total.
-        *
-        * For example, given the following Destination MAC Address and an
-        * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
-        * we can see that the bit_shift for case 0 is 4.  These are the hash
-        * values resulting from each mc_filter_type...
-        * [0] [1] [2] [3] [4] [5]
-        * 01  AA  00  12  34  56
-        * LSB                 MSB
-        *
-        * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
-        * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
-        * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
-        * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
-        */
-       switch (hw->mac.mc_filter_type) {
-       default:
-       case 0:
-               break;
-       case 1:
-               bit_shift += 1;
-               break;
-       case 2:
-               bit_shift += 2;
-               break;
-       case 3:
-               bit_shift += 4;
-               break;
-       }
-
-       hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
-                                 (((u16) mc_addr[5]) << bit_shift)));
-
-       return hash_value;
-}
-
-/**
- *  e1000_pcix_mmrbc_workaround_generic - Fix incorrect MMRBC value
- *  @hw: pointer to the HW structure
- *
- *  In certain situations, a system BIOS may report that the PCIx maximum
- *  memory read byte count (MMRBC) value is higher than than the actual
- *  value. We check the PCIx command register with the current PCIx status
- *  register.
- **/
-void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw)
-{
-       u16 cmd_mmrbc;
-       u16 pcix_cmd;
-       u16 pcix_stat_hi_word;
-       u16 stat_mmrbc;
-
-       DEBUGFUNC("e1000_pcix_mmrbc_workaround_generic");
-
-       /* Workaround for PCI-X issue when BIOS sets MMRBC incorrectly */
-       if (hw->bus.type != e1000_bus_type_pcix)
-               return;
-
-       e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd);
-       e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word);
-       cmd_mmrbc = (pcix_cmd & PCIX_COMMAND_MMRBC_MASK) >>
-                    PCIX_COMMAND_MMRBC_SHIFT;
-       stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >>
-                     PCIX_STATUS_HI_MMRBC_SHIFT;
-       if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K)
-               stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K;
-       if (cmd_mmrbc > stat_mmrbc) {
-               pcix_cmd &= ~PCIX_COMMAND_MMRBC_MASK;
-               pcix_cmd |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT;
-               e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd);
-       }
-}
-
-/**
- *  e1000_clear_hw_cntrs_base_generic - Clear base hardware counters
- *  @hw: pointer to the HW structure
- *
- *  Clears the base hardware counters by reading the counter registers.
- **/
-void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw)
-{
-       DEBUGFUNC("e1000_clear_hw_cntrs_base_generic");
-
-       E1000_READ_REG(hw, E1000_CRCERRS);
-       E1000_READ_REG(hw, E1000_SYMERRS);
-       E1000_READ_REG(hw, E1000_MPC);
-       E1000_READ_REG(hw, E1000_SCC);
-       E1000_READ_REG(hw, E1000_ECOL);
-       E1000_READ_REG(hw, E1000_MCC);
-       E1000_READ_REG(hw, E1000_LATECOL);
-       E1000_READ_REG(hw, E1000_COLC);
-       E1000_READ_REG(hw, E1000_DC);
-       E1000_READ_REG(hw, E1000_SEC);
-       E1000_READ_REG(hw, E1000_RLEC);
-       E1000_READ_REG(hw, E1000_XONRXC);
-       E1000_READ_REG(hw, E1000_XONTXC);
-       E1000_READ_REG(hw, E1000_XOFFRXC);
-       E1000_READ_REG(hw, E1000_XOFFTXC);
-       E1000_READ_REG(hw, E1000_FCRUC);
-       E1000_READ_REG(hw, E1000_GPRC);
-       E1000_READ_REG(hw, E1000_BPRC);
-       E1000_READ_REG(hw, E1000_MPRC);
-       E1000_READ_REG(hw, E1000_GPTC);
-       E1000_READ_REG(hw, E1000_GORCL);
-       E1000_READ_REG(hw, E1000_GORCH);
-       E1000_READ_REG(hw, E1000_GOTCL);
-       E1000_READ_REG(hw, E1000_GOTCH);
-       E1000_READ_REG(hw, E1000_RNBC);
-       E1000_READ_REG(hw, E1000_RUC);
-       E1000_READ_REG(hw, E1000_RFC);
-       E1000_READ_REG(hw, E1000_ROC);
-       E1000_READ_REG(hw, E1000_RJC);
-       E1000_READ_REG(hw, E1000_TORL);
-       E1000_READ_REG(hw, E1000_TORH);
-       E1000_READ_REG(hw, E1000_TOTL);
-       E1000_READ_REG(hw, E1000_TOTH);
-       E1000_READ_REG(hw, E1000_TPR);
-       E1000_READ_REG(hw, E1000_TPT);
-       E1000_READ_REG(hw, E1000_MPTC);
-       E1000_READ_REG(hw, E1000_BPTC);
-}
-
-/**
- *  e1000_check_for_copper_link_generic - Check for link (Copper)
- *  @hw: pointer to the HW structure
- *
- *  Checks to see of the link status of the hardware has changed.  If a
- *  change in link status has been detected, then we read the PHY registers
- *  to get the current speed/duplex if link exists.
- **/
-s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw)
-{
-       struct e1000_mac_info *mac = &hw->mac;
-       s32 ret_val;
-       bool link;
-
-       DEBUGFUNC("e1000_check_for_copper_link");
-
-       /*
-        * We only want to go out to the PHY registers to see if Auto-Neg
-        * has completed and/or if our link status has changed.  The
-        * get_link_status flag is set upon receiving a Link Status
-        * Change or Rx Sequence Error interrupt.
-        */
-       if (!mac->get_link_status) {
-               ret_val = E1000_SUCCESS;
-               goto out;
-       }
-
-       /*
-        * First we want to see if the MII Status Register reports
-        * link.  If so, then we want to get the current speed/duplex
-        * of the PHY.
-        */
-       ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
-       if (ret_val)
-               goto out;
-
-       if (!link)
-               goto out; /* No link detected */
-
-       mac->get_link_status = FALSE;
-
-       /*
-        * Check if there was DownShift, must be checked
-        * immediately after link-up
-        */
-       e1000_check_downshift_generic(hw);
-
-       /*
-        * If we are forcing speed/duplex, then we simply return since
-        * we have already determined whether we have link or not.
-        */
-       if (!mac->autoneg) {
-               ret_val = -E1000_ERR_CONFIG;
-               goto out;
-       }
-
-       /*
-        * Auto-Neg is enabled.  Auto Speed Detection takes care
-        * of MAC speed/duplex configuration.  So we only need to
-        * configure Collision Distance in the MAC.
-        */
-       mac->ops.config_collision_dist(hw);
-
-       /*
-        * Configure Flow Control now that Auto-Neg has completed.
-        * First, we need to restore the desired flow control
-        * settings because we may have had to re-autoneg with a
-        * different link partner.
-        */
-       ret_val = e1000_config_fc_after_link_up_generic(hw);
-       if (ret_val)
-               DEBUGOUT("Error configuring flow control\n");
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_check_for_fiber_link_generic - Check for link (Fiber)
- *  @hw: pointer to the HW structure
- *
- *  Checks for link up on the hardware.  If link is not up and we have
- *  a signal, then we need to force link up.
- **/
-s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw)
-{
-       struct e1000_mac_info *mac = &hw->mac;
-       u32 rxcw;
-       u32 ctrl;
-       u32 status;
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_check_for_fiber_link_generic");
-
-       ctrl = E1000_READ_REG(hw, E1000_CTRL);
-       status = E1000_READ_REG(hw, E1000_STATUS);
-       rxcw = E1000_READ_REG(hw, E1000_RXCW);
-
-       /*
-        * If we don't have link (auto-negotiation failed or link partner
-        * cannot auto-negotiate), the cable is plugged in (we have signal),
-        * and our link partner is not trying to auto-negotiate with us (we
-        * are receiving idles or data), we need to force link up. We also
-        * need to give auto-negotiation time to complete, in case the cable
-        * was just plugged in. The autoneg_failed flag does this.
-        */
-       /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
-       if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) &&
-           (!(rxcw & E1000_RXCW_C))) {
-               if (mac->autoneg_failed == 0) {
-                       mac->autoneg_failed = 1;
-                       goto out;
-               }
-               DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
-
-               /* Disable auto-negotiation in the TXCW register */
-               E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
-
-               /* Force link-up and also force full-duplex. */
-               ctrl = E1000_READ_REG(hw, E1000_CTRL);
-               ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
-               E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
-
-               /* Configure Flow Control after forcing link up. */
-               ret_val = e1000_config_fc_after_link_up_generic(hw);
-               if (ret_val) {
-                       DEBUGOUT("Error configuring flow control\n");
-                       goto out;
-               }
-       } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
-               /*
-                * If we are forcing link and we are receiving /C/ ordered
-                * sets, re-enable auto-negotiation in the TXCW register
-                * and disable forced link in the Device Control register
-                * in an attempt to auto-negotiate with our link partner.
-                */
-               DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
-               E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
-               E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
-
-               mac->serdes_has_link = TRUE;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_check_for_serdes_link_generic - Check for link (Serdes)
- *  @hw: pointer to the HW structure
- *
- *  Checks for link up on the hardware.  If link is not up and we have
- *  a signal, then we need to force link up.
- **/
-s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
-{
-       struct e1000_mac_info *mac = &hw->mac;
-       u32 rxcw;
-       u32 ctrl;
-       u32 status;
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_check_for_serdes_link_generic");
-
-       ctrl = E1000_READ_REG(hw, E1000_CTRL);
-       status = E1000_READ_REG(hw, E1000_STATUS);
-       rxcw = E1000_READ_REG(hw, E1000_RXCW);
-
-       /*
-        * If we don't have link (auto-negotiation failed or link partner
-        * cannot auto-negotiate), and our link partner is not trying to
-        * auto-negotiate with us (we are receiving idles or data),
-        * we need to force link up. We also need to give auto-negotiation
-        * time to complete.
-        */
-       /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
-       if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
-               if (mac->autoneg_failed == 0) {
-                       mac->autoneg_failed = 1;
-                       goto out;
-               }
-               DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
-
-               /* Disable auto-negotiation in the TXCW register */
-               E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
-
-               /* Force link-up and also force full-duplex. */
-               ctrl = E1000_READ_REG(hw, E1000_CTRL);
-               ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
-               E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
-
-               /* Configure Flow Control after forcing link up. */
-               ret_val = e1000_config_fc_after_link_up_generic(hw);
-               if (ret_val) {
-                       DEBUGOUT("Error configuring flow control\n");
-                       goto out;
-               }
-       } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
-               /*
-                * If we are forcing link and we are receiving /C/ ordered
-                * sets, re-enable auto-negotiation in the TXCW register
-                * and disable forced link in the Device Control register
-                * in an attempt to auto-negotiate with our link partner.
-                */
-               DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
-               E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
-               E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
-
-               mac->serdes_has_link = TRUE;
-       } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) {
-               /*
-                * If we force link for non-auto-negotiation switch, check
-                * link status based on MAC synchronization for internal
-                * serdes media type.
-                */
-               /* SYNCH bit and IV bit are sticky. */
-               usec_delay(10);
-               rxcw = E1000_READ_REG(hw, E1000_RXCW);
-               if (rxcw & E1000_RXCW_SYNCH) {
-                       if (!(rxcw & E1000_RXCW_IV)) {
-                               mac->serdes_has_link = TRUE;
-                               DEBUGOUT("SERDES: Link up - forced.\n");
-                       }
-               } else {
-                       mac->serdes_has_link = FALSE;
-                       DEBUGOUT("SERDES: Link down - force failed.\n");
-               }
-       }
-
-       if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) {
-               status = E1000_READ_REG(hw, E1000_STATUS);
-               if (status & E1000_STATUS_LU) {
-                       /* SYNCH bit and IV bit are sticky, so reread rxcw. */
-                       usec_delay(10);
-                       rxcw = E1000_READ_REG(hw, E1000_RXCW);
-                       if (rxcw & E1000_RXCW_SYNCH) {
-                               if (!(rxcw & E1000_RXCW_IV)) {
-                                       mac->serdes_has_link = TRUE;
-                                       DEBUGOUT("SERDES: Link up - autoneg "
-                                          "completed sucessfully.\n");
-                               } else {
-                                       mac->serdes_has_link = FALSE;
-                                       DEBUGOUT("SERDES: Link down - invalid"
-                                          "codewords detected in autoneg.\n");
-                               }
-                       } else {
-                               mac->serdes_has_link = FALSE;
-                               DEBUGOUT("SERDES: Link down - no sync.\n");
-                       }
-               } else {
-                       mac->serdes_has_link = FALSE;
-                       DEBUGOUT("SERDES: Link down - autoneg failed\n");
-               }
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_setup_link_generic - Setup flow control and link settings
- *  @hw: pointer to the HW structure
- *
- *  Determines which flow control settings to use, then configures flow
- *  control.  Calls the appropriate media-specific link configuration
- *  function.  Assuming the adapter has a valid link partner, a valid link
- *  should be established.  Assumes the hardware has previously been reset
- *  and the transmitter and receiver are not enabled.
- **/
-s32 e1000_setup_link_generic(struct e1000_hw *hw)
-{
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_setup_link_generic");
-
-       /*
-        * In the case of the phy reset being blocked, we already have a link.
-        * We do not need to set it up again.
-        */
-       if (e1000_check_reset_block(hw))
-               goto out;
-
-       /*
-        * If requested flow control is set to default, set flow control
-        * based on the EEPROM flow control settings.
-        */
-       if (hw->fc.requested_mode == e1000_fc_default) {
-               ret_val = e1000_set_default_fc_generic(hw);
-               if (ret_val)
-                       goto out;
-       }
-
-       /*
-        * Save off the requested flow control mode for use later.  Depending
-        * on the link partner's capabilities, we may or may not use this mode.
-        */
-       hw->fc.current_mode = hw->fc.requested_mode;
-
-       DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
-               hw->fc.current_mode);
-
-       /* Call the necessary media_type subroutine to configure the link. */
-       ret_val = hw->mac.ops.setup_physical_interface(hw);
-       if (ret_val)
-               goto out;
-
-       /*
-        * Initialize the flow control address, type, and PAUSE timer
-        * registers to their default values.  This is done even if flow
-        * control is disabled, because it does not hurt anything to
-        * initialize these registers.
-        */
-       DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
-       E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE);
-       E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
-       E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
-
-       E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
-
-       ret_val = e1000_set_fc_watermarks_generic(hw);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes
- *  @hw: pointer to the HW structure
- *
- *  Configures collision distance and flow control for fiber and serdes
- *  links.  Upon successful setup, poll for link.
- **/
-s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw)
-{
-       struct e1000_mac_info *mac = &hw->mac;
-       u32 ctrl;
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_setup_fiber_serdes_link_generic");
-
-       ctrl = E1000_READ_REG(hw, E1000_CTRL);
-
-       /* Take the link out of reset */
-       ctrl &= ~E1000_CTRL_LRST;
-
-       mac->ops.config_collision_dist(hw);
-
-       ret_val = e1000_commit_fc_settings_generic(hw);
-       if (ret_val)
-               goto out;
-
-       /*
-        * Since auto-negotiation is enabled, take the link out of reset (the
-        * link will be in reset, because we previously reset the chip). This
-        * will restart auto-negotiation.  If auto-negotiation is successful
-        * then the link-up status bit will be set and the flow control enable
-        * bits (RFCE and TFCE) will be set according to their negotiated value.
-        */
-       DEBUGOUT("Auto-negotiation enabled\n");
-
-       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
-       E1000_WRITE_FLUSH(hw);
-       msec_delay(1);
-
-       /*
-        * For these adapters, the SW definable pin 1 is set when the optics
-        * detect a signal.  If we have a signal, then poll for a "Link-Up"
-        * indication.
-        */
-       if (hw->phy.media_type == e1000_media_type_internal_serdes ||
-           (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) {
-               ret_val = e1000_poll_fiber_serdes_link_generic(hw);
-       } else {
-               DEBUGOUT("No signal detected\n");
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_config_collision_dist_generic - Configure collision distance
- *  @hw: pointer to the HW structure
- *
- *  Configures the collision distance to the default value and is used
- *  during link setup.
- **/
-void e1000_config_collision_dist_generic(struct e1000_hw *hw)
-{
-       u32 tctl;
-
-       DEBUGFUNC("e1000_config_collision_dist_generic");
-
-       tctl = E1000_READ_REG(hw, E1000_TCTL);
-
-       tctl &= ~E1000_TCTL_COLD;
-       tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
-
-       E1000_WRITE_REG(hw, E1000_TCTL, tctl);
-       E1000_WRITE_FLUSH(hw);
-}
-
-/**
- *  e1000_poll_fiber_serdes_link_generic - Poll for link up
- *  @hw: pointer to the HW structure
- *
- *  Polls for link up by reading the status register, if link fails to come
- *  up with auto-negotiation, then the link is forced if a signal is detected.
- **/
-static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
-{
-       struct e1000_mac_info *mac = &hw->mac;
-       u32 i, status;
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_poll_fiber_serdes_link_generic");
-
-       /*
-        * If we have a signal (the cable is plugged in, or assumed TRUE for
-        * serdes media) then poll for a "Link-Up" indication in the Device
-        * Status Register.  Time-out if a link isn't seen in 500 milliseconds
-        * seconds (Auto-negotiation should complete in less than 500
-        * milliseconds even if the other end is doing it in SW).
-        */
-       for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
-               msec_delay(10);
-               status = E1000_READ_REG(hw, E1000_STATUS);
-               if (status & E1000_STATUS_LU)
-                       break;
-       }
-       if (i == FIBER_LINK_UP_LIMIT) {
-               DEBUGOUT("Never got a valid link from auto-neg!!!\n");
-               mac->autoneg_failed = 1;
-               /*
-                * AutoNeg failed to achieve a link, so we'll call
-                * mac->check_for_link. This routine will force the
-                * link up if we detect a signal. This will allow us to
-                * communicate with non-autonegotiating link partners.
-                */
-               ret_val = mac->ops.check_for_link(hw);
-               if (ret_val) {
-                       DEBUGOUT("Error while checking for link\n");
-                       goto out;
-               }
-               mac->autoneg_failed = 0;
-       } else {
-               mac->autoneg_failed = 0;
-               DEBUGOUT("Valid Link Found\n");
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_commit_fc_settings_generic - Configure flow control
- *  @hw: pointer to the HW structure
- *
- *  Write the flow control settings to the Transmit Config Word Register (TXCW)
- *  base on the flow control settings in e1000_mac_info.
- **/
-static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
-{
-       struct e1000_mac_info *mac = &hw->mac;
-       u32 txcw;
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_commit_fc_settings_generic");
-
-       /*
-        * Check for a software override of the flow control settings, and
-        * setup the device accordingly.  If auto-negotiation is enabled, then
-        * software will have to set the "PAUSE" bits to the correct value in
-        * the Transmit Config Word Register (TXCW) and re-start auto-
-        * negotiation.  However, if auto-negotiation is disabled, then
-        * software will have to manually configure the two flow control enable
-        * bits in the CTRL register.
-        *
-        * The possible values of the "fc" parameter are:
-        *      0:  Flow control is completely disabled
-        *      1:  Rx flow control is enabled (we can receive pause frames,
-        *          but not send pause frames).
-        *      2:  Tx flow control is enabled (we can send pause frames but we
-        *          do not support receiving pause frames).
-        *      3:  Both Rx and Tx flow control (symmetric) are enabled.
-        */
-       switch (hw->fc.current_mode) {
-       case e1000_fc_none:
-               /* Flow control completely disabled by a software over-ride. */
-               txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
-               break;
-       case e1000_fc_rx_pause:
-               /*
-                * Rx Flow control is enabled and Tx Flow control is disabled
-                * by a software over-ride. Since there really isn't a way to
-                * advertise that we are capable of Rx Pause ONLY, we will
-                * advertise that we support both symmetric and asymmetric Rx
-                * PAUSE.  Later, we will disable the adapter's ability to send
-                * PAUSE frames.
-                */
-               txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
-               break;
-       case e1000_fc_tx_pause:
-               /*
-                * Tx Flow control is enabled, and Rx Flow control is disabled,
-                * by a software over-ride.
-                */
-               txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
-               break;
-       case e1000_fc_full:
-               /*
-                * Flow control (both Rx and Tx) is enabled by a software
-                * over-ride.
-                */
-               txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
-               break;
-       default:
-               DEBUGOUT("Flow control param set incorrectly\n");
-               ret_val = -E1000_ERR_CONFIG;
-               goto out;
-               break;
-       }
-
-       E1000_WRITE_REG(hw, E1000_TXCW, txcw);
-       mac->txcw = txcw;
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_set_fc_watermarks_generic - Set flow control high/low watermarks
- *  @hw: pointer to the HW structure
- *
- *  Sets the flow control high/low threshold (watermark) registers.  If
- *  flow control XON frame transmission is enabled, then set XON frame
- *  transmission as well.
- **/
-s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw)
-{
-       u32 fcrtl = 0, fcrth = 0;
-
-       DEBUGFUNC("e1000_set_fc_watermarks_generic");
-
-       /*
-        * Set the flow control receive threshold registers.  Normally,
-        * these registers will be set to a default threshold that may be
-        * adjusted later by the driver's runtime code.  However, if the
-        * ability to transmit pause frames is not enabled, then these
-        * registers will be set to 0.
-        */
-       if (hw->fc.current_mode & e1000_fc_tx_pause) {
-               /*
-                * We need to set up the Receive Threshold high and low water
-                * marks as well as (optionally) enabling the transmission of
-                * XON frames.
-                */
-               fcrtl = hw->fc.low_water;
-               if (hw->fc.send_xon)
-                       fcrtl |= E1000_FCRTL_XONE;
-
-               fcrth = hw->fc.high_water;
-       }
-       E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl);
-       E1000_WRITE_REG(hw, E1000_FCRTH, fcrth);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_set_default_fc_generic - Set flow control default values
- *  @hw: pointer to the HW structure
- *
- *  Read the EEPROM for the default values for flow control and store the
- *  values.
- **/
-static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
-{
-       s32 ret_val = E1000_SUCCESS;
-       u16 nvm_data;
-
-       DEBUGFUNC("e1000_set_default_fc_generic");
-
-       /*
-        * Read and store word 0x0F of the EEPROM. This word contains bits
-        * that determine the hardware's default PAUSE (flow control) mode,
-        * a bit that determines whether the HW defaults to enabling or
-        * disabling auto-negotiation, and the direction of the
-        * SW defined pins. If there is no SW over-ride of the flow
-        * control setting, then the variable hw->fc will
-        * be initialized based on a value in the EEPROM.
-        */
-       ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
-
-       if (ret_val) {
-               DEBUGOUT("NVM Read Error\n");
-               goto out;
-       }
-
-       if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
-               hw->fc.requested_mode = e1000_fc_none;
-       else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
-                NVM_WORD0F_ASM_DIR)
-               hw->fc.requested_mode = e1000_fc_tx_pause;
-       else
-               hw->fc.requested_mode = e1000_fc_full;
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_force_mac_fc_generic - Force the MAC's flow control settings
- *  @hw: pointer to the HW structure
- *
- *  Force the MAC's flow control settings.  Sets the TFCE and RFCE bits in the
- *  device control register to reflect the adapter settings.  TFCE and RFCE
- *  need to be explicitly set by software when a copper PHY is used because
- *  autonegotiation is managed by the PHY rather than the MAC.  Software must
- *  also configure these bits when link is forced on a fiber connection.
- **/
-s32 e1000_force_mac_fc_generic(struct e1000_hw *hw)
-{
-       u32 ctrl;
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_force_mac_fc_generic");
-
-       ctrl = E1000_READ_REG(hw, E1000_CTRL);
-
-       /*
-        * Because we didn't get link via the internal auto-negotiation
-        * mechanism (we either forced link or we got link via PHY
-        * auto-neg), we have to manually enable/disable transmit an
-        * receive flow control.
-        *
-        * The "Case" statement below enables/disable flow control
-        * according to the "hw->fc.current_mode" parameter.
-        *
-        * The possible values of the "fc" parameter are:
-        *      0:  Flow control is completely disabled
-        *      1:  Rx flow control is enabled (we can receive pause
-        *          frames but not send pause frames).
-        *      2:  Tx flow control is enabled (we can send pause frames
-        *          frames but we do not receive pause frames).
-        *      3:  Both Rx and Tx flow control (symmetric) is enabled.
-        *  other:  No other values should be possible at this point.
-        */
-       DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode);
-
-       switch (hw->fc.current_mode) {
-       case e1000_fc_none:
-               ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
-               break;
-       case e1000_fc_rx_pause:
-               ctrl &= (~E1000_CTRL_TFCE);
-               ctrl |= E1000_CTRL_RFCE;
-               break;
-       case e1000_fc_tx_pause:
-               ctrl &= (~E1000_CTRL_RFCE);
-               ctrl |= E1000_CTRL_TFCE;
-               break;
-       case e1000_fc_full:
-               ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
-               break;
-       default:
-               DEBUGOUT("Flow control param set incorrectly\n");
-               ret_val = -E1000_ERR_CONFIG;
-               goto out;
-       }
-
-       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_config_fc_after_link_up_generic - Configures flow control after link
- *  @hw: pointer to the HW structure
- *
- *  Checks the status of auto-negotiation after link up to ensure that the
- *  speed and duplex were not forced.  If the link needed to be forced, then
- *  flow control needs to be forced also.  If auto-negotiation is enabled
- *  and did not fail, then we configure flow control based on our link
- *  partner.
- **/
-s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
-{
-       struct e1000_mac_info *mac = &hw->mac;
-       s32 ret_val = E1000_SUCCESS;
-       u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
-       u16 speed, duplex;
-
-       DEBUGFUNC("e1000_config_fc_after_link_up_generic");
-
-       /*
-        * Check for the case where we have fiber media and auto-neg failed
-        * so we had to force link.  In this case, we need to force the
-        * configuration of the MAC to match the "fc" parameter.
-        */
-       if (mac->autoneg_failed) {
-               if (hw->phy.media_type == e1000_media_type_fiber ||
-                   hw->phy.media_type == e1000_media_type_internal_serdes)
-                       ret_val = e1000_force_mac_fc_generic(hw);
-       } else {
-               if (hw->phy.media_type == e1000_media_type_copper)
-                       ret_val = e1000_force_mac_fc_generic(hw);
-       }
-
-       if (ret_val) {
-               DEBUGOUT("Error forcing flow control settings\n");
-               goto out;
-       }
-
-       /*
-        * Check for the case where we have copper media and auto-neg is
-        * enabled.  In this case, we need to check and see if Auto-Neg
-        * has completed, and if so, how the PHY and link partner has
-        * flow control configured.
-        */
-       if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
-               /*
-                * Read the MII Status Register and check to see if AutoNeg
-                * has completed.  We read this twice because this reg has
-                * some "sticky" (latched) bits.
-                */
-               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
-               if (ret_val)
-                       goto out;
-               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
-               if (ret_val)
-                       goto out;
-
-               if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
-                       DEBUGOUT("Copper PHY and Auto Neg "
-                                "has not completed.\n");
-                       goto out;
-               }
-
-               /*
-                * The AutoNeg process has completed, so we now need to
-                * read both the Auto Negotiation Advertisement
-                * Register (Address 4) and the Auto_Negotiation Base
-                * Page Ability Register (Address 5) to determine how
-                * flow control was negotiated.
-                */
-               ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
-                                            &mii_nway_adv_reg);
-               if (ret_val)
-                       goto out;
-               ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
-                                            &mii_nway_lp_ability_reg);
-               if (ret_val)
-                       goto out;
-
-               /*
-                * Two bits in the Auto Negotiation Advertisement Register
-                * (Address 4) and two bits in the Auto Negotiation Base
-                * Page Ability Register (Address 5) determine flow control
-                * for both the PHY and the link partner.  The following
-                * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
-                * 1999, describes these PAUSE resolution bits and how flow
-                * control is determined based upon these settings.
-                * NOTE:  DC = Don't Care
-                *
-                *   LOCAL DEVICE  |   LINK PARTNER
-                * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
-                *-------|---------|-------|---------|--------------------
-                *   0   |    0    |  DC   |   DC    | e1000_fc_none
-                *   0   |    1    |   0   |   DC    | e1000_fc_none
-                *   0   |    1    |   1   |    0    | e1000_fc_none
-                *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
-                *   1   |    0    |   0   |   DC    | e1000_fc_none
-                *   1   |   DC    |   1   |   DC    | e1000_fc_full
-                *   1   |    1    |   0   |    0    | e1000_fc_none
-                *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
-                *
-                * Are both PAUSE bits set to 1?  If so, this implies
-                * Symmetric Flow Control is enabled at both ends.  The
-                * ASM_DIR bits are irrelevant per the spec.
-                *
-                * For Symmetric Flow Control:
-                *
-                *   LOCAL DEVICE  |   LINK PARTNER
-                * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
-                *-------|---------|-------|---------|--------------------
-                *   1   |   DC    |   1   |   DC    | E1000_fc_full
-                *
-                */
-               if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
-                   (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
-                       /*
-                        * Now we need to check if the user selected Rx ONLY
-                        * of pause frames.  In this case, we had to advertise
-                        * FULL flow control because we could not advertise Rx
-                        * ONLY. Hence, we must now check to see if we need to
-                        * turn OFF the TRANSMISSION of PAUSE frames.
-                        */
-                       if (hw->fc.requested_mode == e1000_fc_full) {
-                               hw->fc.current_mode = e1000_fc_full;
-                               DEBUGOUT("Flow Control = FULL.\r\n");
-                       } else {
-                               hw->fc.current_mode = e1000_fc_rx_pause;
-                               DEBUGOUT("Flow Control = "
-                                        "Rx PAUSE frames only.\r\n");
-                       }
-               }
-               /*
-                * For receiving PAUSE frames ONLY.
-                *
-                *   LOCAL DEVICE  |   LINK PARTNER
-                * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
-                *-------|---------|-------|---------|--------------------
-                *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
-                */
-               else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
-                         (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
-                         (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
-                         (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
-                       hw->fc.current_mode = e1000_fc_tx_pause;
-                       DEBUGOUT("Flow Control = Tx PAUSE frames only.\r\n");
-               }
-               /*
-                * For transmitting PAUSE frames ONLY.
-                *
-                *   LOCAL DEVICE  |   LINK PARTNER
-                * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
-                *-------|---------|-------|---------|--------------------
-                *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
-                */
-               else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
-                        (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
-                        !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
-                        (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
-                       hw->fc.current_mode = e1000_fc_rx_pause;
-                       DEBUGOUT("Flow Control = Rx PAUSE frames only.\r\n");
-               } else {
-                       /*
-                        * Per the IEEE spec, at this point flow control
-                        * should be disabled.
-                        */
-                       hw->fc.current_mode = e1000_fc_none;
-                       DEBUGOUT("Flow Control = NONE.\r\n");
-               }
-
-               /*
-                * Now we need to do one last check...  If we auto-
-                * negotiated to HALF DUPLEX, flow control should not be
-                * enabled per IEEE 802.3 spec.
-                */
-               ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
-               if (ret_val) {
-                       DEBUGOUT("Error getting link speed and duplex\n");
-                       goto out;
-               }
-
-               if (duplex == HALF_DUPLEX)
-                       hw->fc.current_mode = e1000_fc_none;
-
-               /*
-                * Now we call a subroutine to actually force the MAC
-                * controller to use the correct flow control settings.
-                */
-               ret_val = e1000_force_mac_fc_generic(hw);
-               if (ret_val) {
-                       DEBUGOUT("Error forcing flow control settings\n");
-                       goto out;
-               }
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex
- *  @hw: pointer to the HW structure
- *  @speed: stores the current speed
- *  @duplex: stores the current duplex
- *
- *  Read the status register for the current speed/duplex and store the current
- *  speed and duplex for copper connections.
- **/
-s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
-                                              u16 *duplex)
-{
-       u32 status;
-
-       DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic");
-
-       status = E1000_READ_REG(hw, E1000_STATUS);
-       if (status & E1000_STATUS_SPEED_1000) {
-               *speed = SPEED_1000;
-               DEBUGOUT("1000 Mbs, ");
-       } else if (status & E1000_STATUS_SPEED_100) {
-               *speed = SPEED_100;
-               DEBUGOUT("100 Mbs, ");
-       } else {
-               *speed = SPEED_10;
-               DEBUGOUT("10 Mbs, ");
-       }
-
-       if (status & E1000_STATUS_FD) {
-               *duplex = FULL_DUPLEX;
-               DEBUGOUT("Full Duplex\n");
-       } else {
-               *duplex = HALF_DUPLEX;
-               DEBUGOUT("Half Duplex\n");
-       }
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex
- *  @hw: pointer to the HW structure
- *  @speed: stores the current speed
- *  @duplex: stores the current duplex
- *
- *  Sets the speed and duplex to gigabit full duplex (the only possible option)
- *  for fiber/serdes links.
- **/
-s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
-                                                    u16 *speed, u16 *duplex)
-{
-       DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic");
-
-       *speed = SPEED_1000;
-       *duplex = FULL_DUPLEX;
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_get_hw_semaphore_generic - Acquire hardware semaphore
- *  @hw: pointer to the HW structure
- *
- *  Acquire the HW semaphore to access the PHY or NVM
- **/
-s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw)
-{
-       u32 swsm;
-       s32 ret_val = E1000_SUCCESS;
-       s32 timeout = hw->nvm.word_size + 1;
-       s32 i = 0;
-
-       DEBUGFUNC("e1000_get_hw_semaphore_generic");
-
-       /* Get the SW semaphore */
-       while (i < timeout) {
-               swsm = E1000_READ_REG(hw, E1000_SWSM);
-               if (!(swsm & E1000_SWSM_SMBI))
-                       break;
-
-               usec_delay(50);
-               i++;
-       }
-
-       if (i == timeout) {
-               DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
-               ret_val = -E1000_ERR_NVM;
-               goto out;
-       }
-
-       /* Get the FW semaphore. */
-       for (i = 0; i < timeout; i++) {
-               swsm = E1000_READ_REG(hw, E1000_SWSM);
-               E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
-
-               /* Semaphore acquired if bit latched */
-               if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
-                       break;
-
-               usec_delay(50);
-       }
-
-       if (i == timeout) {
-               /* Release semaphores */
-               e1000_put_hw_semaphore_generic(hw);
-               DEBUGOUT("Driver can't access the NVM\n");
-               ret_val = -E1000_ERR_NVM;
-               goto out;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_put_hw_semaphore_generic - Release hardware semaphore
- *  @hw: pointer to the HW structure
- *
- *  Release hardware semaphore used to access the PHY or NVM
- **/
-void e1000_put_hw_semaphore_generic(struct e1000_hw *hw)
-{
-       u32 swsm;
-
-       DEBUGFUNC("e1000_put_hw_semaphore_generic");
-
-       swsm = E1000_READ_REG(hw, E1000_SWSM);
-
-       swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
-
-       E1000_WRITE_REG(hw, E1000_SWSM, swsm);
-}
-
-/**
- *  e1000_get_auto_rd_done_generic - Check for auto read completion
- *  @hw: pointer to the HW structure
- *
- *  Check EEPROM for Auto Read done bit.
- **/
-s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw)
-{
-       s32 i = 0;
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_get_auto_rd_done_generic");
-
-       while (i < AUTO_READ_DONE_TIMEOUT) {
-               if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD)
-                       break;
-               msec_delay(1);
-               i++;
-       }
-
-       if (i == AUTO_READ_DONE_TIMEOUT) {
-               DEBUGOUT("Auto read by HW from NVM has not completed.\n");
-               ret_val = -E1000_ERR_RESET;
-               goto out;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_valid_led_default_generic - Verify a valid default LED config
- *  @hw: pointer to the HW structure
- *  @data: pointer to the NVM (EEPROM)
- *
- *  Read the EEPROM for the current default LED configuration.  If the
- *  LED configuration is not valid, set to a valid LED configuration.
- **/
-s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data)
-{
-       s32 ret_val;
-
-       DEBUGFUNC("e1000_valid_led_default_generic");
-
-       ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
-       if (ret_val) {
-               DEBUGOUT("NVM Read Error\n");
-               goto out;
-       }
-
-       if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
-               *data = ID_LED_DEFAULT;
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_id_led_init_generic -
- *  @hw: pointer to the HW structure
- *
- **/
-s32 e1000_id_led_init_generic(struct e1000_hw *hw)
-{
-       struct e1000_mac_info *mac = &hw->mac;
-       s32 ret_val;
-       const u32 ledctl_mask = 0x000000FF;
-       const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
-       const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
-       u16 data, i, temp;
-       const u16 led_mask = 0x0F;
-
-       DEBUGFUNC("e1000_id_led_init_generic");
-
-       ret_val = hw->nvm.ops.valid_led_default(hw, &data);
-       if (ret_val)
-               goto out;
-
-       mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
-       mac->ledctl_mode1 = mac->ledctl_default;
-       mac->ledctl_mode2 = mac->ledctl_default;
-
-       for (i = 0; i < 4; i++) {
-               temp = (data >> (i << 2)) & led_mask;
-               switch (temp) {
-               case ID_LED_ON1_DEF2:
-               case ID_LED_ON1_ON2:
-               case ID_LED_ON1_OFF2:
-                       mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
-                       mac->ledctl_mode1 |= ledctl_on << (i << 3);
-                       break;
-               case ID_LED_OFF1_DEF2:
-               case ID_LED_OFF1_ON2:
-               case ID_LED_OFF1_OFF2:
-                       mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
-                       mac->ledctl_mode1 |= ledctl_off << (i << 3);
-                       break;
-               default:
-                       /* Do nothing */
-                       break;
-               }
-               switch (temp) {
-               case ID_LED_DEF1_ON2:
-               case ID_LED_ON1_ON2:
-               case ID_LED_OFF1_ON2:
-                       mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
-                       mac->ledctl_mode2 |= ledctl_on << (i << 3);
-                       break;
-               case ID_LED_DEF1_OFF2:
-               case ID_LED_ON1_OFF2:
-               case ID_LED_OFF1_OFF2:
-                       mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
-                       mac->ledctl_mode2 |= ledctl_off << (i << 3);
-                       break;
-               default:
-                       /* Do nothing */
-                       break;
-               }
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_setup_led_generic - Configures SW controllable LED
- *  @hw: pointer to the HW structure
- *
- *  This prepares the SW controllable LED for use and saves the current state
- *  of the LED so it can be later restored.
- **/
-s32 e1000_setup_led_generic(struct e1000_hw *hw)
-{
-       u32 ledctl;
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_setup_led_generic");
-
-       if (hw->mac.ops.setup_led != e1000_setup_led_generic) {
-               ret_val = -E1000_ERR_CONFIG;
-               goto out;
-       }
-
-       if (hw->phy.media_type == e1000_media_type_fiber) {
-               ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
-               hw->mac.ledctl_default = ledctl;
-               /* Turn off LED0 */
-               ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
-                           E1000_LEDCTL_LED0_BLINK |
-                           E1000_LEDCTL_LED0_MODE_MASK);
-               ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
-                          E1000_LEDCTL_LED0_MODE_SHIFT);
-               E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
-       } else if (hw->phy.media_type == e1000_media_type_copper) {
-               E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_cleanup_led_generic - Set LED config to default operation
- *  @hw: pointer to the HW structure
- *
- *  Remove the current LED configuration and set the LED configuration
- *  to the default value, saved from the EEPROM.
- **/
-s32 e1000_cleanup_led_generic(struct e1000_hw *hw)
-{
-       DEBUGFUNC("e1000_cleanup_led_generic");
-
-       E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_blink_led_generic - Blink LED
- *  @hw: pointer to the HW structure
- *
- *  Blink the LEDs which are set to be on.
- **/
-s32 e1000_blink_led_generic(struct e1000_hw *hw)
-{
-       u32 ledctl_blink = 0;
-       u32 i;
-
-       DEBUGFUNC("e1000_blink_led_generic");
-
-       if (hw->phy.media_type == e1000_media_type_fiber) {
-               /* always blink LED0 for PCI-E fiber */
-               ledctl_blink = E1000_LEDCTL_LED0_BLINK |
-                    (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
-       } else {
-               /*
-                * set the blink bit for each LED that's "on" (0x0E)
-                * in ledctl_mode2
-                */
-               ledctl_blink = hw->mac.ledctl_mode2;
-               for (i = 0; i < 4; i++)
-                       if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
-                           E1000_LEDCTL_MODE_LED_ON)
-                               ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
-                                                (i * 8));
-       }
-
-       E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_led_on_generic - Turn LED on
- *  @hw: pointer to the HW structure
- *
- *  Turn LED on.
- **/
-s32 e1000_led_on_generic(struct e1000_hw *hw)
-{
-       u32 ctrl;
-
-       DEBUGFUNC("e1000_led_on_generic");
-
-       switch (hw->phy.media_type) {
-       case e1000_media_type_fiber:
-               ctrl = E1000_READ_REG(hw, E1000_CTRL);
-               ctrl &= ~E1000_CTRL_SWDPIN0;
-               ctrl |= E1000_CTRL_SWDPIO0;
-               E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
-               break;
-       case e1000_media_type_copper:
-               E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
-               break;
-       default:
-               break;
-       }
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_led_off_generic - Turn LED off
- *  @hw: pointer to the HW structure
- *
- *  Turn LED off.
- **/
-s32 e1000_led_off_generic(struct e1000_hw *hw)
-{
-       u32 ctrl;
-
-       DEBUGFUNC("e1000_led_off_generic");
-
-       switch (hw->phy.media_type) {
-       case e1000_media_type_fiber:
-               ctrl = E1000_READ_REG(hw, E1000_CTRL);
-               ctrl |= E1000_CTRL_SWDPIN0;
-               ctrl |= E1000_CTRL_SWDPIO0;
-               E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
-               break;
-       case e1000_media_type_copper:
-               E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
-               break;
-       default:
-               break;
-       }
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities
- *  @hw: pointer to the HW structure
- *  @no_snoop: bitmap of snoop events
- *
- *  Set the PCI-express register to snoop for events enabled in 'no_snoop'.
- **/
-void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop)
-{
-       u32 gcr;
-
-       DEBUGFUNC("e1000_set_pcie_no_snoop_generic");
-
-       if (hw->bus.type != e1000_bus_type_pci_express)
-               goto out;
-
-       if (no_snoop) {
-               gcr = E1000_READ_REG(hw, E1000_GCR);
-               gcr &= ~(PCIE_NO_SNOOP_ALL);
-               gcr |= no_snoop;
-               E1000_WRITE_REG(hw, E1000_GCR, gcr);
-       }
-out:
-       return;
-}
-
-/**
- *  e1000_disable_pcie_master_generic - Disables PCI-express master access
- *  @hw: pointer to the HW structure
- *
- *  Returns E1000_SUCCESS if successful, else returns -10
- *  (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
- *  the master requests to be disabled.
- *
- *  Disables PCI-Express master access and verifies there are no pending
- *  requests.
- **/
-s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw)
-{
-       u32 ctrl;
-       s32 timeout = MASTER_DISABLE_TIMEOUT;
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_disable_pcie_master_generic");
-
-       if (hw->bus.type != e1000_bus_type_pci_express)
-               goto out;
-
-       ctrl = E1000_READ_REG(hw, E1000_CTRL);
-       ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
-       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
-
-       while (timeout) {
-               if (!(E1000_READ_REG(hw, E1000_STATUS) &
-                     E1000_STATUS_GIO_MASTER_ENABLE))
-                       break;
-               usec_delay(100);
-               timeout--;
-       }
-
-       if (!timeout) {
-               DEBUGOUT("Master requests are pending.\n");
-               ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing
- *  @hw: pointer to the HW structure
- *
- *  Reset the Adaptive Interframe Spacing throttle to default values.
- **/
-void e1000_reset_adaptive_generic(struct e1000_hw *hw)
-{
-       struct e1000_mac_info *mac = &hw->mac;
-
-       DEBUGFUNC("e1000_reset_adaptive_generic");
-
-       if (!mac->adaptive_ifs) {
-               DEBUGOUT("Not in Adaptive IFS mode!\n");
-               goto out;
-       }
-
-       mac->current_ifs_val = 0;
-       mac->ifs_min_val = IFS_MIN;
-       mac->ifs_max_val = IFS_MAX;
-       mac->ifs_step_size = IFS_STEP;
-       mac->ifs_ratio = IFS_RATIO;
-
-       mac->in_ifs_mode = FALSE;
-       E1000_WRITE_REG(hw, E1000_AIT, 0);
-out:
-       return;
-}
-
-/**
- *  e1000_update_adaptive_generic - Update Adaptive Interframe Spacing
- *  @hw: pointer to the HW structure
- *
- *  Update the Adaptive Interframe Spacing Throttle value based on the
- *  time between transmitted packets and time between collisions.
- **/
-void e1000_update_adaptive_generic(struct e1000_hw *hw)
-{
-       struct e1000_mac_info *mac = &hw->mac;
-
-       DEBUGFUNC("e1000_update_adaptive_generic");
-
-       if (!mac->adaptive_ifs) {
-               DEBUGOUT("Not in Adaptive IFS mode!\n");
-               goto out;
-       }
-
-       if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
-               if (mac->tx_packet_delta > MIN_NUM_XMITS) {
-                       mac->in_ifs_mode = TRUE;
-                       if (mac->current_ifs_val < mac->ifs_max_val) {
-                               if (!mac->current_ifs_val)
-                                       mac->current_ifs_val = mac->ifs_min_val;
-                               else
-                                       mac->current_ifs_val +=
-                                               mac->ifs_step_size;
-                               E1000_WRITE_REG(hw, E1000_AIT, mac->current_ifs_val);
-                       }
-               }
-       } else {
-               if (mac->in_ifs_mode &&
-                   (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
-                       mac->current_ifs_val = 0;
-                       mac->in_ifs_mode = FALSE;
-                       E1000_WRITE_REG(hw, E1000_AIT, 0);
-               }
-       }
-out:
-       return;
-}
-
-/**
- *  e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings
- *  @hw: pointer to the HW structure
- *
- *  Verify that when not using auto-negotiation that MDI/MDIx is correctly
- *  set, which is forced to MDI mode only.
- **/
-static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw)
-{
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_validate_mdi_setting_generic");
-
-       if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
-               DEBUGOUT("Invalid MDI setting detected\n");
-               hw->phy.mdix = 1;
-               ret_val = -E1000_ERR_CONFIG;
-               goto out;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register
- *  @hw: pointer to the HW structure
- *  @reg: 32bit register offset such as E1000_SCTL
- *  @offset: register offset to write to
- *  @data: data to write at register offset
- *
- *  Writes an address/data control type register.  There are several of these
- *  and they all have the format address << 8 | data and bit 31 is polled for
- *  completion.
- **/
-s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
-                                      u32 offset, u8 data)
-{
-       u32 i, regvalue = 0;
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic");
-
-       /* Set up the address and data */
-       regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
-       E1000_WRITE_REG(hw, reg, regvalue);
-
-       /* Poll the ready bit to see if the MDI read completed */
-       for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
-               usec_delay(5);
-               regvalue = E1000_READ_REG(hw, reg);
-               if (regvalue & E1000_GEN_CTL_READY)
-                       break;
-       }
-       if (!(regvalue & E1000_GEN_CTL_READY)) {
-               DEBUGOUT1("Reg %08x did not indicate ready\n", reg);
-               ret_val = -E1000_ERR_PHY;
-               goto out;
-       }
-
-out:
-       return ret_val;
-}
diff --git a/lib/librte_pmd_igb/igb/e1000_mac.h b/lib/librte_pmd_igb/igb/e1000_mac.h
deleted file mode 100644 (file)
index a5a98d0..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#ifndef _E1000_MAC_H_
-#define _E1000_MAC_H_
-
-/*
- * Functions that should not be called directly from drivers but can be used
- * by other files in this 'shared code'
- */
-void e1000_init_mac_ops_generic(struct e1000_hw *hw);
-void e1000_null_mac_generic(struct e1000_hw *hw);
-s32  e1000_null_ops_generic(struct e1000_hw *hw);
-s32  e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d);
-bool e1000_null_mng_mode(struct e1000_hw *hw);
-void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a);
-void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b);
-void e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a);
-s32  e1000_blink_led_generic(struct e1000_hw *hw);
-s32  e1000_check_for_copper_link_generic(struct e1000_hw *hw);
-s32  e1000_check_for_fiber_link_generic(struct e1000_hw *hw);
-s32  e1000_check_for_serdes_link_generic(struct e1000_hw *hw);
-s32  e1000_cleanup_led_generic(struct e1000_hw *hw);
-s32  e1000_config_fc_after_link_up_generic(struct e1000_hw *hw);
-s32  e1000_disable_pcie_master_generic(struct e1000_hw *hw);
-s32  e1000_force_mac_fc_generic(struct e1000_hw *hw);
-s32  e1000_get_auto_rd_done_generic(struct e1000_hw *hw);
-s32  e1000_get_bus_info_pci_generic(struct e1000_hw *hw);
-s32  e1000_get_bus_info_pcie_generic(struct e1000_hw *hw);
-void e1000_set_lan_id_single_port(struct e1000_hw *hw);
-void e1000_set_lan_id_multi_port_pci(struct e1000_hw *hw);
-s32  e1000_get_hw_semaphore_generic(struct e1000_hw *hw);
-s32  e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
-                                               u16 *duplex);
-s32  e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
-                                                     u16 *speed, u16 *duplex);
-s32  e1000_id_led_init_generic(struct e1000_hw *hw);
-s32  e1000_led_on_generic(struct e1000_hw *hw);
-s32  e1000_led_off_generic(struct e1000_hw *hw);
-void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
-                                       u8 *mc_addr_list, u32 mc_addr_count);
-s32  e1000_set_fc_watermarks_generic(struct e1000_hw *hw);
-s32  e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw);
-s32  e1000_setup_led_generic(struct e1000_hw *hw);
-s32  e1000_setup_link_generic(struct e1000_hw *hw);
-s32  e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
-                                       u32 offset, u8 data);
-
-u32  e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr);
-
-void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw);
-void e1000_clear_vfta_generic(struct e1000_hw *hw);
-void e1000_config_collision_dist_generic(struct e1000_hw *hw);
-void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
-void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw);
-void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
-void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
-s32  e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
-void e1000_reset_adaptive_generic(struct e1000_hw *hw);
-void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop);
-void e1000_update_adaptive_generic(struct e1000_hw *hw);
-void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
-
-#endif
diff --git a/lib/librte_pmd_igb/igb/e1000_manage.c b/lib/librte_pmd_igb/igb/e1000_manage.c
deleted file mode 100644 (file)
index bb0a10b..0000000
+++ /dev/null
@@ -1,472 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#include "e1000_api.h"
-
-/**
- *  e1000_calculate_checksum - Calculate checksum for buffer
- *  @buffer: pointer to EEPROM
- *  @length: size of EEPROM to calculate a checksum for
- *
- *  Calculates the checksum for some buffer on a specified length.  The
- *  checksum calculated is returned.
- **/
-u8 e1000_calculate_checksum(u8 *buffer, u32 length)
-{
-       u32 i;
-       u8 sum = 0;
-
-       DEBUGFUNC("e1000_calculate_checksum");
-
-       if (!buffer)
-               return 0;
-
-       for (i = 0; i < length; i++)
-               sum += buffer[i];
-
-       return (u8) (0 - sum);
-}
-
-/**
- *  e1000_mng_enable_host_if_generic - Checks host interface is enabled
- *  @hw: pointer to the HW structure
- *
- *  Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
- *
- *  This function checks whether the HOST IF is enabled for command operation
- *  and also checks whether the previous command is completed.  It busy waits
- *  in case of previous command is not completed.
- **/
-s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw)
-{
-       u32 hicr;
-       s32 ret_val = E1000_SUCCESS;
-       u8 i;
-
-       DEBUGFUNC("e1000_mng_enable_host_if_generic");
-
-       if (!(hw->mac.arc_subsystem_valid)) {
-               DEBUGOUT("ARC subsystem not valid.\n");
-               ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
-               goto out;
-       }
-
-       /* Check that the host interface is enabled. */
-       hicr = E1000_READ_REG(hw, E1000_HICR);
-       if ((hicr & E1000_HICR_EN) == 0) {
-               DEBUGOUT("E1000_HOST_EN bit disabled.\n");
-               ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
-               goto out;
-       }
-       /* check the previous command is completed */
-       for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
-               hicr = E1000_READ_REG(hw, E1000_HICR);
-               if (!(hicr & E1000_HICR_C))
-                       break;
-               msec_delay_irq(1);
-       }
-
-       if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
-               DEBUGOUT("Previous command timeout failed .\n");
-               ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
-               goto out;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_check_mng_mode_generic - Generic check management mode
- *  @hw: pointer to the HW structure
- *
- *  Reads the firmware semaphore register and returns TRUE (>0) if
- *  manageability is enabled, else FALSE (0).
- **/
-bool e1000_check_mng_mode_generic(struct e1000_hw *hw)
-{
-       u32 fwsm = E1000_READ_REG(hw, E1000_FWSM);
-
-       DEBUGFUNC("e1000_check_mng_mode_generic");
-
-
-       return (fwsm & E1000_FWSM_MODE_MASK) ==
-               (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
-}
-
-/**
- *  e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on Tx
- *  @hw: pointer to the HW structure
- *
- *  Enables packet filtering on transmit packets if manageability is enabled
- *  and host interface is enabled.
- **/
-bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw)
-{
-       struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
-       u32 *buffer = (u32 *)&hw->mng_cookie;
-       u32 offset;
-       s32 ret_val, hdr_csum, csum;
-       u8 i, len;
-
-       DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic");
-
-       hw->mac.tx_pkt_filtering = TRUE;
-
-       /* No manageability, no filtering */
-       if (!hw->mac.ops.check_mng_mode(hw)) {
-               hw->mac.tx_pkt_filtering = FALSE;
-               goto out;
-       }
-
-       /*
-        * If we can't read from the host interface for whatever
-        * reason, disable filtering.
-        */
-       ret_val = hw->mac.ops.mng_enable_host_if(hw);
-       if (ret_val != E1000_SUCCESS) {
-               hw->mac.tx_pkt_filtering = FALSE;
-               goto out;
-       }
-
-       /* Read in the header.  Length and offset are in dwords. */
-       len    = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
-       offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
-       for (i = 0; i < len; i++)
-               *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF,
-                                                          offset + i);
-       hdr_csum = hdr->checksum;
-       hdr->checksum = 0;
-       csum = e1000_calculate_checksum((u8 *)hdr,
-                                       E1000_MNG_DHCP_COOKIE_LENGTH);
-       /*
-        * If either the checksums or signature don't match, then
-        * the cookie area isn't considered valid, in which case we
-        * take the safe route of assuming Tx filtering is enabled.
-        */
-       if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
-               hw->mac.tx_pkt_filtering = TRUE;
-               goto out;
-       }
-
-       /* Cookie area is valid, make the final check for filtering. */
-       if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
-               hw->mac.tx_pkt_filtering = FALSE;
-               goto out;
-       }
-
-out:
-       return hw->mac.tx_pkt_filtering;
-}
-
-/**
- *  e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface
- *  @hw: pointer to the HW structure
- *  @buffer: pointer to the host interface
- *  @length: size of the buffer
- *
- *  Writes the DHCP information to the host interface.
- **/
-s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer,
-                                      u16 length)
-{
-       struct e1000_host_mng_command_header hdr;
-       s32 ret_val;
-       u32 hicr;
-
-       DEBUGFUNC("e1000_mng_write_dhcp_info_generic");
-
-       hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
-       hdr.command_length = length;
-       hdr.reserved1 = 0;
-       hdr.reserved2 = 0;
-       hdr.checksum = 0;
-
-       /* Enable the host interface */
-       ret_val = hw->mac.ops.mng_enable_host_if(hw);
-       if (ret_val)
-               goto out;
-
-       /* Populate the host interface with the contents of "buffer". */
-       ret_val = hw->mac.ops.mng_host_if_write(hw, buffer, length,
-                                         sizeof(hdr), &(hdr.checksum));
-       if (ret_val)
-               goto out;
-
-       /* Write the manageability command header */
-       ret_val = hw->mac.ops.mng_write_cmd_header(hw, &hdr);
-       if (ret_val)
-               goto out;
-
-       /* Tell the ARC a new command is pending. */
-       hicr = E1000_READ_REG(hw, E1000_HICR);
-       E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_mng_write_cmd_header_generic - Writes manageability command header
- *  @hw: pointer to the HW structure
- *  @hdr: pointer to the host interface command header
- *
- *  Writes the command header after does the checksum calculation.
- **/
-s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
-                                    struct e1000_host_mng_command_header *hdr)
-{
-       u16 i, length = sizeof(struct e1000_host_mng_command_header);
-
-       DEBUGFUNC("e1000_mng_write_cmd_header_generic");
-
-       /* Write the whole command header structure with new checksum. */
-
-       hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
-
-       length >>= 2;
-       /* Write the relevant command block into the ram area. */
-       for (i = 0; i < length; i++) {
-               E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
-                                           *((u32 *) hdr + i));
-               E1000_WRITE_FLUSH(hw);
-       }
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_mng_host_if_write_generic - Write to the manageability host interface
- *  @hw: pointer to the HW structure
- *  @buffer: pointer to the host interface buffer
- *  @length: size of the buffer
- *  @offset: location in the buffer to write to
- *  @sum: sum of the data (not checksum)
- *
- *  This function writes the buffer content at the offset given on the host if.
- *  It also does alignment considerations to do the writes in most efficient
- *  way.  Also fills up the sum of the buffer in *buffer parameter.
- **/
-s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
-                                    u16 length, u16 offset, u8 *sum)
-{
-       u8 *tmp;
-       u8 *bufptr = buffer;
-       u32 data = 0;
-       s32 ret_val = E1000_SUCCESS;
-       u16 remaining, i, j, prev_bytes;
-
-       DEBUGFUNC("e1000_mng_host_if_write_generic");
-
-       /* sum = only sum of the data and it is not checksum */
-
-       if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) {
-               ret_val = -E1000_ERR_PARAM;
-               goto out;
-       }
-
-       tmp = (u8 *)&data;
-       prev_bytes = offset & 0x3;
-       offset >>= 2;
-
-       if (prev_bytes) {
-               data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset);
-               for (j = prev_bytes; j < sizeof(u32); j++) {
-                       *(tmp + j) = *bufptr++;
-                       *sum += *(tmp + j);
-               }
-               E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data);
-               length -= j - prev_bytes;
-               offset++;
-       }
-
-       remaining = length & 0x3;
-       length -= remaining;
-
-       /* Calculate length in DWORDs */
-       length >>= 2;
-
-       /*
-        * The device driver writes the relevant command block into the
-        * ram area.
-        */
-       for (i = 0; i < length; i++) {
-               for (j = 0; j < sizeof(u32); j++) {
-                       *(tmp + j) = *bufptr++;
-                       *sum += *(tmp + j);
-               }
-
-               E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i,
-                                           data);
-       }
-       if (remaining) {
-               for (j = 0; j < sizeof(u32); j++) {
-                       if (j < remaining)
-                               *(tmp + j) = *bufptr++;
-                       else
-                               *(tmp + j) = 0;
-
-                       *sum += *(tmp + j);
-               }
-               E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, data);
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_enable_mng_pass_thru - Check if management passthrough is needed
- *  @hw: pointer to the HW structure
- *
- *  Verifies the hardware needs to leave interface enabled so that frames can
- *  be directed to and from the management interface.
- **/
-bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
-{
-       u32 manc;
-       u32 fwsm, factps;
-       bool ret_val = FALSE;
-
-       DEBUGFUNC("e1000_enable_mng_pass_thru");
-
-       if (!hw->mac.asf_firmware_present)
-               goto out;
-
-       manc = E1000_READ_REG(hw, E1000_MANC);
-
-       if (!(manc & E1000_MANC_RCV_TCO_EN))
-               goto out;
-
-       if (hw->mac.has_fwsm) {
-               fwsm = E1000_READ_REG(hw, E1000_FWSM);
-               factps = E1000_READ_REG(hw, E1000_FACTPS);
-
-               if (!(factps & E1000_FACTPS_MNGCG) &&
-                   ((fwsm & E1000_FWSM_MODE_MASK) ==
-                    (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
-                       ret_val = TRUE;
-                       goto out;
-               }
-       } else if ((manc & E1000_MANC_SMBUS_EN) &&
-                   !(manc & E1000_MANC_ASF_EN)) {
-                       ret_val = TRUE;
-                       goto out;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_host_interface_command - Writes buffer to host interface
- *  @hw: pointer to the HW structure
- *  @buffer: contains a command to write
- *  @length: the byte length of the buffer, must be multiple of 4 bytes
- *
- *  Writes a buffer to the Host Interface.  Upon success, returns E1000_SUCCESS
- *  else returns E1000_ERR_HOST_INTERFACE_COMMAND.
- **/
-s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length)
-{
-       u32 hicr, i;
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_host_interface_command");
-
-       if (!(hw->mac.arc_subsystem_valid)) {
-               DEBUGOUT("Hardware doesn't support host interface command.\n");
-               goto out;
-       }
-
-       if (!hw->mac.asf_firmware_present) {
-               DEBUGOUT("Firmware is not present.\n");
-               goto out;
-       }
-
-       if (length == 0 || length & 0x3 ||
-           length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) {
-               DEBUGOUT("Buffer length failure.\n");
-               ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
-               goto out;
-       }
-
-       /* Check that the host interface is enabled. */
-       hicr = E1000_READ_REG(hw, E1000_HICR);
-       if ((hicr & E1000_HICR_EN) == 0) {
-               DEBUGOUT("E1000_HOST_EN bit disabled.\n");
-               ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
-               goto out;
-       }
-
-       /* Calculate length in DWORDs */
-       length >>= 2;
-
-       /*
-        * The device driver writes the relevant command block
-        * into the ram area.
-        */
-       for (i = 0; i < length; i++)
-               E1000_WRITE_REG_ARRAY_DWORD(hw,
-                                           E1000_HOST_IF,
-                                           i,
-                                           *((u32 *)buffer + i));
-
-       /* Setting this bit tells the ARC that a new command is pending. */
-       E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
-
-       for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
-               hicr = E1000_READ_REG(hw, E1000_HICR);
-               if (!(hicr & E1000_HICR_C))
-                       break;
-               msec_delay(1);
-       }
-
-       /* Check command successful completion. */
-       if (i == E1000_HI_COMMAND_TIMEOUT ||
-           (!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) {
-               DEBUGOUT("Command has failed with no status valid.\n");
-               ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
-               goto out;
-       }
-
-       for (i = 0; i < length; i++)
-               *((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw,
-                                                                 E1000_HOST_IF,
-                                                                 i);
-
-out:
-       return ret_val;
-}
-
diff --git a/lib/librte_pmd_igb/igb/e1000_manage.h b/lib/librte_pmd_igb/igb/e1000_manage.h
deleted file mode 100644 (file)
index 9a8d756..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#ifndef _E1000_MANAGE_H_
-#define _E1000_MANAGE_H_
-
-bool e1000_check_mng_mode_generic(struct e1000_hw *hw);
-bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw);
-s32  e1000_mng_enable_host_if_generic(struct e1000_hw *hw);
-s32  e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
-                                     u16 length, u16 offset, u8 *sum);
-s32  e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
-                                    struct e1000_host_mng_command_header *hdr);
-s32  e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw,
-                                       u8 *buffer, u16 length);
-bool e1000_enable_mng_pass_thru(struct e1000_hw *hw);
-u8 e1000_calculate_checksum(u8 *buffer, u32 length);
-s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length);
-
-enum e1000_mng_mode {
-       e1000_mng_mode_none = 0,
-       e1000_mng_mode_asf,
-       e1000_mng_mode_pt,
-       e1000_mng_mode_ipmi,
-       e1000_mng_mode_host_if_only
-};
-
-#define E1000_FACTPS_MNGCG    0x20000000
-
-#define E1000_FWSM_MODE_MASK  0xE
-#define E1000_FWSM_MODE_SHIFT 1
-
-#define E1000_MNG_IAMT_MODE                  0x3
-#define E1000_MNG_DHCP_COOKIE_LENGTH         0x10
-#define E1000_MNG_DHCP_COOKIE_OFFSET         0x6F0
-#define E1000_MNG_DHCP_COMMAND_TIMEOUT       10
-#define E1000_MNG_DHCP_TX_PAYLOAD_CMD        64
-#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
-#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN    0x2
-
-#define E1000_VFTA_ENTRY_SHIFT               5
-#define E1000_VFTA_ENTRY_MASK                0x7F
-#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK      0x1F
-
-#define E1000_HI_MAX_BLOCK_BYTE_LENGTH       1792 /* Num of bytes in range */
-#define E1000_HI_MAX_BLOCK_DWORD_LENGTH      448 /* Num of dwords in range */
-#define E1000_HI_COMMAND_TIMEOUT             500 /* Process HI command limit */
-
-#define E1000_HICR_EN              0x01  /* Enable bit - RO */
-/* Driver sets this bit when done to put command in RAM */
-#define E1000_HICR_C               0x02
-#define E1000_HICR_SV              0x04  /* Status Validity */
-#define E1000_HICR_FW_RESET_ENABLE 0x40
-#define E1000_HICR_FW_RESET        0x80
-
-/* Intel(R) Active Management Technology signature */
-#define E1000_IAMT_SIGNATURE  0x544D4149
-
-#endif
diff --git a/lib/librte_pmd_igb/igb/e1000_mbx.c b/lib/librte_pmd_igb/igb/e1000_mbx.c
deleted file mode 100644 (file)
index 67dbc64..0000000
+++ /dev/null
@@ -1,764 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#include "e1000_mbx.h"
-
-/**
- *  e1000_null_mbx_check_for_flag - No-op function, return 0
- *  @hw: pointer to the HW structure
- **/
-static s32 e1000_null_mbx_check_for_flag(struct e1000_hw *hw, u16 mbx_id)
-{
-       DEBUGFUNC("e1000_null_mbx_check_flag");
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_null_mbx_transact - No-op function, return 0
- *  @hw: pointer to the HW structure
- **/
-static s32 e1000_null_mbx_transact(struct e1000_hw *hw, u32 *msg, u16 size,
-                            u16 mbx_id)
-{
-       DEBUGFUNC("e1000_null_mbx_rw_msg");
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_read_mbx - Reads a message from the mailbox
- *  @hw: pointer to the HW structure
- *  @msg: The message buffer
- *  @size: Length of buffer
- *  @mbx_id: id of mailbox to read
- *
- *  returns SUCCESS if it successfuly read message from buffer
- **/
-s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
-{
-       struct e1000_mbx_info *mbx = &hw->mbx;
-       s32 ret_val = -E1000_ERR_MBX;
-
-       DEBUGFUNC("e1000_read_mbx");
-
-       /* limit read to size of mailbox */
-       if (size > mbx->size)
-               size = mbx->size;
-
-       if (mbx->ops.read)
-               ret_val = mbx->ops.read(hw, msg, size, mbx_id);
-
-       return ret_val;
-}
-
-/**
- *  e1000_write_mbx - Write a message to the mailbox
- *  @hw: pointer to the HW structure
- *  @msg: The message buffer
- *  @size: Length of buffer
- *  @mbx_id: id of mailbox to write
- *
- *  returns SUCCESS if it successfully copied message into the buffer
- **/
-s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
-{
-       struct e1000_mbx_info *mbx = &hw->mbx;
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_write_mbx");
-
-       if (size > mbx->size)
-               ret_val = -E1000_ERR_MBX;
-
-       else if (mbx->ops.write)
-               ret_val = mbx->ops.write(hw, msg, size, mbx_id);
-
-       return ret_val;
-}
-
-/**
- *  e1000_check_for_msg - checks to see if someone sent us mail
- *  @hw: pointer to the HW structure
- *  @mbx_id: id of mailbox to check
- *
- *  returns SUCCESS if the Status bit was found or else ERR_MBX
- **/
-s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
-{
-       struct e1000_mbx_info *mbx = &hw->mbx;
-       s32 ret_val = -E1000_ERR_MBX;
-
-       DEBUGFUNC("e1000_check_for_msg");
-
-       if (mbx->ops.check_for_msg)
-               ret_val = mbx->ops.check_for_msg(hw, mbx_id);
-
-       return ret_val;
-}
-
-/**
- *  e1000_check_for_ack - checks to see if someone sent us ACK
- *  @hw: pointer to the HW structure
- *  @mbx_id: id of mailbox to check
- *
- *  returns SUCCESS if the Status bit was found or else ERR_MBX
- **/
-s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
-{
-       struct e1000_mbx_info *mbx = &hw->mbx;
-       s32 ret_val = -E1000_ERR_MBX;
-
-       DEBUGFUNC("e1000_check_for_ack");
-
-       if (mbx->ops.check_for_ack)
-               ret_val = mbx->ops.check_for_ack(hw, mbx_id);
-
-       return ret_val;
-}
-
-/**
- *  e1000_check_for_rst - checks to see if other side has reset
- *  @hw: pointer to the HW structure
- *  @mbx_id: id of mailbox to check
- *
- *  returns SUCCESS if the Status bit was found or else ERR_MBX
- **/
-s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
-{
-       struct e1000_mbx_info *mbx = &hw->mbx;
-       s32 ret_val = -E1000_ERR_MBX;
-
-       DEBUGFUNC("e1000_check_for_rst");
-
-       if (mbx->ops.check_for_rst)
-               ret_val = mbx->ops.check_for_rst(hw, mbx_id);
-
-       return ret_val;
-}
-
-/**
- *  e1000_poll_for_msg - Wait for message notification
- *  @hw: pointer to the HW structure
- *  @mbx_id: id of mailbox to write
- *
- *  returns SUCCESS if it successfully received a message notification
- **/
-static s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
-{
-       struct e1000_mbx_info *mbx = &hw->mbx;
-       int countdown = mbx->timeout;
-
-       DEBUGFUNC("e1000_poll_for_msg");
-
-       if (!countdown || !mbx->ops.check_for_msg)
-               goto out;
-
-       while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
-               countdown--;
-               if (!countdown)
-                       break;
-               usec_delay(mbx->usec_delay);
-       }
-
-       /* if we failed, all future posted messages fail until reset */
-       if (!countdown)
-               mbx->timeout = 0;
-out:
-       return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
-}
-
-/**
- *  e1000_poll_for_ack - Wait for message acknowledgement
- *  @hw: pointer to the HW structure
- *  @mbx_id: id of mailbox to write
- *
- *  returns SUCCESS if it successfully received a message acknowledgement
- **/
-static s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
-{
-       struct e1000_mbx_info *mbx = &hw->mbx;
-       int countdown = mbx->timeout;
-
-       DEBUGFUNC("e1000_poll_for_ack");
-
-       if (!countdown || !mbx->ops.check_for_ack)
-               goto out;
-
-       while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
-               countdown--;
-               if (!countdown)
-                       break;
-               usec_delay(mbx->usec_delay);
-       }
-
-       /* if we failed, all future posted messages fail until reset */
-       if (!countdown)
-               mbx->timeout = 0;
-out:
-       return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
-}
-
-/**
- *  e1000_read_posted_mbx - Wait for message notification and receive message
- *  @hw: pointer to the HW structure
- *  @msg: The message buffer
- *  @size: Length of buffer
- *  @mbx_id: id of mailbox to write
- *
- *  returns SUCCESS if it successfully received a message notification and
- *  copied it into the receive buffer.
- **/
-s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
-{
-       struct e1000_mbx_info *mbx = &hw->mbx;
-       s32 ret_val = -E1000_ERR_MBX;
-
-       DEBUGFUNC("e1000_read_posted_mbx");
-
-       if (!mbx->ops.read)
-               goto out;
-
-       ret_val = e1000_poll_for_msg(hw, mbx_id);
-
-       /* if ack received read message, otherwise we timed out */
-       if (!ret_val)
-               ret_val = mbx->ops.read(hw, msg, size, mbx_id);
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_write_posted_mbx - Write a message to the mailbox, wait for ack
- *  @hw: pointer to the HW structure
- *  @msg: The message buffer
- *  @size: Length of buffer
- *  @mbx_id: id of mailbox to write
- *
- *  returns SUCCESS if it successfully copied message into the buffer and
- *  received an ack to that message within delay * timeout period
- **/
-s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
-{
-       struct e1000_mbx_info *mbx = &hw->mbx;
-       s32 ret_val = -E1000_ERR_MBX;
-
-       DEBUGFUNC("e1000_write_posted_mbx");
-
-       /* exit if either we can't write or there isn't a defined timeout */
-       if (!mbx->ops.write || !mbx->timeout)
-               goto out;
-
-       /* send msg */
-       ret_val = mbx->ops.write(hw, msg, size, mbx_id);
-
-       /* if msg sent wait until we receive an ack */
-       if (!ret_val)
-               ret_val = e1000_poll_for_ack(hw, mbx_id);
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_init_mbx_ops_generic - Initialize mbx function pointers
- *  @hw: pointer to the HW structure
- *
- *  Sets the function pointers to no-op functions
- **/
-void e1000_init_mbx_ops_generic(struct e1000_hw *hw)
-{
-       struct e1000_mbx_info *mbx = &hw->mbx;
-       mbx->ops.init_params = e1000_null_ops_generic;
-       mbx->ops.read = e1000_null_mbx_transact;
-       mbx->ops.write = e1000_null_mbx_transact;
-       mbx->ops.check_for_msg = e1000_null_mbx_check_for_flag;
-       mbx->ops.check_for_ack = e1000_null_mbx_check_for_flag;
-       mbx->ops.check_for_rst = e1000_null_mbx_check_for_flag;
-       mbx->ops.read_posted = e1000_read_posted_mbx;
-       mbx->ops.write_posted = e1000_write_posted_mbx;
-}
-
-/**
- *  e1000_read_v2p_mailbox - read v2p mailbox
- *  @hw: pointer to the HW structure
- *
- *  This function is used to read the v2p mailbox without losing the read to
- *  clear status bits.
- **/
-static u32 e1000_read_v2p_mailbox(struct e1000_hw *hw)
-{
-       u32 v2p_mailbox = E1000_READ_REG(hw, E1000_V2PMAILBOX(0));
-
-       v2p_mailbox |= hw->dev_spec.vf.v2p_mailbox;
-       hw->dev_spec.vf.v2p_mailbox |= v2p_mailbox & E1000_V2PMAILBOX_R2C_BITS;
-
-       return v2p_mailbox;
-}
-
-/**
- *  e1000_check_for_bit_vf - Determine if a status bit was set
- *  @hw: pointer to the HW structure
- *  @mask: bitmask for bits to be tested and cleared
- *
- *  This function is used to check for the read to clear bits within
- *  the V2P mailbox.
- **/
-static s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask)
-{
-       u32 v2p_mailbox = e1000_read_v2p_mailbox(hw);
-       s32 ret_val = -E1000_ERR_MBX;
-
-       if (v2p_mailbox & mask)
-               ret_val = E1000_SUCCESS;
-
-       hw->dev_spec.vf.v2p_mailbox &= ~mask;
-
-       return ret_val;
-}
-
-/**
- *  e1000_check_for_msg_vf - checks to see if the PF has sent mail
- *  @hw: pointer to the HW structure
- *  @mbx_id: id of mailbox to check
- *
- *  returns SUCCESS if the PF has set the Status bit or else ERR_MBX
- **/
-static s32 e1000_check_for_msg_vf(struct e1000_hw *hw, u16 mbx_id)
-{
-       s32 ret_val = -E1000_ERR_MBX;
-
-       DEBUGFUNC("e1000_check_for_msg_vf");
-
-       if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFSTS)) {
-               ret_val = E1000_SUCCESS;
-               hw->mbx.stats.reqs++;
-       }
-
-       return ret_val;
-}
-
-/**
- *  e1000_check_for_ack_vf - checks to see if the PF has ACK'd
- *  @hw: pointer to the HW structure
- *  @mbx_id: id of mailbox to check
- *
- *  returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
- **/
-static s32 e1000_check_for_ack_vf(struct e1000_hw *hw, u16 mbx_id)
-{
-       s32 ret_val = -E1000_ERR_MBX;
-
-       DEBUGFUNC("e1000_check_for_ack_vf");
-
-       if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFACK)) {
-               ret_val = E1000_SUCCESS;
-               hw->mbx.stats.acks++;
-       }
-
-       return ret_val;
-}
-
-/**
- *  e1000_check_for_rst_vf - checks to see if the PF has reset
- *  @hw: pointer to the HW structure
- *  @mbx_id: id of mailbox to check
- *
- *  returns TRUE if the PF has set the reset done bit or else FALSE
- **/
-static s32 e1000_check_for_rst_vf(struct e1000_hw *hw, u16 mbx_id)
-{
-       s32 ret_val = -E1000_ERR_MBX;
-
-       DEBUGFUNC("e1000_check_for_rst_vf");
-
-       if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD |
-                                        E1000_V2PMAILBOX_RSTI))) {
-               ret_val = E1000_SUCCESS;
-               hw->mbx.stats.rsts++;
-       }
-
-       return ret_val;
-}
-
-/**
- *  e1000_obtain_mbx_lock_vf - obtain mailbox lock
- *  @hw: pointer to the HW structure
- *
- *  return SUCCESS if we obtained the mailbox lock
- **/
-static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw)
-{
-       s32 ret_val = -E1000_ERR_MBX;
-
-       DEBUGFUNC("e1000_obtain_mbx_lock_vf");
-
-       /* Take ownership of the buffer */
-       E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_VFU);
-
-       /* reserve mailbox for vf use */
-       if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU)
-               ret_val = E1000_SUCCESS;
-
-       return ret_val;
-}
-
-/**
- *  e1000_write_mbx_vf - Write a message to the mailbox
- *  @hw: pointer to the HW structure
- *  @msg: The message buffer
- *  @size: Length of buffer
- *  @mbx_id: id of mailbox to write
- *
- *  returns SUCCESS if it successfully copied message into the buffer
- **/
-static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size,
-                              u16 mbx_id)
-{
-       s32 ret_val;
-       u16 i;
-
-
-       DEBUGFUNC("e1000_write_mbx_vf");
-
-       /* lock the mailbox to prevent pf/vf race condition */
-       ret_val = e1000_obtain_mbx_lock_vf(hw);
-       if (ret_val)
-               goto out_no_write;
-
-       /* flush msg and acks as we are overwriting the message buffer */
-       e1000_check_for_msg_vf(hw, 0);
-       e1000_check_for_ack_vf(hw, 0);
-
-       /* copy the caller specified message to the mailbox memory buffer */
-       for (i = 0; i < size; i++)
-               E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(0), i, msg[i]);
-
-       /* update stats */
-       hw->mbx.stats.msgs_tx++;
-
-       /* Drop VFU and interrupt the PF to tell it a message has been sent */
-       E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_REQ);
-
-out_no_write:
-       return ret_val;
-}
-
-/**
- *  e1000_read_mbx_vf - Reads a message from the inbox intended for vf
- *  @hw: pointer to the HW structure
- *  @msg: The message buffer
- *  @size: Length of buffer
- *  @mbx_id: id of mailbox to read
- *
- *  returns SUCCESS if it successfuly read message from buffer
- **/
-static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size,
-                             u16 mbx_id)
-{
-       s32 ret_val = E1000_SUCCESS;
-       u16 i;
-
-       DEBUGFUNC("e1000_read_mbx_vf");
-
-       /* lock the mailbox to prevent pf/vf race condition */
-       ret_val = e1000_obtain_mbx_lock_vf(hw);
-       if (ret_val)
-               goto out_no_read;
-
-       /* copy the message from the mailbox memory buffer */
-       for (i = 0; i < size; i++)
-               msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(0), i);
-
-       /* Acknowledge receipt and release mailbox, then we're done */
-       E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_ACK);
-
-       /* update stats */
-       hw->mbx.stats.msgs_rx++;
-
-out_no_read:
-       return ret_val;
-}
-
-/**
- *  e1000_init_mbx_params_vf - set initial values for vf mailbox
- *  @hw: pointer to the HW structure
- *
- *  Initializes the hw->mbx struct to correct values for vf mailbox
- */
-s32 e1000_init_mbx_params_vf(struct e1000_hw *hw)
-{
-       struct e1000_mbx_info *mbx = &hw->mbx;
-
-       /* start mailbox as timed out and let the reset_hw call set the timeout
-        * value to begin communications */
-       mbx->timeout = 0;
-       mbx->usec_delay = E1000_VF_MBX_INIT_DELAY;
-
-       mbx->size = E1000_VFMAILBOX_SIZE;
-
-       mbx->ops.read = e1000_read_mbx_vf;
-       mbx->ops.write = e1000_write_mbx_vf;
-       mbx->ops.read_posted = e1000_read_posted_mbx;
-       mbx->ops.write_posted = e1000_write_posted_mbx;
-       mbx->ops.check_for_msg = e1000_check_for_msg_vf;
-       mbx->ops.check_for_ack = e1000_check_for_ack_vf;
-       mbx->ops.check_for_rst = e1000_check_for_rst_vf;
-
-       mbx->stats.msgs_tx = 0;
-       mbx->stats.msgs_rx = 0;
-       mbx->stats.reqs = 0;
-       mbx->stats.acks = 0;
-       mbx->stats.rsts = 0;
-
-       return E1000_SUCCESS;
-}
-
-static s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
-{
-       u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR);
-       s32 ret_val = -E1000_ERR_MBX;
-
-       if (mbvficr & mask) {
-               ret_val = E1000_SUCCESS;
-               E1000_WRITE_REG(hw, E1000_MBVFICR, mask);
-       }
-
-       return ret_val;
-}
-
-/**
- *  e1000_check_for_msg_pf - checks to see if the VF has sent mail
- *  @hw: pointer to the HW structure
- *  @vf_number: the VF index
- *
- *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
- **/
-static s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
-{
-       s32 ret_val = -E1000_ERR_MBX;
-
-       DEBUGFUNC("e1000_check_for_msg_pf");
-
-       if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
-               ret_val = E1000_SUCCESS;
-               hw->mbx.stats.reqs++;
-       }
-
-       return ret_val;
-}
-
-/**
- *  e1000_check_for_ack_pf - checks to see if the VF has ACKed
- *  @hw: pointer to the HW structure
- *  @vf_number: the VF index
- *
- *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
- **/
-static s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
-{
-       s32 ret_val = -E1000_ERR_MBX;
-
-       DEBUGFUNC("e1000_check_for_ack_pf");
-
-       if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
-               ret_val = E1000_SUCCESS;
-               hw->mbx.stats.acks++;
-       }
-
-       return ret_val;
-}
-
-/**
- *  e1000_check_for_rst_pf - checks to see if the VF has reset
- *  @hw: pointer to the HW structure
- *  @vf_number: the VF index
- *
- *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
- **/
-static s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
-{
-       u32 vflre = E1000_READ_REG(hw, E1000_VFLRE);
-       s32 ret_val = -E1000_ERR_MBX;
-
-       DEBUGFUNC("e1000_check_for_rst_pf");
-
-       if (vflre & (1 << vf_number)) {
-               ret_val = E1000_SUCCESS;
-               E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number));
-               hw->mbx.stats.rsts++;
-       }
-
-       return ret_val;
-}
-
-/**
- *  e1000_obtain_mbx_lock_pf - obtain mailbox lock
- *  @hw: pointer to the HW structure
- *  @vf_number: the VF index
- *
- *  return SUCCESS if we obtained the mailbox lock
- **/
-static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
-{
-       s32 ret_val = -E1000_ERR_MBX;
-       u32 p2v_mailbox;
-
-       DEBUGFUNC("e1000_obtain_mbx_lock_pf");
-
-       /* Take ownership of the buffer */
-       E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
-
-       /* reserve mailbox for vf use */
-       p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number));
-       if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
-               ret_val = E1000_SUCCESS;
-
-       return ret_val;
-}
-
-/**
- *  e1000_write_mbx_pf - Places a message in the mailbox
- *  @hw: pointer to the HW structure
- *  @msg: The message buffer
- *  @size: Length of buffer
- *  @vf_number: the VF index
- *
- *  returns SUCCESS if it successfully copied message into the buffer
- **/
-static s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
-                              u16 vf_number)
-{
-       s32 ret_val;
-       u16 i;
-
-       DEBUGFUNC("e1000_write_mbx_pf");
-
-       /* lock the mailbox to prevent pf/vf race condition */
-       ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
-       if (ret_val)
-               goto out_no_write;
-
-       /* flush msg and acks as we are overwriting the message buffer */
-       e1000_check_for_msg_pf(hw, vf_number);
-       e1000_check_for_ack_pf(hw, vf_number);
-
-       /* copy the caller specified message to the mailbox memory buffer */
-       for (i = 0; i < size; i++)
-               E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]);
-
-       /* Interrupt VF to tell it a message has been sent and release buffer*/
-       E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
-
-       /* update stats */
-       hw->mbx.stats.msgs_tx++;
-
-out_no_write:
-       return ret_val;
-
-}
-
-/**
- *  e1000_read_mbx_pf - Read a message from the mailbox
- *  @hw: pointer to the HW structure
- *  @msg: The message buffer
- *  @size: Length of buffer
- *  @vf_number: the VF index
- *
- *  This function copies a message from the mailbox buffer to the caller's
- *  memory buffer.  The presumption is that the caller knows that there was
- *  a message due to a VF request so no polling for message is needed.
- **/
-static s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
-                             u16 vf_number)
-{
-       s32 ret_val;
-       u16 i;
-
-       DEBUGFUNC("e1000_read_mbx_pf");
-
-       /* lock the mailbox to prevent pf/vf race condition */
-       ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
-       if (ret_val)
-               goto out_no_read;
-
-       /* copy the message to the mailbox memory buffer */
-       for (i = 0; i < size; i++)
-               msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i);
-
-       /* Acknowledge the message and release buffer */
-       E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
-
-       /* update stats */
-       hw->mbx.stats.msgs_rx++;
-
-out_no_read:
-       return ret_val;
-}
-
-/**
- *  e1000_init_mbx_params_pf - set initial values for pf mailbox
- *  @hw: pointer to the HW structure
- *
- *  Initializes the hw->mbx struct to correct values for pf mailbox
- */
-s32 e1000_init_mbx_params_pf(struct e1000_hw *hw)
-{
-       struct e1000_mbx_info *mbx = &hw->mbx;
-
-       switch (hw->mac.type) {
-       case e1000_82576:
-       case e1000_i350:
-               mbx->timeout = 0;
-               mbx->usec_delay = 0;
-
-               mbx->size = E1000_VFMAILBOX_SIZE;
-
-               mbx->ops.read = e1000_read_mbx_pf;
-               mbx->ops.write = e1000_write_mbx_pf;
-               mbx->ops.read_posted = e1000_read_posted_mbx;
-               mbx->ops.write_posted = e1000_write_posted_mbx;
-               mbx->ops.check_for_msg = e1000_check_for_msg_pf;
-               mbx->ops.check_for_ack = e1000_check_for_ack_pf;
-               mbx->ops.check_for_rst = e1000_check_for_rst_pf;
-
-               mbx->stats.msgs_tx = 0;
-               mbx->stats.msgs_rx = 0;
-               mbx->stats.reqs = 0;
-               mbx->stats.acks = 0;
-               mbx->stats.rsts = 0;
-       default:
-               return E1000_SUCCESS;
-       }
-}
-
diff --git a/lib/librte_pmd_igb/igb/e1000_mbx.h b/lib/librte_pmd_igb/igb/e1000_mbx.h
deleted file mode 100644 (file)
index 6e9d538..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#ifndef _E1000_MBX_H_
-#define _E1000_MBX_H_
-
-#include "e1000_api.h"
-
-/* Define mailbox register bits */
-#define E1000_V2PMAILBOX_REQ   0x00000001 /* Request for PF Ready bit */
-#define E1000_V2PMAILBOX_ACK   0x00000002 /* Ack PF message received */
-#define E1000_V2PMAILBOX_VFU   0x00000004 /* VF owns the mailbox buffer */
-#define E1000_V2PMAILBOX_PFU   0x00000008 /* PF owns the mailbox buffer */
-#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
-#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
-#define E1000_V2PMAILBOX_RSTI  0x00000040 /* PF has reset indication */
-#define E1000_V2PMAILBOX_RSTD  0x00000080 /* PF has indicated reset done */
-#define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
-
-#define E1000_P2VMAILBOX_STS   0x00000001 /* Initiate message send to VF */
-#define E1000_P2VMAILBOX_ACK   0x00000002 /* Ack message recv'd from VF */
-#define E1000_P2VMAILBOX_VFU   0x00000004 /* VF owns the mailbox buffer */
-#define E1000_P2VMAILBOX_PFU   0x00000008 /* PF owns the mailbox buffer */
-#define E1000_P2VMAILBOX_RVFU  0x00000010 /* Reset VFU - used when VF stuck */
-
-#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
-#define E1000_MBVFICR_VFREQ_VF1  0x00000001 /* bit for VF 1 message */
-#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
-#define E1000_MBVFICR_VFACK_VF1  0x00010000 /* bit for VF 1 ack */
-
-#define E1000_VFMAILBOX_SIZE   16 /* 16 32 bit words - 64 bytes */
-
-/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
- * PF.  The reverse is TRUE if it is E1000_PF_*.
- * Message ACK's are the value or'd with 0xF0000000
- */
-#define E1000_VT_MSGTYPE_ACK      0x80000000  /* Messages below or'd with
-                                               * this are the ACK */
-#define E1000_VT_MSGTYPE_NACK     0x40000000  /* Messages below or'd with
-                                               * this are the NACK */
-#define E1000_VT_MSGTYPE_CTS      0x20000000  /* Indicates that VF is still
-                                                 clear to send requests */
-#define E1000_VT_MSGINFO_SHIFT    16
-/* bits 23:16 are used for exra info for certain messages */
-#define E1000_VT_MSGINFO_MASK     (0xFF << E1000_VT_MSGINFO_SHIFT)
-
-#define E1000_VF_RESET            0x01 /* VF requests reset */
-#define E1000_VF_SET_MAC_ADDR     0x02 /* VF requests to set MAC addr */
-#define E1000_VF_SET_MULTICAST    0x03 /* VF requests to set MC addr */
-#define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT)
-#define E1000_VF_SET_MULTICAST_OVERFLOW   (0x80 << E1000_VT_MSGINFO_SHIFT)
-#define E1000_VF_SET_VLAN         0x04 /* VF requests to set VLAN */
-#define E1000_VF_SET_VLAN_ADD             (0x01 << E1000_VT_MSGINFO_SHIFT)
-#define E1000_VF_SET_LPE          0x05 /* VF requests to set VMOLR.LPE */
-#define E1000_VF_SET_PROMISC      0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
-#define E1000_VF_SET_PROMISC_UNICAST      (0x01 << E1000_VT_MSGINFO_SHIFT)
-#define E1000_VF_SET_PROMISC_MULTICAST    (0x02 << E1000_VT_MSGINFO_SHIFT)
-
-#define E1000_PF_CONTROL_MSG      0x0100 /* PF control message */
-
-#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
-#define E1000_VF_MBX_INIT_DELAY   500  /* microseconds between retries */
-
-s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16);
-s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16);
-s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
-s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
-s32 e1000_check_for_msg(struct e1000_hw *, u16);
-s32 e1000_check_for_ack(struct e1000_hw *, u16);
-s32 e1000_check_for_rst(struct e1000_hw *, u16);
-void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
-s32 e1000_init_mbx_params_vf(struct e1000_hw *);
-s32 e1000_init_mbx_params_pf(struct e1000_hw *);
-
-#endif /* _E1000_MBX_H_ */
diff --git a/lib/librte_pmd_igb/igb/e1000_nvm.c b/lib/librte_pmd_igb/igb/e1000_nvm.c
deleted file mode 100644 (file)
index 1c44270..0000000
+++ /dev/null
@@ -1,1071 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#include "e1000_api.h"
-
-static void e1000_stop_nvm(struct e1000_hw *hw);
-static void e1000_reload_nvm_generic(struct e1000_hw *hw);
-
-/**
- *  e1000_init_nvm_ops_generic - Initialize NVM function pointers
- *  @hw: pointer to the HW structure
- *
- *  Setups up the function pointers to no-op functions
- **/
-void e1000_init_nvm_ops_generic(struct e1000_hw *hw)
-{
-       struct e1000_nvm_info *nvm = &hw->nvm;
-       DEBUGFUNC("e1000_init_nvm_ops_generic");
-
-       /* Initialize function pointers */
-       nvm->ops.init_params = e1000_null_ops_generic;
-       nvm->ops.acquire = e1000_null_ops_generic;
-       nvm->ops.read = e1000_null_read_nvm;
-       nvm->ops.release = e1000_null_nvm_generic;
-       nvm->ops.reload = e1000_reload_nvm_generic;
-       nvm->ops.update = e1000_null_ops_generic;
-       nvm->ops.valid_led_default = e1000_null_led_default;
-       nvm->ops.validate = e1000_null_ops_generic;
-       nvm->ops.write = e1000_null_write_nvm;
-}
-
-/**
- *  e1000_null_nvm_read - No-op function, return 0
- *  @hw: pointer to the HW structure
- **/
-s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c)
-{
-       DEBUGFUNC("e1000_null_read_nvm");
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_null_nvm_generic - No-op function, return void
- *  @hw: pointer to the HW structure
- **/
-void e1000_null_nvm_generic(struct e1000_hw *hw)
-{
-       DEBUGFUNC("e1000_null_nvm_generic");
-       return;
-}
-
-/**
- *  e1000_null_led_default - No-op function, return 0
- *  @hw: pointer to the HW structure
- **/
-s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data)
-{
-       DEBUGFUNC("e1000_null_led_default");
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_null_write_nvm - No-op function, return 0
- *  @hw: pointer to the HW structure
- **/
-s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c)
-{
-       DEBUGFUNC("e1000_null_write_nvm");
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_raise_eec_clk - Raise EEPROM clock
- *  @hw: pointer to the HW structure
- *  @eecd: pointer to the EEPROM
- *
- *  Enable/Raise the EEPROM clock bit.
- **/
-static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
-{
-       *eecd = *eecd | E1000_EECD_SK;
-       E1000_WRITE_REG(hw, E1000_EECD, *eecd);
-       E1000_WRITE_FLUSH(hw);
-       usec_delay(hw->nvm.delay_usec);
-}
-
-/**
- *  e1000_lower_eec_clk - Lower EEPROM clock
- *  @hw: pointer to the HW structure
- *  @eecd: pointer to the EEPROM
- *
- *  Clear/Lower the EEPROM clock bit.
- **/
-static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
-{
-       *eecd = *eecd & ~E1000_EECD_SK;
-       E1000_WRITE_REG(hw, E1000_EECD, *eecd);
-       E1000_WRITE_FLUSH(hw);
-       usec_delay(hw->nvm.delay_usec);
-}
-
-/**
- *  e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
- *  @hw: pointer to the HW structure
- *  @data: data to send to the EEPROM
- *  @count: number of bits to shift out
- *
- *  We need to shift 'count' bits out to the EEPROM.  So, the value in the
- *  "data" parameter will be shifted out to the EEPROM one bit at a time.
- *  In order to do this, "data" must be broken down into bits.
- **/
-static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
-{
-       struct e1000_nvm_info *nvm = &hw->nvm;
-       u32 eecd = E1000_READ_REG(hw, E1000_EECD);
-       u32 mask;
-
-       DEBUGFUNC("e1000_shift_out_eec_bits");
-
-       mask = 0x01 << (count - 1);
-       if (nvm->type == e1000_nvm_eeprom_microwire)
-               eecd &= ~E1000_EECD_DO;
-       else
-       if (nvm->type == e1000_nvm_eeprom_spi)
-               eecd |= E1000_EECD_DO;
-
-       do {
-               eecd &= ~E1000_EECD_DI;
-
-               if (data & mask)
-                       eecd |= E1000_EECD_DI;
-
-               E1000_WRITE_REG(hw, E1000_EECD, eecd);
-               E1000_WRITE_FLUSH(hw);
-
-               usec_delay(nvm->delay_usec);
-
-               e1000_raise_eec_clk(hw, &eecd);
-               e1000_lower_eec_clk(hw, &eecd);
-
-               mask >>= 1;
-       } while (mask);
-
-       eecd &= ~E1000_EECD_DI;
-       E1000_WRITE_REG(hw, E1000_EECD, eecd);
-}
-
-/**
- *  e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
- *  @hw: pointer to the HW structure
- *  @count: number of bits to shift in
- *
- *  In order to read a register from the EEPROM, we need to shift 'count' bits
- *  in from the EEPROM.  Bits are "shifted in" by raising the clock input to
- *  the EEPROM (setting the SK bit), and then reading the value of the data out
- *  "DO" bit.  During this "shifting in" process the data in "DI" bit should
- *  always be clear.
- **/
-static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
-{
-       u32 eecd;
-       u32 i;
-       u16 data;
-
-       DEBUGFUNC("e1000_shift_in_eec_bits");
-
-       eecd = E1000_READ_REG(hw, E1000_EECD);
-
-       eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
-       data = 0;
-
-       for (i = 0; i < count; i++) {
-               data <<= 1;
-               e1000_raise_eec_clk(hw, &eecd);
-
-               eecd = E1000_READ_REG(hw, E1000_EECD);
-
-               eecd &= ~E1000_EECD_DI;
-               if (eecd & E1000_EECD_DO)
-                       data |= 1;
-
-               e1000_lower_eec_clk(hw, &eecd);
-       }
-
-       return data;
-}
-
-/**
- *  e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion
- *  @hw: pointer to the HW structure
- *  @ee_reg: EEPROM flag for polling
- *
- *  Polls the EEPROM status bit for either read or write completion based
- *  upon the value of 'ee_reg'.
- **/
-s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
-{
-       u32 attempts = 100000;
-       u32 i, reg = 0;
-       s32 ret_val = -E1000_ERR_NVM;
-
-       DEBUGFUNC("e1000_poll_eerd_eewr_done");
-
-       for (i = 0; i < attempts; i++) {
-               if (ee_reg == E1000_NVM_POLL_READ)
-                       reg = E1000_READ_REG(hw, E1000_EERD);
-               else
-                       reg = E1000_READ_REG(hw, E1000_EEWR);
-
-               if (reg & E1000_NVM_RW_REG_DONE) {
-                       ret_val = E1000_SUCCESS;
-                       break;
-               }
-
-               usec_delay(5);
-       }
-
-       return ret_val;
-}
-
-/**
- *  e1000_acquire_nvm_generic - Generic request for access to EEPROM
- *  @hw: pointer to the HW structure
- *
- *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
- *  Return successful if access grant bit set, else clear the request for
- *  EEPROM access and return -E1000_ERR_NVM (-1).
- **/
-s32 e1000_acquire_nvm_generic(struct e1000_hw *hw)
-{
-       u32 eecd = E1000_READ_REG(hw, E1000_EECD);
-       s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_acquire_nvm_generic");
-
-       E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ);
-       eecd = E1000_READ_REG(hw, E1000_EECD);
-
-       while (timeout) {
-               if (eecd & E1000_EECD_GNT)
-                       break;
-               usec_delay(5);
-               eecd = E1000_READ_REG(hw, E1000_EECD);
-               timeout--;
-       }
-
-       if (!timeout) {
-               eecd &= ~E1000_EECD_REQ;
-               E1000_WRITE_REG(hw, E1000_EECD, eecd);
-               DEBUGOUT("Could not acquire NVM grant\n");
-               ret_val = -E1000_ERR_NVM;
-       }
-
-       return ret_val;
-}
-
-/**
- *  e1000_standby_nvm - Return EEPROM to standby state
- *  @hw: pointer to the HW structure
- *
- *  Return the EEPROM to a standby state.
- **/
-static void e1000_standby_nvm(struct e1000_hw *hw)
-{
-       struct e1000_nvm_info *nvm = &hw->nvm;
-       u32 eecd = E1000_READ_REG(hw, E1000_EECD);
-
-       DEBUGFUNC("e1000_standby_nvm");
-
-       if (nvm->type == e1000_nvm_eeprom_microwire) {
-               eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
-               E1000_WRITE_REG(hw, E1000_EECD, eecd);
-               E1000_WRITE_FLUSH(hw);
-               usec_delay(nvm->delay_usec);
-
-               e1000_raise_eec_clk(hw, &eecd);
-
-               /* Select EEPROM */
-               eecd |= E1000_EECD_CS;
-               E1000_WRITE_REG(hw, E1000_EECD, eecd);
-               E1000_WRITE_FLUSH(hw);
-               usec_delay(nvm->delay_usec);
-
-               e1000_lower_eec_clk(hw, &eecd);
-       } else
-       if (nvm->type == e1000_nvm_eeprom_spi) {
-               /* Toggle CS to flush commands */
-               eecd |= E1000_EECD_CS;
-               E1000_WRITE_REG(hw, E1000_EECD, eecd);
-               E1000_WRITE_FLUSH(hw);
-               usec_delay(nvm->delay_usec);
-               eecd &= ~E1000_EECD_CS;
-               E1000_WRITE_REG(hw, E1000_EECD, eecd);
-               E1000_WRITE_FLUSH(hw);
-               usec_delay(nvm->delay_usec);
-       }
-}
-
-/**
- *  e1000_stop_nvm - Terminate EEPROM command
- *  @hw: pointer to the HW structure
- *
- *  Terminates the current command by inverting the EEPROM's chip select pin.
- **/
-static void e1000_stop_nvm(struct e1000_hw *hw)
-{
-       u32 eecd;
-
-       DEBUGFUNC("e1000_stop_nvm");
-
-       eecd = E1000_READ_REG(hw, E1000_EECD);
-       if (hw->nvm.type == e1000_nvm_eeprom_spi) {
-               /* Pull CS high */
-               eecd |= E1000_EECD_CS;
-               e1000_lower_eec_clk(hw, &eecd);
-       } else if (hw->nvm.type == e1000_nvm_eeprom_microwire) {
-               /* CS on Microwire is active-high */
-               eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
-               E1000_WRITE_REG(hw, E1000_EECD, eecd);
-               e1000_raise_eec_clk(hw, &eecd);
-               e1000_lower_eec_clk(hw, &eecd);
-       }
-}
-
-/**
- *  e1000_release_nvm_generic - Release exclusive access to EEPROM
- *  @hw: pointer to the HW structure
- *
- *  Stop any current commands to the EEPROM and clear the EEPROM request bit.
- **/
-void e1000_release_nvm_generic(struct e1000_hw *hw)
-{
-       u32 eecd;
-
-       DEBUGFUNC("e1000_release_nvm_generic");
-
-       e1000_stop_nvm(hw);
-
-       eecd = E1000_READ_REG(hw, E1000_EECD);
-       eecd &= ~E1000_EECD_REQ;
-       E1000_WRITE_REG(hw, E1000_EECD, eecd);
-}
-
-/**
- *  e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
- *  @hw: pointer to the HW structure
- *
- *  Setups the EEPROM for reading and writing.
- **/
-static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
-{
-       struct e1000_nvm_info *nvm = &hw->nvm;
-       u32 eecd = E1000_READ_REG(hw, E1000_EECD);
-       s32 ret_val = E1000_SUCCESS;
-       u8 spi_stat_reg;
-
-       DEBUGFUNC("e1000_ready_nvm_eeprom");
-
-       if (nvm->type == e1000_nvm_eeprom_microwire) {
-               /* Clear SK and DI */
-               eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
-               E1000_WRITE_REG(hw, E1000_EECD, eecd);
-               /* Set CS */
-               eecd |= E1000_EECD_CS;
-               E1000_WRITE_REG(hw, E1000_EECD, eecd);
-       } else
-       if (nvm->type == e1000_nvm_eeprom_spi) {
-               u16 timeout = NVM_MAX_RETRY_SPI;
-
-               /* Clear SK and CS */
-               eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
-               E1000_WRITE_REG(hw, E1000_EECD, eecd);
-               usec_delay(1);
-
-               /*
-                * Read "Status Register" repeatedly until the LSB is cleared.
-                * The EEPROM will signal that the command has been completed
-                * by clearing bit 0 of the internal status register.  If it's
-                * not cleared within 'timeout', then error out.
-                */
-               while (timeout) {
-                       e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
-                                                hw->nvm.opcode_bits);
-                       spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
-                       if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
-                               break;
-
-                       usec_delay(5);
-                       e1000_standby_nvm(hw);
-                       timeout--;
-               }
-
-               if (!timeout) {
-                       DEBUGOUT("SPI NVM Status error\n");
-                       ret_val = -E1000_ERR_NVM;
-                       goto out;
-               }
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_read_nvm_spi - Read EEPROM's using SPI
- *  @hw: pointer to the HW structure
- *  @offset: offset of word in the EEPROM to read
- *  @words: number of words to read
- *  @data: word read from the EEPROM
- *
- *  Reads a 16 bit word from the EEPROM.
- **/
-s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
-{
-       struct e1000_nvm_info *nvm = &hw->nvm;
-       u32 i = 0;
-       s32 ret_val;
-       u16 word_in;
-       u8 read_opcode = NVM_READ_OPCODE_SPI;
-
-       DEBUGFUNC("e1000_read_nvm_spi");
-
-       /*
-        * A check for invalid values:  offset too large, too many words,
-        * and not enough words.
-        */
-       if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
-           (words == 0)) {
-               DEBUGOUT("nvm parameter(s) out of bounds\n");
-               ret_val = -E1000_ERR_NVM;
-               goto out;
-       }
-
-       ret_val = nvm->ops.acquire(hw);
-       if (ret_val)
-               goto out;
-
-       ret_val = e1000_ready_nvm_eeprom(hw);
-       if (ret_val)
-               goto release;
-
-       e1000_standby_nvm(hw);
-
-       if ((nvm->address_bits == 8) && (offset >= 128))
-               read_opcode |= NVM_A8_OPCODE_SPI;
-
-       /* Send the READ command (opcode + addr) */
-       e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
-       e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
-
-       /*
-        * Read the data.  SPI NVMs increment the address with each byte
-        * read and will roll over if reading beyond the end.  This allows
-        * us to read the whole NVM from any offset
-        */
-       for (i = 0; i < words; i++) {
-               word_in = e1000_shift_in_eec_bits(hw, 16);
-               data[i] = (word_in >> 8) | (word_in << 8);
-       }
-
-release:
-       nvm->ops.release(hw);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_read_nvm_microwire - Reads EEPROM's using microwire
- *  @hw: pointer to the HW structure
- *  @offset: offset of word in the EEPROM to read
- *  @words: number of words to read
- *  @data: word read from the EEPROM
- *
- *  Reads a 16 bit word from the EEPROM.
- **/
-s32 e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words,
-                             u16 *data)
-{
-       struct e1000_nvm_info *nvm = &hw->nvm;
-       u32 i = 0;
-       s32 ret_val;
-       u8 read_opcode = NVM_READ_OPCODE_MICROWIRE;
-
-       DEBUGFUNC("e1000_read_nvm_microwire");
-
-       /*
-        * A check for invalid values:  offset too large, too many words,
-        * and not enough words.
-        */
-       if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
-           (words == 0)) {
-               DEBUGOUT("nvm parameter(s) out of bounds\n");
-               ret_val = -E1000_ERR_NVM;
-               goto out;
-       }
-
-       ret_val = nvm->ops.acquire(hw);
-       if (ret_val)
-               goto out;
-
-       ret_val = e1000_ready_nvm_eeprom(hw);
-       if (ret_val)
-               goto release;
-
-       for (i = 0; i < words; i++) {
-               /* Send the READ command (opcode + addr) */
-               e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
-               e1000_shift_out_eec_bits(hw, (u16)(offset + i),
-                                       nvm->address_bits);
-
-               /*
-                * Read the data.  For microwire, each word requires the
-                * overhead of setup and tear-down.
-                */
-               data[i] = e1000_shift_in_eec_bits(hw, 16);
-               e1000_standby_nvm(hw);
-       }
-
-release:
-       nvm->ops.release(hw);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_read_nvm_eerd - Reads EEPROM using EERD register
- *  @hw: pointer to the HW structure
- *  @offset: offset of word in the EEPROM to read
- *  @words: number of words to read
- *  @data: word read from the EEPROM
- *
- *  Reads a 16 bit word from the EEPROM using the EERD register.
- **/
-s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
-{
-       struct e1000_nvm_info *nvm = &hw->nvm;
-       u32 i, eerd = 0;
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_read_nvm_eerd");
-
-       /*
-        * A check for invalid values:  offset too large, too many words,
-        * too many words for the offset, and not enough words.
-        */
-       if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
-           (words == 0)) {
-               DEBUGOUT("nvm parameter(s) out of bounds\n");
-               ret_val = -E1000_ERR_NVM;
-               goto out;
-       }
-
-       for (i = 0; i < words; i++) {
-               eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
-                      E1000_NVM_RW_REG_START;
-
-               E1000_WRITE_REG(hw, E1000_EERD, eerd);
-               ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
-               if (ret_val)
-                       break;
-
-               data[i] = (E1000_READ_REG(hw, E1000_EERD) >>
-                          E1000_NVM_RW_REG_DATA);
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_write_nvm_spi - Write to EEPROM using SPI
- *  @hw: pointer to the HW structure
- *  @offset: offset within the EEPROM to be written to
- *  @words: number of words to write
- *  @data: 16 bit word(s) to be written to the EEPROM
- *
- *  Writes data to EEPROM at offset using SPI interface.
- *
- *  If e1000_update_nvm_checksum is not called after this function , the
- *  EEPROM will most likely contain an invalid checksum.
- **/
-s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
-{
-       struct e1000_nvm_info *nvm = &hw->nvm;
-       s32 ret_val;
-       u16 widx = 0;
-
-       DEBUGFUNC("e1000_write_nvm_spi");
-
-       /*
-        * A check for invalid values:  offset too large, too many words,
-        * and not enough words.
-        */
-       if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
-           (words == 0)) {
-               DEBUGOUT("nvm parameter(s) out of bounds\n");
-               ret_val = -E1000_ERR_NVM;
-               goto out;
-       }
-
-       ret_val = nvm->ops.acquire(hw);
-       if (ret_val)
-               goto out;
-
-       while (widx < words) {
-               u8 write_opcode = NVM_WRITE_OPCODE_SPI;
-
-               ret_val = e1000_ready_nvm_eeprom(hw);
-               if (ret_val)
-                       goto release;
-
-               e1000_standby_nvm(hw);
-
-               /* Send the WRITE ENABLE command (8 bit opcode) */
-               e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
-                                        nvm->opcode_bits);
-
-               e1000_standby_nvm(hw);
-
-               /*
-                * Some SPI eeproms use the 8th address bit embedded in the
-                * opcode
-                */
-               if ((nvm->address_bits == 8) && (offset >= 128))
-                       write_opcode |= NVM_A8_OPCODE_SPI;
-
-               /* Send the Write command (8-bit opcode + addr) */
-               e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
-               e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
-                                        nvm->address_bits);
-
-               /* Loop to allow for up to whole page write of eeprom */
-               while (widx < words) {
-                       u16 word_out = data[widx];
-                       word_out = (word_out >> 8) | (word_out << 8);
-                       e1000_shift_out_eec_bits(hw, word_out, 16);
-                       widx++;
-
-                       if ((((offset + widx) * 2) % nvm->page_size) == 0) {
-                               e1000_standby_nvm(hw);
-                               break;
-                       }
-               }
-       }
-
-       msec_delay(10);
-release:
-       nvm->ops.release(hw);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_write_nvm_microwire - Writes EEPROM using microwire
- *  @hw: pointer to the HW structure
- *  @offset: offset within the EEPROM to be written to
- *  @words: number of words to write
- *  @data: 16 bit word(s) to be written to the EEPROM
- *
- *  Writes data to EEPROM at offset using microwire interface.
- *
- *  If e1000_update_nvm_checksum is not called after this function , the
- *  EEPROM will most likely contain an invalid checksum.
- **/
-s32 e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words,
-                              u16 *data)
-{
-       struct e1000_nvm_info *nvm = &hw->nvm;
-       s32  ret_val;
-       u32 eecd;
-       u16 words_written = 0;
-       u16 widx = 0;
-
-       DEBUGFUNC("e1000_write_nvm_microwire");
-
-       /*
-        * A check for invalid values:  offset too large, too many words,
-        * and not enough words.
-        */
-       if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
-           (words == 0)) {
-               DEBUGOUT("nvm parameter(s) out of bounds\n");
-               ret_val = -E1000_ERR_NVM;
-               goto out;
-       }
-
-       ret_val = nvm->ops.acquire(hw);
-       if (ret_val)
-               goto out;
-
-       ret_val = e1000_ready_nvm_eeprom(hw);
-       if (ret_val)
-               goto release;
-
-       e1000_shift_out_eec_bits(hw, NVM_EWEN_OPCODE_MICROWIRE,
-                                (u16)(nvm->opcode_bits + 2));
-
-       e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2));
-
-       e1000_standby_nvm(hw);
-
-       while (words_written < words) {
-               e1000_shift_out_eec_bits(hw, NVM_WRITE_OPCODE_MICROWIRE,
-                                        nvm->opcode_bits);
-
-               e1000_shift_out_eec_bits(hw, (u16)(offset + words_written),
-                                        nvm->address_bits);
-
-               e1000_shift_out_eec_bits(hw, data[words_written], 16);
-
-               e1000_standby_nvm(hw);
-
-               for (widx = 0; widx < 200; widx++) {
-                       eecd = E1000_READ_REG(hw, E1000_EECD);
-                       if (eecd & E1000_EECD_DO)
-                               break;
-                       usec_delay(50);
-               }
-
-               if (widx == 200) {
-                       DEBUGOUT("NVM Write did not complete\n");
-                       ret_val = -E1000_ERR_NVM;
-                       goto release;
-               }
-
-               e1000_standby_nvm(hw);
-
-               words_written++;
-       }
-
-       e1000_shift_out_eec_bits(hw, NVM_EWDS_OPCODE_MICROWIRE,
-                                (u16)(nvm->opcode_bits + 2));
-
-       e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2));
-
-release:
-       nvm->ops.release(hw);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_read_pba_string_generic - Read device part number
- *  @hw: pointer to the HW structure
- *  @pba_num: pointer to device part number
- *  @pba_num_size: size of part number buffer
- *
- *  Reads the product board assembly (PBA) number from the EEPROM and stores
- *  the value in pba_num.
- **/
-s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
-                                  u32 pba_num_size)
-{
-       s32 ret_val;
-       u16 nvm_data;
-       u16 pba_ptr;
-       u16 offset;
-       u16 length;
-
-       DEBUGFUNC("e1000_read_pba_string_generic");
-
-       if (pba_num == NULL) {
-               DEBUGOUT("PBA string buffer was null\n");
-               ret_val = E1000_ERR_INVALID_ARGUMENT;
-               goto out;
-       }
-
-       ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
-       if (ret_val) {
-               DEBUGOUT("NVM Read Error\n");
-               goto out;
-       }
-
-       ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
-       if (ret_val) {
-               DEBUGOUT("NVM Read Error\n");
-               goto out;
-       }
-
-       /*
-        * if nvm_data is not ptr guard the PBA must be in legacy format which
-        * means pba_ptr is actually our second data word for the PBA number
-        * and we can decode it into an ascii string
-        */
-       if (nvm_data != NVM_PBA_PTR_GUARD) {
-               DEBUGOUT("NVM PBA number is not stored as string\n");
-
-               /* we will need 11 characters to store the PBA */
-               if (pba_num_size < 11) {
-                       DEBUGOUT("PBA string buffer too small\n");
-                       return E1000_ERR_NO_SPACE;
-               }
-
-               /* extract hex string from data and pba_ptr */
-               pba_num[0] = (nvm_data >> 12) & 0xF;
-               pba_num[1] = (nvm_data >> 8) & 0xF;
-               pba_num[2] = (nvm_data >> 4) & 0xF;
-               pba_num[3] = nvm_data & 0xF;
-               pba_num[4] = (pba_ptr >> 12) & 0xF;
-               pba_num[5] = (pba_ptr >> 8) & 0xF;
-               pba_num[6] = '-';
-               pba_num[7] = 0;
-               pba_num[8] = (pba_ptr >> 4) & 0xF;
-               pba_num[9] = pba_ptr & 0xF;
-
-               /* put a null character on the end of our string */
-               pba_num[10] = '\0';
-
-               /* switch all the data but the '-' to hex char */
-               for (offset = 0; offset < 10; offset++) {
-                       if (pba_num[offset] < 0xA)
-                               pba_num[offset] += '0';
-                       else if (pba_num[offset] < 0x10)
-                               pba_num[offset] += 'A' - 0xA;
-               }
-
-               goto out;
-       }
-
-       ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
-       if (ret_val) {
-               DEBUGOUT("NVM Read Error\n");
-               goto out;
-       }
-
-       if (length == 0xFFFF || length == 0) {
-               DEBUGOUT("NVM PBA number section invalid length\n");
-               ret_val = E1000_ERR_NVM_PBA_SECTION;
-               goto out;
-       }
-       /* check if pba_num buffer is big enough */
-       if (pba_num_size < (((u32)length * 2) - 1)) {
-               DEBUGOUT("PBA string buffer too small\n");
-               ret_val = E1000_ERR_NO_SPACE;
-               goto out;
-       }
-
-       /* trim pba length from start of string */
-       pba_ptr++;
-       length--;
-
-       for (offset = 0; offset < length; offset++) {
-               ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data);
-               if (ret_val) {
-                       DEBUGOUT("NVM Read Error\n");
-                       goto out;
-               }
-               pba_num[offset * 2] = (u8)(nvm_data >> 8);
-               pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
-       }
-       pba_num[offset * 2] = '\0';
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_read_pba_length_generic - Read device part number length
- *  @hw: pointer to the HW structure
- *  @pba_num_size: size of part number buffer
- *
- *  Reads the product board assembly (PBA) number length from the EEPROM and
- *  stores the value in pba_num_size.
- **/
-s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size)
-{
-       s32 ret_val;
-       u16 nvm_data;
-       u16 pba_ptr;
-       u16 length;
-
-       DEBUGFUNC("e1000_read_pba_length_generic");
-
-       if (pba_num_size == NULL) {
-               DEBUGOUT("PBA buffer size was null\n");
-               ret_val = E1000_ERR_INVALID_ARGUMENT;
-               goto out;
-       }
-
-       ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
-       if (ret_val) {
-               DEBUGOUT("NVM Read Error\n");
-               goto out;
-       }
-
-       ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
-       if (ret_val) {
-               DEBUGOUT("NVM Read Error\n");
-               goto out;
-       }
-
-        /* if data is not ptr guard the PBA must be in legacy format */
-       if (nvm_data != NVM_PBA_PTR_GUARD) {
-               *pba_num_size = 11;
-               goto out;
-       }
-
-       ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
-       if (ret_val) {
-               DEBUGOUT("NVM Read Error\n");
-               goto out;
-       }
-
-       if (length == 0xFFFF || length == 0) {
-               DEBUGOUT("NVM PBA number section invalid length\n");
-               ret_val = E1000_ERR_NVM_PBA_SECTION;
-               goto out;
-       }
-
-       /*
-        * Convert from length in u16 values to u8 chars, add 1 for NULL,
-        * and subtract 2 because length field is included in length.
-        */
-       *pba_num_size = ((u32)length * 2) - 1;
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_read_mac_addr_generic - Read device MAC address
- *  @hw: pointer to the HW structure
- *
- *  Reads the device MAC address from the EEPROM and stores the value.
- *  Since devices with two ports use the same EEPROM, we increment the
- *  last bit in the MAC address for the second port.
- **/
-s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
-{
-       u32 rar_high;
-       u32 rar_low;
-       u16 i;
-
-       rar_high = E1000_READ_REG(hw, E1000_RAH(0));
-       rar_low = E1000_READ_REG(hw, E1000_RAL(0));
-
-       for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
-               hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
-
-       for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
-               hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
-
-       for (i = 0; i < ETH_ADDR_LEN; i++)
-               hw->mac.addr[i] = hw->mac.perm_addr[i];
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_validate_nvm_checksum_generic - Validate EEPROM checksum
- *  @hw: pointer to the HW structure
- *
- *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
- *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
- **/
-s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw)
-{
-       s32 ret_val = E1000_SUCCESS;
-       u16 checksum = 0;
-       u16 i, nvm_data;
-
-       DEBUGFUNC("e1000_validate_nvm_checksum_generic");
-
-       for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
-               ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
-               if (ret_val) {
-                       DEBUGOUT("NVM Read Error\n");
-                       goto out;
-               }
-               checksum += nvm_data;
-       }
-
-       if (checksum != (u16) NVM_SUM) {
-               DEBUGOUT("NVM Checksum Invalid\n");
-               ret_val = -E1000_ERR_NVM;
-               goto out;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_update_nvm_checksum_generic - Update EEPROM checksum
- *  @hw: pointer to the HW structure
- *
- *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
- *  up to the checksum.  Then calculates the EEPROM checksum and writes the
- *  value to the EEPROM.
- **/
-s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw)
-{
-       s32 ret_val;
-       u16 checksum = 0;
-       u16 i, nvm_data;
-
-       DEBUGFUNC("e1000_update_nvm_checksum");
-
-       for (i = 0; i < NVM_CHECKSUM_REG; i++) {
-               ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
-               if (ret_val) {
-                       DEBUGOUT("NVM Read Error while updating checksum.\n");
-                       goto out;
-               }
-               checksum += nvm_data;
-       }
-       checksum = (u16) NVM_SUM - checksum;
-       ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
-       if (ret_val)
-               DEBUGOUT("NVM Write Error while updating checksum.\n");
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_reload_nvm_generic - Reloads EEPROM
- *  @hw: pointer to the HW structure
- *
- *  Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
- *  extended control register.
- **/
-static void e1000_reload_nvm_generic(struct e1000_hw *hw)
-{
-       u32 ctrl_ext;
-
-       DEBUGFUNC("e1000_reload_nvm_generic");
-
-       usec_delay(10);
-       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
-       ctrl_ext |= E1000_CTRL_EXT_EE_RST;
-       E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
-       E1000_WRITE_FLUSH(hw);
-}
-
diff --git a/lib/librte_pmd_igb/igb/e1000_nvm.h b/lib/librte_pmd_igb/igb/e1000_nvm.h
deleted file mode 100644 (file)
index 6bba641..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#ifndef _E1000_NVM_H_
-#define _E1000_NVM_H_
-
-void e1000_init_nvm_ops_generic(struct e1000_hw *hw);
-s32  e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
-void e1000_null_nvm_generic(struct e1000_hw *hw);
-s32  e1000_null_led_default(struct e1000_hw *hw, u16 *data);
-s32  e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
-s32  e1000_acquire_nvm_generic(struct e1000_hw *hw);
-
-s32  e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
-s32  e1000_read_mac_addr_generic(struct e1000_hw *hw);
-s32  e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
-                                   u32 pba_num_size);
-s32  e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size);
-s32  e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-s32  e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset,
-                              u16 words, u16 *data);
-s32  e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words,
-                         u16 *data);
-s32  e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data);
-s32  e1000_validate_nvm_checksum_generic(struct e1000_hw *hw);
-s32  e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset,
-                               u16 words, u16 *data);
-s32  e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words,
-                         u16 *data);
-s32  e1000_update_nvm_checksum_generic(struct e1000_hw *hw);
-void e1000_release_nvm_generic(struct e1000_hw *hw);
-
-#define E1000_STM_OPCODE  0xDB00
-
-#endif
diff --git a/lib/librte_pmd_igb/igb/e1000_osdep.c b/lib/librte_pmd_igb/igb/e1000_osdep.c
deleted file mode 100644 (file)
index 203dcc8..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#include "e1000_api.h"
-
-/*
- * NOTE: the following routines using the e1000 
- *     naming style are provided to the shared
- *     code but are OS specific
- */
-
-void
-e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
-{
-       return;
-}
-
-void
-e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
-{
-       *value = 0;
-       return;
-}
-
-/*
- * Read the PCI Express capabilities
- */
-int32_t
-e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
-{
-       return E1000_NOT_IMPLEMENTED;
-}
-
-/*
- * Write the PCI Express capabilities
- */
-int32_t
-e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
-{
-       return E1000_NOT_IMPLEMENTED;
-}
diff --git a/lib/librte_pmd_igb/igb/e1000_osdep.h b/lib/librte_pmd_igb/igb/e1000_osdep.h
deleted file mode 100644 (file)
index cf460d5..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#ifndef _E1000_OSDEP_H_
-#define _E1000_OSDEP_H_
-
-#include <stdint.h>
-#include <stdio.h>
-#include <stdarg.h>
-#include <string.h>
-#include <rte_common.h>
-#include <rte_cycles.h>
-#include <rte_log.h>
-
-#include "../e1000_logs.h"
-
-/* Remove some compiler warnings for the files in this dir */
-#ifdef __INTEL_COMPILER
-#pragma warning(disable:2259) /* conversion may lose significant bits */
-#pragma warning(disable:869)  /* Parameter was never referenced */
-#pragma warning(disable:181)  /* Arg incompatible with format string */
-#else
-#pragma GCC diagnostic ignored "-Wunused-parameter"
-#pragma GCC diagnostic ignored "-Wformat"
-#pragma GCC diagnostic ignored "-Wuninitialized"
-#if (((__GNUC__) >= 4) && ((__GNUC_MINOR__) >= 7))
-#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
-#endif
-#endif
-
-#define DELAY(x) rte_delay_us(x)
-#define usec_delay(x) DELAY(x)
-#define msec_delay(x) DELAY(1000*(x))
-#define msec_delay_irq(x) DELAY(1000*(x))
-
-#define DEBUGFUNC(F)            DEBUGOUT(F);
-#define DEBUGOUT(S, args...)    PMD_DRV_LOG(DEBUG, S, ##args)
-#define DEBUGOUT1(S, args...)   DEBUGOUT(S, ##args)
-#define DEBUGOUT2(S, args...)   DEBUGOUT(S, ##args)
-#define DEBUGOUT3(S, args...)   DEBUGOUT(S, ##args)
-#define DEBUGOUT6(S, args...)   DEBUGOUT(S, ##args)
-#define DEBUGOUT7(S, args...)   DEBUGOUT(S, ##args)
-
-#define FALSE                  0
-#define TRUE                   1
-
-typedef uint64_t       u64;
-typedef uint32_t       u32;
-typedef uint16_t       u16;
-typedef uint8_t                u8;
-typedef int64_t                s64;
-typedef int32_t                s32;
-typedef int16_t                s16;
-typedef int8_t         s8;
-typedef int            bool;
-
-#define __le16         u16
-#define __le32         u32
-#define __le64         u64
-
-#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS)
-
-#define E1000_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
-
-#define E1000_PCI_REG_WRITE(reg, value) do { \
-       E1000_PCI_REG((reg)) = (value); \
-} while (0)
-
-#define E1000_PCI_REG_ADDR(hw, reg) \
-       ((volatile uint32_t *)((char *)(hw)->hw_addr + (reg)))
-
-#define E1000_PCI_REG_ARRAY_ADDR(hw, reg, index) \
-       E1000_PCI_REG_ADDR((hw), (reg) + ((index) << 2))
-
-static inline uint32_t e1000_read_addr(volatile void* addr)
-{
-       return E1000_PCI_REG(addr);
-}
-
-/* Register READ/WRITE macros */
-
-#define E1000_READ_REG(hw, reg) \
-       e1000_read_addr(E1000_PCI_REG_ADDR((hw), (reg)))
-
-#define E1000_WRITE_REG(hw, reg, value) \
-       E1000_PCI_REG_WRITE(E1000_PCI_REG_ADDR((hw), (reg)), (value))
-
-#define E1000_READ_REG_ARRAY(hw, reg, index) \
-       E1000_PCI_REG(E1000_PCI_REG_ARRAY_ADDR((hw), (reg), (index)))
-
-#define E1000_WRITE_REG_ARRAY(hw, reg, index, value) \
-       E1000_PCI_REG_WRITE(E1000_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value))
-
-#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
-#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
-
-#endif /* _E1000_OSDEP_H_ */
diff --git a/lib/librte_pmd_igb/igb/e1000_phy.c b/lib/librte_pmd_igb/igb/e1000_phy.c
deleted file mode 100644 (file)
index aede670..0000000
+++ /dev/null
@@ -1,2988 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#include "e1000_api.h"
-
-static s32 e1000_copper_link_autoneg(struct e1000_hw *hw);
-static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
-/* Cable length tables */
-static const u16 e1000_m88_cable_length_table[] = {
-       0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
-#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
-                (sizeof(e1000_m88_cable_length_table) / \
-                 sizeof(e1000_m88_cable_length_table[0]))
-
-static const u16 e1000_igp_2_cable_length_table[] = {
-       0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3,
-       6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22,
-       26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40,
-       44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61,
-       66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
-       87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
-       100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
-       124};
-#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
-                (sizeof(e1000_igp_2_cable_length_table) / \
-                 sizeof(e1000_igp_2_cable_length_table[0]))
-
-/**
- *  e1000_init_phy_ops_generic - Initialize PHY function pointers
- *  @hw: pointer to the HW structure
- *
- *  Setups up the function pointers to no-op functions
- **/
-void e1000_init_phy_ops_generic(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       DEBUGFUNC("e1000_init_phy_ops_generic");
-
-       /* Initialize function pointers */
-       phy->ops.init_params = e1000_null_ops_generic;
-       phy->ops.acquire = e1000_null_ops_generic;
-       phy->ops.check_polarity = e1000_null_ops_generic;
-       phy->ops.check_reset_block = e1000_null_ops_generic;
-       phy->ops.commit = e1000_null_ops_generic;
-       phy->ops.force_speed_duplex = e1000_null_ops_generic;
-       phy->ops.get_cfg_done = e1000_null_ops_generic;
-       phy->ops.get_cable_length = e1000_null_ops_generic;
-       phy->ops.get_info = e1000_null_ops_generic;
-       phy->ops.read_reg = e1000_null_read_reg;
-       phy->ops.read_reg_locked = e1000_null_read_reg;
-       phy->ops.release = e1000_null_phy_generic;
-       phy->ops.reset = e1000_null_ops_generic;
-       phy->ops.set_d0_lplu_state = e1000_null_lplu_state;
-       phy->ops.set_d3_lplu_state = e1000_null_lplu_state;
-       phy->ops.write_reg = e1000_null_write_reg;
-       phy->ops.write_reg_locked = e1000_null_write_reg;
-       phy->ops.power_up = e1000_null_phy_generic;
-       phy->ops.power_down = e1000_null_phy_generic;
-}
-
-/**
- *  e1000_null_read_reg - No-op function, return 0
- *  @hw: pointer to the HW structure
- **/
-s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data)
-{
-       DEBUGFUNC("e1000_null_read_reg");
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_null_phy_generic - No-op function, return void
- *  @hw: pointer to the HW structure
- **/
-void e1000_null_phy_generic(struct e1000_hw *hw)
-{
-       DEBUGFUNC("e1000_null_phy_generic");
-       return;
-}
-
-/**
- *  e1000_null_lplu_state - No-op function, return 0
- *  @hw: pointer to the HW structure
- **/
-s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active)
-{
-       DEBUGFUNC("e1000_null_lplu_state");
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_null_write_reg - No-op function, return 0
- *  @hw: pointer to the HW structure
- **/
-s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data)
-{
-       DEBUGFUNC("e1000_null_write_reg");
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_check_reset_block_generic - Check if PHY reset is blocked
- *  @hw: pointer to the HW structure
- *
- *  Read the PHY management control register and check whether a PHY reset
- *  is blocked.  If a reset is not blocked return E1000_SUCCESS, otherwise
- *  return E1000_BLK_PHY_RESET (12).
- **/
-s32 e1000_check_reset_block_generic(struct e1000_hw *hw)
-{
-       u32 manc;
-
-       DEBUGFUNC("e1000_check_reset_block");
-
-       manc = E1000_READ_REG(hw, E1000_MANC);
-
-       return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
-              E1000_BLK_PHY_RESET : E1000_SUCCESS;
-}
-
-/**
- *  e1000_get_phy_id - Retrieve the PHY ID and revision
- *  @hw: pointer to the HW structure
- *
- *  Reads the PHY registers and stores the PHY ID and possibly the PHY
- *  revision in the hardware structure.
- **/
-s32 e1000_get_phy_id(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val = E1000_SUCCESS;
-       u16 phy_id;
-
-       DEBUGFUNC("e1000_get_phy_id");
-
-       if (!(phy->ops.read_reg))
-               goto out;
-
-       ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
-       if (ret_val)
-               goto out;
-
-       phy->id = (u32)(phy_id << 16);
-       usec_delay(20);
-       ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
-       if (ret_val)
-               goto out;
-
-       phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
-       phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_phy_reset_dsp_generic - Reset PHY DSP
- *  @hw: pointer to the HW structure
- *
- *  Reset the digital signal processor.
- **/
-s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw)
-{
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_phy_reset_dsp_generic");
-
-       if (!(hw->phy.ops.write_reg))
-               goto out;
-
-       ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
-       if (ret_val)
-               goto out;
-
-       ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_read_phy_reg_mdic - Read MDI control register
- *  @hw: pointer to the HW structure
- *  @offset: register offset to be read
- *  @data: pointer to the read data
- *
- *  Reads the MDI control register in the PHY at offset and stores the
- *  information read to data.
- **/
-s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       u32 i, mdic = 0;
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_read_phy_reg_mdic");
-
-       if (offset > MAX_PHY_REG_ADDRESS) {
-               DEBUGOUT1("PHY Address %d is out of range\n", offset);
-               return -E1000_ERR_PARAM;
-       }
-
-       /*
-        * Set up Op-code, Phy Address, and register offset in the MDI
-        * Control register.  The MAC will take care of interfacing with the
-        * PHY to retrieve the desired data.
-        */
-       mdic = ((offset << E1000_MDIC_REG_SHIFT) |
-               (phy->addr << E1000_MDIC_PHY_SHIFT) |
-               (E1000_MDIC_OP_READ));
-
-       E1000_WRITE_REG(hw, E1000_MDIC, mdic);
-
-       /*
-        * Poll the ready bit to see if the MDI read completed
-        * Increasing the time out as testing showed failures with
-        * the lower time out
-        */
-       for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
-               usec_delay(50);
-               mdic = E1000_READ_REG(hw, E1000_MDIC);
-               if (mdic & E1000_MDIC_READY)
-                       break;
-       }
-       if (!(mdic & E1000_MDIC_READY)) {
-               DEBUGOUT("MDI Read did not complete\n");
-               ret_val = -E1000_ERR_PHY;
-               goto out;
-       }
-       if (mdic & E1000_MDIC_ERROR) {
-               DEBUGOUT("MDI Error\n");
-               ret_val = -E1000_ERR_PHY;
-               goto out;
-       }
-       *data = (u16) mdic;
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_write_phy_reg_mdic - Write MDI control register
- *  @hw: pointer to the HW structure
- *  @offset: register offset to write to
- *  @data: data to write to register at offset
- *
- *  Writes data to MDI control register in the PHY at offset.
- **/
-s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       u32 i, mdic = 0;
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_write_phy_reg_mdic");
-
-       if (offset > MAX_PHY_REG_ADDRESS) {
-               DEBUGOUT1("PHY Address %d is out of range\n", offset);
-               return -E1000_ERR_PARAM;
-       }
-
-       /*
-        * Set up Op-code, Phy Address, and register offset in the MDI
-        * Control register.  The MAC will take care of interfacing with the
-        * PHY to retrieve the desired data.
-        */
-       mdic = (((u32)data) |
-               (offset << E1000_MDIC_REG_SHIFT) |
-               (phy->addr << E1000_MDIC_PHY_SHIFT) |
-               (E1000_MDIC_OP_WRITE));
-
-       E1000_WRITE_REG(hw, E1000_MDIC, mdic);
-
-       /*
-        * Poll the ready bit to see if the MDI read completed
-        * Increasing the time out as testing showed failures with
-        * the lower time out
-        */
-       for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
-               usec_delay(50);
-               mdic = E1000_READ_REG(hw, E1000_MDIC);
-               if (mdic & E1000_MDIC_READY)
-                       break;
-       }
-       if (!(mdic & E1000_MDIC_READY)) {
-               DEBUGOUT("MDI Write did not complete\n");
-               ret_val = -E1000_ERR_PHY;
-               goto out;
-       }
-       if (mdic & E1000_MDIC_ERROR) {
-               DEBUGOUT("MDI Error\n");
-               ret_val = -E1000_ERR_PHY;
-               goto out;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_read_phy_reg_i2c - Read PHY register using i2c
- *  @hw: pointer to the HW structure
- *  @offset: register offset to be read
- *  @data: pointer to the read data
- *
- *  Reads the PHY register at offset using the i2c interface and stores the
- *  retrieved information in data.
- **/
-s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       u32 i, i2ccmd = 0;
-
-       DEBUGFUNC("e1000_read_phy_reg_i2c");
-
-       /*
-        * Set up Op-code, Phy Address, and register address in the I2CCMD
-        * register.  The MAC will take care of interfacing with the
-        * PHY to retrieve the desired data.
-        */
-       i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
-                 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
-                 (E1000_I2CCMD_OPCODE_READ));
-
-       E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
-
-       /* Poll the ready bit to see if the I2C read completed */
-       for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
-               usec_delay(50);
-               i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
-               if (i2ccmd & E1000_I2CCMD_READY)
-                       break;
-       }
-       if (!(i2ccmd & E1000_I2CCMD_READY)) {
-               DEBUGOUT("I2CCMD Read did not complete\n");
-               return -E1000_ERR_PHY;
-       }
-       if (i2ccmd & E1000_I2CCMD_ERROR) {
-               DEBUGOUT("I2CCMD Error bit set\n");
-               return -E1000_ERR_PHY;
-       }
-
-       /* Need to byte-swap the 16-bit value. */
-       *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_write_phy_reg_i2c - Write PHY register using i2c
- *  @hw: pointer to the HW structure
- *  @offset: register offset to write to
- *  @data: data to write at register offset
- *
- *  Writes the data to PHY register at the offset using the i2c interface.
- **/
-s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       u32 i, i2ccmd = 0;
-       u16 phy_data_swapped;
-
-       DEBUGFUNC("e1000_write_phy_reg_i2c");
-
-       /* Swap the data bytes for the I2C interface */
-       phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
-
-       /*
-        * Set up Op-code, Phy Address, and register address in the I2CCMD
-        * register.  The MAC will take care of interfacing with the
-        * PHY to retrieve the desired data.
-        */
-       i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
-                 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
-                 E1000_I2CCMD_OPCODE_WRITE |
-                 phy_data_swapped);
-
-       E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
-
-       /* Poll the ready bit to see if the I2C read completed */
-       for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
-               usec_delay(50);
-               i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
-               if (i2ccmd & E1000_I2CCMD_READY)
-                       break;
-       }
-       if (!(i2ccmd & E1000_I2CCMD_READY)) {
-               DEBUGOUT("I2CCMD Write did not complete\n");
-               return -E1000_ERR_PHY;
-       }
-       if (i2ccmd & E1000_I2CCMD_ERROR) {
-               DEBUGOUT("I2CCMD Error bit set\n");
-               return -E1000_ERR_PHY;
-       }
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_read_phy_reg_m88 - Read m88 PHY register
- *  @hw: pointer to the HW structure
- *  @offset: register offset to be read
- *  @data: pointer to the read data
- *
- *  Acquires semaphore, if necessary, then reads the PHY register at offset
- *  and storing the retrieved information in data.  Release any acquired
- *  semaphores before exiting.
- **/
-s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
-{
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_read_phy_reg_m88");
-
-       if (!(hw->phy.ops.acquire))
-               goto out;
-
-       ret_val = hw->phy.ops.acquire(hw);
-       if (ret_val)
-               goto out;
-
-       ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
-                                         data);
-
-       hw->phy.ops.release(hw);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_write_phy_reg_m88 - Write m88 PHY register
- *  @hw: pointer to the HW structure
- *  @offset: register offset to write to
- *  @data: data to write at register offset
- *
- *  Acquires semaphore, if necessary, then writes the data to PHY register
- *  at the offset.  Release any acquired semaphores before exiting.
- **/
-s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
-{
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_write_phy_reg_m88");
-
-       if (!(hw->phy.ops.acquire))
-               goto out;
-
-       ret_val = hw->phy.ops.acquire(hw);
-       if (ret_val)
-               goto out;
-
-       ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
-                                          data);
-
-       hw->phy.ops.release(hw);
-
-out:
-       return ret_val;
-}
-
-/**
- *  __e1000_read_phy_reg_igp - Read igp PHY register
- *  @hw: pointer to the HW structure
- *  @offset: register offset to be read
- *  @data: pointer to the read data
- *  @locked: semaphore has already been acquired or not
- *
- *  Acquires semaphore, if necessary, then reads the PHY register at offset
- *  and stores the retrieved information in data.  Release any acquired
- *  semaphores before exiting.
- **/
-static s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
-                                    bool locked)
-{
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("__e1000_read_phy_reg_igp");
-
-       if (!locked) {
-               if (!(hw->phy.ops.acquire))
-                       goto out;
-
-               ret_val = hw->phy.ops.acquire(hw);
-               if (ret_val)
-                       goto out;
-       }
-
-       if (offset > MAX_PHY_MULTI_PAGE_REG) {
-               ret_val = e1000_write_phy_reg_mdic(hw,
-                                                  IGP01E1000_PHY_PAGE_SELECT,
-                                                  (u16)offset);
-               if (ret_val)
-                       goto release;
-       }
-
-       ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
-                                         data);
-
-release:
-       if (!locked)
-               hw->phy.ops.release(hw);
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_read_phy_reg_igp - Read igp PHY register
- *  @hw: pointer to the HW structure
- *  @offset: register offset to be read
- *  @data: pointer to the read data
- *
- *  Acquires semaphore then reads the PHY register at offset and stores the
- *  retrieved information in data.
- *  Release the acquired semaphore before exiting.
- **/
-s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
-{
-       return __e1000_read_phy_reg_igp(hw, offset, data, FALSE);
-}
-
-/**
- *  e1000_read_phy_reg_igp_locked - Read igp PHY register
- *  @hw: pointer to the HW structure
- *  @offset: register offset to be read
- *  @data: pointer to the read data
- *
- *  Reads the PHY register at offset and stores the retrieved information
- *  in data.  Assumes semaphore already acquired.
- **/
-s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
-{
-       return __e1000_read_phy_reg_igp(hw, offset, data, TRUE);
-}
-
-/**
- *  e1000_write_phy_reg_igp - Write igp PHY register
- *  @hw: pointer to the HW structure
- *  @offset: register offset to write to
- *  @data: data to write at register offset
- *  @locked: semaphore has already been acquired or not
- *
- *  Acquires semaphore, if necessary, then writes the data to PHY register
- *  at the offset.  Release any acquired semaphores before exiting.
- **/
-static s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
-                                     bool locked)
-{
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_write_phy_reg_igp");
-
-       if (!locked) {
-               if (!(hw->phy.ops.acquire))
-                       goto out;
-
-               ret_val = hw->phy.ops.acquire(hw);
-               if (ret_val)
-                       goto out;
-       }
-
-       if (offset > MAX_PHY_MULTI_PAGE_REG) {
-               ret_val = e1000_write_phy_reg_mdic(hw,
-                                                  IGP01E1000_PHY_PAGE_SELECT,
-                                                  (u16)offset);
-               if (ret_val)
-                       goto release;
-       }
-
-       ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
-                                          data);
-
-release:
-       if (!locked)
-               hw->phy.ops.release(hw);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_write_phy_reg_igp - Write igp PHY register
- *  @hw: pointer to the HW structure
- *  @offset: register offset to write to
- *  @data: data to write at register offset
- *
- *  Acquires semaphore then writes the data to PHY register
- *  at the offset.  Release any acquired semaphores before exiting.
- **/
-s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
-{
-       return __e1000_write_phy_reg_igp(hw, offset, data, FALSE);
-}
-
-/**
- *  e1000_write_phy_reg_igp_locked - Write igp PHY register
- *  @hw: pointer to the HW structure
- *  @offset: register offset to write to
- *  @data: data to write at register offset
- *
- *  Writes the data to PHY register at the offset.
- *  Assumes semaphore already acquired.
- **/
-s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
-{
-       return __e1000_write_phy_reg_igp(hw, offset, data, TRUE);
-}
-
-/**
- *  __e1000_read_kmrn_reg - Read kumeran register
- *  @hw: pointer to the HW structure
- *  @offset: register offset to be read
- *  @data: pointer to the read data
- *  @locked: semaphore has already been acquired or not
- *
- *  Acquires semaphore, if necessary.  Then reads the PHY register at offset
- *  using the kumeran interface.  The information retrieved is stored in data.
- *  Release any acquired semaphores before exiting.
- **/
-static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
-                                 bool locked)
-{
-       u32 kmrnctrlsta;
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("__e1000_read_kmrn_reg");
-
-       if (!locked) {
-               if (!(hw->phy.ops.acquire))
-                       goto out;
-
-               ret_val = hw->phy.ops.acquire(hw);
-               if (ret_val)
-                       goto out;
-       }
-
-       kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
-                      E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
-       E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
-
-       usec_delay(2);
-
-       kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA);
-       *data = (u16)kmrnctrlsta;
-
-       if (!locked)
-               hw->phy.ops.release(hw);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_read_kmrn_reg_generic -  Read kumeran register
- *  @hw: pointer to the HW structure
- *  @offset: register offset to be read
- *  @data: pointer to the read data
- *
- *  Acquires semaphore then reads the PHY register at offset using the
- *  kumeran interface.  The information retrieved is stored in data.
- *  Release the acquired semaphore before exiting.
- **/
-s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data)
-{
-       return __e1000_read_kmrn_reg(hw, offset, data, FALSE);
-}
-
-/**
- *  e1000_read_kmrn_reg_locked -  Read kumeran register
- *  @hw: pointer to the HW structure
- *  @offset: register offset to be read
- *  @data: pointer to the read data
- *
- *  Reads the PHY register at offset using the kumeran interface.  The
- *  information retrieved is stored in data.
- *  Assumes semaphore already acquired.
- **/
-s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
-{
-       return __e1000_read_kmrn_reg(hw, offset, data, TRUE);
-}
-
-/**
- *  __e1000_write_kmrn_reg - Write kumeran register
- *  @hw: pointer to the HW structure
- *  @offset: register offset to write to
- *  @data: data to write at register offset
- *  @locked: semaphore has already been acquired or not
- *
- *  Acquires semaphore, if necessary.  Then write the data to PHY register
- *  at the offset using the kumeran interface.  Release any acquired semaphores
- *  before exiting.
- **/
-static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
-                                  bool locked)
-{
-       u32 kmrnctrlsta;
-       s32 ret_val = E1000_SUCCESS;
-
-       DEBUGFUNC("e1000_write_kmrn_reg_generic");
-
-       if (!locked) {
-               if (!(hw->phy.ops.acquire))
-                       goto out;
-
-               ret_val = hw->phy.ops.acquire(hw);
-               if (ret_val)
-                       goto out;
-       }
-
-       kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
-                      E1000_KMRNCTRLSTA_OFFSET) | data;
-       E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
-
-       usec_delay(2);
-
-       if (!locked)
-               hw->phy.ops.release(hw);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_write_kmrn_reg_generic -  Write kumeran register
- *  @hw: pointer to the HW structure
- *  @offset: register offset to write to
- *  @data: data to write at register offset
- *
- *  Acquires semaphore then writes the data to the PHY register at the offset
- *  using the kumeran interface.  Release the acquired semaphore before exiting.
- **/
-s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data)
-{
-       return __e1000_write_kmrn_reg(hw, offset, data, FALSE);
-}
-
-/**
- *  e1000_write_kmrn_reg_locked -  Write kumeran register
- *  @hw: pointer to the HW structure
- *  @offset: register offset to write to
- *  @data: data to write at register offset
- *
- *  Write the data to PHY register at the offset using the kumeran interface.
- *  Assumes semaphore already acquired.
- **/
-s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
-{
-       return __e1000_write_kmrn_reg(hw, offset, data, TRUE);
-}
-
-/**
- *  e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
- *  @hw: pointer to the HW structure
- *
- *  Sets up Carrier-sense on Transmit and downshift values.
- **/
-s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
-{
-       s32 ret_val;
-       u16 phy_data;
-
-       DEBUGFUNC("e1000_copper_link_setup_82577");
-
-       if (hw->phy.reset_disable) {
-               ret_val = E1000_SUCCESS;
-               goto out;
-       }
-
-       if (hw->phy.type == e1000_phy_82580) {
-               ret_val = hw->phy.ops.reset(hw);
-               if (ret_val) {
-                       DEBUGOUT("Error resetting the PHY.\n");
-                       goto out;
-               }
-       }
-
-       /* Enable CRS on Tx. This must be set for half-duplex operation. */
-       ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data);
-       if (ret_val)
-               goto out;
-
-       phy_data |= I82577_CFG_ASSERT_CRS_ON_TX;
-
-       /* Enable downshift */
-       phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
-
-       ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link
- *  @hw: pointer to the HW structure
- *
- *  Sets up MDI/MDI-X and polarity for m88 PHY's.  If necessary, transmit clock
- *  and downshift values are set also.
- **/
-s32 e1000_copper_link_setup_m88(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val;
-       u16 phy_data;
-
-       DEBUGFUNC("e1000_copper_link_setup_m88");
-
-       if (phy->reset_disable) {
-               ret_val = E1000_SUCCESS;
-               goto out;
-       }
-
-       /* Enable CRS on Tx. This must be set for half-duplex operation. */
-       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
-       if (ret_val)
-               goto out;
-
-       phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
-
-       /*
-        * Options:
-        *   MDI/MDI-X = 0 (default)
-        *   0 - Auto for all speeds
-        *   1 - MDI mode
-        *   2 - MDI-X mode
-        *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
-        */
-       phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
-
-       switch (phy->mdix) {
-       case 1:
-               phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
-               break;
-       case 2:
-               phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
-               break;
-       case 3:
-               phy_data |= M88E1000_PSCR_AUTO_X_1000T;
-               break;
-       case 0:
-       default:
-               phy_data |= M88E1000_PSCR_AUTO_X_MODE;
-               break;
-       }
-
-       /*
-        * Options:
-        *   disable_polarity_correction = 0 (default)
-        *       Automatic Correction for Reversed Cable Polarity
-        *   0 - Disabled
-        *   1 - Enabled
-        */
-       phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
-       if (phy->disable_polarity_correction == 1)
-               phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
-
-       ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
-       if (ret_val)
-               goto out;
-
-       if (phy->revision < E1000_REVISION_4) {
-               /*
-                * Force TX_CLK in the Extended PHY Specific Control Register
-                * to 25MHz clock.
-                */
-               ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
-                                            &phy_data);
-               if (ret_val)
-                       goto out;
-
-               phy_data |= M88E1000_EPSCR_TX_CLK_25;
-
-               if ((phy->revision == E1000_REVISION_2) &&
-                   (phy->id == M88E1111_I_PHY_ID)) {
-                       /* 82573L PHY - set the downshift counter to 5x. */
-                       phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
-                       phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
-               } else {
-                       /* Configure Master and Slave downshift values */
-                       phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
-                                    M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
-                       phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
-                                    M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
-               }
-               ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
-                                            phy_data);
-               if (ret_val)
-                       goto out;
-       }
-
-       /* Commit the changes. */
-       ret_val = phy->ops.commit(hw);
-       if (ret_val) {
-               DEBUGOUT("Error committing the PHY changes\n");
-               goto out;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link
- *  @hw: pointer to the HW structure
- *
- *  Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's.
- *  Also enables and sets the downshift parameters.
- **/
-s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val;
-       u16 phy_data;
-
-       DEBUGFUNC("e1000_copper_link_setup_m88_gen2");
-
-       if (phy->reset_disable) {
-               ret_val = E1000_SUCCESS;
-               goto out;
-       }
-
-       /* Enable CRS on Tx. This must be set for half-duplex operation. */
-       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
-       if (ret_val)
-               goto out;
-
-       /*
-        * Options:
-        *   MDI/MDI-X = 0 (default)
-        *   0 - Auto for all speeds
-        *   1 - MDI mode
-        *   2 - MDI-X mode
-        *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
-        */
-       phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
-
-       switch (phy->mdix) {
-       case 1:
-               phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
-               break;
-       case 2:
-               phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
-               break;
-       case 3:
-               /* M88E1112 does not support this mode) */
-               if (phy->id != M88E1112_E_PHY_ID) {
-                       phy_data |= M88E1000_PSCR_AUTO_X_1000T;
-                       break;
-               }
-       case 0:
-       default:
-               phy_data |= M88E1000_PSCR_AUTO_X_MODE;
-               break;
-       }
-
-       /*
-        * Options:
-        *   disable_polarity_correction = 0 (default)
-        *       Automatic Correction for Reversed Cable Polarity
-        *   0 - Disabled
-        *   1 - Enabled
-        */
-       phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
-       if (phy->disable_polarity_correction == 1)
-               phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
-
-       /* Enable downshift and setting it to X6 */
-       phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK;
-       phy_data |= I347AT4_PSCR_DOWNSHIFT_6X;
-       phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE;
-
-       ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
-       if (ret_val)
-               goto out;
-
-       /* Commit the changes. */
-       ret_val = phy->ops.commit(hw);
-       if (ret_val) {
-               DEBUGOUT("Error committing the PHY changes\n");
-               goto out;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_copper_link_setup_igp - Setup igp PHY's for copper link
- *  @hw: pointer to the HW structure
- *
- *  Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
- *  igp PHY's.
- **/
-s32 e1000_copper_link_setup_igp(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val;
-       u16 data;
-
-       DEBUGFUNC("e1000_copper_link_setup_igp");
-
-       if (phy->reset_disable) {
-               ret_val = E1000_SUCCESS;
-               goto out;
-       }
-
-       ret_val = hw->phy.ops.reset(hw);
-       if (ret_val) {
-               DEBUGOUT("Error resetting the PHY.\n");
-               goto out;
-       }
-
-       /*
-        * Wait 100ms for MAC to configure PHY from NVM settings, to avoid
-        * timeout issues when LFS is enabled.
-        */
-       msec_delay(100);
-
-       /* disable lplu d0 during driver init */
-       if (hw->phy.ops.set_d0_lplu_state) {
-               ret_val = hw->phy.ops.set_d0_lplu_state(hw, FALSE);
-               if (ret_val) {
-                       DEBUGOUT("Error Disabling LPLU D0\n");
-                       goto out;
-               }
-       }
-       /* Configure mdi-mdix settings */
-       ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
-       if (ret_val)
-               goto out;
-
-       data &= ~IGP01E1000_PSCR_AUTO_MDIX;
-
-       switch (phy->mdix) {
-       case 1:
-               data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
-               break;
-       case 2:
-               data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
-               break;
-       case 0:
-       default:
-               data |= IGP01E1000_PSCR_AUTO_MDIX;
-               break;
-       }
-       ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
-       if (ret_val)
-               goto out;
-
-       /* set auto-master slave resolution settings */
-       if (hw->mac.autoneg) {
-               /*
-                * when autonegotiation advertisement is only 1000Mbps then we
-                * should disable SmartSpeed and enable Auto MasterSlave
-                * resolution as hardware default.
-                */
-               if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
-                       /* Disable SmartSpeed */
-                       ret_val = phy->ops.read_reg(hw,
-                                                    IGP01E1000_PHY_PORT_CONFIG,
-                                                    &data);
-                       if (ret_val)
-                               goto out;
-
-                       data &= ~IGP01E1000_PSCFR_SMART_SPEED;
-                       ret_val = phy->ops.write_reg(hw,
-                                                    IGP01E1000_PHY_PORT_CONFIG,
-                                                    data);
-                       if (ret_val)
-                               goto out;
-
-                       /* Set auto Master/Slave resolution process */
-                       ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
-                       if (ret_val)
-                               goto out;
-
-                       data &= ~CR_1000T_MS_ENABLE;
-                       ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
-                       if (ret_val)
-                               goto out;
-               }
-
-               ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
-               if (ret_val)
-                       goto out;
-
-               /* load defaults for future use */
-               phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
-                       ((data & CR_1000T_MS_VALUE) ?
-                       e1000_ms_force_master :
-                       e1000_ms_force_slave) :
-                       e1000_ms_auto;
-
-               switch (phy->ms_type) {
-               case e1000_ms_force_master:
-                       data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
-                       break;
-               case e1000_ms_force_slave:
-                       data |= CR_1000T_MS_ENABLE;
-                       data &= ~(CR_1000T_MS_VALUE);
-                       break;
-               case e1000_ms_auto:
-                       data &= ~CR_1000T_MS_ENABLE;
-               default:
-                       break;
-               }
-               ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
-               if (ret_val)
-                       goto out;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
- *  @hw: pointer to the HW structure
- *
- *  Performs initial bounds checking on autoneg advertisement parameter, then
- *  configure to advertise the full capability.  Setup the PHY to autoneg
- *  and restart the negotiation process between the link partner.  If
- *  autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
- **/
-static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val;
-       u16 phy_ctrl;
-
-       DEBUGFUNC("e1000_copper_link_autoneg");
-
-       /*
-        * Perform some bounds checking on the autoneg advertisement
-        * parameter.
-        */
-       phy->autoneg_advertised &= phy->autoneg_mask;
-
-       /*
-        * If autoneg_advertised is zero, we assume it was not defaulted
-        * by the calling code so we set to advertise full capability.
-        */
-       if (phy->autoneg_advertised == 0)
-               phy->autoneg_advertised = phy->autoneg_mask;
-
-       DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
-       ret_val = e1000_phy_setup_autoneg(hw);
-       if (ret_val) {
-               DEBUGOUT("Error Setting up Auto-Negotiation\n");
-               goto out;
-       }
-       DEBUGOUT("Restarting Auto-Neg\n");
-
-       /*
-        * Restart auto-negotiation by setting the Auto Neg Enable bit and
-        * the Auto Neg Restart bit in the PHY control register.
-        */
-       ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
-       if (ret_val)
-               goto out;
-
-       phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
-       ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
-       if (ret_val)
-               goto out;
-
-       /*
-        * Does the user want to wait for Auto-Neg to complete here, or
-        * check at a later time (for example, callback routine).
-        */
-       if (phy->autoneg_wait_to_complete) {
-               ret_val = hw->mac.ops.wait_autoneg(hw);
-               if (ret_val) {
-                       DEBUGOUT("Error while waiting for "
-                                "autoneg to complete\n");
-                       goto out;
-               }
-       }
-
-       hw->mac.get_link_status = TRUE;
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
- *  @hw: pointer to the HW structure
- *
- *  Reads the MII auto-neg advertisement register and/or the 1000T control
- *  register and if the PHY is already setup for auto-negotiation, then
- *  return successful.  Otherwise, setup advertisement and flow control to
- *  the appropriate values for the wanted auto-negotiation.
- **/
-static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val;
-       u16 mii_autoneg_adv_reg;
-       u16 mii_1000t_ctrl_reg = 0;
-
-       DEBUGFUNC("e1000_phy_setup_autoneg");
-
-       phy->autoneg_advertised &= phy->autoneg_mask;
-
-       /* Read the MII Auto-Neg Advertisement Register (Address 4). */
-       ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
-       if (ret_val)
-               goto out;
-
-       if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
-               /* Read the MII 1000Base-T Control Register (Address 9). */
-               ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
-                                           &mii_1000t_ctrl_reg);
-               if (ret_val)
-                       goto out;
-       }
-
-       /*
-        * Need to parse both autoneg_advertised and fc and set up
-        * the appropriate PHY registers.  First we will parse for
-        * autoneg_advertised software override.  Since we can advertise
-        * a plethora of combinations, we need to check each bit
-        * individually.
-        */
-
-       /*
-        * First we clear all the 10/100 mb speed bits in the Auto-Neg
-        * Advertisement Register (Address 4) and the 1000 mb speed bits in
-        * the  1000Base-T Control Register (Address 9).
-        */
-       mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
-                                NWAY_AR_100TX_HD_CAPS |
-                                NWAY_AR_10T_FD_CAPS   |
-                                NWAY_AR_10T_HD_CAPS);
-       mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
-
-       DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised);
-
-       /* Do we want to advertise 10 Mb Half Duplex? */
-       if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
-               DEBUGOUT("Advertise 10mb Half duplex\n");
-               mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
-       }
-
-       /* Do we want to advertise 10 Mb Full Duplex? */
-       if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
-               DEBUGOUT("Advertise 10mb Full duplex\n");
-               mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
-       }
-
-       /* Do we want to advertise 100 Mb Half Duplex? */
-       if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
-               DEBUGOUT("Advertise 100mb Half duplex\n");
-               mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
-       }
-
-       /* Do we want to advertise 100 Mb Full Duplex? */
-       if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
-               DEBUGOUT("Advertise 100mb Full duplex\n");
-               mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
-       }
-
-       /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
-       if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
-               DEBUGOUT("Advertise 1000mb Half duplex request denied!\n");
-
-       /* Do we want to advertise 1000 Mb Full Duplex? */
-       if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
-               DEBUGOUT("Advertise 1000mb Full duplex\n");
-               mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
-       }
-
-       /*
-        * Check for a software override of the flow control settings, and
-        * setup the PHY advertisement registers accordingly.  If
-        * auto-negotiation is enabled, then software will have to set the
-        * "PAUSE" bits to the correct value in the Auto-Negotiation
-        * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
-        * negotiation.
-        *
-        * The possible values of the "fc" parameter are:
-        *      0:  Flow control is completely disabled
-        *      1:  Rx flow control is enabled (we can receive pause frames
-        *          but not send pause frames).
-        *      2:  Tx flow control is enabled (we can send pause frames
-        *          but we do not support receiving pause frames).
-        *      3:  Both Rx and Tx flow control (symmetric) are enabled.
-        *  other:  No software override.  The flow control configuration
-        *          in the EEPROM is used.
-        */
-       switch (hw->fc.current_mode) {
-       case e1000_fc_none:
-               /*
-                * Flow control (Rx & Tx) is completely disabled by a
-                * software over-ride.
-                */
-               mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
-               break;
-       case e1000_fc_rx_pause:
-               /*
-                * Rx Flow control is enabled, and Tx Flow control is
-                * disabled, by a software over-ride.
-                *
-                * Since there really isn't a way to advertise that we are
-                * capable of Rx Pause ONLY, we will advertise that we
-                * support both symmetric and asymmetric Rx PAUSE.  Later
-                * (in e1000_config_fc_after_link_up) we will disable the
-                * hw's ability to send PAUSE frames.
-                */
-               mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
-               break;
-       case e1000_fc_tx_pause:
-               /*
-                * Tx Flow control is enabled, and Rx Flow control is
-                * disabled, by a software over-ride.
-                */
-               mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
-               mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
-               break;
-       case e1000_fc_full:
-               /*
-                * Flow control (both Rx and Tx) is enabled by a software
-                * over-ride.
-                */
-               mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
-               break;
-       default:
-               DEBUGOUT("Flow control param set incorrectly\n");
-               ret_val = -E1000_ERR_CONFIG;
-               goto out;
-       }
-
-       ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
-       if (ret_val)
-               goto out;
-
-       DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
-
-       if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
-               ret_val = phy->ops.write_reg(hw,
-                                             PHY_1000T_CTRL,
-                                             mii_1000t_ctrl_reg);
-               if (ret_val)
-                       goto out;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_setup_copper_link_generic - Configure copper link settings
- *  @hw: pointer to the HW structure
- *
- *  Calls the appropriate function to configure the link for auto-neg or forced
- *  speed and duplex.  Then we check for link, once link is established calls
- *  to configure collision distance and flow control are called.  If link is
- *  not established, we return -E1000_ERR_PHY (-2).
- **/
-s32 e1000_setup_copper_link_generic(struct e1000_hw *hw)
-{
-       s32 ret_val;
-       bool link;
-
-       DEBUGFUNC("e1000_setup_copper_link_generic");
-
-       if (hw->mac.autoneg) {
-               /*
-                * Setup autoneg and flow control advertisement and perform
-                * autonegotiation.
-                */
-               ret_val = e1000_copper_link_autoneg(hw);
-               if (ret_val)
-                       goto out;
-       } else {
-               /*
-                * PHY will be set to 10H, 10F, 100H or 100F
-                * depending on user settings.
-                */
-               DEBUGOUT("Forcing Speed and Duplex\n");
-               ret_val = hw->phy.ops.force_speed_duplex(hw);
-               if (ret_val) {
-                       DEBUGOUT("Error Forcing Speed and Duplex\n");
-                       goto out;
-               }
-       }
-
-       /*
-        * Check link status. Wait up to 100 microseconds for link to become
-        * valid.
-        */
-       ret_val = e1000_phy_has_link_generic(hw,
-                                            COPPER_LINK_UP_LIMIT,
-                                            10,
-                                            &link);
-       if (ret_val)
-               goto out;
-
-       if (link) {
-               DEBUGOUT("Valid link established!!!\n");
-               e1000_config_collision_dist_generic(hw);
-               ret_val = e1000_config_fc_after_link_up_generic(hw);
-       } else {
-               DEBUGOUT("Unable to establish link!!!\n");
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
- *  @hw: pointer to the HW structure
- *
- *  Calls the PHY setup function to force speed and duplex.  Clears the
- *  auto-crossover to force MDI manually.  Waits for link and returns
- *  successful if link up is successful, else -E1000_ERR_PHY (-2).
- **/
-s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val;
-       u16 phy_data;
-       bool link;
-
-       DEBUGFUNC("e1000_phy_force_speed_duplex_igp");
-
-       ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
-       if (ret_val)
-               goto out;
-
-       e1000_phy_force_speed_duplex_setup(hw, &phy_data);
-
-       ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
-       if (ret_val)
-               goto out;
-
-       /*
-        * Clear Auto-Crossover to force MDI manually.  IGP requires MDI
-        * forced whenever speed and duplex are forced.
-        */
-       ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
-       if (ret_val)
-               goto out;
-
-       phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
-       phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
-
-       ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
-       if (ret_val)
-               goto out;
-
-       DEBUGOUT1("IGP PSCR: %X\n", phy_data);
-
-       usec_delay(1);
-
-       if (phy->autoneg_wait_to_complete) {
-               DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n");
-
-               ret_val = e1000_phy_has_link_generic(hw,
-                                                    PHY_FORCE_LIMIT,
-                                                    100000,
-                                                    &link);
-               if (ret_val)
-                       goto out;
-
-               if (!link)
-                       DEBUGOUT("Link taking longer than expected.\n");
-
-               /* Try once more */
-               ret_val = e1000_phy_has_link_generic(hw,
-                                                    PHY_FORCE_LIMIT,
-                                                    100000,
-                                                    &link);
-               if (ret_val)
-                       goto out;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
- *  @hw: pointer to the HW structure
- *
- *  Calls the PHY setup function to force speed and duplex.  Clears the
- *  auto-crossover to force MDI manually.  Resets the PHY to commit the
- *  changes.  If time expires while waiting for link up, we reset the DSP.
- *  After reset, TX_CLK and CRS on Tx must be set.  Return successful upon
- *  successful completion, else return corresponding error code.
- **/
-s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val;
-       u16 phy_data;
-       bool link;
-
-       DEBUGFUNC("e1000_phy_force_speed_duplex_m88");
-
-       /*
-        * Clear Auto-Crossover to force MDI manually.  M88E1000 requires MDI
-        * forced whenever speed and duplex are forced.
-        */
-       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
-       if (ret_val)
-               goto out;
-
-       phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
-       ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
-       if (ret_val)
-               goto out;
-
-       DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data);
-
-       ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
-       if (ret_val)
-               goto out;
-
-       e1000_phy_force_speed_duplex_setup(hw, &phy_data);
-
-       ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
-       if (ret_val)
-               goto out;
-
-       /* Reset the phy to commit changes. */
-       ret_val = hw->phy.ops.commit(hw);
-       if (ret_val)
-               goto out;
-
-       if (phy->autoneg_wait_to_complete) {
-               DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n");
-
-               ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
-                                                    100000, &link);
-               if (ret_val)
-                       goto out;
-
-               if (!link) {
-                       if (hw->phy.type != e1000_phy_m88 ||
-                           hw->phy.id == I347AT4_E_PHY_ID ||
-                           hw->phy.id == M88E1340M_E_PHY_ID ||
-                           hw->phy.id == M88E1112_E_PHY_ID) {
-                               DEBUGOUT("Link taking longer than expected.\n");
-                       } else {
-                               /*
-                                * We didn't get link.
-                                * Reset the DSP and cross our fingers.
-                                */
-                               ret_val = phy->ops.write_reg(hw,
-                                               M88E1000_PHY_PAGE_SELECT,
-                                               0x001d);
-                               if (ret_val)
-                                       goto out;
-                               ret_val = e1000_phy_reset_dsp_generic(hw);
-                               if (ret_val)
-                                       goto out;
-                       }
-               }
-
-               /* Try once more */
-               ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
-                                                    100000, &link);
-               if (ret_val)
-                       goto out;
-       }
-
-       if (hw->phy.type != e1000_phy_m88 ||
-           hw->phy.id == I347AT4_E_PHY_ID ||
-           hw->phy.id == M88E1340M_E_PHY_ID ||
-           hw->phy.id == M88E1112_E_PHY_ID)
-               goto out;
-
-       ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
-       if (ret_val)
-               goto out;
-
-       /*
-        * Resetting the phy means we need to re-force TX_CLK in the
-        * Extended PHY Specific Control Register to 25MHz clock from
-        * the reset value of 2.5MHz.
-        */
-       phy_data |= M88E1000_EPSCR_TX_CLK_25;
-       ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
-       if (ret_val)
-               goto out;
-
-       /*
-        * In addition, we must re-enable CRS on Tx for both half and full
-        * duplex.
-        */
-       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
-       if (ret_val)
-               goto out;
-
-       phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
-       ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex
- *  @hw: pointer to the HW structure
- *
- *  Forces the speed and duplex settings of the PHY.
- *  This is a function pointer entry point only called by
- *  PHY setup routines.
- **/
-s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val;
-       u16 data;
-       bool link;
-
-       DEBUGFUNC("e1000_phy_force_speed_duplex_ife");
-
-       ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data);
-       if (ret_val)
-               goto out;
-
-       e1000_phy_force_speed_duplex_setup(hw, &data);
-
-       ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data);
-       if (ret_val)
-               goto out;
-
-       /* Disable MDI-X support for 10/100 */
-       ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
-       if (ret_val)
-               goto out;
-
-       data &= ~IFE_PMC_AUTO_MDIX;
-       data &= ~IFE_PMC_FORCE_MDIX;
-
-       ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data);
-       if (ret_val)
-               goto out;
-
-       DEBUGOUT1("IFE PMC: %X\n", data);
-
-       usec_delay(1);
-
-       if (phy->autoneg_wait_to_complete) {
-               DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n");
-
-               ret_val = e1000_phy_has_link_generic(hw,
-                                                    PHY_FORCE_LIMIT,
-                                                    100000,
-                                                    &link);
-               if (ret_val)
-                       goto out;
-
-               if (!link)
-                       DEBUGOUT("Link taking longer than expected.\n");
-
-               /* Try once more */
-               ret_val = e1000_phy_has_link_generic(hw,
-                                                    PHY_FORCE_LIMIT,
-                                                    100000,
-                                                    &link);
-               if (ret_val)
-                       goto out;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
- *  @hw: pointer to the HW structure
- *  @phy_ctrl: pointer to current value of PHY_CONTROL
- *
- *  Forces speed and duplex on the PHY by doing the following: disable flow
- *  control, force speed/duplex on the MAC, disable auto speed detection,
- *  disable auto-negotiation, configure duplex, configure speed, configure
- *  the collision distance, write configuration to CTRL register.  The
- *  caller must write to the PHY_CONTROL register for these settings to
- *  take affect.
- **/
-void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
-{
-       struct e1000_mac_info *mac = &hw->mac;
-       u32 ctrl;
-
-       DEBUGFUNC("e1000_phy_force_speed_duplex_setup");
-
-       /* Turn off flow control when forcing speed/duplex */
-       hw->fc.current_mode = e1000_fc_none;
-
-       /* Force speed/duplex on the mac */
-       ctrl = E1000_READ_REG(hw, E1000_CTRL);
-       ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
-       ctrl &= ~E1000_CTRL_SPD_SEL;
-
-       /* Disable Auto Speed Detection */
-       ctrl &= ~E1000_CTRL_ASDE;
-
-       /* Disable autoneg on the phy */
-       *phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
-
-       /* Forcing Full or Half Duplex? */
-       if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
-               ctrl &= ~E1000_CTRL_FD;
-               *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
-               DEBUGOUT("Half Duplex\n");
-       } else {
-               ctrl |= E1000_CTRL_FD;
-               *phy_ctrl |= MII_CR_FULL_DUPLEX;
-               DEBUGOUT("Full Duplex\n");
-       }
-
-       /* Forcing 10mb or 100mb? */
-       if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
-               ctrl |= E1000_CTRL_SPD_100;
-               *phy_ctrl |= MII_CR_SPEED_100;
-               *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
-               DEBUGOUT("Forcing 100mb\n");
-       } else {
-               ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
-               *phy_ctrl |= MII_CR_SPEED_10;
-               *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
-               DEBUGOUT("Forcing 10mb\n");
-       }
-
-       e1000_config_collision_dist_generic(hw);
-
-       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
-}
-
-/**
- *  e1000_set_d3_lplu_state_generic - Sets low power link up state for D3
- *  @hw: pointer to the HW structure
- *  @active: boolean used to enable/disable lplu
- *
- *  Success returns 0, Failure returns 1
- *
- *  The low power link up (lplu) state is set to the power management level D3
- *  and SmartSpeed is disabled when active is TRUE, else clear lplu for D3
- *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
- *  is used during Dx states where the power conservation is most important.
- *  During driver activity, SmartSpeed should be enabled so performance is
- *  maintained.
- **/
-s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val = E1000_SUCCESS;
-       u16 data;
-
-       DEBUGFUNC("e1000_set_d3_lplu_state_generic");
-
-       if (!(hw->phy.ops.read_reg))
-               goto out;
-
-       ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
-       if (ret_val)
-               goto out;
-
-       if (!active) {
-               data &= ~IGP02E1000_PM_D3_LPLU;
-               ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
-                                            data);
-               if (ret_val)
-                       goto out;
-               /*
-                * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
-                * during Dx states where the power conservation is most
-                * important.  During driver activity we should enable
-                * SmartSpeed, so performance is maintained.
-                */
-               if (phy->smart_speed == e1000_smart_speed_on) {
-                       ret_val = phy->ops.read_reg(hw,
-                                                   IGP01E1000_PHY_PORT_CONFIG,
-                                                   &data);
-                       if (ret_val)
-                               goto out;
-
-                       data |= IGP01E1000_PSCFR_SMART_SPEED;
-                       ret_val = phy->ops.write_reg(hw,
-                                                    IGP01E1000_PHY_PORT_CONFIG,
-                                                    data);
-                       if (ret_val)
-                               goto out;
-               } else if (phy->smart_speed == e1000_smart_speed_off) {
-                       ret_val = phy->ops.read_reg(hw,
-                                                    IGP01E1000_PHY_PORT_CONFIG,
-                                                    &data);
-                       if (ret_val)
-                               goto out;
-
-                       data &= ~IGP01E1000_PSCFR_SMART_SPEED;
-                       ret_val = phy->ops.write_reg(hw,
-                                                    IGP01E1000_PHY_PORT_CONFIG,
-                                                    data);
-                       if (ret_val)
-                               goto out;
-               }
-       } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
-                  (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
-                  (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
-               data |= IGP02E1000_PM_D3_LPLU;
-               ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
-                                             data);
-               if (ret_val)
-                       goto out;
-
-               /* When LPLU is enabled, we should disable SmartSpeed */
-               ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
-                                            &data);
-               if (ret_val)
-                       goto out;
-
-               data &= ~IGP01E1000_PSCFR_SMART_SPEED;
-               ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
-                                             data);
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_check_downshift_generic - Checks whether a downshift in speed occurred
- *  @hw: pointer to the HW structure
- *
- *  Success returns 0, Failure returns 1
- *
- *  A downshift is detected by querying the PHY link health.
- **/
-s32 e1000_check_downshift_generic(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val;
-       u16 phy_data, offset, mask;
-
-       DEBUGFUNC("e1000_check_downshift_generic");
-
-       switch (phy->type) {
-       case e1000_phy_m88:
-       case e1000_phy_gg82563:
-               offset  = M88E1000_PHY_SPEC_STATUS;
-               mask    = M88E1000_PSSR_DOWNSHIFT;
-               break;
-       case e1000_phy_igp_2:
-       case e1000_phy_igp_3:
-               offset  = IGP01E1000_PHY_LINK_HEALTH;
-               mask    = IGP01E1000_PLHR_SS_DOWNGRADE;
-               break;
-       default:
-               /* speed downshift not supported */
-               phy->speed_downgraded = FALSE;
-               ret_val = E1000_SUCCESS;
-               goto out;
-       }
-
-       ret_val = phy->ops.read_reg(hw, offset, &phy_data);
-
-       if (!ret_val)
-               phy->speed_downgraded = (phy_data & mask) ? TRUE : FALSE;
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_check_polarity_m88 - Checks the polarity.
- *  @hw: pointer to the HW structure
- *
- *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
- *
- *  Polarity is determined based on the PHY specific status register.
- **/
-s32 e1000_check_polarity_m88(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val;
-       u16 data;
-
-       DEBUGFUNC("e1000_check_polarity_m88");
-
-       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
-
-       if (!ret_val)
-               phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
-                                     ? e1000_rev_polarity_reversed
-                                     : e1000_rev_polarity_normal;
-
-       return ret_val;
-}
-
-/**
- *  e1000_check_polarity_igp - Checks the polarity.
- *  @hw: pointer to the HW structure
- *
- *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
- *
- *  Polarity is determined based on the PHY port status register, and the
- *  current speed (since there is no polarity at 100Mbps).
- **/
-s32 e1000_check_polarity_igp(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val;
-       u16 data, offset, mask;
-
-       DEBUGFUNC("e1000_check_polarity_igp");
-
-       /*
-        * Polarity is determined based on the speed of
-        * our connection.
-        */
-       ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
-       if (ret_val)
-               goto out;
-
-       if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
-           IGP01E1000_PSSR_SPEED_1000MBPS) {
-               offset  = IGP01E1000_PHY_PCS_INIT_REG;
-               mask    = IGP01E1000_PHY_POLARITY_MASK;
-       } else {
-               /*
-                * This really only applies to 10Mbps since
-                * there is no polarity for 100Mbps (always 0).
-                */
-               offset  = IGP01E1000_PHY_PORT_STATUS;
-               mask    = IGP01E1000_PSSR_POLARITY_REVERSED;
-       }
-
-       ret_val = phy->ops.read_reg(hw, offset, &data);
-
-       if (!ret_val)
-               phy->cable_polarity = (data & mask)
-                                     ? e1000_rev_polarity_reversed
-                                     : e1000_rev_polarity_normal;
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_check_polarity_ife - Check cable polarity for IFE PHY
- *  @hw: pointer to the HW structure
- *
- *  Polarity is determined on the polarity reversal feature being enabled.
- **/
-s32 e1000_check_polarity_ife(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val;
-       u16 phy_data, offset, mask;
-
-       DEBUGFUNC("e1000_check_polarity_ife");
-
-       /*
-        * Polarity is determined based on the reversal feature being enabled.
-        */
-       if (phy->polarity_correction) {
-               offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
-               mask = IFE_PESC_POLARITY_REVERSED;
-       } else {
-               offset = IFE_PHY_SPECIAL_CONTROL;
-               mask = IFE_PSC_FORCE_POLARITY;
-       }
-
-       ret_val = phy->ops.read_reg(hw, offset, &phy_data);
-
-       if (!ret_val)
-               phy->cable_polarity = (phy_data & mask)
-                                      ? e1000_rev_polarity_reversed
-                                      : e1000_rev_polarity_normal;
-
-       return ret_val;
-}
-
-/**
- *  e1000_wait_autoneg_generic - Wait for auto-neg completion
- *  @hw: pointer to the HW structure
- *
- *  Waits for auto-negotiation to complete or for the auto-negotiation time
- *  limit to expire, which ever happens first.
- **/
-s32 e1000_wait_autoneg_generic(struct e1000_hw *hw)
-{
-       s32 ret_val = E1000_SUCCESS;
-       u16 i, phy_status;
-
-       DEBUGFUNC("e1000_wait_autoneg_generic");
-
-       if (!(hw->phy.ops.read_reg))
-               return E1000_SUCCESS;
-
-       /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
-       for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
-               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
-               if (ret_val)
-                       break;
-               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
-               if (ret_val)
-                       break;
-               if (phy_status & MII_SR_AUTONEG_COMPLETE)
-                       break;
-               msec_delay(100);
-       }
-
-       /*
-        * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
-        * has completed.
-        */
-       return ret_val;
-}
-
-/**
- *  e1000_phy_has_link_generic - Polls PHY for link
- *  @hw: pointer to the HW structure
- *  @iterations: number of times to poll for link
- *  @usec_interval: delay between polling attempts
- *  @success: pointer to whether polling was successful or not
- *
- *  Polls the PHY status register for link, 'iterations' number of times.
- **/
-s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
-                               u32 usec_interval, bool *success)
-{
-       s32 ret_val = E1000_SUCCESS;
-       u16 i, phy_status;
-
-       DEBUGFUNC("e1000_phy_has_link_generic");
-
-       if (!(hw->phy.ops.read_reg))
-               return E1000_SUCCESS;
-
-       for (i = 0; i < iterations; i++) {
-               /*
-                * Some PHYs require the PHY_STATUS register to be read
-                * twice due to the link bit being sticky.  No harm doing
-                * it across the board.
-                */
-               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
-               if (ret_val)
-                       /*
-                        * If the first read fails, another entity may have
-                        * ownership of the resources, wait and try again to
-                        * see if they have relinquished the resources yet.
-                        */
-                       usec_delay(usec_interval);
-               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
-               if (ret_val)
-                       break;
-               if (phy_status & MII_SR_LINK_STATUS)
-                       break;
-               if (usec_interval >= 1000)
-                       msec_delay_irq(usec_interval/1000);
-               else
-                       usec_delay(usec_interval);
-       }
-
-       *success = (i < iterations) ? TRUE : FALSE;
-
-       return ret_val;
-}
-
-/**
- *  e1000_get_cable_length_m88 - Determine cable length for m88 PHY
- *  @hw: pointer to the HW structure
- *
- *  Reads the PHY specific status register to retrieve the cable length
- *  information.  The cable length is determined by averaging the minimum and
- *  maximum values to get the "average" cable length.  The m88 PHY has four
- *  possible cable length values, which are:
- *     Register Value          Cable Length
- *     0                       < 50 meters
- *     1                       50 - 80 meters
- *     2                       80 - 110 meters
- *     3                       110 - 140 meters
- *     4                       > 140 meters
- **/
-s32 e1000_get_cable_length_m88(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val;
-       u16 phy_data, index;
-
-       DEBUGFUNC("e1000_get_cable_length_m88");
-
-       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
-       if (ret_val)
-               goto out;
-
-       index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
-               M88E1000_PSSR_CABLE_LENGTH_SHIFT;
-       if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
-               ret_val = -E1000_ERR_PHY;
-               goto out;
-       }
-
-       phy->min_cable_length = e1000_m88_cable_length_table[index];
-       phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
-
-       phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
-
-out:
-       return ret_val;
-}
-
-s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val;
-       u16 phy_data, phy_data2, index, default_page, is_cm;
-
-       DEBUGFUNC("e1000_get_cable_length_m88_gen2");
-
-       switch (hw->phy.id) {
-       case M88E1340M_E_PHY_ID:
-       case I347AT4_E_PHY_ID:
-               /* Remember the original page select and set it to 7 */
-               ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
-                                           &default_page);
-               if (ret_val)
-                       goto out;
-
-               ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07);
-               if (ret_val)
-                       goto out;
-
-               /* Get cable length from PHY Cable Diagnostics Control Reg */
-               ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr),
-                                           &phy_data);
-               if (ret_val)
-                       goto out;
-
-               /* Check if the unit of cable length is meters or cm */
-               ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2);
-               if (ret_val)
-                       goto out;
-
-               is_cm = !(phy_data & I347AT4_PCDC_CABLE_LENGTH_UNIT);
-
-               /* Populate the phy structure with cable length in meters */
-               phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
-               phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
-               phy->cable_length = phy_data / (is_cm ? 100 : 1);
-
-               /* Reset the page selec to its original value */
-               ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
-                                            default_page);
-               if (ret_val)
-                       goto out;
-               break;
-       case M88E1112_E_PHY_ID:
-               /* Remember the original page select and set it to 5 */
-               ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
-                                           &default_page);
-               if (ret_val)
-                       goto out;
-
-               ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05);
-               if (ret_val)
-                       goto out;
-
-               ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE,
-                                           &phy_data);
-               if (ret_val)
-                       goto out;
-
-               index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
-                       M88E1000_PSSR_CABLE_LENGTH_SHIFT;
-               if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
-                       ret_val = -E1000_ERR_PHY;
-                       goto out;
-               }
-
-               phy->min_cable_length = e1000_m88_cable_length_table[index];
-               phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
-
-               phy->cable_length = (phy->min_cable_length +
-                                    phy->max_cable_length) / 2;
-
-               /* Reset the page select to its original value */
-               ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
-                                            default_page);
-               if (ret_val)
-                       goto out;
-
-               break;
-       default:
-               ret_val = -E1000_ERR_PHY;
-               goto out;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY
- *  @hw: pointer to the HW structure
- *
- *  The automatic gain control (agc) normalizes the amplitude of the
- *  received signal, adjusting for the attenuation produced by the
- *  cable.  By reading the AGC registers, which represent the
- *  combination of coarse and fine gain value, the value can be put
- *  into a lookup table to obtain the approximate cable length
- *  for each channel.
- **/
-s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val = E1000_SUCCESS;
-       u16 phy_data, i, agc_value = 0;
-       u16 cur_agc_index, max_agc_index = 0;
-       u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
-       static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
-              IGP02E1000_PHY_AGC_A,
-              IGP02E1000_PHY_AGC_B,
-              IGP02E1000_PHY_AGC_C,
-              IGP02E1000_PHY_AGC_D
-       };
-
-       DEBUGFUNC("e1000_get_cable_length_igp_2");
-
-       /* Read the AGC registers for all channels */
-       for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
-               ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data);
-               if (ret_val)
-                       goto out;
-
-               /*
-                * Getting bits 15:9, which represent the combination of
-                * coarse and fine gain values.  The result is a number
-                * that can be put into the lookup table to obtain the
-                * approximate cable length.
-                */
-               cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
-                               IGP02E1000_AGC_LENGTH_MASK;
-
-               /* Array index bound check. */
-               if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
-                   (cur_agc_index == 0)) {
-                       ret_val = -E1000_ERR_PHY;
-                       goto out;
-               }
-
-               /* Remove min & max AGC values from calculation. */
-               if (e1000_igp_2_cable_length_table[min_agc_index] >
-                   e1000_igp_2_cable_length_table[cur_agc_index])
-                       min_agc_index = cur_agc_index;
-               if (e1000_igp_2_cable_length_table[max_agc_index] <
-                   e1000_igp_2_cable_length_table[cur_agc_index])
-                       max_agc_index = cur_agc_index;
-
-               agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
-       }
-
-       agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
-                     e1000_igp_2_cable_length_table[max_agc_index]);
-       agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
-
-       /* Calculate cable length with the error range of +/- 10 meters. */
-       phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
-                                (agc_value - IGP02E1000_AGC_RANGE) : 0;
-       phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
-
-       phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_get_phy_info_m88 - Retrieve PHY information
- *  @hw: pointer to the HW structure
- *
- *  Valid for only copper links.  Read the PHY status register (sticky read)
- *  to verify that link is up.  Read the PHY special control register to
- *  determine the polarity and 10base-T extended distance.  Read the PHY
- *  special status register to determine MDI/MDIx and current speed.  If
- *  speed is 1000, then determine cable length, local and remote receiver.
- **/
-s32 e1000_get_phy_info_m88(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32  ret_val;
-       u16 phy_data;
-       bool link;
-
-       DEBUGFUNC("e1000_get_phy_info_m88");
-
-       if (phy->media_type != e1000_media_type_copper) {
-               DEBUGOUT("Phy info is only valid for copper media\n");
-               ret_val = -E1000_ERR_CONFIG;
-               goto out;
-       }
-
-       ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
-       if (ret_val)
-               goto out;
-
-       if (!link) {
-               DEBUGOUT("Phy info is only valid if link is up\n");
-               ret_val = -E1000_ERR_CONFIG;
-               goto out;
-       }
-
-       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
-       if (ret_val)
-               goto out;
-
-       phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
-                                  ? TRUE : FALSE;
-
-       ret_val = e1000_check_polarity_m88(hw);
-       if (ret_val)
-               goto out;
-
-       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
-       if (ret_val)
-               goto out;
-
-       phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? TRUE : FALSE;
-
-       if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
-               ret_val = hw->phy.ops.get_cable_length(hw);
-               if (ret_val)
-                       goto out;
-
-               ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data);
-               if (ret_val)
-                       goto out;
-
-               phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
-                               ? e1000_1000t_rx_status_ok
-                               : e1000_1000t_rx_status_not_ok;
-
-               phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
-                                ? e1000_1000t_rx_status_ok
-                                : e1000_1000t_rx_status_not_ok;
-       } else {
-               /* Set values to "undefined" */
-               phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
-               phy->local_rx = e1000_1000t_rx_status_undefined;
-               phy->remote_rx = e1000_1000t_rx_status_undefined;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_get_phy_info_igp - Retrieve igp PHY information
- *  @hw: pointer to the HW structure
- *
- *  Read PHY status to determine if link is up.  If link is up, then
- *  set/determine 10base-T extended distance and polarity correction.  Read
- *  PHY port status to determine MDI/MDIx and speed.  Based on the speed,
- *  determine on the cable length, local and remote receiver.
- **/
-s32 e1000_get_phy_info_igp(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val;
-       u16 data;
-       bool link;
-
-       DEBUGFUNC("e1000_get_phy_info_igp");
-
-       ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
-       if (ret_val)
-               goto out;
-
-       if (!link) {
-               DEBUGOUT("Phy info is only valid if link is up\n");
-               ret_val = -E1000_ERR_CONFIG;
-               goto out;
-       }
-
-       phy->polarity_correction = TRUE;
-
-       ret_val = e1000_check_polarity_igp(hw);
-       if (ret_val)
-               goto out;
-
-       ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
-       if (ret_val)
-               goto out;
-
-       phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? TRUE : FALSE;
-
-       if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
-           IGP01E1000_PSSR_SPEED_1000MBPS) {
-               ret_val = phy->ops.get_cable_length(hw);
-               if (ret_val)
-                       goto out;
-
-               ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
-               if (ret_val)
-                       goto out;
-
-               phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
-                               ? e1000_1000t_rx_status_ok
-                               : e1000_1000t_rx_status_not_ok;
-
-               phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
-                                ? e1000_1000t_rx_status_ok
-                                : e1000_1000t_rx_status_not_ok;
-       } else {
-               phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
-               phy->local_rx = e1000_1000t_rx_status_undefined;
-               phy->remote_rx = e1000_1000t_rx_status_undefined;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_get_phy_info_ife - Retrieves various IFE PHY states
- *  @hw: pointer to the HW structure
- *
- *  Populates "phy" structure with various feature states.
- **/
-s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val;
-       u16 data;
-       bool link;
-
-       DEBUGFUNC("e1000_get_phy_info_ife");
-
-       ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
-       if (ret_val)
-               goto out;
-
-       if (!link) {
-               DEBUGOUT("Phy info is only valid if link is up\n");
-               ret_val = -E1000_ERR_CONFIG;
-               goto out;
-       }
-
-       ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data);
-       if (ret_val)
-               goto out;
-       phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE)
-                                  ? FALSE : TRUE;
-
-       if (phy->polarity_correction) {
-               ret_val = e1000_check_polarity_ife(hw);
-               if (ret_val)
-                       goto out;
-       } else {
-               /* Polarity is forced */
-               phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY)
-                                     ? e1000_rev_polarity_reversed
-                                     : e1000_rev_polarity_normal;
-       }
-
-       ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
-       if (ret_val)
-               goto out;
-
-       phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? TRUE : FALSE;
-
-       /* The following parameters are undefined for 10/100 operation. */
-       phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
-       phy->local_rx = e1000_1000t_rx_status_undefined;
-       phy->remote_rx = e1000_1000t_rx_status_undefined;
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_phy_sw_reset_generic - PHY software reset
- *  @hw: pointer to the HW structure
- *
- *  Does a software reset of the PHY by reading the PHY control register and
- *  setting/write the control register reset bit to the PHY.
- **/
-s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw)
-{
-       s32 ret_val = E1000_SUCCESS;
-       u16 phy_ctrl;
-
-       DEBUGFUNC("e1000_phy_sw_reset_generic");
-
-       if (!(hw->phy.ops.read_reg))
-               goto out;
-
-       ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
-       if (ret_val)
-               goto out;
-
-       phy_ctrl |= MII_CR_RESET;
-       ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
-       if (ret_val)
-               goto out;
-
-       usec_delay(1);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_phy_hw_reset_generic - PHY hardware reset
- *  @hw: pointer to the HW structure
- *
- *  Verify the reset block is not blocking us from resetting.  Acquire
- *  semaphore (if necessary) and read/set/write the device control reset
- *  bit in the PHY.  Wait the appropriate delay time for the device to
- *  reset and release the semaphore (if necessary).
- **/
-s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val = E1000_SUCCESS;
-       u32 ctrl;
-
-       DEBUGFUNC("e1000_phy_hw_reset_generic");
-
-       ret_val = phy->ops.check_reset_block(hw);
-       if (ret_val) {
-               ret_val = E1000_SUCCESS;
-               goto out;
-       }
-
-       ret_val = phy->ops.acquire(hw);
-       if (ret_val)
-               goto out;
-
-       ctrl = E1000_READ_REG(hw, E1000_CTRL);
-       E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
-       E1000_WRITE_FLUSH(hw);
-
-       usec_delay(phy->reset_delay_us);
-
-       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
-       E1000_WRITE_FLUSH(hw);
-
-       usec_delay(150);
-
-       phy->ops.release(hw);
-
-       ret_val = phy->ops.get_cfg_done(hw);
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_get_cfg_done_generic - Generic configuration done
- *  @hw: pointer to the HW structure
- *
- *  Generic function to wait 10 milli-seconds for configuration to complete
- *  and return success.
- **/
-s32 e1000_get_cfg_done_generic(struct e1000_hw *hw)
-{
-       DEBUGFUNC("e1000_get_cfg_done_generic");
-
-       msec_delay_irq(10);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_phy_init_script_igp3 - Inits the IGP3 PHY
- *  @hw: pointer to the HW structure
- *
- *  Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
- **/
-s32 e1000_phy_init_script_igp3(struct e1000_hw *hw)
-{
-       DEBUGOUT("Running IGP 3 PHY init script\n");
-
-       /* PHY init IGP 3 */
-       /* Enable rise/fall, 10-mode work in class-A */
-       hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018);
-       /* Remove all caps from Replica path filter */
-       hw->phy.ops.write_reg(hw, 0x2F52, 0x0000);
-       /* Bias trimming for ADC, AFE and Driver (Default) */
-       hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24);
-       /* Increase Hybrid poly bias */
-       hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0);
-       /* Add 4% to Tx amplitude in Gig mode */
-       hw->phy.ops.write_reg(hw, 0x2010, 0x10B0);
-       /* Disable trimming (TTT) */
-       hw->phy.ops.write_reg(hw, 0x2011, 0x0000);
-       /* Poly DC correction to 94.6% + 2% for all channels */
-       hw->phy.ops.write_reg(hw, 0x20DD, 0x249A);
-       /* ABS DC correction to 95.9% */
-       hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3);
-       /* BG temp curve trim */
-       hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE);
-       /* Increasing ADC OPAMP stage 1 currents to max */
-       hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4);
-       /* Force 1000 ( required for enabling PHY regs configuration) */
-       hw->phy.ops.write_reg(hw, 0x0000, 0x0140);
-       /* Set upd_freq to 6 */
-       hw->phy.ops.write_reg(hw, 0x1F30, 0x1606);
-       /* Disable NPDFE */
-       hw->phy.ops.write_reg(hw, 0x1F31, 0xB814);
-       /* Disable adaptive fixed FFE (Default) */
-       hw->phy.ops.write_reg(hw, 0x1F35, 0x002A);
-       /* Enable FFE hysteresis */
-       hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067);
-       /* Fixed FFE for short cable lengths */
-       hw->phy.ops.write_reg(hw, 0x1F54, 0x0065);
-       /* Fixed FFE for medium cable lengths */
-       hw->phy.ops.write_reg(hw, 0x1F55, 0x002A);
-       /* Fixed FFE for long cable lengths */
-       hw->phy.ops.write_reg(hw, 0x1F56, 0x002A);
-       /* Enable Adaptive Clip Threshold */
-       hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0);
-       /* AHT reset limit to 1 */
-       hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF);
-       /* Set AHT master delay to 127 msec */
-       hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC);
-       /* Set scan bits for AHT */
-       hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF);
-       /* Set AHT Preset bits */
-       hw->phy.ops.write_reg(hw, 0x1F79, 0x0210);
-       /* Change integ_factor of channel A to 3 */
-       hw->phy.ops.write_reg(hw, 0x1895, 0x0003);
-       /* Change prop_factor of channels BCD to 8 */
-       hw->phy.ops.write_reg(hw, 0x1796, 0x0008);
-       /* Change cg_icount + enable integbp for channels BCD */
-       hw->phy.ops.write_reg(hw, 0x1798, 0xD008);
-       /*
-        * Change cg_icount + enable integbp + change prop_factor_master
-        * to 8 for channel A
-        */
-       hw->phy.ops.write_reg(hw, 0x1898, 0xD918);
-       /* Disable AHT in Slave mode on channel A */
-       hw->phy.ops.write_reg(hw, 0x187A, 0x0800);
-       /*
-        * Enable LPLU and disable AN to 1000 in non-D0a states,
-        * Enable SPD+B2B
-        */
-       hw->phy.ops.write_reg(hw, 0x0019, 0x008D);
-       /* Enable restart AN on an1000_dis change */
-       hw->phy.ops.write_reg(hw, 0x001B, 0x2080);
-       /* Enable wh_fifo read clock in 10/100 modes */
-       hw->phy.ops.write_reg(hw, 0x0014, 0x0045);
-       /* Restart AN, Speed selection is 1000 */
-       hw->phy.ops.write_reg(hw, 0x0000, 0x1340);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_get_phy_type_from_id - Get PHY type from id
- *  @phy_id: phy_id read from the phy
- *
- *  Returns the phy type from the id.
- **/
-enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id)
-{
-       enum e1000_phy_type phy_type = e1000_phy_unknown;
-
-       switch (phy_id) {
-       case M88E1000_I_PHY_ID:
-       case M88E1000_E_PHY_ID:
-       case M88E1111_I_PHY_ID:
-       case M88E1011_I_PHY_ID:
-       case I347AT4_E_PHY_ID:
-       case M88E1112_E_PHY_ID:
-       case M88E1340M_E_PHY_ID:
-               phy_type = e1000_phy_m88;
-               break;
-       case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
-               phy_type = e1000_phy_igp_2;
-               break;
-       case GG82563_E_PHY_ID:
-               phy_type = e1000_phy_gg82563;
-               break;
-       case IGP03E1000_E_PHY_ID:
-               phy_type = e1000_phy_igp_3;
-               break;
-       case IFE_E_PHY_ID:
-       case IFE_PLUS_E_PHY_ID:
-       case IFE_C_E_PHY_ID:
-               phy_type = e1000_phy_ife;
-               break;
-       case I82580_I_PHY_ID:
-               phy_type = e1000_phy_82580;
-               break;
-       default:
-               phy_type = e1000_phy_unknown;
-               break;
-       }
-       return phy_type;
-}
-
-/**
- *  e1000_determine_phy_address - Determines PHY address.
- *  @hw: pointer to the HW structure
- *
- *  This uses a trial and error method to loop through possible PHY
- *  addresses. It tests each by reading the PHY ID registers and
- *  checking for a match.
- **/
-s32 e1000_determine_phy_address(struct e1000_hw *hw)
-{
-       s32 ret_val = -E1000_ERR_PHY_TYPE;
-       u32 phy_addr = 0;
-       u32 i;
-       enum e1000_phy_type phy_type = e1000_phy_unknown;
-
-       hw->phy.id = phy_type;
-
-       for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) {
-               hw->phy.addr = phy_addr;
-               i = 0;
-
-               do {
-                       e1000_get_phy_id(hw);
-                       phy_type = e1000_get_phy_type_from_id(hw->phy.id);
-
-                       /*
-                        * If phy_type is valid, break - we found our
-                        * PHY address
-                        */
-                       if (phy_type != e1000_phy_unknown) {
-                               ret_val = E1000_SUCCESS;
-                               goto out;
-                       }
-                       msec_delay(1);
-                       i++;
-               } while (i < 10);
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- * e1000_power_up_phy_copper - Restore copper link in case of PHY power down
- * @hw: pointer to the HW structure
- *
- * In the case of a PHY power down to save power, or to turn off link during a
- * driver unload, or wake on lan is not enabled, restore the link to previous
- * settings.
- **/
-void e1000_power_up_phy_copper(struct e1000_hw *hw)
-{
-       u16 mii_reg = 0;
-
-       /* The PHY will retain its settings across a power down/up cycle */
-       hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
-       mii_reg &= ~MII_CR_POWER_DOWN;
-       hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
-}
-
-/**
- * e1000_power_down_phy_copper - Restore copper link in case of PHY power down
- * @hw: pointer to the HW structure
- *
- * In the case of a PHY power down to save power, or to turn off link during a
- * driver unload, or wake on lan is not enabled, restore the link to previous
- * settings.
- **/
-void e1000_power_down_phy_copper(struct e1000_hw *hw)
-{
-       u16 mii_reg = 0;
-
-       /* The PHY will retain its settings across a power down/up cycle */
-       hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
-       mii_reg |= MII_CR_POWER_DOWN;
-       hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
-       msec_delay(1);
-}
-
-/**
- *  e1000_check_polarity_82577 - Checks the polarity.
- *  @hw: pointer to the HW structure
- *
- *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
- *
- *  Polarity is determined based on the PHY specific status register.
- **/
-s32 e1000_check_polarity_82577(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val;
-       u16 data;
-
-       DEBUGFUNC("e1000_check_polarity_82577");
-
-       ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
-
-       if (!ret_val)
-               phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
-                                     ? e1000_rev_polarity_reversed
-                                     : e1000_rev_polarity_normal;
-
-       return ret_val;
-}
-
-/**
- *  e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
- *  @hw: pointer to the HW structure
- *
- *  Calls the PHY setup function to force speed and duplex.
- **/
-s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val;
-       u16 phy_data;
-       bool link;
-
-       DEBUGFUNC("e1000_phy_force_speed_duplex_82577");
-
-       ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
-       if (ret_val)
-               goto out;
-
-       e1000_phy_force_speed_duplex_setup(hw, &phy_data);
-
-       ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
-       if (ret_val)
-               goto out;
-
-       usec_delay(1);
-
-       if (phy->autoneg_wait_to_complete) {
-               DEBUGOUT("Waiting for forced speed/duplex link on 82577 phy\n");
-
-               ret_val = e1000_phy_has_link_generic(hw,
-                                                    PHY_FORCE_LIMIT,
-                                                    100000,
-                                                    &link);
-               if (ret_val)
-                       goto out;
-
-               if (!link)
-                       DEBUGOUT("Link taking longer than expected.\n");
-
-               /* Try once more */
-               ret_val = e1000_phy_has_link_generic(hw,
-                                                    PHY_FORCE_LIMIT,
-                                                    100000,
-                                                    &link);
-               if (ret_val)
-                       goto out;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_get_phy_info_82577 - Retrieve I82577 PHY information
- *  @hw: pointer to the HW structure
- *
- *  Read PHY status to determine if link is up.  If link is up, then
- *  set/determine 10base-T extended distance and polarity correction.  Read
- *  PHY port status to determine MDI/MDIx and speed.  Based on the speed,
- *  determine on the cable length, local and remote receiver.
- **/
-s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val;
-       u16 data;
-       bool link;
-
-       DEBUGFUNC("e1000_get_phy_info_82577");
-
-       ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
-       if (ret_val)
-               goto out;
-
-       if (!link) {
-               DEBUGOUT("Phy info is only valid if link is up\n");
-               ret_val = -E1000_ERR_CONFIG;
-               goto out;
-       }
-
-       phy->polarity_correction = TRUE;
-
-       ret_val = e1000_check_polarity_82577(hw);
-       if (ret_val)
-               goto out;
-
-       ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
-       if (ret_val)
-               goto out;
-
-       phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? TRUE : FALSE;
-
-       if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
-           I82577_PHY_STATUS2_SPEED_1000MBPS) {
-               ret_val = hw->phy.ops.get_cable_length(hw);
-               if (ret_val)
-                       goto out;
-
-               ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
-               if (ret_val)
-                       goto out;
-
-               phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
-                               ? e1000_1000t_rx_status_ok
-                               : e1000_1000t_rx_status_not_ok;
-
-               phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
-                                ? e1000_1000t_rx_status_ok
-                                : e1000_1000t_rx_status_not_ok;
-       } else {
-               phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
-               phy->local_rx = e1000_1000t_rx_status_undefined;
-               phy->remote_rx = e1000_1000t_rx_status_undefined;
-       }
-
-out:
-       return ret_val;
-}
-
-/**
- *  e1000_get_cable_length_82577 - Determine cable length for 82577 PHY
- *  @hw: pointer to the HW structure
- *
- * Reads the diagnostic status register and verifies result is valid before
- * placing it in the phy_cable_length field.
- **/
-s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
-{
-       struct e1000_phy_info *phy = &hw->phy;
-       s32 ret_val;
-       u16 phy_data, length;
-
-       DEBUGFUNC("e1000_get_cable_length_82577");
-
-       ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
-       if (ret_val)
-               goto out;
-
-       length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
-                I82577_DSTATUS_CABLE_LENGTH_SHIFT;
-
-       if (length == E1000_CABLE_LENGTH_UNDEFINED)
-               ret_val = -E1000_ERR_PHY;
-
-       phy->cable_length = length;
-
-out:
-       return ret_val;
-}
diff --git a/lib/librte_pmd_igb/igb/e1000_phy.h b/lib/librte_pmd_igb/igb/e1000_phy.h
deleted file mode 100644 (file)
index 1b21430..0000000
+++ /dev/null
@@ -1,217 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#ifndef _E1000_PHY_H_
-#define _E1000_PHY_H_
-
-void e1000_init_phy_ops_generic(struct e1000_hw *hw);
-s32  e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data);
-void e1000_null_phy_generic(struct e1000_hw *hw);
-s32  e1000_null_lplu_state(struct e1000_hw *hw, bool active);
-s32  e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data);
-s32  e1000_check_downshift_generic(struct e1000_hw *hw);
-s32  e1000_check_polarity_m88(struct e1000_hw *hw);
-s32  e1000_check_polarity_igp(struct e1000_hw *hw);
-s32  e1000_check_polarity_ife(struct e1000_hw *hw);
-s32  e1000_check_reset_block_generic(struct e1000_hw *hw);
-s32  e1000_copper_link_setup_igp(struct e1000_hw *hw);
-s32  e1000_copper_link_setup_m88(struct e1000_hw *hw);
-s32  e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw);
-s32  e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw);
-s32  e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw);
-s32  e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
-s32  e1000_get_cable_length_m88(struct e1000_hw *hw);
-s32  e1000_get_cable_length_m88_gen2(struct e1000_hw *hw);
-s32  e1000_get_cable_length_igp_2(struct e1000_hw *hw);
-s32  e1000_get_cfg_done_generic(struct e1000_hw *hw);
-s32  e1000_get_phy_id(struct e1000_hw *hw);
-s32  e1000_get_phy_info_igp(struct e1000_hw *hw);
-s32  e1000_get_phy_info_m88(struct e1000_hw *hw);
-s32  e1000_get_phy_info_ife(struct e1000_hw *hw);
-s32  e1000_phy_sw_reset_generic(struct e1000_hw *hw);
-void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
-s32  e1000_phy_hw_reset_generic(struct e1000_hw *hw);
-s32  e1000_phy_reset_dsp_generic(struct e1000_hw *hw);
-s32  e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data);
-s32  e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data);
-s32  e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
-s32  e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data);
-s32  e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
-s32  e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active);
-s32  e1000_setup_copper_link_generic(struct e1000_hw *hw);
-s32  e1000_wait_autoneg_generic(struct e1000_hw *hw);
-s32  e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data);
-s32  e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
-s32  e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
-s32  e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
-s32  e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
-s32  e1000_phy_reset_dsp(struct e1000_hw *hw);
-s32  e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
-                                u32 usec_interval, bool *success);
-s32  e1000_phy_init_script_igp3(struct e1000_hw *hw);
-enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id);
-s32  e1000_determine_phy_address(struct e1000_hw *hw);
-void e1000_power_up_phy_copper(struct e1000_hw *hw);
-void e1000_power_down_phy_copper(struct e1000_hw *hw);
-s32  e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
-s32  e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
-s32  e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
-s32  e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
-s32  e1000_copper_link_setup_82577(struct e1000_hw *hw);
-s32  e1000_check_polarity_82577(struct e1000_hw *hw);
-s32  e1000_get_phy_info_82577(struct e1000_hw *hw);
-s32  e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
-s32  e1000_get_cable_length_82577(struct e1000_hw *hw);
-
-#define E1000_MAX_PHY_ADDR                4
-
-/* IGP01E1000 Specific Registers */
-#define IGP01E1000_PHY_PORT_CONFIG        0x10 /* Port Config */
-#define IGP01E1000_PHY_PORT_STATUS        0x11 /* Status */
-#define IGP01E1000_PHY_PORT_CTRL          0x12 /* Control */
-#define IGP01E1000_PHY_LINK_HEALTH        0x13 /* PHY Link Health */
-#define IGP01E1000_GMII_FIFO              0x14 /* GMII FIFO */
-#define IGP01E1000_PHY_CHANNEL_QUALITY    0x15 /* PHY Channel Quality */
-#define IGP02E1000_PHY_POWER_MGMT         0x19 /* Power Management */
-#define IGP01E1000_PHY_PAGE_SELECT        0x1F /* Page Select */
-#define BM_PHY_PAGE_SELECT                22   /* Page Select for BM */
-#define IGP_PAGE_SHIFT                    5
-#define PHY_REG_MASK                      0x1F
-
-#define HV_INTC_FC_PAGE_START             768
-#define I82578_ADDR_REG                   29
-#define I82577_ADDR_REG                   16
-#define I82577_CFG_REG                    22
-#define I82577_CFG_ASSERT_CRS_ON_TX       (1 << 15)
-#define I82577_CFG_ENABLE_DOWNSHIFT       (3 << 10) /* auto downshift 100/10 */
-#define I82577_CTRL_REG                   23
-
-/* 82577 specific PHY registers */
-#define I82577_PHY_CTRL_2            18
-#define I82577_PHY_LBK_CTRL          19
-#define I82577_PHY_STATUS_2          26
-#define I82577_PHY_DIAG_STATUS       31
-
-/* I82577 PHY Status 2 */
-#define I82577_PHY_STATUS2_REV_POLARITY   0x0400
-#define I82577_PHY_STATUS2_MDIX           0x0800
-#define I82577_PHY_STATUS2_SPEED_MASK     0x0300
-#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
-#define I82577_PHY_STATUS2_SPEED_100MBPS  0x0100
-
-/* I82577 PHY Control 2 */
-#define I82577_PHY_CTRL2_AUTO_MDIX        0x0400
-#define I82577_PHY_CTRL2_FORCE_MDI_MDIX   0x0200
-
-/* I82577 PHY Diagnostics Status */
-#define I82577_DSTATUS_CABLE_LENGTH       0x03FC
-#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
-
-/* 82580 PHY Power Management */
-#define E1000_82580_PHY_POWER_MGMT        0xE14
-#define E1000_82580_PM_SPD                0x0001 /* Smart Power Down */
-#define E1000_82580_PM_D0_LPLU            0x0002 /* For D0a states */
-#define E1000_82580_PM_D3_LPLU            0x0004 /* For all other states */
-
-#define IGP01E1000_PHY_PCS_INIT_REG       0x00B4
-#define IGP01E1000_PHY_POLARITY_MASK      0x0078
-
-#define IGP01E1000_PSCR_AUTO_MDIX         0x1000
-#define IGP01E1000_PSCR_FORCE_MDI_MDIX    0x2000 /* 0=MDI, 1=MDIX */
-
-#define IGP01E1000_PSCFR_SMART_SPEED      0x0080
-
-/* Enable flexible speed on link-up */
-#define IGP01E1000_GMII_FLEX_SPD          0x0010
-#define IGP01E1000_GMII_SPD               0x0020 /* Enable SPD */
-
-#define IGP02E1000_PM_SPD                 0x0001 /* Smart Power Down */
-#define IGP02E1000_PM_D0_LPLU             0x0002 /* For D0a states */
-#define IGP02E1000_PM_D3_LPLU             0x0004 /* For all other states */
-
-#define IGP01E1000_PLHR_SS_DOWNGRADE      0x8000
-
-#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
-#define IGP01E1000_PSSR_MDIX              0x0800
-#define IGP01E1000_PSSR_SPEED_MASK        0xC000
-#define IGP01E1000_PSSR_SPEED_1000MBPS    0xC000
-
-#define IGP02E1000_PHY_CHANNEL_NUM        4
-#define IGP02E1000_PHY_AGC_A              0x11B1
-#define IGP02E1000_PHY_AGC_B              0x12B1
-#define IGP02E1000_PHY_AGC_C              0x14B1
-#define IGP02E1000_PHY_AGC_D              0x18B1
-
-#define IGP02E1000_AGC_LENGTH_SHIFT       9   /* Course - 15:13, Fine - 12:9 */
-#define IGP02E1000_AGC_LENGTH_MASK        0x7F
-#define IGP02E1000_AGC_RANGE              15
-
-#define IGP03E1000_PHY_MISC_CTRL          0x1B
-#define IGP03E1000_PHY_MISC_DUPLEX_MANUAL_SET  0x1000 /* Manually Set Duplex */
-
-#define E1000_CABLE_LENGTH_UNDEFINED      0xFF
-
-#define E1000_KMRNCTRLSTA_OFFSET          0x001F0000
-#define E1000_KMRNCTRLSTA_OFFSET_SHIFT    16
-#define E1000_KMRNCTRLSTA_REN             0x00200000
-#define E1000_KMRNCTRLSTA_DIAG_OFFSET     0x3    /* Kumeran Diagnostic */
-#define E1000_KMRNCTRLSTA_TIMEOUTS        0x4    /* Kumeran Timeouts */
-#define E1000_KMRNCTRLSTA_INBAND_PARAM    0x9    /* Kumeran InBand Parameters */
-#define E1000_KMRNCTRLSTA_IBIST_DISABLE   0x0200 /* Kumeran IBIST Disable */
-#define E1000_KMRNCTRLSTA_DIAG_NELPBK     0x1000 /* Nearend Loopback mode */
-
-#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
-#define IFE_PHY_SPECIAL_CONTROL     0x11 /* 100BaseTx PHY Special Control */
-#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */
-#define IFE_PHY_MDIX_CONTROL        0x1C /* MDI/MDI-X Control */
-
-/* IFE PHY Extended Status Control */
-#define IFE_PESC_POLARITY_REVERSED    0x0100
-
-/* IFE PHY Special Control */
-#define IFE_PSC_AUTO_POLARITY_DISABLE      0x0010
-#define IFE_PSC_FORCE_POLARITY             0x0020
-#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100
-
-/* IFE PHY Special Control and LED Control */
-#define IFE_PSCL_PROBE_MODE            0x0020
-#define IFE_PSCL_PROBE_LEDS_OFF        0x0006 /* Force LEDs 0 and 2 off */
-#define IFE_PSCL_PROBE_LEDS_ON         0x0007 /* Force LEDs 0 and 2 on */
-
-/* IFE PHY MDIX Control */
-#define IFE_PMC_MDIX_STATUS      0x0020 /* 1=MDI-X, 0=MDI */
-#define IFE_PMC_FORCE_MDIX       0x0040 /* 1=force MDI-X, 0=force MDI */
-#define IFE_PMC_AUTO_MDIX        0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */
-
-#endif
diff --git a/lib/librte_pmd_igb/igb/e1000_regs.h b/lib/librte_pmd_igb/igb/e1000_regs.h
deleted file mode 100644 (file)
index 6b902ea..0000000
+++ /dev/null
@@ -1,574 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#ifndef _E1000_REGS_H_
-#define _E1000_REGS_H_
-
-#define E1000_CTRL     0x00000  /* Device Control - RW */
-#define E1000_CTRL_DUP 0x00004  /* Device Control Duplicate (Shadow) - RW */
-#define E1000_STATUS   0x00008  /* Device Status - RO */
-#define E1000_EECD     0x00010  /* EEPROM/Flash Control - RW */
-#define E1000_EERD     0x00014  /* EEPROM Read - RW */
-#define E1000_CTRL_EXT 0x00018  /* Extended Device Control - RW */
-#define E1000_FLA      0x0001C  /* Flash Access - RW */
-#define E1000_MDIC     0x00020  /* MDI Control - RW */
-#define E1000_MDICNFG  0x00E04  /* MDI Config - RW */
-#define E1000_REGISTER_SET_SIZE        0x20000 /* CSR Size */
-#define E1000_EEPROM_INIT_CTRL_WORD_2  0x0F /* EEPROM Init Ctrl Word 2 */
-#define E1000_BARCTRL                  0x5BBC /* BAR ctrl reg */
-#define E1000_BARCTRL_FLSIZE           0x0700 /* BAR ctrl Flsize */
-#define E1000_BARCTRL_CSRSIZE          0x2000 /* BAR ctrl CSR size */
-#define E1000_SCTL     0x00024  /* SerDes Control - RW */
-#define E1000_FCAL     0x00028  /* Flow Control Address Low - RW */
-#define E1000_FCAH     0x0002C  /* Flow Control Address High -RW */
-#define E1000_FEXT     0x0002C  /* Future Extended - RW */
-#define E1000_FEXTNVM4 0x00024  /* Future Extended NVM 4 - RW */
-#define E1000_FEXTNVM  0x00028  /* Future Extended NVM - RW */
-#define E1000_FCT      0x00030  /* Flow Control Type - RW */
-#define E1000_CONNSW   0x00034  /* Copper/Fiber switch control - RW */
-#define E1000_VET      0x00038  /* VLAN Ether Type - RW */
-#define E1000_ICR      0x000C0  /* Interrupt Cause Read - R/clr */
-#define E1000_ITR      0x000C4  /* Interrupt Throttling Rate - RW */
-#define E1000_ICS      0x000C8  /* Interrupt Cause Set - WO */
-#define E1000_IMS      0x000D0  /* Interrupt Mask Set - RW */
-#define E1000_IMC      0x000D8  /* Interrupt Mask Clear - WO */
-#define E1000_IAM      0x000E0  /* Interrupt Acknowledge Auto Mask */
-#define E1000_RCTL     0x00100  /* Rx Control - RW */
-#define E1000_FCTTV    0x00170  /* Flow Control Transmit Timer Value - RW */
-#define E1000_TXCW     0x00178  /* Tx Configuration Word - RW */
-#define E1000_RXCW     0x00180  /* Rx Configuration Word - RO */
-#define E1000_EICR     0x01580  /* Ext. Interrupt Cause Read - R/clr */
-#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
-#define E1000_EICS     0x01520  /* Ext. Interrupt Cause Set - W0 */
-#define E1000_EIMS     0x01524  /* Ext. Interrupt Mask Set/Read - RW */
-#define E1000_EIMC     0x01528  /* Ext. Interrupt Mask Clear - WO */
-#define E1000_EIAC     0x0152C  /* Ext. Interrupt Auto Clear - RW */
-#define E1000_EIAM     0x01530  /* Ext. Interrupt Ack Auto Clear Mask - RW */
-#define E1000_GPIE     0x01514  /* General Purpose Interrupt Enable - RW */
-#define E1000_IVAR0    0x01700  /* Interrupt Vector Allocation (array) - RW */
-#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
-#define E1000_TCTL     0x00400  /* Tx Control - RW */
-#define E1000_TCTL_EXT 0x00404  /* Extended Tx Control - RW */
-#define E1000_TIPG     0x00410  /* Tx Inter-packet gap -RW */
-#define E1000_TBT      0x00448  /* Tx Burst Timer - RW */
-#define E1000_AIT      0x00458  /* Adaptive Interframe Spacing Throttle - RW */
-#define E1000_LEDCTL   0x00E00  /* LED Control - RW */
-#define E1000_EXTCNF_CTRL  0x00F00  /* Extended Configuration Control */
-#define E1000_EXTCNF_SIZE  0x00F08  /* Extended Configuration Size */
-#define E1000_PHY_CTRL     0x00F10  /* PHY Control Register in CSR */
-#define E1000_PBA      0x01000  /* Packet Buffer Allocation - RW */
-#define E1000_PBS      0x01008  /* Packet Buffer Size */
-#define E1000_EEMNGCTL 0x01010  /* MNG EEprom Control */
-#define E1000_EEARBC   0x01024  /* EEPROM Auto Read Bus Control */
-#define E1000_FLASHT   0x01028  /* FLASH Timer Register */
-#define E1000_EEWR     0x0102C  /* EEPROM Write Register - RW */
-#define E1000_FLSWCTL  0x01030  /* FLASH control register */
-#define E1000_FLSWDATA 0x01034  /* FLASH data register */
-#define E1000_FLSWCNT  0x01038  /* FLASH Access Counter */
-#define E1000_FLOP     0x0103C  /* FLASH Opcode Register */
-#define E1000_I2CCMD   0x01028  /* SFPI2C Command Register - RW */
-#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */
-#define E1000_WDSTP    0x01040  /* Watchdog Setup - RW */
-#define E1000_SWDSTS   0x01044  /* SW Device Status - RW */
-#define E1000_FRTIMER  0x01048  /* Free Running Timer - RW */
-#define E1000_TCPTIMER 0x0104C  /* TCP Timer - RW */
-#define E1000_VPDDIAG  0x01060  /* VPD Diagnostic - RO */
-#define E1000_ICR_V2   0x01500  /* Interrupt Cause - new location - RC */
-#define E1000_ICS_V2   0x01504  /* Interrupt Cause Set - new location - WO */
-#define E1000_IMS_V2   0x01508  /* Interrupt Mask Set/Read - new location - RW */
-#define E1000_IMC_V2   0x0150C  /* Interrupt Mask Clear - new location - WO */
-#define E1000_IAM_V2   0x01510  /* Interrupt Ack Auto Mask - new location - RW */
-#define E1000_ERT      0x02008  /* Early Rx Threshold - RW */
-#define E1000_FCRTL    0x02160  /* Flow Control Receive Threshold Low - RW */
-#define E1000_FCRTH    0x02168  /* Flow Control Receive Threshold High - RW */
-#define E1000_PSRCTL   0x02170  /* Packet Split Receive Control - RW */
-#define E1000_RDFPCQ(_n)  (0x02430 + (0x4 * (_n)))
-#define E1000_PBRTH    0x02458  /* PB Rx Arbitration Threshold - RW */
-#define E1000_FCRTV    0x02460  /* Flow Control Refresh Timer Value - RW */
-/* Split and Replication Rx Control - RW */
-#define E1000_RDPUMB   0x025CC  /* DMA Rx Descriptor uC Mailbox - RW */
-#define E1000_RDPUAD   0x025D0  /* DMA Rx Descriptor uC Addr Command - RW */
-#define E1000_RDPUWD   0x025D4  /* DMA Rx Descriptor uC Data Write - RW */
-#define E1000_RDPURD   0x025D8  /* DMA Rx Descriptor uC Data Read - RW */
-#define E1000_RDPUCTL  0x025DC  /* DMA Rx Descriptor uC Control - RW */
-#define E1000_PBDIAG   0x02458  /* Packet Buffer Diagnostic - RW */
-#define E1000_RXPBS    0x02404  /* Rx Packet Buffer Size - RW */
-#define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer adapters - RW */
-#define E1000_PBRWAC   0x024E8 /* Rx packet buffer wrap around counter - RO */
-#define E1000_RDTR     0x02820  /* Rx Delay Timer - RW */
-#define E1000_RADV     0x0282C  /* Rx Interrupt Absolute Delay Timer - RW */
-/*
- * Convenience macros
- *
- * Note: "_n" is the queue number of the register to be written to.
- *
- * Example usage:
- * E1000_RDBAL_REG(current_rx_queue)
- */
-#define E1000_RDBAL(_n)      ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
-                                         (0x0C000 + ((_n) * 0x40)))
-#define E1000_RDBAH(_n)      ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
-                                         (0x0C004 + ((_n) * 0x40)))
-#define E1000_RDLEN(_n)      ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
-                                         (0x0C008 + ((_n) * 0x40)))
-#define E1000_SRRCTL(_n)     ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
-                                         (0x0C00C + ((_n) * 0x40)))
-#define E1000_RDH(_n)        ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
-                                         (0x0C010 + ((_n) * 0x40)))
-#define E1000_RXCTL(_n)      ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
-                                         (0x0C014 + ((_n) * 0x40)))
-#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n)
-#define E1000_RDT(_n)        ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
-                                         (0x0C018 + ((_n) * 0x40)))
-#define E1000_RXDCTL(_n)     ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
-                                         (0x0C028 + ((_n) * 0x40)))
-#define E1000_RQDPC(_n)      ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \
-                                         (0x0C030 + ((_n) * 0x40)))
-#define E1000_TDBAL(_n)      ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
-                                         (0x0E000 + ((_n) * 0x40)))
-#define E1000_TDBAH(_n)      ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
-                                         (0x0E004 + ((_n) * 0x40)))
-#define E1000_TDLEN(_n)      ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
-                                         (0x0E008 + ((_n) * 0x40)))
-#define E1000_TDH(_n)        ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
-                                         (0x0E010 + ((_n) * 0x40)))
-#define E1000_TXCTL(_n)      ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
-                                         (0x0E014 + ((_n) * 0x40)))
-#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
-#define E1000_TDT(_n)        ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
-                                         (0x0E018 + ((_n) * 0x40)))
-#define E1000_TXDCTL(_n)     ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
-                                         (0x0E028 + ((_n) * 0x40)))
-#define E1000_TDWBAL(_n)     ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \
-                                         (0x0E038 + ((_n) * 0x40)))
-#define E1000_TDWBAH(_n)     ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \
-                                         (0x0E03C + ((_n) * 0x40)))
-#define E1000_TARC(_n)                   (0x03840 + ((_n) * 0x100))
-#define E1000_RSRPD    0x02C00  /* Rx Small Packet Detect - RW */
-#define E1000_RAID     0x02C08  /* Receive Ack Interrupt Delay - RW */
-#define E1000_TXDMAC   0x03000  /* Tx DMA Control - RW */
-#define E1000_KABGTXD  0x03004  /* AFE Band Gap Transmit Ref Data */
-#define E1000_PSRTYPE(_i)       (0x05480 + ((_i) * 4))
-#define E1000_RAL(_i)  (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
-                                       (0x054E0 + ((_i - 16) * 8)))
-#define E1000_RAH(_i)  (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
-                                       (0x054E4 + ((_i - 16) * 8)))
-#define E1000_SHRAL(_i)         (0x05438 + ((_i) * 8))
-#define E1000_SHRAH(_i)         (0x0543C + ((_i) * 8))
-#define E1000_IP4AT_REG(_i)     (0x05840 + ((_i) * 8))
-#define E1000_IP6AT_REG(_i)     (0x05880 + ((_i) * 4))
-#define E1000_WUPM_REG(_i)      (0x05A00 + ((_i) * 4))
-#define E1000_FFMT_REG(_i)      (0x09000 + ((_i) * 8))
-#define E1000_FFVT_REG(_i)      (0x09800 + ((_i) * 8))
-#define E1000_FFLT_REG(_i)      (0x05F00 + ((_i) * 8))
-#define E1000_PBSLAC   0x03100  /* Packet Buffer Slave Access Control */
-#define E1000_PBSLAD(_n)  (0x03110 + (0x4 * (_n)))  /* Packet Buffer DWORD (_n) */
-#define E1000_TXPBS    0x03404  /* Tx Packet Buffer Size - RW */
-#define E1000_ITPBS   0x03404   /* Same as TXPBS, renamed for newer adpaters - RW */
-#define E1000_TDFH     0x03410  /* Tx Data FIFO Head - RW */
-#define E1000_TDFT     0x03418  /* Tx Data FIFO Tail - RW */
-#define E1000_TDFHS    0x03420  /* Tx Data FIFO Head Saved - RW */
-#define E1000_TDFTS    0x03428  /* Tx Data FIFO Tail Saved - RW */
-#define E1000_TDFPC    0x03430  /* Tx Data FIFO Packet Count - RW */
-#define E1000_TDPUMB   0x0357C  /* DMA Tx Descriptor uC Mail Box - RW */
-#define E1000_TDPUAD   0x03580  /* DMA Tx Descriptor uC Addr Command - RW */
-#define E1000_TDPUWD   0x03584  /* DMA Tx Descriptor uC Data Write - RW */
-#define E1000_TDPURD   0x03588  /* DMA Tx Descriptor uC Data  Read  - RW */
-#define E1000_TDPUCTL  0x0358C  /* DMA Tx Descriptor uC Control - RW */
-#define E1000_DTXCTL   0x03590  /* DMA Tx Control - RW */
-#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */
-#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */
-#define E1000_DTXMXSZRQ  0x03540 /* DMA Tx Max Total Allow Size Requests - RW */
-#define E1000_TIDV     0x03820  /* Tx Interrupt Delay Value - RW */
-#define E1000_TADV     0x0382C  /* Tx Interrupt Absolute Delay Val - RW */
-#define E1000_TSPMT    0x03830  /* TCP Segmentation PAD & Min Threshold - RW */
-#define E1000_CRCERRS  0x04000  /* CRC Error Count - R/clr */
-#define E1000_ALGNERRC 0x04004  /* Alignment Error Count - R/clr */
-#define E1000_SYMERRS  0x04008  /* Symbol Error Count - R/clr */
-#define E1000_RXERRC   0x0400C  /* Receive Error Count - R/clr */
-#define E1000_MPC      0x04010  /* Missed Packet Count - R/clr */
-#define E1000_SCC      0x04014  /* Single Collision Count - R/clr */
-#define E1000_ECOL     0x04018  /* Excessive Collision Count - R/clr */
-#define E1000_MCC      0x0401C  /* Multiple Collision Count - R/clr */
-#define E1000_LATECOL  0x04020  /* Late Collision Count - R/clr */
-#define E1000_COLC     0x04028  /* Collision Count - R/clr */
-#define E1000_DC       0x04030  /* Defer Count - R/clr */
-#define E1000_TNCRS    0x04034  /* Tx-No CRS - R/clr */
-#define E1000_SEC      0x04038  /* Sequence Error Count - R/clr */
-#define E1000_CEXTERR  0x0403C  /* Carrier Extension Error Count - R/clr */
-#define E1000_RLEC     0x04040  /* Receive Length Error Count - R/clr */
-#define E1000_XONRXC   0x04048  /* XON Rx Count - R/clr */
-#define E1000_XONTXC   0x0404C  /* XON Tx Count - R/clr */
-#define E1000_XOFFRXC  0x04050  /* XOFF Rx Count - R/clr */
-#define E1000_XOFFTXC  0x04054  /* XOFF Tx Count - R/clr */
-#define E1000_FCRUC    0x04058  /* Flow Control Rx Unsupported Count- R/clr */
-#define E1000_PRC64    0x0405C  /* Packets Rx (64 bytes) - R/clr */
-#define E1000_PRC127   0x04060  /* Packets Rx (65-127 bytes) - R/clr */
-#define E1000_PRC255   0x04064  /* Packets Rx (128-255 bytes) - R/clr */
-#define E1000_PRC511   0x04068  /* Packets Rx (255-511 bytes) - R/clr */
-#define E1000_PRC1023  0x0406C  /* Packets Rx (512-1023 bytes) - R/clr */
-#define E1000_PRC1522  0x04070  /* Packets Rx (1024-1522 bytes) - R/clr */
-#define E1000_GPRC     0x04074  /* Good Packets Rx Count - R/clr */
-#define E1000_BPRC     0x04078  /* Broadcast Packets Rx Count - R/clr */
-#define E1000_MPRC     0x0407C  /* Multicast Packets Rx Count - R/clr */
-#define E1000_GPTC     0x04080  /* Good Packets Tx Count - R/clr */
-#define E1000_GORCL    0x04088  /* Good Octets Rx Count Low - R/clr */
-#define E1000_GORCH    0x0408C  /* Good Octets Rx Count High - R/clr */
-#define E1000_GOTCL    0x04090  /* Good Octets Tx Count Low - R/clr */
-#define E1000_GOTCH    0x04094  /* Good Octets Tx Count High - R/clr */
-#define E1000_RNBC     0x040A0  /* Rx No Buffers Count - R/clr */
-#define E1000_RUC      0x040A4  /* Rx Undersize Count - R/clr */
-#define E1000_RFC      0x040A8  /* Rx Fragment Count - R/clr */
-#define E1000_ROC      0x040AC  /* Rx Oversize Count - R/clr */
-#define E1000_RJC      0x040B0  /* Rx Jabber Count - R/clr */
-#define E1000_MGTPRC   0x040B4  /* Management Packets Rx Count - R/clr */
-#define E1000_MGTPDC   0x040B8  /* Management Packets Dropped Count - R/clr */
-#define E1000_MGTPTC   0x040BC  /* Management Packets Tx Count - R/clr */
-#define E1000_TORL     0x040C0  /* Total Octets Rx Low - R/clr */
-#define E1000_TORH     0x040C4  /* Total Octets Rx High - R/clr */
-#define E1000_TOTL     0x040C8  /* Total Octets Tx Low - R/clr */
-#define E1000_TOTH     0x040CC  /* Total Octets Tx High - R/clr */
-#define E1000_TPR      0x040D0  /* Total Packets Rx - R/clr */
-#define E1000_TPT      0x040D4  /* Total Packets Tx - R/clr */
-#define E1000_PTC64    0x040D8  /* Packets Tx (64 bytes) - R/clr */
-#define E1000_PTC127   0x040DC  /* Packets Tx (65-127 bytes) - R/clr */
-#define E1000_PTC255   0x040E0  /* Packets Tx (128-255 bytes) - R/clr */
-#define E1000_PTC511   0x040E4  /* Packets Tx (256-511 bytes) - R/clr */
-#define E1000_PTC1023  0x040E8  /* Packets Tx (512-1023 bytes) - R/clr */
-#define E1000_PTC1522  0x040EC  /* Packets Tx (1024-1522 Bytes) - R/clr */
-#define E1000_MPTC     0x040F0  /* Multicast Packets Tx Count - R/clr */
-#define E1000_BPTC     0x040F4  /* Broadcast Packets Tx Count - R/clr */
-#define E1000_TSCTC    0x040F8  /* TCP Segmentation Context Tx - R/clr */
-#define E1000_TSCTFC   0x040FC  /* TCP Segmentation Context Tx Fail - R/clr */
-#define E1000_IAC      0x04100  /* Interrupt Assertion Count */
-#define E1000_ICRXPTC  0x04104  /* Interrupt Cause Rx Pkt Timer Expire Count */
-#define E1000_ICRXATC  0x04108  /* Interrupt Cause Rx Abs Timer Expire Count */
-#define E1000_ICTXPTC  0x0410C  /* Interrupt Cause Tx Pkt Timer Expire Count */
-#define E1000_ICTXATC  0x04110  /* Interrupt Cause Tx Abs Timer Expire Count */
-#define E1000_ICTXQEC  0x04118  /* Interrupt Cause Tx Queue Empty Count */
-#define E1000_ICTXQMTC 0x0411C  /* Interrupt Cause Tx Queue Min Thresh Count */
-#define E1000_ICRXDMTC 0x04120  /* Interrupt Cause Rx Desc Min Thresh Count */
-#define E1000_ICRXOC   0x04124  /* Interrupt Cause Receiver Overrun Count */
-
-#define E1000_VFGPRC   0x00F10
-#define E1000_VFGORC   0x00F18
-#define E1000_VFMPRC   0x00F3C
-#define E1000_VFGPTC   0x00F14
-#define E1000_VFGOTC   0x00F34
-#define E1000_VFGOTLBC 0x00F50
-#define E1000_VFGPTLBC 0x00F44
-#define E1000_VFGORLBC 0x00F48
-#define E1000_VFGPRLBC 0x00F40
-/* Virtualization statistical counters */
-#define E1000_PFVFGPRC(_n)   (0x010010 + (0x100 * (_n)))
-#define E1000_PFVFGPTC(_n)   (0x010014 + (0x100 * (_n)))
-#define E1000_PFVFGORC(_n)   (0x010018 + (0x100 * (_n)))
-#define E1000_PFVFGOTC(_n)   (0x010034 + (0x100 * (_n)))
-#define E1000_PFVFMPRC(_n)   (0x010038 + (0x100 * (_n)))
-#define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n)))
-#define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n)))
-#define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n)))
-#define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n)))
-
-#define E1000_LSECTXUT        0x04300  /* LinkSec Tx Untagged Packet Count - OutPktsUntagged */
-#define E1000_LSECTXPKTE      0x04304  /* LinkSec Encrypted Tx Packets Count - OutPktsEncrypted */
-#define E1000_LSECTXPKTP      0x04308  /* LinkSec Protected Tx Packet Count - OutPktsProtected */
-#define E1000_LSECTXOCTE      0x0430C  /* LinkSec Encrypted Tx Octets Count - OutOctetsEncrypted */
-#define E1000_LSECTXOCTP      0x04310  /* LinkSec Protected Tx Octets Count - OutOctetsProtected */
-#define E1000_LSECRXUT        0x04314  /* LinkSec Untagged non-Strict Rx Packet Count - InPktsUntagged/InPktsNoTag */
-#define E1000_LSECRXOCTD      0x0431C  /* LinkSec Rx Octets Decrypted Count - InOctetsDecrypted */
-#define E1000_LSECRXOCTV      0x04320  /* LinkSec Rx Octets Validated - InOctetsValidated */
-#define E1000_LSECRXBAD       0x04324  /* LinkSec Rx Bad Tag - InPktsBadTag */
-#define E1000_LSECRXNOSCI     0x04328  /* LinkSec Rx Packet No SCI Count - InPktsNoSci */
-#define E1000_LSECRXUNSCI     0x0432C  /* LinkSec Rx Packet Unknown SCI Count - InPktsUnknownSci */
-#define E1000_LSECRXUNCH      0x04330  /* LinkSec Rx Unchecked Packets Count - InPktsUnchecked */
-#define E1000_LSECRXDELAY     0x04340  /* LinkSec Rx Delayed Packet Count - InPktsDelayed */
-#define E1000_LSECRXLATE      0x04350  /* LinkSec Rx Late Packets Count - InPktsLate */
-#define E1000_LSECRXOK(_n)    (0x04360 + (0x04 * (_n))) /* LinkSec Rx Packet OK Count - InPktsOk */
-#define E1000_LSECRXINV(_n)   (0x04380 + (0x04 * (_n))) /* LinkSec Rx Invalid Count - InPktsInvalid */
-#define E1000_LSECRXNV(_n)    (0x043A0 + (0x04 * (_n))) /* LinkSec Rx Not Valid Count - InPktsNotValid */
-#define E1000_LSECRXUNSA      0x043C0  /* LinkSec Rx Unused SA Count - InPktsUnusedSa */
-#define E1000_LSECRXNUSA      0x043D0  /* LinkSec Rx Not Using SA Count - InPktsNotUsingSa */
-#define E1000_LSECTXCAP       0x0B000  /* LinkSec Tx Capabilities Register - RO */
-#define E1000_LSECRXCAP       0x0B300  /* LinkSec Rx Capabilities Register - RO */
-#define E1000_LSECTXCTRL      0x0B004  /* LinkSec Tx Control - RW */
-#define E1000_LSECRXCTRL      0x0B304  /* LinkSec Rx Control - RW */
-#define E1000_LSECTXSCL       0x0B008  /* LinkSec Tx SCI Low - RW */
-#define E1000_LSECTXSCH       0x0B00C  /* LinkSec Tx SCI High - RW */
-#define E1000_LSECTXSA        0x0B010  /* LinkSec Tx SA0 - RW */
-#define E1000_LSECTXPN0       0x0B018  /* LinkSec Tx SA PN 0 - RW */
-#define E1000_LSECTXPN1       0x0B01C  /* LinkSec Tx SA PN 1 - RW */
-#define E1000_LSECRXSCL       0x0B3D0  /* LinkSec Rx SCI Low - RW */
-#define E1000_LSECRXSCH       0x0B3E0  /* LinkSec Rx SCI High - RW */
-#define E1000_LSECTXKEY0(_n)  (0x0B020 + (0x04 * (_n))) /* LinkSec Tx 128-bit Key 0 - WO */
-#define E1000_LSECTXKEY1(_n)  (0x0B030 + (0x04 * (_n))) /* LinkSec Tx 128-bit Key 1 - WO */
-#define E1000_LSECRXSA(_n)    (0x0B310 + (0x04 * (_n))) /* LinkSec Rx SAs - RW */
-#define E1000_LSECRXPN(_n)    (0x0B330 + (0x04 * (_n))) /* LinkSec Rx SAs - RW */
-/*
- * LinkSec Rx Keys  - where _n is the SA no. and _m the 4 dwords of the 128 bit
- * key - RW.
- */
-#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m)))
-
-#define E1000_SSVPC             0x041A0  /* Switch Security Violation Packet Count */
-#define E1000_IPSCTRL           0xB430   /* IpSec Control Register */
-#define E1000_IPSRXCMD          0x0B408  /* IPSec Rx Command Register - RW */
-#define E1000_IPSRXIDX          0x0B400  /* IPSec Rx Index - RW */
-#define E1000_IPSRXIPADDR(_n)   (0x0B420+ (0x04 * (_n)))  /* IPSec Rx IPv4/v6 Address - RW */
-#define E1000_IPSRXKEY(_n)      (0x0B410 + (0x04 * (_n))) /* IPSec Rx 128-bit Key - RW */
-#define E1000_IPSRXSALT         0x0B404  /* IPSec Rx Salt - RW */
-#define E1000_IPSRXSPI          0x0B40C  /* IPSec Rx SPI - RW */
-#define E1000_IPSTXKEY(_n)      (0x0B460 + (0x04 * (_n))) /* IPSec Tx 128-bit Key - RW */
-#define E1000_IPSTXSALT         0x0B454  /* IPSec Tx Salt - RW */
-#define E1000_IPSTXIDX          0x0B450  /* IPSec Tx SA IDX - RW */
-#define E1000_PCS_CFG0    0x04200  /* PCS Configuration 0 - RW */
-#define E1000_PCS_LCTL    0x04208  /* PCS Link Control - RW */
-#define E1000_PCS_LSTAT   0x0420C  /* PCS Link Status - RO */
-#define E1000_CBTMPC      0x0402C  /* Circuit Breaker Tx Packet Count */
-#define E1000_HTDPMC      0x0403C  /* Host Transmit Discarded Packets */
-#define E1000_CBRDPC      0x04044  /* Circuit Breaker Rx Dropped Count */
-#define E1000_CBRMPC      0x040FC  /* Circuit Breaker Rx Packet Count */
-#define E1000_RPTHC       0x04104  /* Rx Packets To Host */
-#define E1000_HGPTC       0x04118  /* Host Good Packets Tx Count */
-#define E1000_HTCBDPC     0x04124  /* Host Tx Circuit Breaker Dropped Count */
-#define E1000_HGORCL      0x04128  /* Host Good Octets Received Count Low */
-#define E1000_HGORCH      0x0412C  /* Host Good Octets Received Count High */
-#define E1000_HGOTCL      0x04130  /* Host Good Octets Transmit Count Low */
-#define E1000_HGOTCH      0x04134  /* Host Good Octets Transmit Count High */
-#define E1000_LENERRS     0x04138  /* Length Errors Count */
-#define E1000_SCVPC       0x04228  /* SerDes/SGMII Code Violation Pkt Count */
-#define E1000_HRMPC       0x0A018  /* Header Redirection Missed Packet Count */
-#define E1000_PCS_ANADV   0x04218  /* AN advertisement - RW */
-#define E1000_PCS_LPAB    0x0421C  /* Link Partner Ability - RW */
-#define E1000_PCS_NPTX    0x04220  /* AN Next Page Transmit - RW */
-#define E1000_PCS_LPABNP  0x04224  /* Link Partner Ability Next Page - RW */
-#define E1000_1GSTAT_RCV  0x04228  /* 1GSTAT Code Violation Packet Count - RW */
-#define E1000_RXCSUM   0x05000  /* Rx Checksum Control - RW */
-#define E1000_RLPML    0x05004  /* Rx Long Packet Max Length */
-#define E1000_RFCTL    0x05008  /* Receive Filter Control*/
-#define E1000_MTA      0x05200  /* Multicast Table Array - RW Array */
-#define E1000_RA       0x05400  /* Receive Address - RW Array */
-#define E1000_RA2      0x054E0  /* 2nd half of receive address array - RW Array */
-#define E1000_VFTA     0x05600  /* VLAN Filter Table Array - RW Array */
-#define E1000_VT_CTL   0x0581C  /* VMDq Control - RW */
-#define E1000_VFQA0    0x0B000  /* VLAN Filter Queue Array 0 - RW Array */
-#define E1000_VFQA1    0x0B200  /* VLAN Filter Queue Array 1 - RW Array */
-#define E1000_WUC      0x05800  /* Wakeup Control - RW */
-#define E1000_WUFC     0x05808  /* Wakeup Filter Control - RW */
-#define E1000_WUS      0x05810  /* Wakeup Status - RO */
-#define E1000_MANC     0x05820  /* Management Control - RW */
-#define E1000_IPAV     0x05838  /* IP Address Valid - RW */
-#define E1000_IP4AT    0x05840  /* IPv4 Address Table - RW Array */
-#define E1000_IP6AT    0x05880  /* IPv6 Address Table - RW Array */
-#define E1000_WUPL     0x05900  /* Wakeup Packet Length - RW */
-#define E1000_WUPM     0x05A00  /* Wakeup Packet Memory - RO A */
-#define E1000_PBACL    0x05B68  /* MSIx PBA Clear - Read/Write 1's to clear */
-#define E1000_FFLT     0x05F00  /* Flexible Filter Length Table - RW Array */
-#define E1000_HOST_IF  0x08800  /* Host Interface */
-#define E1000_FFMT     0x09000  /* Flexible Filter Mask Table - RW Array */
-#define E1000_FFVT     0x09800  /* Flexible Filter Value Table - RW Array */
-#define E1000_FHFT(_n)  (0x09000 + (_n * 0x100)) /* Flexible Host Filter Table */
-#define E1000_FHFT_EXT(_n) (0x09A00 + (_n * 0x100)) /* Ext Flexible Host Filter Table */
-
-
-#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
-#define E1000_MDPHYA      0x0003C /* PHY address - RW */
-#define E1000_MANC2H      0x05860 /* Management Control To Host - RW */
-#define E1000_MDEF(_n)    (0x05890 + (4 * (_n))) /* Mngmt Decision Filters */
-#define E1000_SW_FW_SYNC  0x05B5C /* Software-Firmware Synchronization - RW */
-#define E1000_CCMCTL      0x05B48 /* CCM Control Register */
-#define E1000_GIOCTL      0x05B44 /* GIO Analog Control Register */
-#define E1000_SCCTL       0x05B4C /* PCIc PLL Configuration Register */
-#define E1000_GCR         0x05B00 /* PCI-Ex Control */
-#define E1000_GCR2        0x05B64 /* PCI-Ex Control #2 */
-#define E1000_GSCL_1    0x05B10 /* PCI-Ex Statistic Control #1 */
-#define E1000_GSCL_2    0x05B14 /* PCI-Ex Statistic Control #2 */
-#define E1000_GSCL_3    0x05B18 /* PCI-Ex Statistic Control #3 */
-#define E1000_GSCL_4    0x05B1C /* PCI-Ex Statistic Control #4 */
-#define E1000_FACTPS    0x05B30 /* Function Active and Power State to MNG */
-#define E1000_SWSM      0x05B50 /* SW Semaphore */
-#define E1000_FWSM      0x05B54 /* FW Semaphore */
-#define E1000_SWSM2     0x05B58 /* Driver-only SW semaphore (not used by BOOT agents) */
-#define E1000_DCA_ID    0x05B70 /* DCA Requester ID Information - RO */
-#define E1000_DCA_CTRL  0x05B74 /* DCA Control - RW */
-#define E1000_UFUSE     0x05B78 /* UFUSE - RO */
-#define E1000_FFLT_DBG  0x05F04 /* Debug Register */
-#define E1000_HICR      0x08F00 /* Host Interface Control */
-#define E1000_FWSTS     0x08F0C /* FW Status */
-
-/* RSS registers */
-#define E1000_CPUVEC    0x02C10 /* CPU Vector Register - RW */
-#define E1000_MRQC      0x05818 /* Multiple Receive Control - RW */
-#define E1000_IMIR(_i)      (0x05A80 + ((_i) * 4))  /* Immediate Interrupt */
-#define E1000_IMIREXT(_i)   (0x05AA0 + ((_i) * 4))  /* Immediate Interrupt Ext*/
-#define E1000_IMIRVP    0x05AC0 /* Immediate Interrupt Rx VLAN Priority - RW */
-#define E1000_MSIXBM(_i)    (0x01600 + ((_i) * 4)) /* MSI-X Allocation Register
-                                                    * (_i) - RW */
-#define E1000_MSIXTADD(_i)  (0x0C000 + ((_i) * 0x10)) /* MSI-X Table entry addr
-                                                       * low reg - RW */
-#define E1000_MSIXTUADD(_i) (0x0C004 + ((_i) * 0x10)) /* MSI-X Table entry addr
-                                                       * upper reg - RW */
-#define E1000_MSIXTMSG(_i)  (0x0C008 + ((_i) * 0x10)) /* MSI-X Table entry
-                                                       * message reg - RW */
-#define E1000_MSIXVCTRL(_i) (0x0C00C + ((_i) * 0x10)) /* MSI-X Table entry
-                                                       * vector ctrl reg - RW */
-#define E1000_MSIXPBA    0x0E000 /* MSI-X Pending bit array */
-#define E1000_RETA(_i)  (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */
-#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */
-#define E1000_RSSIM     0x05864 /* RSS Interrupt Mask */
-#define E1000_RSSIR     0x05868 /* RSS Interrupt Request */
-/* VT Registers */
-#define E1000_SWPBS     0x03004 /* Switch Packet Buffer Size - RW */
-#define E1000_MBVFICR   0x00C80 /* Mailbox VF Cause - RWC */
-#define E1000_MBVFIMR   0x00C84 /* Mailbox VF int Mask - RW */
-#define E1000_VFLRE     0x00C88 /* VF Register Events - RWC */
-#define E1000_VFRE      0x00C8C /* VF Receive Enables */
-#define E1000_VFTE      0x00C90 /* VF Transmit Enables */
-#define E1000_QDE       0x02408 /* Queue Drop Enable - RW */
-#define E1000_DTXSWC    0x03500 /* DMA Tx Switch Control - RW */
-#define E1000_WVBR      0x03554 /* VM Wrong Behavior - RWS */
-#define E1000_RPLOLR    0x05AF0 /* Replication Offload - RW */
-#define E1000_UTA       0x0A000 /* Unicast Table Array - RW */
-#define E1000_IOVTCL    0x05BBC /* IOV Control Register */
-#define E1000_VMRCTL    0X05D80 /* Virtual Mirror Rule Control */
-#define E1000_VMRVLAN   0x05D90 /* Virtual Mirror Rule VLAN */
-#define E1000_VMRVM     0x05DA0 /* Virtual Mirror Rule VM */
-#define E1000_MDFB      0x03558 /* Malicious Driver free block */
-#define E1000_LVMMC     0x03548 /* Last VM Misbehavior cause */
-#define E1000_TXSWC     0x05ACC /* Tx Switch Control */
-#define E1000_SCCRL     0x05DB0 /* Storm Control Control */
-#define E1000_BSCTRH    0x05DB8 /* Broadcast Storm Control Threshold */
-#define E1000_MSCTRH    0x05DBC /* Multicast Storm Control Threshold */
-/* These act per VF so an array friendly macro is used */
-#define E1000_V2PMAILBOX(_n)   (0x00C40 + (4 * (_n)))
-#define E1000_P2VMAILBOX(_n)   (0x00C00 + (4 * (_n)))
-#define E1000_VMBMEM(_n)       (0x00800 + (64 * (_n)))
-#define E1000_VFVMBMEM(_n)     (0x00800 + (_n))
-#define E1000_VMOLR(_n)        (0x05AD0 + (4 * (_n)))
-#define E1000_VLVF(_n)         (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
-                                                       * Filter - RW */
-#define E1000_VMVIR(_n)        (0x03700 + (4 * (_n)))
-#define E1000_DVMOLR(_n)       (0x0C038 + (0x40 * (_n))) /* DMA VM offload */
-/* Time Sync */
-#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
-#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
-#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
-#define E1000_RXSTMPL    0x0B624 /* Rx timestamp Low - RO */
-#define E1000_RXSTMPH    0x0B628 /* Rx timestamp High - RO */
-#define E1000_RXSATRL    0x0B62C /* Rx timestamp attribute low - RO */
-#define E1000_RXSATRH    0x0B630 /* Rx timestamp attribute high - RO */
-#define E1000_TXSTMPL    0x0B618 /* Tx timestamp value Low - RO */
-#define E1000_TXSTMPH    0x0B61C /* Tx timestamp value High - RO */
-#define E1000_SYSTIML    0x0B600 /* System time register Low - RO */
-#define E1000_SYSTIMH    0x0B604 /* System time register High - RO */
-#define E1000_TIMINCA    0x0B608 /* Increment attributes register - RW */
-#define E1000_TSAUXC     0x0B640 /* Timesync Auxiliary Control register */
-#define E1000_SYSTIMR    0x0B6F8 /* System time register Residue */
-
-/* Filtering Registers */
-#define E1000_SAQF(_n)  (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */
-#define E1000_DAQF(_n)  (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */
-#define E1000_SPQF(_n)  (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */
-#define E1000_FTQF(_n)  (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
-#define E1000_TTQF(_n)  (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */
-#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
-#define E1000_ETQF(_n)  (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
-
-#define E1000_RTTDCS            0x3600  /* Reedtown Tx Desc plane control and status */
-#define E1000_RTTPCS            0x3474  /* Reedtown Tx Packet Plane control and status */
-#define E1000_RTRPCS            0x2474  /* Rx packet plane control and status */
-#define E1000_RTRUP2TC          0x05AC4 /* Rx User Priority to Traffic Class */
-#define E1000_RTTUP2TC          0x0418  /* Transmit User Priority to Traffic Class */
-#define E1000_RTTDTCRC(_n)      (0x3610 + ((_n) * 4)) /* Tx Desc plane TC Rate-scheduler config */
-#define E1000_RTTPTCRC(_n)      (0x3480 + ((_n) * 4)) /* Tx Packet plane TC Rate-Scheduler Config */
-#define E1000_RTRPTCRC(_n)      (0x2480 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler Config */
-#define E1000_RTTDTCRS(_n)      (0x3630 + ((_n) * 4)) /* Tx Desc Plane TC Rate-Scheduler Status */
-#define E1000_RTTDTCRM(_n)      (0x3650 + ((_n) * 4)) /* Tx Desc Plane TC Rate-Scheduler MMW */
-#define E1000_RTTPTCRS(_n)      (0x34A0 + ((_n) * 4)) /* Tx Packet plane TC Rate-Scheduler Status */
-#define E1000_RTTPTCRM(_n)      (0x34C0 + ((_n) * 4)) /* Tx Packet plane TC Rate-scheduler MMW */
-#define E1000_RTRPTCRS(_n)      (0x24A0 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler Status */
-#define E1000_RTRPTCRM(_n)      (0x24C0 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler MMW */
-#define E1000_RTTDVMRM(_n)      (0x3670 + ((_n) * 4)) /* Tx Desc plane VM Rate-Scheduler MMW*/
-#define E1000_RTTBCNRM(_n)      (0x3690 + ((_n) * 4)) /* Tx BCN Rate-Scheduler MMW */
-#define E1000_RTTDQSEL          0x3604  /* Tx Desc Plane Queue Select */
-#define E1000_RTTDVMRC          0x3608  /* Tx Desc Plane VM Rate-Scheduler Config */
-#define E1000_RTTDVMRS          0x360C  /* Tx Desc Plane VM Rate-Scheduler Status */
-#define E1000_RTTBCNRC          0x36B0  /* Tx BCN Rate-Scheduler Config */
-#define E1000_RTTBCNRS          0x36B4  /* Tx BCN Rate-Scheduler Status */
-#define E1000_RTTBCNCR          0xB200  /* Tx BCN Control Register */
-#define E1000_RTTBCNTG          0x35A4  /* Tx BCN Tagging */
-#define E1000_RTTBCNCP          0xB208  /* Tx BCN Congestion point */
-#define E1000_RTRBCNCR          0xB20C  /* Rx BCN Control Register */
-#define E1000_RTTBCNRD          0x36B8  /* Tx BCN Rate Drift */
-#define E1000_PFCTOP            0x1080  /* Priority Flow Control Type and Opcode */
-#define E1000_RTTBCNIDX         0xB204  /* Tx BCN Congestion Point */
-#define E1000_RTTBCNACH         0x0B214 /* Tx BCN Control High */
-#define E1000_RTTBCNACL         0x0B210 /* Tx BCN Control Low */
-
-/* DMA Coalescing registers */
-#define E1000_DMACR             0x02508 /* Control Register */
-#define E1000_DMCTXTH           0x03550 /* Transmit Threshold */
-#define E1000_DMCTLX            0x02514 /* Time to Lx Request */
-#define E1000_DMCRTRH           0x05DD0 /* Receive Packet Rate Threshold */
-#define E1000_DMCCNT            0x05DD4 /* Current Rx Count */
-#define E1000_FCRTC             0x02170 /* Flow Control Rx high watermark */
-#define E1000_PCIEMISC          0x05BB8 /* PCIE misc config register */
-
-/* PCIe Parity Status Register */
-#define E1000_PCIEERRSTS        0x05BA8
-
-#define E1000_PROXYS            0x5F64 /* Proxying Status */
-#define E1000_PROXYFC           0x5F60 /* Proxying Filter Control */
-/* Thermal sensor configuration and status registers */
-#define E1000_THMJT             0x08100 /* Junction Temperature */
-#define E1000_THLOWTC           0x08104 /* Low Threshold Control */
-#define E1000_THMIDTC           0x08108 /* Mid Threshold Control */
-#define E1000_THHIGHTC          0x0810C /* High Threshold Control */
-#define E1000_THSTAT            0x08110 /* Thermal Sensor Status */
-
-/*Energy Efficient Ethernet "EEE" registers */
-#define E1000_IPCNFG            0x0E38 /* Internal PHY Configuration */
-#define E1000_LTRC              0x01A0 /* Latency Tolerance Reporting Control */
-#define E1000_EEER              0x0E30 /* Energy Efficient Ethernet "EEE"*/
-#define E1000_EEE_SU            0x0E34 /* EEE Setup */
-#define E1000_TLPIC             0x4148 /* EEE Tx LPI Count - TLPIC */
-#define E1000_RLPIC             0x414C /* EEE Rx LPI Count - RLPIC */
-
-/* OS2BMC Registers */
-#define E1000_B2OSPC            0x08FE0 /* BMC2OS packets sent by BMC */
-#define E1000_B2OGPRC           0x04158 /* BMC2OS packets received by host */
-#define E1000_O2BGPTC           0x08FE4 /* OS2BMC packets received by BMC */
-#define E1000_O2BSPC            0x0415C /* OS2BMC packets transmitted by host */
-
-#endif
diff --git a/lib/librte_pmd_igb/igb/e1000_vf.c b/lib/librte_pmd_igb/igb/e1000_vf.c
deleted file mode 100644 (file)
index 8b81e4b..0000000
+++ /dev/null
@@ -1,574 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-
-#include "e1000_api.h"
-
-
-static s32       e1000_init_phy_params_vf(struct e1000_hw *hw);
-static s32       e1000_init_nvm_params_vf(struct e1000_hw *hw);
-static void      e1000_release_vf(struct e1000_hw *hw);
-static s32       e1000_acquire_vf(struct e1000_hw *hw);
-static s32       e1000_setup_link_vf(struct e1000_hw *hw);
-static s32       e1000_get_bus_info_pcie_vf(struct e1000_hw *hw);
-static s32       e1000_init_mac_params_vf(struct e1000_hw *hw);
-static s32       e1000_check_for_link_vf(struct e1000_hw *hw);
-static s32       e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
-                                              u16 *duplex);
-static s32       e1000_init_hw_vf(struct e1000_hw *hw);
-static s32       e1000_reset_hw_vf(struct e1000_hw *hw);
-static void      e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, u32);
-static void      e1000_rar_set_vf(struct e1000_hw *, u8 *, u32);
-static s32       e1000_read_mac_addr_vf(struct e1000_hw *);
-
-/**
- *  e1000_init_phy_params_vf - Inits PHY params
- *  @hw: pointer to the HW structure
- *
- *  Doesn't do much - there's no PHY available to the VF.
- **/
-static s32 e1000_init_phy_params_vf(struct e1000_hw *hw)
-{
-       DEBUGFUNC("e1000_init_phy_params_vf");
-       hw->phy.type = e1000_phy_vf;
-       hw->phy.ops.acquire = e1000_acquire_vf;
-       hw->phy.ops.release = e1000_release_vf;
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_init_nvm_params_vf - Inits NVM params
- *  @hw: pointer to the HW structure
- *
- *  Doesn't do much - there's no NVM available to the VF.
- **/
-static s32 e1000_init_nvm_params_vf(struct e1000_hw *hw)
-{
-       DEBUGFUNC("e1000_init_nvm_params_vf");
-       hw->nvm.type = e1000_nvm_none;
-       hw->nvm.ops.acquire = e1000_acquire_vf;
-       hw->nvm.ops.release = e1000_release_vf;
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_init_mac_params_vf - Inits MAC params
- *  @hw: pointer to the HW structure
- **/
-static s32 e1000_init_mac_params_vf(struct e1000_hw *hw)
-{
-       struct e1000_mac_info *mac = &hw->mac;
-
-       DEBUGFUNC("e1000_init_mac_params_vf");
-
-       /* Set media type */
-       /*
-        * Virtual functions don't care what they're media type is as they
-        * have no direct access to the PHY, or the media.  That is handled
-        * by the physical function driver.
-        */
-       hw->phy.media_type = e1000_media_type_unknown;
-
-       /* No ASF features for the VF driver */
-       mac->asf_firmware_present = FALSE;
-       /* ARC subsystem not supported */
-       mac->arc_subsystem_valid = FALSE;
-       /* Disable adaptive IFS mode so the generic funcs don't do anything */
-       mac->adaptive_ifs = FALSE;
-       /* VF's have no MTA Registers - PF feature only */
-       mac->mta_reg_count = 128;
-       /* VF's have no access to RAR entries  */
-       mac->rar_entry_count = 1;
-
-       /* Function pointers */
-       /* link setup */
-       mac->ops.setup_link = e1000_setup_link_vf;
-       /* bus type/speed/width */
-       mac->ops.get_bus_info = e1000_get_bus_info_pcie_vf;
-       /* reset */
-       mac->ops.reset_hw = e1000_reset_hw_vf;
-       /* hw initialization */
-       mac->ops.init_hw = e1000_init_hw_vf;
-       /* check for link */
-       mac->ops.check_for_link = e1000_check_for_link_vf;
-       /* link info */
-       mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
-       /* multicast address update */
-       mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
-       /* set mac address */
-       mac->ops.rar_set = e1000_rar_set_vf;
-       /* read mac address */
-       mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
-
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_init_function_pointers_vf - Inits function pointers
- *  @hw: pointer to the HW structure
- **/
-void e1000_init_function_pointers_vf(struct e1000_hw *hw)
-{
-       DEBUGFUNC("e1000_init_function_pointers_vf");
-
-       hw->mac.ops.init_params = e1000_init_mac_params_vf;
-       hw->nvm.ops.init_params = e1000_init_nvm_params_vf;
-       hw->phy.ops.init_params = e1000_init_phy_params_vf;
-       hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
-}
-
-/**
- *  e1000_acquire_vf - Acquire rights to access PHY or NVM.
- *  @hw: pointer to the HW structure
- *
- *  There is no PHY or NVM so we want all attempts to acquire these to fail.
- *  In addition, the MAC registers to access PHY/NVM don't exist so we don't
- *  even want any SW to attempt to use them.
- **/
-static s32 e1000_acquire_vf(struct e1000_hw *hw)
-{
-       return -E1000_ERR_PHY;
-}
-
-/**
- *  e1000_release_vf - Release PHY or NVM
- *  @hw: pointer to the HW structure
- *
- *  There is no PHY or NVM so we want all attempts to acquire these to fail.
- *  In addition, the MAC registers to access PHY/NVM don't exist so we don't
- *  even want any SW to attempt to use them.
- **/
-static void e1000_release_vf(struct e1000_hw *hw)
-{
-       return;
-}
-
-/**
- *  e1000_setup_link_vf - Sets up link.
- *  @hw: pointer to the HW structure
- *
- *  Virtual functions cannot change link.
- **/
-static s32 e1000_setup_link_vf(struct e1000_hw *hw)
-{
-       DEBUGFUNC("e1000_setup_link_vf");
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_get_bus_info_pcie_vf - Gets the bus info.
- *  @hw: pointer to the HW structure
- *
- *  Virtual functions are not really on their own bus.
- **/
-static s32 e1000_get_bus_info_pcie_vf(struct e1000_hw *hw)
-{
-       struct e1000_bus_info *bus = &hw->bus;
-
-       DEBUGFUNC("e1000_get_bus_info_pcie_vf");
-
-       /* Do not set type PCI-E because we don't want disable master to run */
-       bus->type = e1000_bus_type_reserved;
-       bus->speed = e1000_bus_speed_2500;
-
-       return 0;
-}
-
-/**
- *  e1000_get_link_up_info_vf - Gets link info.
- *  @hw: pointer to the HW structure
- *  @speed: pointer to 16 bit value to store link speed.
- *  @duplex: pointer to 16 bit value to store duplex.
- *
- *  Since we cannot read the PHY and get accurate link info, we must rely upon
- *  the status register's data which is often stale and inaccurate.
- **/
-static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
-                                     u16 *duplex)
-{
-       s32 status;
-
-       DEBUGFUNC("e1000_get_link_up_info_vf");
-
-       status = E1000_READ_REG(hw, E1000_STATUS);
-       if (status & E1000_STATUS_SPEED_1000) {
-               *speed = SPEED_1000;
-               DEBUGOUT("1000 Mbs, ");
-       } else if (status & E1000_STATUS_SPEED_100) {
-               *speed = SPEED_100;
-               DEBUGOUT("100 Mbs, ");
-       } else {
-               *speed = SPEED_10;
-               DEBUGOUT("10 Mbs, ");
-       }
-
-       if (status & E1000_STATUS_FD) {
-               *duplex = FULL_DUPLEX;
-               DEBUGOUT("Full Duplex\n");
-       } else {
-               *duplex = HALF_DUPLEX;
-               DEBUGOUT("Half Duplex\n");
-       }
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_reset_hw_vf - Resets the HW
- *  @hw: pointer to the HW structure
- *
- *  VF's provide a function level reset. This is done using bit 26 of ctrl_reg.
- *  This is all the reset we can perform on a VF.
- **/
-static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
-{
-       struct e1000_mbx_info *mbx = &hw->mbx;
-       u32 timeout = E1000_VF_INIT_TIMEOUT;
-       s32 ret_val = -E1000_ERR_MAC_INIT;
-       u32 ctrl, msgbuf[3];
-       u8 *addr = (u8 *)(&msgbuf[1]);
-
-       DEBUGFUNC("e1000_reset_hw_vf");
-
-       DEBUGOUT("Issuing a function level reset to MAC\n");
-       ctrl = E1000_READ_REG(hw, E1000_CTRL);
-       E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
-
-       /* we cannot reset while the RSTI / RSTD bits are asserted */
-       while (!mbx->ops.check_for_rst(hw, 0) && timeout) {
-               timeout--;
-               usec_delay(5);
-       }
-
-       if (timeout) {
-               /* mailbox timeout can now become active */
-               mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT;
-
-               msgbuf[0] = E1000_VF_RESET;
-               mbx->ops.write_posted(hw, msgbuf, 1, 0);
-
-               msec_delay(10);
-
-               /* set our "perm_addr" based on info provided by PF */
-               ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
-               if (!ret_val) {
-                       if (msgbuf[0] == (E1000_VF_RESET |
-                                               E1000_VT_MSGTYPE_ACK))
-                               memcpy(hw->mac.perm_addr, addr, 6);
-                       else
-                               ret_val = -E1000_ERR_MAC_INIT;
-               }
-       }
-
-       return ret_val;
-}
-
-/**
- *  e1000_init_hw_vf - Inits the HW
- *  @hw: pointer to the HW structure
- *
- *  Not much to do here except clear the PF Reset indication if there is one.
- **/
-static s32 e1000_init_hw_vf(struct e1000_hw *hw)
-{
-       DEBUGFUNC("e1000_init_hw_vf");
-
-       /* attempt to set and restore our mac address */
-       e1000_rar_set_vf(hw, hw->mac.addr, 0);
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_rar_set_vf - set device MAC address
- *  @hw: pointer to the HW structure
- *  @addr: pointer to the receive address
- *  @index receive address array register
- **/
-static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
-{
-       struct e1000_mbx_info *mbx = &hw->mbx;
-       u32 msgbuf[3];
-       u8 *msg_addr = (u8 *)(&msgbuf[1]);
-       s32 ret_val;
-
-       memset(msgbuf, 0, 12);
-       msgbuf[0] = E1000_VF_SET_MAC_ADDR;
-       memcpy(msg_addr, addr, 6);
-       ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0);
-
-       if (!ret_val)
-               ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
-
-       msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
-
-       /* if nacked the address was rejected, use "perm_addr" */
-       if (!ret_val &&
-           (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK)))
-               e1000_read_mac_addr_vf(hw);
-}
-
-/**
- *  e1000_hash_mc_addr_vf - Generate a multicast hash value
- *  @hw: pointer to the HW structure
- *  @mc_addr: pointer to a multicast address
- *
- *  Generates a multicast address hash value which is used to determine
- *  the multicast filter table array address and new table value.
- **/
-static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
-{
-       u32 hash_value, hash_mask;
-       u8 bit_shift = 0;
-
-       DEBUGFUNC("e1000_hash_mc_addr_generic");
-
-       /* Register count multiplied by bits per register */
-       hash_mask = (hw->mac.mta_reg_count * 32) - 1;
-
-       /*
-        * The bit_shift is the number of left-shifts
-        * where 0xFF would still fall within the hash mask.
-        */
-       while (hash_mask >> bit_shift != 0xFF)
-               bit_shift++;
-
-       hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
-                                 (((u16) mc_addr[5]) << bit_shift)));
-
-       return hash_value;
-}
-
-/**
- *  e1000_update_mc_addr_list_vf - Update Multicast addresses
- *  @hw: pointer to the HW structure
- *  @mc_addr_list: array of multicast addresses to program
- *  @mc_addr_count: number of multicast addresses to program
- *
- *  Updates the Multicast Table Array.
- *  The caller must have a packed mc_addr_list of multicast addresses.
- **/
-void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
-                                  u8 *mc_addr_list, u32 mc_addr_count)
-{
-       struct e1000_mbx_info *mbx = &hw->mbx;
-       u32 msgbuf[E1000_VFMAILBOX_SIZE];
-       u16 *hash_list = (u16 *)&msgbuf[1];
-       u32 hash_value;
-       u32 i;
-
-       DEBUGFUNC("e1000_update_mc_addr_list_vf");
-
-       /* Each entry in the list uses 1 16 bit word.  We have 30
-        * 16 bit words available in our HW msg buffer (minus 1 for the
-        * msg type).  That's 30 hash values if we pack 'em right.  If
-        * there are more than 30 MC addresses to add then punt the
-        * extras for now and then add code to handle more than 30 later.
-        * It would be unusual for a server to request that many multi-cast
-        * addresses except for in large enterprise network environments.
-        */
-
-       DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count);
-
-       if (mc_addr_count > 30) {
-               msgbuf[0] |= E1000_VF_SET_MULTICAST_OVERFLOW;
-               mc_addr_count = 30;
-       }
-
-       msgbuf[0] = E1000_VF_SET_MULTICAST;
-       msgbuf[0] |= mc_addr_count << E1000_VT_MSGINFO_SHIFT;
-
-       for (i = 0; i < mc_addr_count; i++) {
-               hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list);
-               DEBUGOUT1("Hash value = 0x%03X\n", hash_value);
-               hash_list[i] = hash_value & 0x0FFF;
-               mc_addr_list += ETH_ADDR_LEN;
-       }
-
-       mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE, 0);
-}
-
-/**
- *  e1000_vfta_set_vf - Set/Unset vlan filter table address
- *  @hw: pointer to the HW structure
- *  @vid: determines the vfta register and bit to set/unset
- *  @set: if TRUE then set bit, else clear bit
- **/
-void e1000_vfta_set_vf(struct e1000_hw *hw, u16 vid, bool set)
-{
-       struct e1000_mbx_info *mbx = &hw->mbx;
-       u32 msgbuf[2];
-
-       msgbuf[0] = E1000_VF_SET_VLAN;
-       msgbuf[1] = vid;
-       /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
-       if (set)
-               msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
-
-       mbx->ops.write_posted(hw, msgbuf, 2, 0);
-}
-
-/** e1000_rlpml_set_vf - Set the maximum receive packet length
- *  @hw: pointer to the HW structure
- *  @max_size: value to assign to max frame size
- **/
-void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size)
-{
-       struct e1000_mbx_info *mbx = &hw->mbx;
-       u32 msgbuf[2];
-
-       msgbuf[0] = E1000_VF_SET_LPE;
-       msgbuf[1] = max_size;
-
-       mbx->ops.write_posted(hw, msgbuf, 2, 0);
-}
-
-/**
- *  e1000_promisc_set_vf - Set flags for Unicast or Multicast promisc
- *  @hw: pointer to the HW structure
- *  @uni: boolean indicating unicast promisc status
- *  @multi: boolean indicating multicast promisc status
- **/
-s32 e1000_promisc_set_vf(struct e1000_hw *hw, enum e1000_promisc_type type)
-{
-       struct e1000_mbx_info *mbx = &hw->mbx;
-       u32 msgbuf = E1000_VF_SET_PROMISC;
-       s32 ret_val;
-
-       switch (type) {
-       case e1000_promisc_multicast:
-               msgbuf |= E1000_VF_SET_PROMISC_MULTICAST;
-               break;
-       case e1000_promisc_enabled:
-               msgbuf |= E1000_VF_SET_PROMISC_MULTICAST;
-       case e1000_promisc_unicast:
-               msgbuf |= E1000_VF_SET_PROMISC_UNICAST;
-       case e1000_promisc_disabled:
-               break;
-       default:
-               return -E1000_ERR_MAC_INIT;
-       }
-
-        ret_val = mbx->ops.write_posted(hw, &msgbuf, 1, 0);
-
-       if (!ret_val)
-               ret_val = mbx->ops.read_posted(hw, &msgbuf, 1, 0);
-
-       if (!ret_val && !(msgbuf & E1000_VT_MSGTYPE_ACK))
-               ret_val = -E1000_ERR_MAC_INIT;
-
-       return ret_val;
-}
-
-/**
- *  e1000_read_mac_addr_vf - Read device MAC address
- *  @hw: pointer to the HW structure
- **/
-static s32 e1000_read_mac_addr_vf(struct e1000_hw *hw)
-{
-       int i;
-
-       for (i = 0; i < ETH_ADDR_LEN; i++)
-               hw->mac.addr[i] = hw->mac.perm_addr[i];
-
-       return E1000_SUCCESS;
-}
-
-/**
- *  e1000_check_for_link_vf - Check for link for a virtual interface
- *  @hw: pointer to the HW structure
- *
- *  Checks to see if the underlying PF is still talking to the VF and
- *  if it is then it reports the link state to the hardware, otherwise
- *  it reports link down and returns an error.
- **/
-static s32 e1000_check_for_link_vf(struct e1000_hw *hw)
-{
-       struct e1000_mbx_info *mbx = &hw->mbx;
-       struct e1000_mac_info *mac = &hw->mac;
-       s32 ret_val = E1000_SUCCESS;
-       u32 in_msg = 0;
-
-       DEBUGFUNC("e1000_check_for_link_vf");
-
-       /*
-        * We only want to run this if there has been a rst asserted.
-        * in this case that could mean a link change, device reset,
-        * or a virtual function reset
-        */
-
-       /* If we were hit with a reset or timeout drop the link */
-       if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
-               mac->get_link_status = TRUE;
-
-       if (!mac->get_link_status)
-               goto out;
-
-       /* if link status is down no point in checking to see if pf is up */
-       if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
-               goto out;
-
-       /* if the read failed it could just be a mailbox collision, best wait
-        * until we are called again and don't report an error */
-       if (mbx->ops.read(hw, &in_msg, 1, 0))
-               goto out;
-
-       /* if incoming message isn't clear to send we are waiting on response */
-       if (!(in_msg & E1000_VT_MSGTYPE_CTS)) {
-               /* message is not CTS and is NACK we have lost CTS status */
-               if (in_msg & E1000_VT_MSGTYPE_NACK)
-                       ret_val = -E1000_ERR_MAC_INIT;
-               goto out;
-       }
-
-       /* at this point we know the PF is talking to us, check and see if
-        * we are still accepting timeout or if we had a timeout failure.
-        * if we failed then we will need to reinit */
-       if (!mbx->timeout) {
-               ret_val = -E1000_ERR_MAC_INIT;
-               goto out;
-       }
-
-       /* if we passed all the tests above then the link is up and we no
-        * longer need to check for link */
-       mac->get_link_status = FALSE;
-
-out:
-       return ret_val;
-}
-
diff --git a/lib/librte_pmd_igb/igb/e1000_vf.h b/lib/librte_pmd_igb/igb/e1000_vf.h
deleted file mode 100644 (file)
index b2fd8a1..0000000
+++ /dev/null
@@ -1,294 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#ifndef _E1000_VF_H_
-#define _E1000_VF_H_
-
-#include "e1000_osdep.h"
-#include "e1000_regs.h"
-#include "e1000_defines.h"
-
-struct e1000_hw;
-
-#define E1000_DEV_ID_82576_VF                 0x10CA
-#define E1000_DEV_ID_I350_VF                  0x1520
-
-#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
-
-/* Additional Descriptor Control definitions */
-#define E1000_TXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Tx Queue */
-#define E1000_RXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Rx Queue */
-
-/* SRRCTL bit definitions */
-#define E1000_SRRCTL_BSIZEPKT_SHIFT                     10 /* Shift _right_ */
-#define E1000_SRRCTL_BSIZEHDRSIZE_MASK                  0x00000F00
-#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT                 2  /* Shift _left_ */
-#define E1000_SRRCTL_DESCTYPE_LEGACY                    0x00000000
-#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF                0x02000000
-#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT                 0x04000000
-#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS          0x0A000000
-#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION           0x06000000
-#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
-#define E1000_SRRCTL_DESCTYPE_MASK                      0x0E000000
-#define E1000_SRRCTL_DROP_EN                            0x80000000
-
-#define E1000_SRRCTL_BSIZEPKT_MASK      0x0000007F
-#define E1000_SRRCTL_BSIZEHDR_MASK      0x00003F00
-
-/* Interrupt Defines */
-#define E1000_EICR     0x01580  /* Ext. Interrupt Cause Read - R/clr */
-#define E1000_EITR(_n) (0x01680 + ((_n) << 2))
-#define E1000_EICS     0x01520  /* Ext. Interrupt Cause Set - W0 */
-#define E1000_EIMS     0x01524  /* Ext. Interrupt Mask Set/Read - RW */
-#define E1000_EIMC     0x01528  /* Ext. Interrupt Mask Clear - WO */
-#define E1000_EIAC     0x0152C  /* Ext. Interrupt Auto Clear - RW */
-#define E1000_EIAM     0x01530  /* Ext. Interrupt Ack Auto Clear Mask - RW */
-#define E1000_IVAR0    0x01700  /* Interrupt Vector Allocation (array) - RW */
-#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
-#define E1000_IVAR_VALID        0x80
-
-/* Receive Descriptor - Advanced */
-union e1000_adv_rx_desc {
-       struct {
-               u64 pkt_addr;             /* Packet buffer address */
-               u64 hdr_addr;             /* Header buffer address */
-       } read;
-       struct {
-               struct {
-                       union {
-                               u32 data;
-                               struct {
-                                       /* RSS type, Packet type */
-                                       u16 pkt_info;
-                                       /* Split Header, header buffer len */
-                                       u16 hdr_info;
-                               } hs_rss;
-                       } lo_dword;
-                       union {
-                               u32 rss;          /* RSS Hash */
-                               struct {
-                                       u16 ip_id;    /* IP id */
-                                       u16 csum;     /* Packet Checksum */
-                               } csum_ip;
-                       } hi_dword;
-               } lower;
-               struct {
-                       u32 status_error;     /* ext status/error */
-                       u16 length;           /* Packet length */
-                       u16 vlan;             /* VLAN tag */
-               } upper;
-       } wb;  /* writeback */
-};
-
-#define E1000_RXDADV_HDRBUFLEN_MASK      0x7FE0
-#define E1000_RXDADV_HDRBUFLEN_SHIFT     5
-
-/* Transmit Descriptor - Advanced */
-union e1000_adv_tx_desc {
-       struct {
-               u64 buffer_addr;    /* Address of descriptor's data buf */
-               u32 cmd_type_len;
-               u32 olinfo_status;
-       } read;
-       struct {
-               u64 rsvd;       /* Reserved */
-               u32 nxtseq_seed;
-               u32 status;
-       } wb;
-};
-
-/* Adv Transmit Descriptor Config Masks */
-#define E1000_ADVTXD_DTYP_CTXT    0x00200000 /* Advanced Context Descriptor */
-#define E1000_ADVTXD_DTYP_DATA    0x00300000 /* Advanced Data Descriptor */
-#define E1000_ADVTXD_DCMD_EOP     0x01000000 /* End of Packet */
-#define E1000_ADVTXD_DCMD_IFCS    0x02000000 /* Insert FCS (Ethernet CRC) */
-#define E1000_ADVTXD_DCMD_RS      0x08000000 /* Report Status */
-#define E1000_ADVTXD_DCMD_DEXT    0x20000000 /* Descriptor extension (1=Adv) */
-#define E1000_ADVTXD_DCMD_VLE     0x40000000 /* VLAN pkt enable */
-#define E1000_ADVTXD_DCMD_TSE     0x80000000 /* TCP Seg enable */
-#define E1000_ADVTXD_PAYLEN_SHIFT    14 /* Adv desc PAYLEN shift */
-
-/* Context descriptors */
-struct e1000_adv_tx_context_desc {
-       u32 vlan_macip_lens;
-       u32 seqnum_seed;
-       u32 type_tucmd_mlhl;
-       u32 mss_l4len_idx;
-};
-
-#define E1000_ADVTXD_MACLEN_SHIFT    9  /* Adv ctxt desc mac len shift */
-#define E1000_ADVTXD_TUCMD_IPV4    0x00000400  /* IP Packet Type: 1=IPv4 */
-#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800  /* L4 Packet TYPE of TCP */
-#define E1000_ADVTXD_L4LEN_SHIFT     8  /* Adv ctxt L4LEN shift */
-#define E1000_ADVTXD_MSS_SHIFT      16  /* Adv ctxt MSS shift */
-
-enum e1000_mac_type {
-       e1000_undefined = 0,
-       e1000_vfadapt,
-       e1000_vfadapt_i350,
-       e1000_num_macs  /* List is 1-based, so subtract 1 for TRUE count. */
-};
-
-struct e1000_vf_stats {
-       u64 base_gprc;
-       u64 base_gptc;
-       u64 base_gorc;
-       u64 base_gotc;
-       u64 base_mprc;
-       u64 base_gotlbc;
-       u64 base_gptlbc;
-       u64 base_gorlbc;
-       u64 base_gprlbc;
-
-       u32 last_gprc;
-       u32 last_gptc;
-       u32 last_gorc;
-       u32 last_gotc;
-       u32 last_mprc;
-       u32 last_gotlbc;
-       u32 last_gptlbc;
-       u32 last_gorlbc;
-       u32 last_gprlbc;
-
-       u64 gprc;
-       u64 gptc;
-       u64 gorc;
-       u64 gotc;
-       u64 mprc;
-       u64 gotlbc;
-       u64 gptlbc;
-       u64 gorlbc;
-       u64 gprlbc;
-};
-
-#include "e1000_mbx.h"
-
-struct e1000_mac_operations {
-       /* Function pointers for the MAC. */
-       s32  (*init_params)(struct e1000_hw *);
-       s32  (*check_for_link)(struct e1000_hw *);
-       void (*clear_vfta)(struct e1000_hw *);
-       s32  (*get_bus_info)(struct e1000_hw *);
-       s32  (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
-       void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
-       s32  (*reset_hw)(struct e1000_hw *);
-       s32  (*init_hw)(struct e1000_hw *);
-       s32  (*setup_link)(struct e1000_hw *);
-       void (*write_vfta)(struct e1000_hw *, u32, u32);
-       void (*rar_set)(struct e1000_hw *, u8*, u32);
-       s32  (*read_mac_addr)(struct e1000_hw *);
-};
-
-struct e1000_mac_info {
-       struct e1000_mac_operations ops;
-       u8 addr[6];
-       u8 perm_addr[6];
-
-       enum e1000_mac_type type;
-
-       u16 mta_reg_count;
-       u16 rar_entry_count;
-
-       bool get_link_status;
-};
-
-struct e1000_mbx_operations {
-       s32 (*init_params)(struct e1000_hw *hw);
-       s32 (*read)(struct e1000_hw *, u32 *, u16,  u16);
-       s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
-       s32 (*read_posted)(struct e1000_hw *, u32 *, u16,  u16);
-       s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
-       s32 (*check_for_msg)(struct e1000_hw *, u16);
-       s32 (*check_for_ack)(struct e1000_hw *, u16);
-       s32 (*check_for_rst)(struct e1000_hw *, u16);
-};
-
-struct e1000_mbx_stats {
-       u32 msgs_tx;
-       u32 msgs_rx;
-
-       u32 acks;
-       u32 reqs;
-       u32 rsts;
-};
-
-struct e1000_mbx_info {
-       struct e1000_mbx_operations ops;
-       struct e1000_mbx_stats stats;
-       u32 timeout;
-       u32 usec_delay;
-       u16 size;
-};
-
-struct e1000_dev_spec_vf {
-       u32 vf_number;
-       u32 v2p_mailbox;
-};
-
-struct e1000_hw {
-       void *back;
-
-       u8 *hw_addr;
-       u8 *flash_address;
-       unsigned long io_base;
-
-       struct e1000_mac_info  mac;
-       struct e1000_mbx_info mbx;
-
-       union {
-               struct e1000_dev_spec_vf vf;
-       } dev_spec;
-
-       u16 device_id;
-       u16 subsystem_vendor_id;
-       u16 subsystem_device_id;
-       u16 vendor_id;
-
-       u8  revision_id;
-};
-
-enum e1000_promisc_type {
-       e1000_promisc_disabled = 0,   /* all promisc modes disabled */
-       e1000_promisc_unicast = 1,    /* unicast promiscuous enabled */
-       e1000_promisc_multicast = 2,  /* multicast promiscuous enabled */
-       e1000_promisc_enabled = 3,    /* both uni and multicast promisc */
-       e1000_num_promisc_types
-};
-
-/* These functions must be implemented by drivers */
-s32  e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
-void e1000_vfta_set_vf(struct e1000_hw *, u16, bool);
-void e1000_rlpml_set_vf(struct e1000_hw *, u16);
-s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type);
-#endif /* _E1000_VF_H_ */
diff --git a/lib/librte_pmd_igb/igb/if_igb.c b/lib/librte_pmd_igb/igb/if_igb.c
deleted file mode 100644 (file)
index 4aa08f6..0000000
+++ /dev/null
@@ -1,5567 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-
-#ifdef HAVE_KERNEL_OPTION_HEADERS
-#include "opt_device_polling.h"
-#include "opt_inet.h"
-#include "opt_altq.h"
-#endif
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#if __FreeBSD_version >= 800000
-#include <sys/buf_ring.h>
-#endif
-#include <sys/bus.h>
-#include <sys/endian.h>
-#include <sys/kernel.h>
-#include <sys/kthread.h>
-#include <sys/malloc.h>
-#include <sys/mbuf.h>
-#include <sys/module.h>
-#include <sys/rman.h>
-#include <sys/socket.h>
-#include <sys/sockio.h>
-#include <sys/sysctl.h>
-#include <sys/taskqueue.h>
-#include <sys/eventhandler.h>
-#include <sys/pcpu.h>
-#include <sys/smp.h>
-#include <machine/smp.h>
-#include <machine/bus.h>
-#include <machine/resource.h>
-
-#include <net/bpf.h>
-#include <net/ethernet.h>
-#include <net/if.h>
-#include <net/if_arp.h>
-#include <net/if_dl.h>
-#include <net/if_media.h>
-
-#include <net/if_types.h>
-#include <net/if_vlan_var.h>
-
-#include <netinet/in_systm.h>
-#include <netinet/in.h>
-#include <netinet/if_ether.h>
-#include <netinet/ip.h>
-#include <netinet/ip6.h>
-#include <netinet/tcp.h>
-#include <netinet/tcp_lro.h>
-#include <netinet/udp.h>
-
-#include <machine/in_cksum.h>
-#include <dev/led/led.h>
-#include <dev/pci/pcivar.h>
-#include <dev/pci/pcireg.h>
-
-#include "e1000_api.h"
-#include "e1000_82575.h"
-#include "if_igb.h"
-
-/*********************************************************************
- *  Set this to one to display debug statistics
- *********************************************************************/
-int    igb_display_debug_stats = 0;
-
-/*********************************************************************
- *  Driver version:
- *********************************************************************/
-char igb_driver_version[] = "version - 2.2.3";
-
-
-/*********************************************************************
- *  PCI Device ID Table
- *
- *  Used by probe to select devices to load on
- *  Last field stores an index into e1000_strings
- *  Last entry must be all 0s
- *
- *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
- *********************************************************************/
-
-static igb_vendor_info_t igb_vendor_info_array[] =
-{
-       { 0x8086, E1000_DEV_ID_82575EB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES,
-                                               PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER,
-                                               PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_82576,           PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_82576_NS,        PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_82576_NS_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_82576_FIBER,     PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_82576_SERDES,    PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_82576_SERDES_QUAD,
-                                               PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_82576_QUAD_COPPER,
-                                               PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_82576_QUAD_COPPER_ET2,
-                                               PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_82576_VF,        PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_82580_COPPER,    PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_82580_FIBER,     PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_82580_SERDES,    PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_82580_SGMII,     PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_82580_COPPER_DUAL,
-                                               PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_82580_QUAD_FIBER,
-                                               PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_DH89XXCC_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_DH89XXCC_SGMII,  PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_DH89XXCC_SFP,    PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_DH89XXCC_BACKPLANE,
-                                               PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_I350_COPPER,     PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_I350_FIBER,      PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_I350_SERDES,     PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_I350_SGMII,      PCI_ANY_ID, PCI_ANY_ID, 0},
-       { 0x8086, E1000_DEV_ID_I350_VF,         PCI_ANY_ID, PCI_ANY_ID, 0},
-       /* required last entry */
-       { 0, 0, 0, 0, 0}
-};
-
-/*********************************************************************
- *  Table of branding strings for all supported NICs.
- *********************************************************************/
-
-static char *igb_strings[] = {
-       "Intel(R) PRO/1000 Network Connection"
-};
-
-/*********************************************************************
- *  Function prototypes
- *********************************************************************/
-static int     igb_probe(device_t);
-static int     igb_attach(device_t);
-static int     igb_detach(device_t);
-static int     igb_shutdown(device_t);
-static int     igb_suspend(device_t);
-static int     igb_resume(device_t);
-static void    igb_start(struct ifnet *);
-static void    igb_start_locked(struct tx_ring *, struct ifnet *ifp);
-#if __FreeBSD_version >= 800000
-static int     igb_mq_start(struct ifnet *, struct mbuf *);
-static int     igb_mq_start_locked(struct ifnet *,
-                   struct tx_ring *, struct mbuf *);
-static void    igb_qflush(struct ifnet *);
-#endif
-static int     igb_ioctl(struct ifnet *, u_long, caddr_t);
-static void    igb_init(void *);
-static void    igb_init_locked(struct adapter *);
-static void    igb_stop(void *);
-static void    igb_media_status(struct ifnet *, struct ifmediareq *);
-static int     igb_media_change(struct ifnet *);
-static void    igb_identify_hardware(struct adapter *);
-static int     igb_allocate_pci_resources(struct adapter *);
-static int     igb_allocate_msix(struct adapter *);
-static int     igb_allocate_legacy(struct adapter *);
-static int     igb_setup_msix(struct adapter *);
-static void    igb_free_pci_resources(struct adapter *);
-static void    igb_local_timer(void *);
-static void    igb_reset(struct adapter *);
-static int     igb_setup_interface(device_t, struct adapter *);
-static int     igb_allocate_queues(struct adapter *);
-static void    igb_configure_queues(struct adapter *);
-
-static int     igb_allocate_transmit_buffers(struct tx_ring *);
-static void    igb_setup_transmit_structures(struct adapter *);
-static void    igb_setup_transmit_ring(struct tx_ring *);
-static void    igb_initialize_transmit_units(struct adapter *);
-static void    igb_free_transmit_structures(struct adapter *);
-static void    igb_free_transmit_buffers(struct tx_ring *);
-
-static int     igb_allocate_receive_buffers(struct rx_ring *);
-static int     igb_setup_receive_structures(struct adapter *);
-static int     igb_setup_receive_ring(struct rx_ring *);
-static void    igb_initialize_receive_units(struct adapter *);
-static void    igb_free_receive_structures(struct adapter *);
-static void    igb_free_receive_buffers(struct rx_ring *);
-static void    igb_free_receive_ring(struct rx_ring *);
-
-static void    igb_enable_intr(struct adapter *);
-static void    igb_disable_intr(struct adapter *);
-static void    igb_update_stats_counters(struct adapter *);
-static bool    igb_txeof(struct tx_ring *);
-
-static __inline        void igb_rx_discard(struct rx_ring *, int);
-static __inline void igb_rx_input(struct rx_ring *,
-                   struct ifnet *, struct mbuf *, u32);
-
-static bool    igb_rxeof(struct igb_queue *, int, int *);
-static void    igb_rx_checksum(u32, struct mbuf *, u32);
-static int     igb_tx_ctx_setup(struct tx_ring *, struct mbuf *);
-static bool    igb_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
-static void    igb_set_promisc(struct adapter *);
-static void    igb_disable_promisc(struct adapter *);
-static void    igb_set_multi(struct adapter *);
-static void    igb_update_link_status(struct adapter *);
-static void    igb_refresh_mbufs(struct rx_ring *, int);
-
-static void    igb_register_vlan(void *, struct ifnet *, u16);
-static void    igb_unregister_vlan(void *, struct ifnet *, u16);
-static void    igb_setup_vlan_hw_support(struct adapter *);
-
-static int     igb_xmit(struct tx_ring *, struct mbuf **);
-static int     igb_dma_malloc(struct adapter *, bus_size_t,
-                   struct igb_dma_alloc *, int);
-static void    igb_dma_free(struct adapter *, struct igb_dma_alloc *);
-static int     igb_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
-static void    igb_print_nvm_info(struct adapter *);
-static int     igb_is_valid_ether_addr(u8 *);
-static void     igb_add_hw_stats(struct adapter *);
-
-static void    igb_vf_init_stats(struct adapter *);
-static void    igb_update_vf_stats_counters(struct adapter *);
-
-/* Management and WOL Support */
-static void    igb_init_manageability(struct adapter *);
-static void    igb_release_manageability(struct adapter *);
-static void     igb_get_hw_control(struct adapter *);
-static void     igb_release_hw_control(struct adapter *);
-static void     igb_enable_wakeup(device_t);
-static void     igb_led_func(void *, int);
-
-static int     igb_irq_fast(void *);
-static void    igb_msix_que(void *);
-static void    igb_msix_link(void *);
-static void    igb_handle_que(void *context, int pending);
-static void    igb_handle_link(void *context, int pending);
-
-static void    igb_set_sysctl_value(struct adapter *, const char *,
-                   const char *, int *, int);
-static int     igb_set_flowcntl(SYSCTL_HANDLER_ARGS);
-
-#ifdef DEVICE_POLLING
-static poll_handler_t igb_poll;
-#endif /* POLLING */
-
-/*********************************************************************
- *  FreeBSD Device Interface Entry Points
- *********************************************************************/
-
-static device_method_t igb_methods[] = {
-       /* Device interface */
-       DEVMETHOD(device_probe, igb_probe),
-       DEVMETHOD(device_attach, igb_attach),
-       DEVMETHOD(device_detach, igb_detach),
-       DEVMETHOD(device_shutdown, igb_shutdown),
-       DEVMETHOD(device_suspend, igb_suspend),
-       DEVMETHOD(device_resume, igb_resume),
-       {0, 0}
-};
-
-static driver_t igb_driver = {
-       "igb", igb_methods, sizeof(struct adapter),
-};
-
-static devclass_t igb_devclass;
-DRIVER_MODULE(igb, pci, igb_driver, igb_devclass, 0, 0);
-MODULE_DEPEND(igb, pci, 1, 1, 1);
-MODULE_DEPEND(igb, ether, 1, 1, 1);
-
-/*********************************************************************
- *  Tunable default values.
- *********************************************************************/
-
-/* Descriptor defaults */
-static int igb_rxd = IGB_DEFAULT_RXD;
-static int igb_txd = IGB_DEFAULT_TXD;
-TUNABLE_INT("hw.igb.rxd", &igb_rxd);
-TUNABLE_INT("hw.igb.txd", &igb_txd);
-
-/*
-** AIM: Adaptive Interrupt Moderation
-** which means that the interrupt rate
-** is varied over time based on the
-** traffic for that interrupt vector
-*/
-static int igb_enable_aim = TRUE;
-TUNABLE_INT("hw.igb.enable_aim", &igb_enable_aim);
-
-/*
- * MSIX should be the default for best performance,
- * but this allows it to be forced off for testing.
- */         
-static int igb_enable_msix = 1;
-TUNABLE_INT("hw.igb.enable_msix", &igb_enable_msix);
-
-/*
-** Tuneable Interrupt rate
-*/
-static int igb_max_interrupt_rate = 8000;
-TUNABLE_INT("hw.igb.max_interrupt_rate", &igb_max_interrupt_rate);
-
-/*
-** Header split causes the packet header to
-** be dma'd to a seperate mbuf from the payload.
-** this can have memory alignment benefits. But
-** another plus is that small packets often fit
-** into the header and thus use no cluster. Its
-** a very workload dependent type feature.
-*/
-static bool igb_header_split = FALSE;
-TUNABLE_INT("hw.igb.hdr_split", &igb_header_split);
-
-/*
-** This will autoconfigure based on
-** the number of CPUs if left at 0.
-*/
-static int igb_num_queues = 0;
-TUNABLE_INT("hw.igb.num_queues", &igb_num_queues);
-
-/* How many packets rxeof tries to clean at a time */
-static int igb_rx_process_limit = 100;
-TUNABLE_INT("hw.igb.rx_process_limit", &igb_rx_process_limit);
-
-/* Flow control setting - default to FULL */
-static int igb_fc_setting = e1000_fc_full;
-TUNABLE_INT("hw.igb.fc_setting", &igb_fc_setting);
-
-/* Energy Efficient Ethernet - default to off */
-static int igb_eee_disabled = TRUE;
-TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled);
-
-/*
-** DMA Coalescing, only for i350 - default to off,
-** this feature is for power savings
-*/
-static int igb_dma_coalesce = FALSE;
-TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce);
-
-/*********************************************************************
- *  Device identification routine
- *
- *  igb_probe determines if the driver should be loaded on
- *  adapter based on PCI vendor/device id of the adapter.
- *
- *  return BUS_PROBE_DEFAULT on success, positive on failure
- *********************************************************************/
-
-static int
-igb_probe(device_t dev)
-{
-       char            adapter_name[60];
-       uint16_t        pci_vendor_id = 0;
-       uint16_t        pci_device_id = 0;
-       uint16_t        pci_subvendor_id = 0;
-       uint16_t        pci_subdevice_id = 0;
-       igb_vendor_info_t *ent;
-
-       INIT_DEBUGOUT("igb_probe: begin");
-
-       pci_vendor_id = pci_get_vendor(dev);
-       if (pci_vendor_id != IGB_VENDOR_ID)
-               return (ENXIO);
-
-       pci_device_id = pci_get_device(dev);
-       pci_subvendor_id = pci_get_subvendor(dev);
-       pci_subdevice_id = pci_get_subdevice(dev);
-
-       ent = igb_vendor_info_array;
-       while (ent->vendor_id != 0) {
-               if ((pci_vendor_id == ent->vendor_id) &&
-                   (pci_device_id == ent->device_id) &&
-
-                   ((pci_subvendor_id == ent->subvendor_id) ||
-                   (ent->subvendor_id == PCI_ANY_ID)) &&
-
-                   ((pci_subdevice_id == ent->subdevice_id) ||
-                   (ent->subdevice_id == PCI_ANY_ID))) {
-                       sprintf(adapter_name, "%s %s",
-                               igb_strings[ent->index],
-                               igb_driver_version);
-                       device_set_desc_copy(dev, adapter_name);
-                       return (BUS_PROBE_DEFAULT);
-               }
-               ent++;
-       }
-
-       return (ENXIO);
-}
-
-/*********************************************************************
- *  Device initialization routine
- *
- *  The attach entry point is called when the driver is being loaded.
- *  This routine identifies the type of hardware, allocates all resources
- *  and initializes the hardware.
- *
- *  return 0 on success, positive on failure
- *********************************************************************/
-
-static int
-igb_attach(device_t dev)
-{
-       struct adapter  *adapter;
-       int             error = 0;
-       u16             eeprom_data;
-
-       INIT_DEBUGOUT("igb_attach: begin");
-
-       adapter = device_get_softc(dev);
-       adapter->dev = adapter->osdep.dev = dev;
-       IGB_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
-
-       /* SYSCTL stuff */
-       SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
-           SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
-           OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
-           igb_sysctl_nvm_info, "I", "NVM Information");
-
-       SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
-           SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
-           OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
-           &igb_enable_aim, 1, "Interrupt Moderation");
-
-       SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
-           SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
-           OID_AUTO, "flow_control", CTLTYPE_INT|CTLFLAG_RW,
-           adapter, 0, igb_set_flowcntl, "I", "Flow Control");
-
-       callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
-
-       /* Determine hardware and mac info */
-       igb_identify_hardware(adapter);
-
-       /* Setup PCI resources */
-       if (igb_allocate_pci_resources(adapter)) {
-               device_printf(dev, "Allocation of PCI resources failed\n");
-               error = ENXIO;
-               goto err_pci;
-       }
-
-       /* Do Shared Code initialization */
-       if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
-               device_printf(dev, "Setup of Shared code failed\n");
-               error = ENXIO;
-               goto err_pci;
-       }
-
-       e1000_get_bus_info(&adapter->hw);
-
-       /* Sysctl for limiting the amount of work done in the taskqueue */
-       igb_set_sysctl_value(adapter, "rx_processing_limit",
-           "max number of rx packets to process", &adapter->rx_process_limit,
-           igb_rx_process_limit);
-
-       /*
-        * Validate number of transmit and receive descriptors. It
-        * must not exceed hardware maximum, and must be multiple
-        * of E1000_DBA_ALIGN.
-        */
-       if (((igb_txd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN) != 0 ||
-           (igb_txd > IGB_MAX_TXD) || (igb_txd < IGB_MIN_TXD)) {
-               device_printf(dev, "Using %d TX descriptors instead of %d!\n",
-                   IGB_DEFAULT_TXD, igb_txd);
-               adapter->num_tx_desc = IGB_DEFAULT_TXD;
-       } else
-               adapter->num_tx_desc = igb_txd;
-       if (((igb_rxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN) != 0 ||
-           (igb_rxd > IGB_MAX_RXD) || (igb_rxd < IGB_MIN_RXD)) {
-               device_printf(dev, "Using %d RX descriptors instead of %d!\n",
-                   IGB_DEFAULT_RXD, igb_rxd);
-               adapter->num_rx_desc = IGB_DEFAULT_RXD;
-       } else
-               adapter->num_rx_desc = igb_rxd;
-
-       adapter->hw.mac.autoneg = DO_AUTO_NEG;
-       adapter->hw.phy.autoneg_wait_to_complete = FALSE;
-       adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
-
-       /* Copper options */
-       if (adapter->hw.phy.media_type == e1000_media_type_copper) {
-               adapter->hw.phy.mdix = AUTO_ALL_MODES;
-               adapter->hw.phy.disable_polarity_correction = FALSE;
-               adapter->hw.phy.ms_type = IGB_MASTER_SLAVE;
-       }
-
-       /*
-        * Set the frame limits assuming
-        * standard ethernet sized frames.
-        */
-       adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
-       adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
-
-       /*
-       ** Allocate and Setup Queues
-       */
-       if (igb_allocate_queues(adapter)) {
-               error = ENOMEM;
-               goto err_pci;
-       }
-
-       /* Allocate the appropriate stats memory */
-       if (adapter->vf_ifp) {
-               adapter->stats =
-                   (struct e1000_vf_stats *)malloc(sizeof \
-                   (struct e1000_vf_stats), M_DEVBUF, M_NOWAIT | M_ZERO);
-               igb_vf_init_stats(adapter);
-       } else
-               adapter->stats =
-                   (struct e1000_hw_stats *)malloc(sizeof \
-                   (struct e1000_hw_stats), M_DEVBUF, M_NOWAIT | M_ZERO);
-       if (adapter->stats == NULL) {
-               device_printf(dev, "Can not allocate stats memory\n");
-               error = ENOMEM;
-               goto err_late;
-       }
-
-       /* Allocate multicast array memory. */
-       adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
-           MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
-       if (adapter->mta == NULL) {
-               device_printf(dev, "Can not allocate multicast setup array\n");
-               error = ENOMEM;
-               goto err_late;
-       }
-
-       /* Some adapter-specific advanced features */
-       if (adapter->hw.mac.type >= e1000_i350) {
-               igb_set_sysctl_value(adapter, "dma_coalesce",
-                   "configure dma coalesce",
-                   &adapter->dma_coalesce, igb_dma_coalesce);
-               igb_set_sysctl_value(adapter, "eee_disabled",
-                   "enable Energy Efficient Ethernet",
-                   &adapter->hw.dev_spec._82575.eee_disable,
-                   igb_eee_disabled);
-               e1000_set_eee_i350(&adapter->hw);
-       }
-
-       /*
-       ** Start from a known state, this is
-       ** important in reading the nvm and
-       ** mac from that.
-       */
-       e1000_reset_hw(&adapter->hw);
-
-       /* Make sure we have a good EEPROM before we read from it */
-       if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
-               /*
-               ** Some PCI-E parts fail the first check due to
-               ** the link being in sleep state, call it again,
-               ** if it fails a second time its a real issue.
-               */
-               if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
-                       device_printf(dev,
-                           "The EEPROM Checksum Is Not Valid\n");
-                       error = EIO;
-                       goto err_late;
-               }
-       }
-
-       /*
-       ** Copy the permanent MAC address out of the EEPROM
-       */
-       if (e1000_read_mac_addr(&adapter->hw) < 0) {
-               device_printf(dev, "EEPROM read error while reading MAC"
-                   " address\n");
-               error = EIO;
-               goto err_late;
-       }
-       /* Check its sanity */
-       if (!igb_is_valid_ether_addr(adapter->hw.mac.addr)) {
-               device_printf(dev, "Invalid MAC address\n");
-               error = EIO;
-               goto err_late;
-       }
-
-       /* 
-       ** Configure Interrupts
-       */
-       if ((adapter->msix > 1) && (igb_enable_msix))
-               error = igb_allocate_msix(adapter);
-       else /* MSI or Legacy */
-               error = igb_allocate_legacy(adapter);
-       if (error)
-               goto err_late;
-
-       /* Setup OS specific network interface */
-       if (igb_setup_interface(dev, adapter) != 0)
-               goto err_late;
-
-       /* Now get a good starting state */
-       igb_reset(adapter);
-
-       /* Initialize statistics */
-       igb_update_stats_counters(adapter);
-
-       adapter->hw.mac.get_link_status = 1;
-       igb_update_link_status(adapter);
-
-       /* Indicate SOL/IDER usage */
-       if (e1000_check_reset_block(&adapter->hw))
-               device_printf(dev,
-                   "PHY reset is blocked due to SOL/IDER session.\n");
-
-       /* Determine if we have to control management hardware */
-       adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
-
-       /*
-        * Setup Wake-on-Lan
-        */
-       /* APME bit in EEPROM is mapped to WUC.APME */
-       eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC) & E1000_WUC_APME;
-       if (eeprom_data)
-               adapter->wol = E1000_WUFC_MAG;
-
-       /* Register for VLAN events */
-       adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
-            igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
-       adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
-            igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
-
-       igb_add_hw_stats(adapter);
-
-       /* Tell the stack that the interface is not active */
-       adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
-
-       adapter->led_dev = led_create(igb_led_func, adapter,
-           device_get_nameunit(dev));
-
-       INIT_DEBUGOUT("igb_attach: end");
-
-       return (0);
-
-err_late:
-       igb_free_transmit_structures(adapter);
-       igb_free_receive_structures(adapter);
-       igb_release_hw_control(adapter);
-       if (adapter->ifp != NULL)
-               if_free(adapter->ifp);
-err_pci:
-       igb_free_pci_resources(adapter);
-       free(adapter->mta, M_DEVBUF);
-       IGB_CORE_LOCK_DESTROY(adapter);
-
-       return (error);
-}
-
-/*********************************************************************
- *  Device removal routine
- *
- *  The detach entry point is called when the driver is being removed.
- *  This routine stops the adapter and deallocates all the resources
- *  that were allocated for driver operation.
- *
- *  return 0 on success, positive on failure
- *********************************************************************/
-
-static int
-igb_detach(device_t dev)
-{
-       struct adapter  *adapter = device_get_softc(dev);
-       struct ifnet    *ifp = adapter->ifp;
-
-       INIT_DEBUGOUT("igb_detach: begin");
-
-       /* Make sure VLANS are not using driver */
-       if (adapter->ifp->if_vlantrunk != NULL) {
-               device_printf(dev,"Vlan in use, detach first\n");
-               return (EBUSY);
-       }
-
-       if (adapter->led_dev != NULL)
-               led_destroy(adapter->led_dev);
-
-#ifdef DEVICE_POLLING
-       if (ifp->if_capenable & IFCAP_POLLING)
-               ether_poll_deregister(ifp);
-#endif
-
-       IGB_CORE_LOCK(adapter);
-       adapter->in_detach = 1;
-       igb_stop(adapter);
-       IGB_CORE_UNLOCK(adapter);
-
-       e1000_phy_hw_reset(&adapter->hw);
-
-       /* Give control back to firmware */
-       igb_release_manageability(adapter);
-       igb_release_hw_control(adapter);
-
-       if (adapter->wol) {
-               E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
-               E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
-               igb_enable_wakeup(dev);
-       }
-
-       /* Unregister VLAN events */
-       if (adapter->vlan_attach != NULL)
-               EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
-       if (adapter->vlan_detach != NULL)
-               EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
-
-       ether_ifdetach(adapter->ifp);
-
-       callout_drain(&adapter->timer);
-
-       igb_free_pci_resources(adapter);
-       bus_generic_detach(dev);
-       if_free(ifp);
-
-       igb_free_transmit_structures(adapter);
-       igb_free_receive_structures(adapter);
-       free(adapter->mta, M_DEVBUF);
-
-       IGB_CORE_LOCK_DESTROY(adapter);
-
-       return (0);
-}
-
-/*********************************************************************
- *
- *  Shutdown entry point
- *
- **********************************************************************/
-
-static int
-igb_shutdown(device_t dev)
-{
-       return igb_suspend(dev);
-}
-
-/*
- * Suspend/resume device methods.
- */
-static int
-igb_suspend(device_t dev)
-{
-       struct adapter *adapter = device_get_softc(dev);
-
-       IGB_CORE_LOCK(adapter);
-
-       igb_stop(adapter);
-
-        igb_release_manageability(adapter);
-       igb_release_hw_control(adapter);
-
-        if (adapter->wol) {
-                E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
-                E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
-                igb_enable_wakeup(dev);
-        }
-
-       IGB_CORE_UNLOCK(adapter);
-
-       return bus_generic_suspend(dev);
-}
-
-static int
-igb_resume(device_t dev)
-{
-       struct adapter *adapter = device_get_softc(dev);
-       struct ifnet *ifp = adapter->ifp;
-
-       IGB_CORE_LOCK(adapter);
-       igb_init_locked(adapter);
-       igb_init_manageability(adapter);
-
-       if ((ifp->if_flags & IFF_UP) &&
-           (ifp->if_drv_flags & IFF_DRV_RUNNING))
-               igb_start(ifp);
-
-       IGB_CORE_UNLOCK(adapter);
-
-       return bus_generic_resume(dev);
-}
-
-
-/*********************************************************************
- *  Transmit entry point
- *
- *  igb_start is called by the stack to initiate a transmit.
- *  The driver will remain in this routine as long as there are
- *  packets to transmit and transmit resources are available.
- *  In case resources are not available stack is notified and
- *  the packet is requeued.
- **********************************************************************/
-
-static void
-igb_start_locked(struct tx_ring *txr, struct ifnet *ifp)
-{
-       struct adapter  *adapter = ifp->if_softc;
-       struct mbuf     *m_head;
-
-       IGB_TX_LOCK_ASSERT(txr);
-
-       if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
-           IFF_DRV_RUNNING)
-               return;
-       if (!adapter->link_active)
-               return;
-
-       /* Call cleanup if number of TX descriptors low */
-       if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD)
-               igb_txeof(txr);
-
-       while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
-               if (txr->tx_avail <= IGB_MAX_SCATTER) {
-                       ifp->if_drv_flags |= IFF_DRV_OACTIVE;
-                       break;
-               }
-               IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
-               if (m_head == NULL)
-                       break;
-               /*
-                *  Encapsulation can modify our pointer, and or make it
-                *  NULL on failure.  In that event, we can't requeue.
-                */
-               if (igb_xmit(txr, &m_head)) {
-                       if (m_head == NULL)
-                               break;
-                       ifp->if_drv_flags |= IFF_DRV_OACTIVE;
-                       IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
-                       break;
-               }
-
-               /* Send a copy of the frame to the BPF listener */
-               ETHER_BPF_MTAP(ifp, m_head);
-
-               /* Set watchdog on */
-               txr->watchdog_time = ticks;
-               txr->queue_status = IGB_QUEUE_WORKING;
-       }
-}
-/*
- * Legacy TX driver routine, called from the
- * stack, always uses tx[0], and spins for it.
- * Should not be used with multiqueue tx
- */
-static void
-igb_start(struct ifnet *ifp)
-{
-       struct adapter  *adapter = ifp->if_softc;
-       struct tx_ring  *txr = adapter->tx_rings;
-
-       if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
-               IGB_TX_LOCK(txr);
-               igb_start_locked(txr, ifp);
-               IGB_TX_UNLOCK(txr);
-       }
-       return;
-}
-
-#if __FreeBSD_version >= 800000
-/*
-** Multiqueue Transmit driver
-**
-*/
-static int
-igb_mq_start(struct ifnet *ifp, struct mbuf *m)
-{
-       struct adapter          *adapter = ifp->if_softc;
-       struct igb_queue        *que;
-       struct tx_ring          *txr;
-       int                     i = 0, err = 0;
-
-       /* Which queue to use */
-       if ((m->m_flags & M_FLOWID) != 0)
-               i = m->m_pkthdr.flowid % adapter->num_queues;
-
-       txr = &adapter->tx_rings[i];
-       que = &adapter->queues[i];
-
-       if (IGB_TX_TRYLOCK(txr)) {
-               err = igb_mq_start_locked(ifp, txr, m);
-               IGB_TX_UNLOCK(txr);
-       } else {
-               err = drbr_enqueue(ifp, txr->br, m);
-               taskqueue_enqueue(que->tq, &que->que_task);
-       }
-
-       return (err);
-}
-
-static int
-igb_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
-{
-       struct adapter  *adapter = txr->adapter;
-        struct mbuf     *next;
-        int             err = 0, enq;
-
-       IGB_TX_LOCK_ASSERT(txr);
-
-       if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
-           IFF_DRV_RUNNING || adapter->link_active == 0) {
-               if (m != NULL)
-                       err = drbr_enqueue(ifp, txr->br, m);
-               return (err);
-       }
-
-       enq = 0;
-       if (m == NULL) {
-               next = drbr_dequeue(ifp, txr->br);
-       } else if (drbr_needs_enqueue(ifp, txr->br)) {
-               if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
-                       return (err);
-               next = drbr_dequeue(ifp, txr->br);
-       } else
-               next = m;
-
-       /* Process the queue */
-       while (next != NULL) {
-               if ((err = igb_xmit(txr, &next)) != 0) {
-                       if (next != NULL)
-                               err = drbr_enqueue(ifp, txr->br, next);
-                       break;
-               }
-               enq++;
-               drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
-               ETHER_BPF_MTAP(ifp, next);
-               if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
-                       break;
-               if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD)
-                       igb_txeof(txr);
-               if (txr->tx_avail <= IGB_MAX_SCATTER) {
-                       ifp->if_drv_flags |= IFF_DRV_OACTIVE;
-                       break;
-               }
-               next = drbr_dequeue(ifp, txr->br);
-       }
-       if (enq > 0) {
-               /* Set the watchdog */
-               txr->queue_status = IGB_QUEUE_WORKING;
-               txr->watchdog_time = ticks;
-       }
-       return (err);
-}
-
-/*
-** Flush all ring buffers
-*/
-static void
-igb_qflush(struct ifnet *ifp)
-{
-       struct adapter  *adapter = ifp->if_softc;
-       struct tx_ring  *txr = adapter->tx_rings;
-       struct mbuf     *m;
-
-       for (int i = 0; i < adapter->num_queues; i++, txr++) {
-               IGB_TX_LOCK(txr);
-               while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
-                       m_freem(m);
-               IGB_TX_UNLOCK(txr);
-       }
-       if_qflush(ifp);
-}
-#endif /* __FreeBSD_version >= 800000 */
-
-/*********************************************************************
- *  Ioctl entry point
- *
- *  igb_ioctl is called when the user wants to configure the
- *  interface.
- *
- *  return 0 on success, positive on failure
- **********************************************************************/
-
-static int
-igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
-{
-       struct adapter  *adapter = ifp->if_softc;
-       struct ifreq *ifr = (struct ifreq *)data;
-#ifdef INET
-       struct ifaddr *ifa = (struct ifaddr *)data;
-#endif
-       int error = 0;
-
-       if (adapter->in_detach)
-               return (error);
-
-       switch (command) {
-       case SIOCSIFADDR:
-#ifdef INET
-               if (ifa->ifa_addr->sa_family == AF_INET) {
-                       /*
-                        * XXX
-                        * Since resetting hardware takes a very long time
-                        * and results in link renegotiation we only
-                        * initialize the hardware only when it is absolutely
-                        * required.
-                        */
-                       ifp->if_flags |= IFF_UP;
-                       if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
-                               IGB_CORE_LOCK(adapter);
-                               igb_init_locked(adapter);
-                               IGB_CORE_UNLOCK(adapter);
-                       }
-                       if (!(ifp->if_flags & IFF_NOARP))
-                               arp_ifinit(ifp, ifa);
-               } else
-#endif
-                       error = ether_ioctl(ifp, command, data);
-               break;
-       case SIOCSIFMTU:
-           {
-               int max_frame_size;
-
-               IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
-
-               IGB_CORE_LOCK(adapter);
-               max_frame_size = 9234;
-               if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
-                   ETHER_CRC_LEN) {
-                       IGB_CORE_UNLOCK(adapter);
-                       error = EINVAL;
-                       break;
-               }
-
-               ifp->if_mtu = ifr->ifr_mtu;
-               adapter->max_frame_size =
-                   ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
-               igb_init_locked(adapter);
-               IGB_CORE_UNLOCK(adapter);
-               break;
-           }
-       case SIOCSIFFLAGS:
-               IOCTL_DEBUGOUT("ioctl rcv'd:\
-                   SIOCSIFFLAGS (Set Interface Flags)");
-               IGB_CORE_LOCK(adapter);
-               if (ifp->if_flags & IFF_UP) {
-                       if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
-                               if ((ifp->if_flags ^ adapter->if_flags) &
-                                   (IFF_PROMISC | IFF_ALLMULTI)) {
-                                       igb_disable_promisc(adapter);
-                                       igb_set_promisc(adapter);
-                               }
-                       } else
-                               igb_init_locked(adapter);
-               } else
-                       if (ifp->if_drv_flags & IFF_DRV_RUNNING)
-                               igb_stop(adapter);
-               adapter->if_flags = ifp->if_flags;
-               IGB_CORE_UNLOCK(adapter);
-               break;
-       case SIOCADDMULTI:
-       case SIOCDELMULTI:
-               IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
-               if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
-                       IGB_CORE_LOCK(adapter);
-                       igb_disable_intr(adapter);
-                       igb_set_multi(adapter);
-#ifdef DEVICE_POLLING
-                       if (!(ifp->if_capenable & IFCAP_POLLING))
-#endif
-                               igb_enable_intr(adapter);
-                       IGB_CORE_UNLOCK(adapter);
-               }
-               break;
-       case SIOCSIFMEDIA:
-               /*
-               ** As the speed/duplex settings are being
-               ** changed, we need toreset the PHY.
-               */
-               adapter->hw.phy.reset_disable = FALSE;
-               /* Check SOL/IDER usage */
-               IGB_CORE_LOCK(adapter);
-               if (e1000_check_reset_block(&adapter->hw)) {
-                       IGB_CORE_UNLOCK(adapter);
-                       device_printf(adapter->dev, "Media change is"
-                           " blocked due to SOL/IDER session.\n");
-                       break;
-               }
-               IGB_CORE_UNLOCK(adapter);
-       case SIOCGIFMEDIA:
-               IOCTL_DEBUGOUT("ioctl rcv'd: \
-                   SIOCxIFMEDIA (Get/Set Interface Media)");
-               error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
-               break;
-       case SIOCSIFCAP:
-           {
-               int mask, reinit;
-
-               IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
-               reinit = 0;
-               mask = ifr->ifr_reqcap ^ ifp->if_capenable;
-#ifdef DEVICE_POLLING
-               if (mask & IFCAP_POLLING) {
-                       if (ifr->ifr_reqcap & IFCAP_POLLING) {
-                               error = ether_poll_register(igb_poll, ifp);
-                               if (error)
-                                       return (error);
-                               IGB_CORE_LOCK(adapter);
-                               igb_disable_intr(adapter);
-                               ifp->if_capenable |= IFCAP_POLLING;
-                               IGB_CORE_UNLOCK(adapter);
-                       } else {
-                               error = ether_poll_deregister(ifp);
-                               /* Enable interrupt even in error case */
-                               IGB_CORE_LOCK(adapter);
-                               igb_enable_intr(adapter);
-                               ifp->if_capenable &= ~IFCAP_POLLING;
-                               IGB_CORE_UNLOCK(adapter);
-                       }
-               }
-#endif
-               if (mask & IFCAP_HWCSUM) {
-                       ifp->if_capenable ^= IFCAP_HWCSUM;
-                       reinit = 1;
-               }
-               if (mask & IFCAP_TSO4) {
-                       ifp->if_capenable ^= IFCAP_TSO4;
-                       reinit = 1;
-               }
-               if (mask & IFCAP_VLAN_HWTAGGING) {
-                       ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
-                       reinit = 1;
-               }
-               if (mask & IFCAP_VLAN_HWFILTER) {
-                       ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
-                       reinit = 1;
-               }
-               if (mask & IFCAP_LRO) {
-                       ifp->if_capenable ^= IFCAP_LRO;
-                       reinit = 1;
-               }
-               if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
-                       igb_init(adapter);
-               VLAN_CAPABILITIES(ifp);
-               break;
-           }
-
-       default:
-               error = ether_ioctl(ifp, command, data);
-               break;
-       }
-
-       return (error);
-}
-
-
-/*********************************************************************
- *  Init entry point
- *
- *  This routine is used in two ways. It is used by the stack as
- *  init entry point in network interface structure. It is also used
- *  by the driver as a hw/sw initialization routine to get to a
- *  consistent state.
- *
- *  return 0 on success, positive on failure
- **********************************************************************/
-
-static void
-igb_init_locked(struct adapter *adapter)
-{
-       struct ifnet    *ifp = adapter->ifp;
-       device_t        dev = adapter->dev;
-
-       INIT_DEBUGOUT("igb_init: begin");
-
-       IGB_CORE_LOCK_ASSERT(adapter);
-
-       igb_disable_intr(adapter);
-       callout_stop(&adapter->timer);
-
-       /* Get the latest mac address, User can use a LAA */
-        bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
-              ETHER_ADDR_LEN);
-
-       /* Put the address into the Receive Address Array */
-       e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
-
-       igb_reset(adapter);
-       igb_update_link_status(adapter);
-
-       E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
-
-       /* Set hardware offload abilities */
-       ifp->if_hwassist = 0;
-       if (ifp->if_capenable & IFCAP_TXCSUM) {
-               ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
-#if __FreeBSD_version >= 800000
-               if (adapter->hw.mac.type == e1000_82576)
-                       ifp->if_hwassist |= CSUM_SCTP;
-#endif
-       }
-
-       if (ifp->if_capenable & IFCAP_TSO4)
-               ifp->if_hwassist |= CSUM_TSO;
-
-       /* Configure for OS presence */
-       igb_init_manageability(adapter);
-
-       /* Prepare transmit descriptors and buffers */
-       igb_setup_transmit_structures(adapter);
-       igb_initialize_transmit_units(adapter);
-
-       /* Setup Multicast table */
-       igb_set_multi(adapter);
-
-       /*
-       ** Figure out the desired mbuf pool
-       ** for doing jumbo/packetsplit
-       */
-       if (adapter->max_frame_size <= 2048)
-               adapter->rx_mbuf_sz = MCLBYTES;
-       else if (adapter->max_frame_size <= 4096)
-               adapter->rx_mbuf_sz = MJUMPAGESIZE;
-       else
-               adapter->rx_mbuf_sz = MJUM9BYTES;
-
-       /* Prepare receive descriptors and buffers */
-       if (igb_setup_receive_structures(adapter)) {
-               device_printf(dev, "Could not setup receive structures\n");
-               return;
-       }
-       igb_initialize_receive_units(adapter);
-
-        /* Enable VLAN support */
-       if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
-               igb_setup_vlan_hw_support(adapter);
-                                
-       /* Don't lose promiscuous settings */
-       igb_set_promisc(adapter);
-
-       ifp->if_drv_flags |= IFF_DRV_RUNNING;
-       ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
-
-       callout_reset(&adapter->timer, hz, igb_local_timer, adapter);
-       e1000_clear_hw_cntrs_base_generic(&adapter->hw);
-
-       if (adapter->msix > 1) /* Set up queue routing */
-               igb_configure_queues(adapter);
-
-       /* this clears any pending interrupts */
-       E1000_READ_REG(&adapter->hw, E1000_ICR);
-#ifdef DEVICE_POLLING
-       /*
-        * Only enable interrupts if we are not polling, make sure
-        * they are off otherwise.
-        */
-       if (ifp->if_capenable & IFCAP_POLLING)
-               igb_disable_intr(adapter);
-       else
-#endif /* DEVICE_POLLING */
-       {
-               igb_enable_intr(adapter);
-               E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC);
-       }
-
-       /* Set Energy Efficient Ethernet */
-       e1000_set_eee_i350(&adapter->hw);
-
-       /* Don't reset the phy next time init gets called */
-       adapter->hw.phy.reset_disable = TRUE;
-}
-
-static void
-igb_init(void *arg)
-{
-       struct adapter *adapter = arg;
-
-       IGB_CORE_LOCK(adapter);
-       igb_init_locked(adapter);
-       IGB_CORE_UNLOCK(adapter);
-}
-
-
-static void
-igb_handle_que(void *context, int pending)
-{
-       struct igb_queue *que = context;
-       struct adapter *adapter = que->adapter;
-       struct tx_ring *txr = que->txr;
-       struct ifnet    *ifp = adapter->ifp;
-
-       if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
-               bool    more;
-
-               more = igb_rxeof(que, -1, NULL);
-
-               IGB_TX_LOCK(txr);
-               if (igb_txeof(txr))
-                       more = TRUE;
-#if __FreeBSD_version >= 800000
-               if (!drbr_empty(ifp, txr->br))
-                       igb_mq_start_locked(ifp, txr, NULL);
-#else
-               igb_start_locked(txr, ifp);
-#endif
-               IGB_TX_UNLOCK(txr);
-               if (more || (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
-                       taskqueue_enqueue(que->tq, &que->que_task);
-                       return;
-               }
-       }
-
-#ifdef DEVICE_POLLING
-       if (ifp->if_capenable & IFCAP_POLLING)
-               return;
-#endif
-       /* Reenable this interrupt */
-       if (que->eims)
-               E1000_WRITE_REG(&adapter->hw, E1000_EIMS, que->eims);
-       else
-               igb_enable_intr(adapter);
-}
-
-/* Deal with link in a sleepable context */
-static void
-igb_handle_link(void *context, int pending)
-{
-       struct adapter *adapter = context;
-
-       adapter->hw.mac.get_link_status = 1;
-       igb_update_link_status(adapter);
-}
-
-/*********************************************************************
- *
- *  MSI/Legacy Deferred
- *  Interrupt Service routine  
- *
- *********************************************************************/
-static int
-igb_irq_fast(void *arg)
-{
-       struct adapter          *adapter = arg;
-       struct igb_queue        *que = adapter->queues;
-       u32                     reg_icr;
-
-
-       reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
-
-       /* Hot eject?  */
-       if (reg_icr == 0xffffffff)
-               return FILTER_STRAY;
-
-       /* Definitely not our interrupt.  */
-       if (reg_icr == 0x0)
-               return FILTER_STRAY;
-
-       if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0)
-               return FILTER_STRAY;
-
-       /*
-        * Mask interrupts until the taskqueue is finished running.  This is
-        * cheap, just assume that it is needed.  This also works around the
-        * MSI message reordering errata on certain systems.
-        */
-       igb_disable_intr(adapter);
-       taskqueue_enqueue(que->tq, &que->que_task);
-
-       /* Link status change */
-       if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))
-               taskqueue_enqueue(que->tq, &adapter->link_task);
-
-       if (reg_icr & E1000_ICR_RXO)
-               adapter->rx_overruns++;
-       return FILTER_HANDLED;
-}
-
-#ifdef DEVICE_POLLING
-/*********************************************************************
- *
- *  Legacy polling routine : if using this code you MUST be sure that
- *  multiqueue is not defined, ie, set igb_num_queues to 1.
- *
- *********************************************************************/
-#if __FreeBSD_version >= 800000
-#define POLL_RETURN_COUNT(a) (a)
-static int
-#else
-#define POLL_RETURN_COUNT(a)
-static void
-#endif
-igb_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
-{
-       struct adapter          *adapter = ifp->if_softc;
-       struct igb_queue        *que = adapter->queues;
-       struct tx_ring          *txr = adapter->tx_rings;
-       u32                     reg_icr, rx_done = 0;
-       u32                     loop = IGB_MAX_LOOP;
-       bool                    more;
-
-       IGB_CORE_LOCK(adapter);
-       if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
-               IGB_CORE_UNLOCK(adapter);
-               return POLL_RETURN_COUNT(rx_done);
-       }
-
-       if (cmd == POLL_AND_CHECK_STATUS) {
-               reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
-               /* Link status change */
-               if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))
-                       igb_handle_link(adapter, 0);
-
-               if (reg_icr & E1000_ICR_RXO)
-                       adapter->rx_overruns++;
-       }
-       IGB_CORE_UNLOCK(adapter);
-
-       igb_rxeof(que, count, &rx_done);
-
-       IGB_TX_LOCK(txr);
-       do {
-               more = igb_txeof(txr);
-       } while (loop-- && more);
-#if __FreeBSD_version >= 800000
-       if (!drbr_empty(ifp, txr->br))
-               igb_mq_start_locked(ifp, txr, NULL);
-#else
-       igb_start_locked(txr, ifp);
-#endif
-       IGB_TX_UNLOCK(txr);
-       return POLL_RETURN_COUNT(rx_done);
-}
-#endif /* DEVICE_POLLING */
-
-/*********************************************************************
- *
- *  MSIX TX Interrupt Service routine
- *
- **********************************************************************/
-static void
-igb_msix_que(void *arg)
-{
-       struct igb_queue *que = arg;
-       struct adapter *adapter = que->adapter;
-       struct tx_ring *txr = que->txr;
-       struct rx_ring *rxr = que->rxr;
-       u32             newitr = 0;
-       bool            more_tx, more_rx;
-
-       E1000_WRITE_REG(&adapter->hw, E1000_EIMC, que->eims);
-       ++que->irqs;
-
-       IGB_TX_LOCK(txr);
-       more_tx = igb_txeof(txr);
-       IGB_TX_UNLOCK(txr);
-
-       more_rx = igb_rxeof(que, adapter->rx_process_limit, NULL);
-
-       if (igb_enable_aim == FALSE)
-               goto no_calc;
-       /*
-       ** Do Adaptive Interrupt Moderation:
-        **  - Write out last calculated setting
-       **  - Calculate based on average size over
-       **    the last interval.
-       */
-        if (que->eitr_setting)
-                E1000_WRITE_REG(&adapter->hw,
-                    E1000_EITR(que->msix), que->eitr_setting);
-        que->eitr_setting = 0;
-
-        /* Idle, do nothing */
-        if ((txr->bytes == 0) && (rxr->bytes == 0))
-                goto no_calc;
-                                
-        /* Used half Default if sub-gig */
-        if (adapter->link_speed != 1000)
-                newitr = IGB_DEFAULT_ITR / 2;
-        else {
-               if ((txr->bytes) && (txr->packets))
-                       newitr = txr->bytes/txr->packets;
-               if ((rxr->bytes) && (rxr->packets))
-                       newitr = max(newitr,
-                           (rxr->bytes / rxr->packets));
-                newitr += 24; /* account for hardware frame, crc */
-               /* set an upper boundary */
-               newitr = min(newitr, 3000);
-               /* Be nice to the mid range */
-                if ((newitr > 300) && (newitr < 1200))
-                        newitr = (newitr / 3);
-                else
-                        newitr = (newitr / 2);
-        }
-        newitr &= 0x7FFC;  /* Mask invalid bits */
-        if (adapter->hw.mac.type == e1000_82575)
-                newitr |= newitr << 16;
-        else
-                newitr |= E1000_EITR_CNT_IGNR;
-                 
-        /* save for next interrupt */
-        que->eitr_setting = newitr;
-
-        /* Reset state */
-        txr->bytes = 0;
-        txr->packets = 0;
-        rxr->bytes = 0;
-        rxr->packets = 0;
-
-no_calc:
-       /* Schedule a clean task if needed*/
-       if (more_tx || more_rx ||
-           (adapter->ifp->if_drv_flags & IFF_DRV_OACTIVE))
-               taskqueue_enqueue(que->tq, &que->que_task);
-       else
-               /* Reenable this interrupt */
-               E1000_WRITE_REG(&adapter->hw, E1000_EIMS, que->eims);
-       return;
-}
-
-
-/*********************************************************************
- *
- *  MSIX Link Interrupt Service routine
- *
- **********************************************************************/
-
-static void
-igb_msix_link(void *arg)
-{
-       struct adapter  *adapter = arg;
-       u32             icr;
-
-       ++adapter->link_irq;
-       icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
-       if (!(icr & E1000_ICR_LSC))
-               goto spurious;
-       igb_handle_link(adapter, 0);
-
-spurious:
-       /* Rearm */
-       E1000_WRITE_REG(&adapter->hw, E1000_IMS, E1000_IMS_LSC);
-       E1000_WRITE_REG(&adapter->hw, E1000_EIMS, adapter->link_mask);
-       return;
-}
-
-
-/*********************************************************************
- *
- *  Media Ioctl callback
- *
- *  This routine is called whenever the user queries the status of
- *  the interface using ifconfig.
- *
- **********************************************************************/
-static void
-igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
-{
-       struct adapter *adapter = ifp->if_softc;
-       u_char fiber_type = IFM_1000_SX;
-
-       INIT_DEBUGOUT("igb_media_status: begin");
-
-       IGB_CORE_LOCK(adapter);
-       igb_update_link_status(adapter);
-
-       ifmr->ifm_status = IFM_AVALID;
-       ifmr->ifm_active = IFM_ETHER;
-
-       if (!adapter->link_active) {
-               IGB_CORE_UNLOCK(adapter);
-               return;
-       }
-
-       ifmr->ifm_status |= IFM_ACTIVE;
-
-       if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
-           (adapter->hw.phy.media_type == e1000_media_type_internal_serdes))
-               ifmr->ifm_active |= fiber_type | IFM_FDX;
-       else {
-               switch (adapter->link_speed) {
-               case 10:
-                       ifmr->ifm_active |= IFM_10_T;
-                       break;
-               case 100:
-                       ifmr->ifm_active |= IFM_100_TX;
-                       break;
-               case 1000:
-                       ifmr->ifm_active |= IFM_1000_T;
-                       break;
-               }
-               if (adapter->link_duplex == FULL_DUPLEX)
-                       ifmr->ifm_active |= IFM_FDX;
-               else
-                       ifmr->ifm_active |= IFM_HDX;
-       }
-       IGB_CORE_UNLOCK(adapter);
-}
-
-/*********************************************************************
- *
- *  Media Ioctl callback
- *
- *  This routine is called when the user changes speed/duplex using
- *  media/mediopt option with ifconfig.
- *
- **********************************************************************/
-static int
-igb_media_change(struct ifnet *ifp)
-{
-       struct adapter *adapter = ifp->if_softc;
-       struct ifmedia  *ifm = &adapter->media;
-
-       INIT_DEBUGOUT("igb_media_change: begin");
-
-       if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
-               return (EINVAL);
-
-       IGB_CORE_LOCK(adapter);
-       switch (IFM_SUBTYPE(ifm->ifm_media)) {
-       case IFM_AUTO:
-               adapter->hw.mac.autoneg = DO_AUTO_NEG;
-               adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
-               break;
-       case IFM_1000_LX:
-       case IFM_1000_SX:
-       case IFM_1000_T:
-               adapter->hw.mac.autoneg = DO_AUTO_NEG;
-               adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
-               break;
-       case IFM_100_TX:
-               adapter->hw.mac.autoneg = FALSE;
-               adapter->hw.phy.autoneg_advertised = 0;
-               if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
-                       adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
-               else
-                       adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
-               break;
-       case IFM_10_T:
-               adapter->hw.mac.autoneg = FALSE;
-               adapter->hw.phy.autoneg_advertised = 0;
-               if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
-                       adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
-               else
-                       adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
-               break;
-       default:
-               device_printf(adapter->dev, "Unsupported media type\n");
-       }
-
-       igb_init_locked(adapter);
-       IGB_CORE_UNLOCK(adapter);
-
-       return (0);
-}
-
-
-/*********************************************************************
- *
- *  This routine maps the mbufs to Advanced TX descriptors.
- *  used by the 82575 adapter.
- *  
- **********************************************************************/
-
-static int
-igb_xmit(struct tx_ring *txr, struct mbuf **m_headp)
-{
-       struct adapter          *adapter = txr->adapter;
-       bus_dma_segment_t       segs[IGB_MAX_SCATTER];
-       bus_dmamap_t            map;
-       struct igb_tx_buffer    *tx_buffer, *tx_buffer_mapped;
-       union e1000_adv_tx_desc *txd = NULL;
-       struct mbuf             *m_head;
-       u32                     olinfo_status = 0, cmd_type_len = 0;
-       int                     nsegs, i, j, error, first, last = 0;
-       u32                     hdrlen = 0;
-
-       m_head = *m_headp;
-
-
-       /* Set basic descriptor constants */
-       cmd_type_len |= E1000_ADVTXD_DTYP_DATA;
-       cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
-       if (m_head->m_flags & M_VLANTAG)
-               cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
-
-       /*
-         * Map the packet for DMA.
-        *
-        * Capture the first descriptor index,
-        * this descriptor will have the index
-        * of the EOP which is the only one that
-        * now gets a DONE bit writeback.
-        */
-       first = txr->next_avail_desc;
-       tx_buffer = &txr->tx_buffers[first];
-       tx_buffer_mapped = tx_buffer;
-       map = tx_buffer->map;
-
-       error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
-           *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
-
-       if (error == EFBIG) {
-               struct mbuf *m;
-
-               m = m_defrag(*m_headp, M_DONTWAIT);
-               if (m == NULL) {
-                       adapter->mbuf_defrag_failed++;
-                       m_freem(*m_headp);
-                       *m_headp = NULL;
-                       return (ENOBUFS);
-               }
-               *m_headp = m;
-
-               /* Try it again */
-               error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
-                   *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
-
-               if (error == ENOMEM) {
-                       adapter->no_tx_dma_setup++;
-                       return (error);
-               } else if (error != 0) {
-                       adapter->no_tx_dma_setup++;
-                       m_freem(*m_headp);
-                       *m_headp = NULL;
-                       return (error);
-               }
-       } else if (error == ENOMEM) {
-               adapter->no_tx_dma_setup++;
-               return (error);
-       } else if (error != 0) {
-               adapter->no_tx_dma_setup++;
-               m_freem(*m_headp);
-               *m_headp = NULL;
-               return (error);
-       }
-
-       /* Check again to be sure we have enough descriptors */
-        if (nsegs > (txr->tx_avail - 2)) {
-                txr->no_desc_avail++;
-               bus_dmamap_unload(txr->txtag, map);
-               return (ENOBUFS);
-        }
-       m_head = *m_headp;
-
-        /*
-         * Set up the context descriptor:
-         * used when any hardware offload is done.
-        * This includes CSUM, VLAN, and TSO. It
-        * will use the first descriptor.
-         */
-        if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
-               if (igb_tso_setup(txr, m_head, &hdrlen)) {
-                       cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
-                       olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
-                       olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
-               } else
-                       return (ENXIO); 
-       } else if (igb_tx_ctx_setup(txr, m_head))
-               olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
-
-       /* Calculate payload length */
-       olinfo_status |= ((m_head->m_pkthdr.len - hdrlen)
-           << E1000_ADVTXD_PAYLEN_SHIFT);
-
-       /* 82575 needs the queue index added */
-       if (adapter->hw.mac.type == e1000_82575)
-               olinfo_status |= txr->me << 4;
-
-       /* Set up our transmit descriptors */
-       i = txr->next_avail_desc;
-       for (j = 0; j < nsegs; j++) {
-               bus_size_t seg_len;
-               bus_addr_t seg_addr;
-
-               tx_buffer = &txr->tx_buffers[i];
-               txd = (union e1000_adv_tx_desc *)&txr->tx_base[i];
-               seg_addr = segs[j].ds_addr;
-               seg_len  = segs[j].ds_len;
-
-               txd->read.buffer_addr = htole64(seg_addr);
-               txd->read.cmd_type_len = htole32(cmd_type_len | seg_len);
-               txd->read.olinfo_status = htole32(olinfo_status);
-               last = i;
-               if (++i == adapter->num_tx_desc)
-                       i = 0;
-               tx_buffer->m_head = NULL;
-               tx_buffer->next_eop = -1;
-       }
-
-       txr->next_avail_desc = i;
-       txr->tx_avail -= nsegs;
-
-        tx_buffer->m_head = m_head;
-       tx_buffer_mapped->map = tx_buffer->map;
-       tx_buffer->map = map;
-        bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
-
-        /*
-         * Last Descriptor of Packet
-        * needs End Of Packet (EOP)
-        * and Report Status (RS)
-         */
-        txd->read.cmd_type_len |=
-           htole32(E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS);
-       /*
-        * Keep track in the first buffer which
-        * descriptor will be written back
-        */
-       tx_buffer = &txr->tx_buffers[first];
-       tx_buffer->next_eop = last;
-       txr->watchdog_time = ticks;
-
-       /*
-        * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
-        * that this frame is available to transmit.
-        */
-       bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
-           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-       E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), i);
-       ++txr->tx_packets;
-
-       return (0);
-
-}
-
-static void
-igb_set_promisc(struct adapter *adapter)
-{
-       struct ifnet    *ifp = adapter->ifp;
-       struct e1000_hw *hw = &adapter->hw;
-       u32             reg;
-
-       if (adapter->vf_ifp) {
-               e1000_promisc_set_vf(hw, e1000_promisc_enabled);
-               return;
-       }
-
-       reg = E1000_READ_REG(hw, E1000_RCTL);
-       if (ifp->if_flags & IFF_PROMISC) {
-               reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
-               E1000_WRITE_REG(hw, E1000_RCTL, reg);
-       } else if (ifp->if_flags & IFF_ALLMULTI) {
-               reg |= E1000_RCTL_MPE;
-               reg &= ~E1000_RCTL_UPE;
-               E1000_WRITE_REG(hw, E1000_RCTL, reg);
-       }
-}
-
-static void
-igb_disable_promisc(struct adapter *adapter)
-{
-       struct e1000_hw *hw = &adapter->hw;
-       u32             reg;
-
-       if (adapter->vf_ifp) {
-               e1000_promisc_set_vf(hw, e1000_promisc_disabled);
-               return;
-       }
-       reg = E1000_READ_REG(hw, E1000_RCTL);
-       reg &=  (~E1000_RCTL_UPE);
-       reg &=  (~E1000_RCTL_MPE);
-       E1000_WRITE_REG(hw, E1000_RCTL, reg);
-}
-
-
-/*********************************************************************
- *  Multicast Update
- *
- *  This routine is called whenever multicast address list is updated.
- *
- **********************************************************************/
-
-static void
-igb_set_multi(struct adapter *adapter)
-{
-       struct ifnet    *ifp = adapter->ifp;
-       struct ifmultiaddr *ifma;
-       u32 reg_rctl = 0;
-       u8  *mta;
-
-       int mcnt = 0;
-
-       IOCTL_DEBUGOUT("igb_set_multi: begin");
-
-       mta = adapter->mta;
-       bzero(mta, sizeof(uint8_t) * ETH_ADDR_LEN *
-           MAX_NUM_MULTICAST_ADDRESSES);
-
-#if __FreeBSD_version < 800000
-       IF_ADDR_LOCK(ifp);
-#else
-       if_maddr_rlock(ifp);
-#endif
-       TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
-               if (ifma->ifma_addr->sa_family != AF_LINK)
-                       continue;
-
-               if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
-                       break;
-
-               bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
-                   &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
-               mcnt++;
-       }
-#if __FreeBSD_version < 800000
-       IF_ADDR_UNLOCK(ifp);
-#else
-       if_maddr_runlock(ifp);
-#endif
-
-       if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
-               reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
-               reg_rctl |= E1000_RCTL_MPE;
-               E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
-       } else
-               e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
-}
-
-
-/*********************************************************************
- *  Timer routine:
- *     This routine checks for link status,
- *     updates statistics, and does the watchdog.
- *
- **********************************************************************/
-
-static void
-igb_local_timer(void *arg)
-{
-       struct adapter          *adapter = arg;
-       device_t                dev = adapter->dev;
-       struct tx_ring          *txr = adapter->tx_rings;
-
-
-       IGB_CORE_LOCK_ASSERT(adapter);
-
-       igb_update_link_status(adapter);
-       igb_update_stats_counters(adapter);
-
-       /* 
-       ** If flow control has paused us since last checking
-       ** it invalidates the watchdog timing, so dont run it.
-       */
-       if (adapter->pause_frames) {
-               adapter->pause_frames = 0;
-               goto out;
-       }
-
-        /*
-        ** Watchdog: check for time since any descriptor was cleaned
-        */
-       for (int i = 0; i < adapter->num_queues; i++, txr++)
-               if (txr->queue_status == IGB_QUEUE_HUNG) 
-                       goto timeout;
-out:
-       callout_reset(&adapter->timer, hz, igb_local_timer, adapter);
-#ifndef DEVICE_POLLING
-       /* Schedule all queue interrupts - deadlock protection */
-       E1000_WRITE_REG(&adapter->hw, E1000_EICS, adapter->que_mask);
-#endif
-       return;
-
-timeout:
-       device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
-       device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
-            E1000_READ_REG(&adapter->hw, E1000_TDH(txr->me)),
-            E1000_READ_REG(&adapter->hw, E1000_TDT(txr->me)));
-       device_printf(dev,"TX(%d) desc avail = %d,"
-            "Next TX to Clean = %d\n",
-            txr->me, txr->tx_avail, txr->next_to_clean);
-       adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
-       adapter->watchdog_events++;
-       igb_init_locked(adapter);
-}
-
-static void
-igb_update_link_status(struct adapter *adapter)
-{
-       struct e1000_hw *hw = &adapter->hw;
-       struct ifnet *ifp = adapter->ifp;
-       device_t dev = adapter->dev;
-       struct tx_ring *txr = adapter->tx_rings;
-       u32 link_check, thstat, ctrl;
-
-       link_check = thstat = ctrl = 0;
-
-       /* Get the cached link value or read for real */
-        switch (hw->phy.media_type) {
-        case e1000_media_type_copper:
-                if (hw->mac.get_link_status) {
-                       /* Do the work to read phy */
-                        e1000_check_for_link(hw);
-                        link_check = !hw->mac.get_link_status;
-                } else
-                        link_check = TRUE;
-                break;
-        case e1000_media_type_fiber:
-                e1000_check_for_link(hw);
-                link_check = (E1000_READ_REG(hw, E1000_STATUS) &
-                                 E1000_STATUS_LU);
-                break;
-        case e1000_media_type_internal_serdes:
-                e1000_check_for_link(hw);
-                link_check = adapter->hw.mac.serdes_has_link;
-                break;
-       /* VF device is type_unknown */
-        case e1000_media_type_unknown:
-                e1000_check_for_link(hw);
-               link_check = !hw->mac.get_link_status;
-               /* Fall thru */
-        default:
-                break;
-        }
-
-       /* Check for thermal downshift or shutdown */
-       if (hw->mac.type == e1000_i350) {
-               thstat = E1000_READ_REG(hw, E1000_THSTAT);
-               ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT);
-       }
-
-       /* Now we check if a transition has happened */
-       if (link_check && (adapter->link_active == 0)) {
-               e1000_get_speed_and_duplex(&adapter->hw, 
-                   &adapter->link_speed, &adapter->link_duplex);
-               if (bootverbose)
-                       device_printf(dev, "Link is up %d Mbps %s\n",
-                           adapter->link_speed,
-                           ((adapter->link_duplex == FULL_DUPLEX) ?
-                           "Full Duplex" : "Half Duplex"));
-               adapter->link_active = 1;
-               ifp->if_baudrate = adapter->link_speed * 1000000;
-               if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
-                   (thstat & E1000_THSTAT_LINK_THROTTLE))
-                       device_printf(dev, "Link: thermal downshift\n");
-               /* This can sleep */
-               if_link_state_change(ifp, LINK_STATE_UP);
-       } else if (!link_check && (adapter->link_active == 1)) {
-               ifp->if_baudrate = adapter->link_speed = 0;
-               adapter->link_duplex = 0;
-               if (bootverbose)
-                       device_printf(dev, "Link is Down\n");
-               if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
-                   (thstat & E1000_THSTAT_PWR_DOWN))
-                       device_printf(dev, "Link: thermal shutdown\n");
-               adapter->link_active = 0;
-               /* This can sleep */
-               if_link_state_change(ifp, LINK_STATE_DOWN);
-               /* Turn off watchdogs */
-               for (int i = 0; i < adapter->num_queues; i++, txr++)
-                       txr->queue_status = IGB_QUEUE_IDLE;
-       }
-}
-
-/*********************************************************************
- *
- *  This routine disables all traffic on the adapter by issuing a
- *  global reset on the MAC and deallocates TX/RX buffers.
- *
- **********************************************************************/
-
-static void
-igb_stop(void *arg)
-{
-       struct adapter  *adapter = arg;
-       struct ifnet    *ifp = adapter->ifp;
-       struct tx_ring *txr = adapter->tx_rings;
-
-       IGB_CORE_LOCK_ASSERT(adapter);
-
-       INIT_DEBUGOUT("igb_stop: begin");
-
-       igb_disable_intr(adapter);
-
-       callout_stop(&adapter->timer);
-
-       /* Tell the stack that the interface is no longer active */
-       ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
-
-       /* Unarm watchdog timer. */
-       for (int i = 0; i < adapter->num_queues; i++, txr++) {
-               IGB_TX_LOCK(txr);
-               txr->queue_status = IGB_QUEUE_IDLE;
-               IGB_TX_UNLOCK(txr);
-       }
-
-       e1000_reset_hw(&adapter->hw);
-       E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
-
-       e1000_led_off(&adapter->hw);
-       e1000_cleanup_led(&adapter->hw);
-}
-
-
-/*********************************************************************
- *
- *  Determine hardware revision.
- *
- **********************************************************************/
-static void
-igb_identify_hardware(struct adapter *adapter)
-{
-       device_t dev = adapter->dev;
-
-       /* Make sure our PCI config space has the necessary stuff set */
-       adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
-       if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
-           (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
-               INIT_DEBUGOUT("Memory Access and/or Bus Master "
-                   "bits were not set!\n");
-               adapter->hw.bus.pci_cmd_word |=
-               (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
-               pci_write_config(dev, PCIR_COMMAND,
-                   adapter->hw.bus.pci_cmd_word, 2);
-       }
-
-       /* Save off the information about this board */
-       adapter->hw.vendor_id = pci_get_vendor(dev);
-       adapter->hw.device_id = pci_get_device(dev);
-       adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
-       adapter->hw.subsystem_vendor_id =
-           pci_read_config(dev, PCIR_SUBVEND_0, 2);
-       adapter->hw.subsystem_device_id =
-           pci_read_config(dev, PCIR_SUBDEV_0, 2);
-
-       /* Set MAC type early for PCI setup */
-       e1000_set_mac_type(&adapter->hw);
-
-       /* Are we a VF device? */
-       if ((adapter->hw.mac.type == e1000_vfadapt) ||
-           (adapter->hw.mac.type == e1000_vfadapt_i350))
-               adapter->vf_ifp = 1;
-       else
-               adapter->vf_ifp = 0;
-}
-
-static int
-igb_allocate_pci_resources(struct adapter *adapter)
-{
-       device_t        dev = adapter->dev;
-       int             rid;
-
-       rid = PCIR_BAR(0);
-       adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
-           &rid, RF_ACTIVE);
-       if (adapter->pci_mem == NULL) {
-               device_printf(dev, "Unable to allocate bus resource: memory\n");
-               return (ENXIO);
-       }
-       adapter->osdep.mem_bus_space_tag =
-           rman_get_bustag(adapter->pci_mem);
-       adapter->osdep.mem_bus_space_handle =
-           rman_get_bushandle(adapter->pci_mem);
-       adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
-
-       adapter->num_queues = 1; /* Defaults for Legacy or MSI */
-
-       /* This will setup either MSI/X or MSI */
-       adapter->msix = igb_setup_msix(adapter);
-       adapter->hw.back = &adapter->osdep;
-
-       return (0);
-}
-
-/*********************************************************************
- *
- *  Setup the Legacy or MSI Interrupt handler
- *
- **********************************************************************/
-static int
-igb_allocate_legacy(struct adapter *adapter)
-{
-       device_t                dev = adapter->dev;
-       struct igb_queue        *que = adapter->queues;
-       int                     error, rid = 0;
-
-       /* Turn off all interrupts */
-       E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
-
-       /* MSI RID is 1 */
-       if (adapter->msix == 1)
-               rid = 1;
-
-       /* We allocate a single interrupt resource */
-       adapter->res = bus_alloc_resource_any(dev,
-           SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
-       if (adapter->res == NULL) {
-               device_printf(dev, "Unable to allocate bus resource: "
-                   "interrupt\n");
-               return (ENXIO);
-       }
-
-       /*
-        * Try allocating a fast interrupt and the associated deferred
-        * processing contexts.
-        */
-       TASK_INIT(&que->que_task, 0, igb_handle_que, que);
-       /* Make tasklet for deferred link handling */
-       TASK_INIT(&adapter->link_task, 0, igb_handle_link, adapter);
-       que->tq = taskqueue_create_fast("igb_taskq", M_NOWAIT,
-           taskqueue_thread_enqueue, &que->tq);
-       taskqueue_start_threads(&que->tq, 1, PI_NET, "%s taskq",
-           device_get_nameunit(adapter->dev));
-       if ((error = bus_setup_intr(dev, adapter->res,
-           INTR_TYPE_NET | INTR_MPSAFE, igb_irq_fast, NULL,
-           adapter, &adapter->tag)) != 0) {
-               device_printf(dev, "Failed to register fast interrupt "
-                           "handler: %d\n", error);
-               taskqueue_free(que->tq);
-               que->tq = NULL;
-               return (error);
-       }
-
-       return (0);
-}
-
-
-/*********************************************************************
- *
- *  Setup the MSIX Queue Interrupt handlers: 
- *
- **********************************************************************/
-static int
-igb_allocate_msix(struct adapter *adapter)
-{
-       device_t                dev = adapter->dev;
-       struct igb_queue        *que = adapter->queues;
-       int                     error, rid, vector = 0;
-
-
-       for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
-               rid = vector +1;
-               que->res = bus_alloc_resource_any(dev,
-                   SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
-               if (que->res == NULL) {
-                       device_printf(dev,
-                           "Unable to allocate bus resource: "
-                           "MSIX Queue Interrupt\n");
-                       return (ENXIO);
-               }
-               error = bus_setup_intr(dev, que->res,
-                   INTR_TYPE_NET | INTR_MPSAFE, NULL,
-                   igb_msix_que, que, &que->tag);
-               if (error) {
-                       que->res = NULL;
-                       device_printf(dev, "Failed to register Queue handler");
-                       return (error);
-               }
-#if __FreeBSD_version >= 800504
-               bus_describe_intr(dev, que->res, que->tag, "que %d", i);
-#endif
-               que->msix = vector;
-               if (adapter->hw.mac.type == e1000_82575)
-                       que->eims = E1000_EICR_TX_QUEUE0 << i;
-               else
-                       que->eims = 1 << vector;
-               /*
-               ** Bind the msix vector, and thus the
-               ** rings to the corresponding cpu.
-               */
-               if (adapter->num_queues > 1)
-                       bus_bind_intr(dev, que->res, i);
-               /* Make tasklet for deferred handling */
-               TASK_INIT(&que->que_task, 0, igb_handle_que, que);
-               que->tq = taskqueue_create_fast("igb_que", M_NOWAIT,
-                   taskqueue_thread_enqueue, &que->tq);
-               taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
-                   device_get_nameunit(adapter->dev));
-       }
-
-       /* And Link */
-       rid = vector + 1;
-       adapter->res = bus_alloc_resource_any(dev,
-           SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
-       if (adapter->res == NULL) {
-               device_printf(dev,
-                   "Unable to allocate bus resource: "
-                   "MSIX Link Interrupt\n");
-               return (ENXIO);
-       }
-       if ((error = bus_setup_intr(dev, adapter->res,
-           INTR_TYPE_NET | INTR_MPSAFE, NULL,
-           igb_msix_link, adapter, &adapter->tag)) != 0) {
-               device_printf(dev, "Failed to register Link handler");
-               return (error);
-       }
-#if __FreeBSD_version >= 800504
-       bus_describe_intr(dev, adapter->res, adapter->tag, "link");
-#endif
-       adapter->linkvec = vector;
-
-       return (0);
-}
-
-
-static void
-igb_configure_queues(struct adapter *adapter)
-{
-       struct  e1000_hw        *hw = &adapter->hw;
-       struct  igb_queue       *que;
-       u32                     tmp, ivar = 0, newitr = 0;
-
-       /* First turn on RSS capability */
-       if (adapter->hw.mac.type != e1000_82575)
-               E1000_WRITE_REG(hw, E1000_GPIE,
-                   E1000_GPIE_MSIX_MODE | E1000_GPIE_EIAME |
-                   E1000_GPIE_PBA | E1000_GPIE_NSICR);
-
-       /* Turn on MSIX */
-       switch (adapter->hw.mac.type) {
-       case e1000_82580:
-       case e1000_i350:
-       case e1000_vfadapt:
-       case e1000_vfadapt_i350:
-               /* RX entries */
-               for (int i = 0; i < adapter->num_queues; i++) {
-                       u32 index = i >> 1;
-                       ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
-                       que = &adapter->queues[i];
-                       if (i & 1) {
-                               ivar &= 0xFF00FFFF;
-                               ivar |= (que->msix | E1000_IVAR_VALID) << 16;
-                       } else {
-                               ivar &= 0xFFFFFF00;
-                               ivar |= que->msix | E1000_IVAR_VALID;
-                       }
-                       E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
-               }
-               /* TX entries */
-               for (int i = 0; i < adapter->num_queues; i++) {
-                       u32 index = i >> 1;
-                       ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
-                       que = &adapter->queues[i];
-                       if (i & 1) {
-                               ivar &= 0x00FFFFFF;
-                               ivar |= (que->msix | E1000_IVAR_VALID) << 24;
-                       } else {
-                               ivar &= 0xFFFF00FF;
-                               ivar |= (que->msix | E1000_IVAR_VALID) << 8;
-                       }
-                       E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
-                       adapter->que_mask |= que->eims;
-               }
-
-               /* And for the link interrupt */
-               ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8;
-               adapter->link_mask = 1 << adapter->linkvec;
-               E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
-               break;
-       case e1000_82576:
-               /* RX entries */
-               for (int i = 0; i < adapter->num_queues; i++) {
-                       u32 index = i & 0x7; /* Each IVAR has two entries */
-                       ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
-                       que = &adapter->queues[i];
-                       if (i < 8) {
-                               ivar &= 0xFFFFFF00;
-                               ivar |= que->msix | E1000_IVAR_VALID;
-                       } else {
-                               ivar &= 0xFF00FFFF;
-                               ivar |= (que->msix | E1000_IVAR_VALID) << 16;
-                       }
-                       E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
-                       adapter->que_mask |= que->eims;
-               }
-               /* TX entries */
-               for (int i = 0; i < adapter->num_queues; i++) {
-                       u32 index = i & 0x7; /* Each IVAR has two entries */
-                       ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
-                       que = &adapter->queues[i];
-                       if (i < 8) {
-                               ivar &= 0xFFFF00FF;
-                               ivar |= (que->msix | E1000_IVAR_VALID) << 8;
-                       } else {
-                               ivar &= 0x00FFFFFF;
-                               ivar |= (que->msix | E1000_IVAR_VALID) << 24;
-                       }
-                       E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
-                       adapter->que_mask |= que->eims;
-               }
-
-               /* And for the link interrupt */
-               ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8;
-               adapter->link_mask = 1 << adapter->linkvec;
-               E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
-               break;
-
-       case e1000_82575:
-                /* enable MSI-X support*/
-               tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
-                tmp |= E1000_CTRL_EXT_PBA_CLR;
-                /* Auto-Mask interrupts upon ICR read. */
-                tmp |= E1000_CTRL_EXT_EIAME;
-                tmp |= E1000_CTRL_EXT_IRCA;
-                E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
-
-               /* Queues */
-               for (int i = 0; i < adapter->num_queues; i++) {
-                       que = &adapter->queues[i];
-                       tmp = E1000_EICR_RX_QUEUE0 << i;
-                       tmp |= E1000_EICR_TX_QUEUE0 << i;
-                       que->eims = tmp;
-                       E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0),
-                           i, que->eims);
-                       adapter->que_mask |= que->eims;
-               }
-
-               /* Link */
-               E1000_WRITE_REG(hw, E1000_MSIXBM(adapter->linkvec),
-                   E1000_EIMS_OTHER);
-               adapter->link_mask |= E1000_EIMS_OTHER;
-       default:
-               break;
-       }
-
-       /* Set the starting interrupt rate */
-       if (igb_max_interrupt_rate > 0)
-               newitr = (4000000 / igb_max_interrupt_rate) & 0x7FFC;
-
-        if (hw->mac.type == e1000_82575)
-                newitr |= newitr << 16;
-        else
-                newitr |= E1000_EITR_CNT_IGNR;
-
-       for (int i = 0; i < adapter->num_queues; i++) {
-               que = &adapter->queues[i];
-               E1000_WRITE_REG(hw, E1000_EITR(que->msix), newitr);
-       }
-
-       return;
-}
-
-
-static void
-igb_free_pci_resources(struct adapter *adapter)
-{
-       struct          igb_queue *que = adapter->queues;
-       device_t        dev = adapter->dev;
-       int             rid;
-
-       /*
-       ** There is a slight possibility of a failure mode
-       ** in attach that will result in entering this function
-       ** before interrupt resources have been initialized, and
-       ** in that case we do not want to execute the loops below
-       ** We can detect this reliably by the state of the adapter
-       ** res pointer.
-       */
-       if (adapter->res == NULL)
-               goto mem;
-
-       /*
-        * First release all the interrupt resources:
-        */
-       for (int i = 0; i < adapter->num_queues; i++, que++) {
-               rid = que->msix + 1;
-               if (que->tag != NULL) {
-                       bus_teardown_intr(dev, que->res, que->tag);
-                       que->tag = NULL;
-               }
-               if (que->res != NULL)
-                       bus_release_resource(dev,
-                           SYS_RES_IRQ, rid, que->res);
-       }
-
-       /* Clean the Legacy or Link interrupt last */
-       if (adapter->linkvec) /* we are doing MSIX */
-               rid = adapter->linkvec + 1;
-       else
-               (adapter->msix != 0) ? (rid = 1):(rid = 0);
-
-       if (adapter->tag != NULL) {
-               bus_teardown_intr(dev, adapter->res, adapter->tag);
-               adapter->tag = NULL;
-       }
-       if (adapter->res != NULL)
-               bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
-
-mem:
-       if (adapter->msix)
-               pci_release_msi(dev);
-
-       if (adapter->msix_mem != NULL)
-               bus_release_resource(dev, SYS_RES_MEMORY,
-                   PCIR_BAR(IGB_MSIX_BAR), adapter->msix_mem);
-
-       if (adapter->pci_mem != NULL)
-               bus_release_resource(dev, SYS_RES_MEMORY,
-                   PCIR_BAR(0), adapter->pci_mem);
-
-}
-
-/*
- * Setup Either MSI/X or MSI
- */
-static int
-igb_setup_msix(struct adapter *adapter)
-{
-       device_t dev = adapter->dev;
-       int rid, want, queues, msgs;
-
-       /* tuneable override */
-       if (igb_enable_msix == 0)
-               goto msi;
-
-       /* First try MSI/X */
-       rid = PCIR_BAR(IGB_MSIX_BAR);
-       adapter->msix_mem = bus_alloc_resource_any(dev,
-           SYS_RES_MEMORY, &rid, RF_ACTIVE);
-               if (!adapter->msix_mem) {
-               /* May not be enabled */
-               device_printf(adapter->dev,
-                   "Unable to map MSIX table \n");
-               goto msi;
-       }
-
-       msgs = pci_msix_count(dev); 
-       if (msgs == 0) { /* system has msix disabled */
-               bus_release_resource(dev, SYS_RES_MEMORY,
-                   PCIR_BAR(IGB_MSIX_BAR), adapter->msix_mem);
-               adapter->msix_mem = NULL;
-               goto msi;
-       }
-
-       /* Figure out a reasonable auto config value */
-       queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
-
-       /* Manual override */
-       if (igb_num_queues != 0)
-               queues = igb_num_queues;
-       if (queues > 8)  /* max queues */
-               queues = 8;
-
-       /* Can have max of 4 queues on 82575 */
-       if ((adapter->hw.mac.type == e1000_82575) && (queues > 4))
-               queues = 4;
-
-       /* Limit the VF devices to one queue */
-       if (adapter->vf_ifp)
-               queues = 1;
-
-       /*
-       ** One vector (RX/TX pair) per queue
-       ** plus an additional for Link interrupt
-       */
-       want = queues + 1;
-       if (msgs >= want)
-               msgs = want;
-       else {
-                       device_printf(adapter->dev,
-                   "MSIX Configuration Problem, "
-                   "%d vectors configured, but %d queues wanted!\n",
-                   msgs, want);
-               return (ENXIO);
-       }
-       if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
-                       device_printf(adapter->dev,
-                   "Using MSIX interrupts with %d vectors\n", msgs);
-               adapter->num_queues = queues;
-               return (msgs);
-       }
-msi:
-               msgs = pci_msi_count(dev);
-               if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
-                       device_printf(adapter->dev,"Using MSI interrupt\n");
-       return (msgs);
-}
-
-/*********************************************************************
- *
- *  Set up an fresh starting state
- *
- **********************************************************************/
-static void
-igb_reset(struct adapter *adapter)
-{
-       device_t        dev = adapter->dev;
-       struct e1000_hw *hw = &adapter->hw;
-       struct e1000_fc_info *fc = &hw->fc;
-       struct ifnet    *ifp = adapter->ifp;
-       u32             pba = 0;
-       u16             hwm;
-
-       INIT_DEBUGOUT("igb_reset: begin");
-
-       /* Let the firmware know the OS is in control */
-       igb_get_hw_control(adapter);
-
-       /*
-        * Packet Buffer Allocation (PBA)
-        * Writing PBA sets the receive portion of the buffer
-        * the remainder is used for the transmit buffer.
-        */
-       switch (hw->mac.type) {
-       case e1000_82575:
-               pba = E1000_PBA_32K;
-               break;
-       case e1000_82576:
-       case e1000_vfadapt:
-               pba = E1000_READ_REG(hw, E1000_RXPBS);
-               pba &= E1000_RXPBS_SIZE_MASK_82576;
-               break;
-       case e1000_82580:
-       case e1000_i350:
-       case e1000_vfadapt_i350:
-               pba = E1000_READ_REG(hw, E1000_RXPBS);
-               pba = e1000_rxpbs_adjust_82580(pba);
-               break;
-               pba = E1000_PBA_35K;
-       default:
-               break;
-       }
-
-       /* Special needs in case of Jumbo frames */
-       if ((hw->mac.type == e1000_82575) && (ifp->if_mtu > ETHERMTU)) {
-               u32 tx_space, min_tx, min_rx;
-               pba = E1000_READ_REG(hw, E1000_PBA);
-               tx_space = pba >> 16;
-               pba &= 0xffff;
-               min_tx = (adapter->max_frame_size +
-                   sizeof(struct e1000_tx_desc) - ETHERNET_FCS_SIZE) * 2;
-               min_tx = roundup2(min_tx, 1024);
-               min_tx >>= 10;
-                min_rx = adapter->max_frame_size;
-                min_rx = roundup2(min_rx, 1024);
-                min_rx >>= 10;
-               if (tx_space < min_tx &&
-                   ((min_tx - tx_space) < pba)) {
-                       pba = pba - (min_tx - tx_space);
-                       /*
-                         * if short on rx space, rx wins
-                         * and must trump tx adjustment
-                        */
-                        if (pba < min_rx)
-                                pba = min_rx;
-               }
-               E1000_WRITE_REG(hw, E1000_PBA, pba);
-       }
-
-       INIT_DEBUGOUT1("igb_init: pba=%dK",pba);
-
-       /*
-        * These parameters control the automatic generation (Tx) and
-        * response (Rx) to Ethernet PAUSE frames.
-        * - High water mark should allow for at least two frames to be
-        *   received after sending an XOFF.
-        * - Low water mark works best when it is very near the high water mark.
-        *   This allows the receiver to restart by sending XON when it has
-        *   drained a bit.
-        */
-       hwm = min(((pba << 10) * 9 / 10),
-           ((pba << 10) - 2 * adapter->max_frame_size));
-
-       if (hw->mac.type < e1000_82576) {
-               fc->high_water = hwm & 0xFFF8;  /* 8-byte granularity */
-               fc->low_water = fc->high_water - 8;
-       } else {
-               fc->high_water = hwm & 0xFFF0;  /* 16-byte granularity */
-               fc->low_water = fc->high_water - 16;
-       }
-
-       fc->pause_time = IGB_FC_PAUSE_TIME;
-       fc->send_xon = TRUE;
-
-       /* Issue a global reset */
-       e1000_reset_hw(hw);
-       E1000_WRITE_REG(hw, E1000_WUC, 0);
-
-       if (e1000_init_hw(hw) < 0)
-               device_printf(dev, "Hardware Initialization Failed\n");
-
-       /* Setup DMA Coalescing */
-       if ((hw->mac.type == e1000_i350) &&
-           (adapter->dma_coalesce == TRUE)) {
-               u32 reg;
-
-               hwm = (pba - 4) << 10;
-               reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
-                   & E1000_DMACR_DMACTHR_MASK);
-
-               /* transition to L0x or L1 if available..*/
-               reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
-
-               /* timer = +-1000 usec in 32usec intervals */
-               reg |= (1000 >> 5);
-               E1000_WRITE_REG(hw, E1000_DMACR, reg);
-
-               /* No lower threshold */
-               E1000_WRITE_REG(hw, E1000_DMCRTRH, 0);
-
-               /* set hwm to PBA -  2 * max frame size */
-               E1000_WRITE_REG(hw, E1000_FCRTC, hwm);
-
-               /* Set the interval before transition */
-               reg = E1000_READ_REG(hw, E1000_DMCTLX);
-               reg |= 0x800000FF; /* 255 usec */
-               E1000_WRITE_REG(hw, E1000_DMCTLX, reg);
-
-               /* free space in tx packet buffer to wake from DMA coal */
-               E1000_WRITE_REG(hw, E1000_DMCTXTH,
-                   (20480 - (2 * adapter->max_frame_size)) >> 6);
-
-               /* make low power state decision controlled by DMA coal */
-               reg = E1000_READ_REG(hw, E1000_PCIEMISC);
-               E1000_WRITE_REG(hw, E1000_PCIEMISC,
-                   reg | E1000_PCIEMISC_LX_DECISION);
-               device_printf(dev, "DMA Coalescing enabled\n");
-       }
-
-       E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
-       e1000_get_phy_info(hw);
-       e1000_check_for_link(hw);
-       return;
-}
-
-/*********************************************************************
- *
- *  Setup networking device structure and register an interface.
- *
- **********************************************************************/
-static int
-igb_setup_interface(device_t dev, struct adapter *adapter)
-{
-       struct ifnet   *ifp;
-
-       INIT_DEBUGOUT("igb_setup_interface: begin");
-
-       ifp = adapter->ifp = if_alloc(IFT_ETHER);
-       if (ifp == NULL) {
-               device_printf(dev, "can not allocate ifnet structure\n");
-               return (-1);
-       }
-       if_initname(ifp, device_get_name(dev), device_get_unit(dev));
-       ifp->if_mtu = ETHERMTU;
-       ifp->if_init =  igb_init;
-       ifp->if_softc = adapter;
-       ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
-       ifp->if_ioctl = igb_ioctl;
-       ifp->if_start = igb_start;
-#if __FreeBSD_version >= 800000
-       ifp->if_transmit = igb_mq_start;
-       ifp->if_qflush = igb_qflush;
-#endif
-       IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
-       ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
-       IFQ_SET_READY(&ifp->if_snd);
-
-       ether_ifattach(ifp, adapter->hw.mac.addr);
-
-       ifp->if_capabilities = ifp->if_capenable = 0;
-
-       ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
-       ifp->if_capabilities |= IFCAP_TSO4;
-       ifp->if_capabilities |= IFCAP_JUMBO_MTU;
-       ifp->if_capenable = ifp->if_capabilities;
-
-       /* Don't enable LRO by default */
-       ifp->if_capabilities |= IFCAP_LRO;
-
-#ifdef DEVICE_POLLING
-       ifp->if_capabilities |= IFCAP_POLLING;
-#endif
-
-       /*
-        * Tell the upper layer(s) we
-        * support full VLAN capability.
-        */
-       ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
-       ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
-       ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
-
-       /*
-       ** Dont turn this on by default, if vlans are
-       ** created on another pseudo device (eg. lagg)
-       ** then vlan events are not passed thru, breaking
-       ** operation, but with HW FILTER off it works. If
-       ** using vlans directly on the em driver you can
-       ** enable this and get full hardware tag filtering.
-       */
-       ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
-
-       /*
-        * Specify the media types supported by this adapter and register
-        * callbacks to update media and link information
-        */
-       ifmedia_init(&adapter->media, IFM_IMASK,
-           igb_media_change, igb_media_status);
-       if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
-           (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
-               ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 
-                           0, NULL);
-               ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
-       } else {
-               ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
-               ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
-                           0, NULL);
-               ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
-                           0, NULL);
-               ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
-                           0, NULL);
-               if (adapter->hw.phy.type != e1000_phy_ife) {
-                       ifmedia_add(&adapter->media,
-                               IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
-                       ifmedia_add(&adapter->media,
-                               IFM_ETHER | IFM_1000_T, 0, NULL);
-               }
-       }
-       ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
-       ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
-       return (0);
-}
-
-
-/*
- * Manage DMA'able memory.
- */
-static void
-igb_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
-{
-       if (error)
-               return;
-       *(bus_addr_t *) arg = segs[0].ds_addr;
-}
-
-static int
-igb_dma_malloc(struct adapter *adapter, bus_size_t size,
-        struct igb_dma_alloc *dma, int mapflags)
-{
-       int error;
-
-       error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
-                               IGB_DBA_ALIGN, 0,       /* alignment, bounds */
-                               BUS_SPACE_MAXADDR,      /* lowaddr */
-                               BUS_SPACE_MAXADDR,      /* highaddr */
-                               NULL, NULL,             /* filter, filterarg */
-                               size,                   /* maxsize */
-                               1,                      /* nsegments */
-                               size,                   /* maxsegsize */
-                               0,                      /* flags */
-                               NULL,                   /* lockfunc */
-                               NULL,                   /* lockarg */
-                               &dma->dma_tag);
-       if (error) {
-               device_printf(adapter->dev,
-                   "%s: bus_dma_tag_create failed: %d\n",
-                   __func__, error);
-               goto fail_0;
-       }
-
-       error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
-           BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
-       if (error) {
-               device_printf(adapter->dev,
-                   "%s: bus_dmamem_alloc(%ju) failed: %d\n",
-                   __func__, (uintmax_t)size, error);
-               goto fail_2;
-       }
-
-       dma->dma_paddr = 0;
-       error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
-           size, igb_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
-       if (error || dma->dma_paddr == 0) {
-               device_printf(adapter->dev,
-                   "%s: bus_dmamap_load failed: %d\n",
-                   __func__, error);
-               goto fail_3;
-       }
-
-       return (0);
-
-fail_3:
-       bus_dmamap_unload(dma->dma_tag, dma->dma_map);
-fail_2:
-       bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
-       bus_dma_tag_destroy(dma->dma_tag);
-fail_0:
-       dma->dma_map = NULL;
-       dma->dma_tag = NULL;
-
-       return (error);
-}
-
-static void
-igb_dma_free(struct adapter *adapter, struct igb_dma_alloc *dma)
-{
-       if (dma->dma_tag == NULL)
-               return;
-       if (dma->dma_map != NULL) {
-               bus_dmamap_sync(dma->dma_tag, dma->dma_map,
-                   BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
-               bus_dmamap_unload(dma->dma_tag, dma->dma_map);
-               bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
-               dma->dma_map = NULL;
-       }
-       bus_dma_tag_destroy(dma->dma_tag);
-       dma->dma_tag = NULL;
-}
-
-
-/*********************************************************************
- *
- *  Allocate memory for the transmit and receive rings, and then
- *  the descriptors associated with each, called only once at attach.
- *
- **********************************************************************/
-static int
-igb_allocate_queues(struct adapter *adapter)
-{
-       device_t dev = adapter->dev;
-       struct igb_queue        *que = NULL;
-       struct tx_ring          *txr = NULL;
-       struct rx_ring          *rxr = NULL;
-       int rsize, tsize, error = E1000_SUCCESS;
-       int txconf = 0, rxconf = 0;
-
-       /* First allocate the top level queue structs */
-       if (!(adapter->queues =
-           (struct igb_queue *) malloc(sizeof(struct igb_queue) *
-           adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
-               device_printf(dev, "Unable to allocate queue memory\n");
-               error = ENOMEM;
-               goto fail;
-       }
-
-       /* Next allocate the TX ring struct memory */
-       if (!(adapter->tx_rings =
-           (struct tx_ring *) malloc(sizeof(struct tx_ring) *
-           adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
-               device_printf(dev, "Unable to allocate TX ring memory\n");
-               error = ENOMEM;
-               goto tx_fail;
-       }
-
-       /* Now allocate the RX */
-       if (!(adapter->rx_rings =
-           (struct rx_ring *) malloc(sizeof(struct rx_ring) *
-           adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
-               device_printf(dev, "Unable to allocate RX ring memory\n");
-               error = ENOMEM;
-               goto rx_fail;
-       }
-
-       tsize = roundup2(adapter->num_tx_desc *
-           sizeof(union e1000_adv_tx_desc), IGB_DBA_ALIGN);
-       /*
-        * Now set up the TX queues, txconf is needed to handle the
-        * possibility that things fail midcourse and we need to
-        * undo memory gracefully
-        */ 
-       for (int i = 0; i < adapter->num_queues; i++, txconf++) {
-               /* Set up some basics */
-               txr = &adapter->tx_rings[i];
-               txr->adapter = adapter;
-               txr->me = i;
-
-               /* Initialize the TX lock */
-               snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
-                   device_get_nameunit(dev), txr->me);
-               mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
-
-               if (igb_dma_malloc(adapter, tsize,
-                       &txr->txdma, BUS_DMA_NOWAIT)) {
-                       device_printf(dev,
-                           "Unable to allocate TX Descriptor memory\n");
-                       error = ENOMEM;
-                       goto err_tx_desc;
-               }
-               txr->tx_base = (struct e1000_tx_desc *)txr->txdma.dma_vaddr;
-               bzero((void *)txr->tx_base, tsize);
-
-               /* Now allocate transmit buffers for the ring */
-               if (igb_allocate_transmit_buffers(txr)) {
-                       device_printf(dev,
-                           "Critical Failure setting up transmit buffers\n");
-                       error = ENOMEM;
-                       goto err_tx_desc;
-               }
-#if __FreeBSD_version >= 800000
-               /* Allocate a buf ring */
-               txr->br = buf_ring_alloc(IGB_BR_SIZE, M_DEVBUF,
-                   M_WAITOK, &txr->tx_mtx);
-#endif
-       }
-
-       /*
-        * Next the RX queues...
-        */ 
-       rsize = roundup2(adapter->num_rx_desc *
-           sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN);
-       for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
-               rxr = &adapter->rx_rings[i];
-               rxr->adapter = adapter;
-               rxr->me = i;
-
-               /* Initialize the RX lock */
-               snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
-                   device_get_nameunit(dev), txr->me);
-               mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
-
-               if (igb_dma_malloc(adapter, rsize,
-                       &rxr->rxdma, BUS_DMA_NOWAIT)) {
-                       device_printf(dev,
-                           "Unable to allocate RxDescriptor memory\n");
-                       error = ENOMEM;
-                       goto err_rx_desc;
-               }
-               rxr->rx_base = (union e1000_adv_rx_desc *)rxr->rxdma.dma_vaddr;
-               bzero((void *)rxr->rx_base, rsize);
-
-               /* Allocate receive buffers for the ring*/
-               if (igb_allocate_receive_buffers(rxr)) {
-                       device_printf(dev,
-                           "Critical Failure setting up receive buffers\n");
-                       error = ENOMEM;
-                       goto err_rx_desc;
-               }
-       }
-
-       /*
-       ** Finally set up the queue holding structs
-       */
-       for (int i = 0; i < adapter->num_queues; i++) {
-               que = &adapter->queues[i];
-               que->adapter = adapter;
-               que->txr = &adapter->tx_rings[i];
-               que->rxr = &adapter->rx_rings[i];
-       }
-
-       return (0);
-
-err_rx_desc:
-       for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
-               igb_dma_free(adapter, &rxr->rxdma);
-err_tx_desc:
-       for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
-               igb_dma_free(adapter, &txr->txdma);
-       free(adapter->rx_rings, M_DEVBUF);
-rx_fail:
-#if __FreeBSD_version >= 800000
-       buf_ring_free(txr->br, M_DEVBUF);
-#endif
-       free(adapter->tx_rings, M_DEVBUF);
-tx_fail:
-       free(adapter->queues, M_DEVBUF);
-fail:
-       return (error);
-}
-
-/*********************************************************************
- *
- *  Allocate memory for tx_buffer structures. The tx_buffer stores all
- *  the information needed to transmit a packet on the wire. This is
- *  called only once at attach, setup is done every reset.
- *
- **********************************************************************/
-static int
-igb_allocate_transmit_buffers(struct tx_ring *txr)
-{
-       struct adapter *adapter = txr->adapter;
-       device_t dev = adapter->dev;
-       struct igb_tx_buffer *txbuf;
-       int error, i;
-
-       /*
-        * Setup DMA descriptor areas.
-        */
-       if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),
-                              1, 0,                    /* alignment, bounds */
-                              BUS_SPACE_MAXADDR,       /* lowaddr */
-                              BUS_SPACE_MAXADDR,       /* highaddr */
-                              NULL, NULL,              /* filter, filterarg */
-                              IGB_TSO_SIZE,            /* maxsize */
-                              IGB_MAX_SCATTER,         /* nsegments */
-                              PAGE_SIZE,               /* maxsegsize */
-                              0,                       /* flags */
-                              NULL,                    /* lockfunc */
-                              NULL,                    /* lockfuncarg */
-                              &txr->txtag))) {
-               device_printf(dev,"Unable to allocate TX DMA tag\n");
-               goto fail;
-       }
-
-       if (!(txr->tx_buffers =
-           (struct igb_tx_buffer *) malloc(sizeof(struct igb_tx_buffer) *
-           adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
-               device_printf(dev, "Unable to allocate tx_buffer memory\n");
-               error = ENOMEM;
-               goto fail;
-       }
-
-        /* Create the descriptor buffer dma maps */
-       txbuf = txr->tx_buffers;
-       for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
-               error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
-               if (error != 0) {
-                       device_printf(dev, "Unable to create TX DMA map\n");
-                       goto fail;
-               }
-       }
-
-       return 0;
-fail:
-       /* We free all, it handles case where we are in the middle */
-       igb_free_transmit_structures(adapter);
-       return (error);
-}
-
-/*********************************************************************
- *
- *  Initialize a transmit ring.
- *
- **********************************************************************/
-static void
-igb_setup_transmit_ring(struct tx_ring *txr)
-{
-       struct adapter *adapter = txr->adapter;
-       struct igb_tx_buffer *txbuf;
-       int i;
-
-       /* Clear the old descriptor contents */
-       IGB_TX_LOCK(txr);
-       bzero((void *)txr->tx_base,
-             (sizeof(union e1000_adv_tx_desc)) * adapter->num_tx_desc);
-       /* Reset indices */
-       txr->next_avail_desc = 0;
-       txr->next_to_clean = 0;
-
-       /* Free any existing tx buffers. */
-        txbuf = txr->tx_buffers;
-       for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
-               if (txbuf->m_head != NULL) {
-                       bus_dmamap_sync(txr->txtag, txbuf->map,
-                           BUS_DMASYNC_POSTWRITE);
-                       bus_dmamap_unload(txr->txtag, txbuf->map);
-                       m_freem(txbuf->m_head);
-                       txbuf->m_head = NULL;
-               }
-               /* clear the watch index */
-               txbuf->next_eop = -1;
-        }
-
-       /* Set number of descriptors available */
-       txr->tx_avail = adapter->num_tx_desc;
-
-       bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
-           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-       IGB_TX_UNLOCK(txr);
-}
-
-/*********************************************************************
- *
- *  Initialize all transmit rings.
- *
- **********************************************************************/
-static void
-igb_setup_transmit_structures(struct adapter *adapter)
-{
-       struct tx_ring *txr = adapter->tx_rings;
-
-       for (int i = 0; i < adapter->num_queues; i++, txr++)
-               igb_setup_transmit_ring(txr);
-
-       return;
-}
-
-/*********************************************************************
- *
- *  Enable transmit unit.
- *
- **********************************************************************/
-static void
-igb_initialize_transmit_units(struct adapter *adapter)
-{
-       struct tx_ring  *txr = adapter->tx_rings;
-       struct e1000_hw *hw = &adapter->hw;
-       u32             tctl, txdctl;
-
-       INIT_DEBUGOUT("igb_initialize_transmit_units: begin");
-       tctl = txdctl = 0;
-
-       /* Setup the Tx Descriptor Rings */
-       for (int i = 0; i < adapter->num_queues; i++, txr++) {
-               u64 bus_addr = txr->txdma.dma_paddr;
-
-               E1000_WRITE_REG(hw, E1000_TDLEN(i),
-                   adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
-               E1000_WRITE_REG(hw, E1000_TDBAH(i),
-                   (uint32_t)(bus_addr >> 32));
-               E1000_WRITE_REG(hw, E1000_TDBAL(i),
-                   (uint32_t)bus_addr);
-
-               /* Setup the HW Tx Head and Tail descriptor pointers */
-               E1000_WRITE_REG(hw, E1000_TDT(i), 0);
-               E1000_WRITE_REG(hw, E1000_TDH(i), 0);
-
-               HW_DEBUGOUT2("Base = %x, Length = %x\n",
-                   E1000_READ_REG(hw, E1000_TDBAL(i)),
-                   E1000_READ_REG(hw, E1000_TDLEN(i)));
-
-               txr->queue_status = IGB_QUEUE_IDLE;
-
-               txdctl |= IGB_TX_PTHRESH;
-               txdctl |= IGB_TX_HTHRESH << 8;
-               txdctl |= IGB_TX_WTHRESH << 16;
-               txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
-               E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
-       }
-
-       if (adapter->vf_ifp)
-               return;
-
-       e1000_config_collision_dist(hw);
-
-       /* Program the Transmit Control Register */
-       tctl = E1000_READ_REG(hw, E1000_TCTL);
-       tctl &= ~E1000_TCTL_CT;
-       tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
-                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
-
-       /* This write will effectively turn on the transmit unit. */
-       E1000_WRITE_REG(hw, E1000_TCTL, tctl);
-}
-
-/*********************************************************************
- *
- *  Free all transmit rings.
- *
- **********************************************************************/
-static void
-igb_free_transmit_structures(struct adapter *adapter)
-{
-       struct tx_ring *txr = adapter->tx_rings;
-
-       for (int i = 0; i < adapter->num_queues; i++, txr++) {
-               IGB_TX_LOCK(txr);
-               igb_free_transmit_buffers(txr);
-               igb_dma_free(adapter, &txr->txdma);
-               IGB_TX_UNLOCK(txr);
-               IGB_TX_LOCK_DESTROY(txr);
-       }
-       free(adapter->tx_rings, M_DEVBUF);
-}
-
-/*********************************************************************
- *
- *  Free transmit ring related data structures.
- *
- **********************************************************************/
-static void
-igb_free_transmit_buffers(struct tx_ring *txr)
-{
-       struct adapter *adapter = txr->adapter;
-       struct igb_tx_buffer *tx_buffer;
-       int             i;
-
-       INIT_DEBUGOUT("free_transmit_ring: begin");
-
-       if (txr->tx_buffers == NULL)
-               return;
-
-       tx_buffer = txr->tx_buffers;
-       for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
-               if (tx_buffer->m_head != NULL) {
-                       bus_dmamap_sync(txr->txtag, tx_buffer->map,
-                           BUS_DMASYNC_POSTWRITE);
-                       bus_dmamap_unload(txr->txtag,
-                           tx_buffer->map);
-                       m_freem(tx_buffer->m_head);
-                       tx_buffer->m_head = NULL;
-                       if (tx_buffer->map != NULL) {
-                               bus_dmamap_destroy(txr->txtag,
-                                   tx_buffer->map);
-                               tx_buffer->map = NULL;
-                       }
-               } else if (tx_buffer->map != NULL) {
-                       bus_dmamap_unload(txr->txtag,
-                           tx_buffer->map);
-                       bus_dmamap_destroy(txr->txtag,
-                           tx_buffer->map);
-                       tx_buffer->map = NULL;
-               }
-       }
-#if __FreeBSD_version >= 800000
-       if (txr->br != NULL)
-               buf_ring_free(txr->br, M_DEVBUF);
-#endif
-       if (txr->tx_buffers != NULL) {
-               free(txr->tx_buffers, M_DEVBUF);
-               txr->tx_buffers = NULL;
-       }
-       if (txr->txtag != NULL) {
-               bus_dma_tag_destroy(txr->txtag);
-               txr->txtag = NULL;
-       }
-       return;
-}
-
-/**********************************************************************
- *
- *  Setup work for hardware segmentation offload (TSO)
- *
- **********************************************************************/
-static boolean_t
-igb_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *hdrlen)
-{
-       struct adapter *adapter = txr->adapter;
-       struct e1000_adv_tx_context_desc *TXD;
-       struct igb_tx_buffer        *tx_buffer;
-       u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
-       u32 mss_l4len_idx = 0;
-       u16 vtag = 0;
-       int ctxd, ehdrlen, ip_hlen, tcp_hlen;
-       struct ether_vlan_header *eh;
-       struct ip *ip;
-       struct tcphdr *th;
-
-
-       /*
-        * Determine where frame payload starts.
-        * Jump over vlan headers if already present
-        */
-       eh = mtod(mp, struct ether_vlan_header *);
-       if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
-               ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
-       else
-               ehdrlen = ETHER_HDR_LEN;
-
-       /* Ensure we have at least the IP+TCP header in the first mbuf. */
-       if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
-               return FALSE;
-
-       /* Only supports IPV4 for now */
-       ctxd = txr->next_avail_desc;
-       tx_buffer = &txr->tx_buffers[ctxd];
-       TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd];
-
-       ip = (struct ip *)(mp->m_data + ehdrlen);
-       if (ip->ip_p != IPPROTO_TCP)
-                return FALSE;   /* 0 */
-       ip->ip_sum = 0;
-       ip_hlen = ip->ip_hl << 2;
-       th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
-       th->th_sum = in_pseudo(ip->ip_src.s_addr,
-           ip->ip_dst.s_addr, htons(IPPROTO_TCP));
-       tcp_hlen = th->th_off << 2;
-       /*
-        * Calculate header length, this is used
-        * in the transmit desc in igb_xmit
-        */
-       *hdrlen = ehdrlen + ip_hlen + tcp_hlen;
-
-       /* VLAN MACLEN IPLEN */
-       if (mp->m_flags & M_VLANTAG) {
-               vtag = htole16(mp->m_pkthdr.ether_vtag);
-               vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT);
-       }
-
-       vlan_macip_lens |= (ehdrlen << E1000_ADVTXD_MACLEN_SHIFT);
-       vlan_macip_lens |= ip_hlen;
-       TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
-
-       /* ADV DTYPE TUCMD */
-       type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
-       type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
-       type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
-       TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
-
-       /* MSS L4LEN IDX */
-       mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT);
-       mss_l4len_idx |= (tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT);
-       /* 82575 needs the queue index added */
-       if (adapter->hw.mac.type == e1000_82575)
-               mss_l4len_idx |= txr->me << 4;
-       TXD->mss_l4len_idx = htole32(mss_l4len_idx);
-
-       TXD->seqnum_seed = htole32(0);
-       tx_buffer->m_head = NULL;
-       tx_buffer->next_eop = -1;
-
-       if (++ctxd == adapter->num_tx_desc)
-               ctxd = 0;
-
-       txr->tx_avail--;
-       txr->next_avail_desc = ctxd;
-       return TRUE;
-}
-
-
-/*********************************************************************
- *
- *  Context Descriptor setup for VLAN or CSUM
- *
- **********************************************************************/
-
-static bool
-igb_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
-{
-       struct adapter *adapter = txr->adapter;
-       struct e1000_adv_tx_context_desc *TXD;
-       struct igb_tx_buffer        *tx_buffer;
-       u32 vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx;
-       struct ether_vlan_header *eh;
-       struct ip *ip = NULL;
-       struct ip6_hdr *ip6;
-       int  ehdrlen, ctxd, ip_hlen = 0;
-       u16     etype, vtag = 0;
-       u8      ipproto = 0;
-       bool    offload = TRUE;
-
-       if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
-               offload = FALSE;
-
-       vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0;
-       ctxd = txr->next_avail_desc;
-       tx_buffer = &txr->tx_buffers[ctxd];
-       TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd];
-
-       /*
-       ** In advanced descriptors the vlan tag must 
-       ** be placed into the context descriptor, thus
-       ** we need to be here just for that setup.
-       */
-       if (mp->m_flags & M_VLANTAG) {
-               vtag = htole16(mp->m_pkthdr.ether_vtag);
-               vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT);
-       } else if (offload == FALSE)
-               return FALSE;
-
-       /*
-        * Determine where frame payload starts.
-        * Jump over vlan headers if already present,
-        * helpful for QinQ too.
-        */
-       eh = mtod(mp, struct ether_vlan_header *);
-       if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
-               etype = ntohs(eh->evl_proto);
-               ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
-       } else {
-               etype = ntohs(eh->evl_encap_proto);
-               ehdrlen = ETHER_HDR_LEN;
-       }
-
-       /* Set the ether header length */
-       vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
-
-       switch (etype) {
-               case ETHERTYPE_IP:
-                       ip = (struct ip *)(mp->m_data + ehdrlen);
-                       ip_hlen = ip->ip_hl << 2;
-                       if (mp->m_len < ehdrlen + ip_hlen) {
-                               offload = FALSE;
-                               break;
-                       }
-                       ipproto = ip->ip_p;
-                       type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
-                       break;
-               case ETHERTYPE_IPV6:
-                       ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
-                       ip_hlen = sizeof(struct ip6_hdr);
-                       ipproto = ip6->ip6_nxt;
-                       type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
-                       break;
-               default:
-                       offload = FALSE;
-                       break;
-       }
-
-       vlan_macip_lens |= ip_hlen;
-       type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
-
-       switch (ipproto) {
-               case IPPROTO_TCP:
-                       if (mp->m_pkthdr.csum_flags & CSUM_TCP)
-                               type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
-                       break;
-               case IPPROTO_UDP:
-                       if (mp->m_pkthdr.csum_flags & CSUM_UDP)
-                               type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP;
-                       break;
-#if __FreeBSD_version >= 800000
-               case IPPROTO_SCTP:
-                       if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
-                               type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP;
-                       break;
-#endif
-               default:
-                       offload = FALSE;
-                       break;
-       }
-
-       /* 82575 needs the queue index added */
-       if (adapter->hw.mac.type == e1000_82575)
-               mss_l4len_idx = txr->me << 4;
-
-       /* Now copy bits into descriptor */
-       TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
-       TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
-       TXD->seqnum_seed = htole32(0);
-       TXD->mss_l4len_idx = htole32(mss_l4len_idx);
-
-       tx_buffer->m_head = NULL;
-       tx_buffer->next_eop = -1;
-
-       /* We've consumed the first desc, adjust counters */
-       if (++ctxd == adapter->num_tx_desc)
-               ctxd = 0;
-       txr->next_avail_desc = ctxd;
-       --txr->tx_avail;
-
-        return (offload);
-}
-
-
-/**********************************************************************
- *
- *  Examine each tx_buffer in the used queue. If the hardware is done
- *  processing the packet then free associated resources. The
- *  tx_buffer is put back on the free queue.
- *
- *  TRUE return means there's work in the ring to clean, FALSE its empty.
- **********************************************************************/
-static bool
-igb_txeof(struct tx_ring *txr)
-{
-       struct adapter  *adapter = txr->adapter;
-        int first, last, done, processed;
-        struct igb_tx_buffer *tx_buffer;
-        struct e1000_tx_desc   *tx_desc, *eop_desc;
-       struct ifnet   *ifp = adapter->ifp;
-
-       IGB_TX_LOCK_ASSERT(txr);
-
-        if (txr->tx_avail == adapter->num_tx_desc) {
-               txr->queue_status = IGB_QUEUE_IDLE;
-                return FALSE;
-       }
-
-       processed = 0;
-        first = txr->next_to_clean;
-        tx_desc = &txr->tx_base[first];
-        tx_buffer = &txr->tx_buffers[first];
-       last = tx_buffer->next_eop;
-        eop_desc = &txr->tx_base[last];
-
-       /*
-        * What this does is get the index of the
-        * first descriptor AFTER the EOP of the 
-        * first packet, that way we can do the
-        * simple comparison on the inner while loop.
-        */
-       if (++last == adapter->num_tx_desc)
-               last = 0;
-       done = last;
-
-        bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
-            BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
-
-        while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
-               /* We clean the range of the packet */
-               while (first != done) {
-                       tx_desc->upper.data = 0;
-                       tx_desc->lower.data = 0;
-                       tx_desc->buffer_addr = 0;
-                       ++txr->tx_avail;
-                       ++processed;
-
-                       if (tx_buffer->m_head) {
-                               txr->bytes +=
-                                   tx_buffer->m_head->m_pkthdr.len;
-                               bus_dmamap_sync(txr->txtag,
-                                   tx_buffer->map,
-                                   BUS_DMASYNC_POSTWRITE);
-                               bus_dmamap_unload(txr->txtag,
-                                   tx_buffer->map);
-
-                               m_freem(tx_buffer->m_head);
-                               tx_buffer->m_head = NULL;
-                       }
-                       tx_buffer->next_eop = -1;
-                       txr->watchdog_time = ticks;
-
-                       if (++first == adapter->num_tx_desc)
-                               first = 0;
-
-                       tx_buffer = &txr->tx_buffers[first];
-                       tx_desc = &txr->tx_base[first];
-               }
-               ++txr->packets;
-               ++ifp->if_opackets;
-               /* See if we can continue to the next packet */
-               last = tx_buffer->next_eop;
-               if (last != -1) {
-                       eop_desc = &txr->tx_base[last];
-                       /* Get new done point */
-                       if (++last == adapter->num_tx_desc) last = 0;
-                       done = last;
-               } else
-                       break;
-        }
-        bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
-            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
-        txr->next_to_clean = first;
-
-       /*
-       ** Watchdog calculation, we know there's
-       ** work outstanding or the first return
-       ** would have been taken, so none processed
-       ** for too long indicates a hang.
-       */
-       if ((!processed) && ((ticks - txr->watchdog_time) > IGB_WATCHDOG))
-               txr->queue_status = IGB_QUEUE_HUNG;
-
-        /*
-         * If we have a minimum free, clear IFF_DRV_OACTIVE
-         * to tell the stack that it is OK to send packets.
-         */
-        if (txr->tx_avail > IGB_TX_CLEANUP_THRESHOLD) {                
-                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
-               /* All clean, turn off the watchdog */
-                if (txr->tx_avail == adapter->num_tx_desc) {
-                       txr->queue_status = IGB_QUEUE_IDLE;
-                       return (FALSE);
-               }
-        }
-       return (TRUE);
-}
-
-/*********************************************************************
- *
- *  Refresh mbuf buffers for RX descriptor rings
- *   - now keeps its own state so discards due to resource
- *     exhaustion are unnecessary, if an mbuf cannot be obtained
- *     it just returns, keeping its placeholder, thus it can simply
- *     be recalled to try again.
- *
- **********************************************************************/
-static void
-igb_refresh_mbufs(struct rx_ring *rxr, int limit)
-{
-       struct adapter          *adapter = rxr->adapter;
-       bus_dma_segment_t       hseg[1];
-       bus_dma_segment_t       pseg[1];
-       struct igb_rx_buf       *rxbuf;
-       struct mbuf             *mh, *mp;
-       int                     i, j, nsegs, error;
-       bool                    refreshed = FALSE;
-
-       i = j = rxr->next_to_refresh;
-       /*
-       ** Get one descriptor beyond
-       ** our work mark to control
-       ** the loop.
-        */
-       if (++j == adapter->num_rx_desc)
-               j = 0;
-
-       while (j != limit) {
-               rxbuf = &rxr->rx_buffers[i];
-               /* No hdr mbuf used with header split off */
-               if (rxr->hdr_split == FALSE)
-                       goto no_split;
-               if (rxbuf->m_head == NULL) {
-                       mh = m_gethdr(M_DONTWAIT, MT_DATA);
-                       if (mh == NULL)
-                               goto update;
-               } else
-                       mh = rxbuf->m_head;
-
-               mh->m_pkthdr.len = mh->m_len = MHLEN;
-               mh->m_len = MHLEN;
-               mh->m_flags |= M_PKTHDR;
-               /* Get the memory mapping */
-               error = bus_dmamap_load_mbuf_sg(rxr->htag,
-                   rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
-               if (error != 0) {
-                       printf("Refresh mbufs: hdr dmamap load"
-                           " failure - %d\n", error);
-                       m_free(mh);
-                       rxbuf->m_head = NULL;
-                       goto update;
-               }
-               rxbuf->m_head = mh;
-               bus_dmamap_sync(rxr->htag, rxbuf->hmap,
-                   BUS_DMASYNC_PREREAD);
-               rxr->rx_base[i].read.hdr_addr =
-                   htole64(hseg[0].ds_addr);
-no_split:
-               if (rxbuf->m_pack == NULL) {
-                       mp = m_getjcl(M_DONTWAIT, MT_DATA,
-                           M_PKTHDR, adapter->rx_mbuf_sz);
-                       if (mp == NULL)
-                               goto update;
-               } else
-                       mp = rxbuf->m_pack;
-
-               mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
-               /* Get the memory mapping */
-               error = bus_dmamap_load_mbuf_sg(rxr->ptag,
-                   rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
-               if (error != 0) {
-                       printf("Refresh mbufs: payload dmamap load"
-                           " failure - %d\n", error);
-                       m_free(mp);
-                       rxbuf->m_pack = NULL;
-                       goto update;
-               }
-               rxbuf->m_pack = mp;
-               bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
-                   BUS_DMASYNC_PREREAD);
-               rxr->rx_base[i].read.pkt_addr =
-                   htole64(pseg[0].ds_addr);
-               refreshed = TRUE; /* I feel wefreshed :) */
-
-               i = j; /* our next is precalculated */
-               rxr->next_to_refresh = i;
-               if (++j == adapter->num_rx_desc)
-                       j = 0;
-       }
-update:
-       if (refreshed) /* update tail */
-               E1000_WRITE_REG(&adapter->hw,
-                   E1000_RDT(rxr->me), rxr->next_to_refresh);
-       return;
-}
-
-
-/*********************************************************************
- *
- *  Allocate memory for rx_buffer structures. Since we use one
- *  rx_buffer per received packet, the maximum number of rx_buffer's
- *  that we'll need is equal to the number of receive descriptors
- *  that we've allocated.
- *
- **********************************************************************/
-static int
-igb_allocate_receive_buffers(struct rx_ring *rxr)
-{
-       struct  adapter         *adapter = rxr->adapter;
-       device_t                dev = adapter->dev;
-       struct igb_rx_buf       *rxbuf;
-       int                     i, bsize, error;
-
-       bsize = sizeof(struct igb_rx_buf) * adapter->num_rx_desc;
-       if (!(rxr->rx_buffers =
-           (struct igb_rx_buf *) malloc(bsize,
-           M_DEVBUF, M_NOWAIT | M_ZERO))) {
-               device_printf(dev, "Unable to allocate rx_buffer memory\n");
-               error = ENOMEM;
-               goto fail;
-       }
-
-       if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),
-                                  1, 0,                /* alignment, bounds */
-                                  BUS_SPACE_MAXADDR,   /* lowaddr */
-                                  BUS_SPACE_MAXADDR,   /* highaddr */
-                                  NULL, NULL,          /* filter, filterarg */
-                                  MSIZE,               /* maxsize */
-                                  1,                   /* nsegments */
-                                  MSIZE,               /* maxsegsize */
-                                  0,                   /* flags */
-                                  NULL,                /* lockfunc */
-                                  NULL,                /* lockfuncarg */
-                                  &rxr->htag))) {
-               device_printf(dev, "Unable to create RX DMA tag\n");
-               goto fail;
-       }
-
-       if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),
-                                  1, 0,                /* alignment, bounds */
-                                  BUS_SPACE_MAXADDR,   /* lowaddr */
-                                  BUS_SPACE_MAXADDR,   /* highaddr */
-                                  NULL, NULL,          /* filter, filterarg */
-                                  MJUM9BYTES,          /* maxsize */
-                                  1,                   /* nsegments */
-                                  MJUM9BYTES,          /* maxsegsize */
-                                  0,                   /* flags */
-                                  NULL,                /* lockfunc */
-                                  NULL,                /* lockfuncarg */
-                                  &rxr->ptag))) {
-               device_printf(dev, "Unable to create RX payload DMA tag\n");
-               goto fail;
-       }
-
-       for (i = 0; i < adapter->num_rx_desc; i++) {
-               rxbuf = &rxr->rx_buffers[i];
-               error = bus_dmamap_create(rxr->htag,
-                   BUS_DMA_NOWAIT, &rxbuf->hmap);
-               if (error) {
-                       device_printf(dev,
-                           "Unable to create RX head DMA maps\n");
-                       goto fail;
-               }
-               error = bus_dmamap_create(rxr->ptag,
-                   BUS_DMA_NOWAIT, &rxbuf->pmap);
-               if (error) {
-                       device_printf(dev,
-                           "Unable to create RX packet DMA maps\n");
-                       goto fail;
-               }
-       }
-
-       return (0);
-
-fail:
-       /* Frees all, but can handle partial completion */
-       igb_free_receive_structures(adapter);
-       return (error);
-}
-
-
-static void
-igb_free_receive_ring(struct rx_ring *rxr)
-{
-       struct  adapter         *adapter = rxr->adapter;
-       struct igb_rx_buf       *rxbuf;
-
-
-       for (int i = 0; i < adapter->num_rx_desc; i++) {
-               rxbuf = &rxr->rx_buffers[i];
-               if (rxbuf->m_head != NULL) {
-                       bus_dmamap_sync(rxr->htag, rxbuf->hmap,
-                           BUS_DMASYNC_POSTREAD);
-                       bus_dmamap_unload(rxr->htag, rxbuf->hmap);
-                       rxbuf->m_head->m_flags |= M_PKTHDR;
-                       m_freem(rxbuf->m_head);
-               }
-               if (rxbuf->m_pack != NULL) {
-                       bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
-                           BUS_DMASYNC_POSTREAD);
-                       bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
-                       rxbuf->m_pack->m_flags |= M_PKTHDR;
-                       m_freem(rxbuf->m_pack);
-               }
-               rxbuf->m_head = NULL;
-               rxbuf->m_pack = NULL;
-       }
-}
-
-
-/*********************************************************************
- *
- *  Initialize a receive ring and its buffers.
- *
- **********************************************************************/
-static int
-igb_setup_receive_ring(struct rx_ring *rxr)
-{
-       struct  adapter         *adapter;
-       struct  ifnet           *ifp;
-       device_t                dev;
-       struct igb_rx_buf       *rxbuf;
-       bus_dma_segment_t       pseg[1], hseg[1];
-       struct lro_ctrl         *lro = &rxr->lro;
-       int                     rsize, nsegs, error = 0;
-
-       adapter = rxr->adapter;
-       dev = adapter->dev;
-       ifp = adapter->ifp;
-
-       /* Clear the ring contents */
-       IGB_RX_LOCK(rxr);
-       rsize = roundup2(adapter->num_rx_desc *
-           sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN);
-       bzero((void *)rxr->rx_base, rsize);
-
-       /*
-       ** Free current RX buffer structures and their mbufs
-       */
-       igb_free_receive_ring(rxr);
-
-       /* Configure for header split? */
-       if (igb_header_split)
-               rxr->hdr_split = TRUE;
-
-        /* Now replenish the ring mbufs */
-       for (int j = 0; j < adapter->num_rx_desc; ++j) {
-               struct mbuf     *mh, *mp;
-
-               rxbuf = &rxr->rx_buffers[j];
-               if (rxr->hdr_split == FALSE)
-                       goto skip_head;
-
-               /* First the header */
-               rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA);
-               if (rxbuf->m_head == NULL) {
-                       error = ENOBUFS;
-                        goto fail;
-               }
-               m_adj(rxbuf->m_head, ETHER_ALIGN);
-               mh = rxbuf->m_head;
-               mh->m_len = mh->m_pkthdr.len = MHLEN;
-               mh->m_flags |= M_PKTHDR;
-               /* Get the memory mapping */
-               error = bus_dmamap_load_mbuf_sg(rxr->htag,
-                   rxbuf->hmap, rxbuf->m_head, hseg,
-                   &nsegs, BUS_DMA_NOWAIT);
-               if (error != 0) /* Nothing elegant to do here */
-                        goto fail;
-               bus_dmamap_sync(rxr->htag,
-                   rxbuf->hmap, BUS_DMASYNC_PREREAD);
-               /* Update descriptor */
-               rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
-
-skip_head:
-               /* Now the payload cluster */
-               rxbuf->m_pack = m_getjcl(M_DONTWAIT, MT_DATA,
-                   M_PKTHDR, adapter->rx_mbuf_sz);
-               if (rxbuf->m_pack == NULL) {
-                       error = ENOBUFS;
-                        goto fail;
-               }
-               mp = rxbuf->m_pack;
-               mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
-               /* Get the memory mapping */
-               error = bus_dmamap_load_mbuf_sg(rxr->ptag,
-                   rxbuf->pmap, mp, pseg,
-                   &nsegs, BUS_DMA_NOWAIT);
-               if (error != 0)
-                        goto fail;
-               bus_dmamap_sync(rxr->ptag,
-                   rxbuf->pmap, BUS_DMASYNC_PREREAD);
-               /* Update descriptor */
-               rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
-        }
-
-       /* Setup our descriptor indices */
-       rxr->next_to_check = 0;
-       rxr->next_to_refresh = adapter->num_rx_desc - 1;
-       rxr->lro_enabled = FALSE;
-       rxr->rx_split_packets = 0;
-       rxr->rx_bytes = 0;
-
-       rxr->fmp = NULL;
-       rxr->lmp = NULL;
-       rxr->discard = FALSE;
-
-       bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
-           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
-       /*
-       ** Now set up the LRO interface, we
-       ** also only do head split when LRO
-       ** is enabled, since so often they
-       ** are undesireable in similar setups.
-       */
-       if (ifp->if_capenable & IFCAP_LRO) {
-               error = tcp_lro_init(lro);
-               if (error) {
-                       device_printf(dev, "LRO Initialization failed!\n");
-                       goto fail;
-               }
-               INIT_DEBUGOUT("RX LRO Initialized\n");
-               rxr->lro_enabled = TRUE;
-               lro->ifp = adapter->ifp;
-       }
-
-       IGB_RX_UNLOCK(rxr);
-       return (0);
-
-fail:
-       igb_free_receive_ring(rxr);
-       IGB_RX_UNLOCK(rxr);
-       return (error);
-}
-
-
-/*********************************************************************
- *
- *  Initialize all receive rings.
- *
- **********************************************************************/
-static int
-igb_setup_receive_structures(struct adapter *adapter)
-{
-       struct rx_ring *rxr = adapter->rx_rings;
-       int i;
-
-       for (i = 0; i < adapter->num_queues; i++, rxr++)
-               if (igb_setup_receive_ring(rxr))
-                       goto fail;
-
-       return (0);
-fail:
-       /*
-        * Free RX buffers allocated so far, we will only handle
-        * the rings that completed, the failing case will have
-        * cleaned up for itself. 'i' is the endpoint.
-        */
-       for (int j = 0; j > i; ++j) {
-               rxr = &adapter->rx_rings[i];
-               IGB_RX_LOCK(rxr);
-               igb_free_receive_ring(rxr);
-               IGB_RX_UNLOCK(rxr);
-       }
-
-       return (ENOBUFS);
-}
-
-/*********************************************************************
- *
- *  Enable receive unit.
- *
- **********************************************************************/
-static void
-igb_initialize_receive_units(struct adapter *adapter)
-{
-       struct rx_ring  *rxr = adapter->rx_rings;
-       struct ifnet    *ifp = adapter->ifp;
-       struct e1000_hw *hw = &adapter->hw;
-       u32             rctl, rxcsum, psize, srrctl = 0;
-
-       INIT_DEBUGOUT("igb_initialize_receive_unit: begin");
-
-       /*
-        * Make sure receives are disabled while setting
-        * up the descriptor ring
-        */
-       rctl = E1000_READ_REG(hw, E1000_RCTL);
-       E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
-
-       /*
-       ** Set up for header split
-       */
-       if (igb_header_split) {
-               /* Use a standard mbuf for the header */
-               srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
-               srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-       } else
-               srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
-
-       /*
-       ** Set up for jumbo frames
-       */
-       if (ifp->if_mtu > ETHERMTU) {
-               rctl |= E1000_RCTL_LPE;
-               if (adapter->rx_mbuf_sz == MJUMPAGESIZE) {
-                       srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-                       rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
-               } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) {
-                       srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-                       rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
-               }
-               /* Set maximum packet len */
-               psize = adapter->max_frame_size;
-               /* are we on a vlan? */
-               if (adapter->ifp->if_vlantrunk != NULL)
-                       psize += VLAN_TAG_SIZE;
-               E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize);
-       } else {
-               rctl &= ~E1000_RCTL_LPE;
-               srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-               rctl |= E1000_RCTL_SZ_2048;
-       }
-
-       /* Setup the Base and Length of the Rx Descriptor Rings */
-       for (int i = 0; i < adapter->num_queues; i++, rxr++) {
-               u64 bus_addr = rxr->rxdma.dma_paddr;
-               u32 rxdctl;
-
-               E1000_WRITE_REG(hw, E1000_RDLEN(i),
-                   adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
-               E1000_WRITE_REG(hw, E1000_RDBAH(i),
-                   (uint32_t)(bus_addr >> 32));
-               E1000_WRITE_REG(hw, E1000_RDBAL(i),
-                   (uint32_t)bus_addr);
-               E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
-               /* Enable this Queue */
-               rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
-               rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
-               rxdctl &= 0xFFF00000;
-               rxdctl |= IGB_RX_PTHRESH;
-               rxdctl |= IGB_RX_HTHRESH << 8;
-               rxdctl |= IGB_RX_WTHRESH << 16;
-               E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
-       }
-
-       /*
-       ** Setup for RX MultiQueue
-       */
-       rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
-       if (adapter->num_queues >1) {
-               u32 random[10], mrqc, shift = 0;
-               union igb_reta {
-                       u32 dword;
-                       u8  bytes[4];
-               } reta;
-
-               arc4rand(&random, sizeof(random), 0);
-               if (adapter->hw.mac.type == e1000_82575)
-                       shift = 6;
-               /* Warning FM follows */
-               for (int i = 0; i < 128; i++) {
-                       reta.bytes[i & 3] =
-                           (i % adapter->num_queues) << shift;
-                       if ((i & 3) == 3)
-                               E1000_WRITE_REG(hw,
-                                   E1000_RETA(i >> 2), reta.dword);
-               }
-               /* Now fill in hash table */
-               mrqc = E1000_MRQC_ENABLE_RSS_4Q;
-               for (int i = 0; i < 10; i++)
-                       E1000_WRITE_REG_ARRAY(hw,
-                           E1000_RSSRK(0), i, random[i]);
-
-               mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
-                   E1000_MRQC_RSS_FIELD_IPV4_TCP);
-               mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
-                   E1000_MRQC_RSS_FIELD_IPV6_TCP);
-               mrqc |=( E1000_MRQC_RSS_FIELD_IPV4_UDP |
-                   E1000_MRQC_RSS_FIELD_IPV6_UDP);
-               mrqc |=( E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
-                   E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
-
-               E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
-
-               /*
-               ** NOTE: Receive Full-Packet Checksum Offload 
-               ** is mutually exclusive with Multiqueue. However
-               ** this is not the same as TCP/IP checksums which
-               ** still work.
-               */
-               rxcsum |= E1000_RXCSUM_PCSD;
-#if __FreeBSD_version >= 800000
-               /* For SCTP Offload */
-               if ((hw->mac.type == e1000_82576)
-                   && (ifp->if_capenable & IFCAP_RXCSUM))
-                       rxcsum |= E1000_RXCSUM_CRCOFL;
-#endif
-       } else {
-               /* Non RSS setup */
-               if (ifp->if_capenable & IFCAP_RXCSUM) {
-                       rxcsum |= E1000_RXCSUM_IPPCSE;
-#if __FreeBSD_version >= 800000
-                       if (adapter->hw.mac.type == e1000_82576)
-                               rxcsum |= E1000_RXCSUM_CRCOFL;
-#endif
-               } else
-                       rxcsum &= ~E1000_RXCSUM_TUOFL;
-       }
-       E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
-
-       /* Setup the Receive Control Register */
-       rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
-       rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
-                  E1000_RCTL_RDMTS_HALF |
-                  (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
-       /* Strip CRC bytes. */
-       rctl |= E1000_RCTL_SECRC;
-       /* Make sure VLAN Filters are off */
-       rctl &= ~E1000_RCTL_VFE;
-       /* Don't store bad packets */
-       rctl &= ~E1000_RCTL_SBP;
-
-       /* Enable Receives */
-       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
-
-       /*
-        * Setup the HW Rx Head and Tail Descriptor Pointers
-        *   - needs to be after enable
-        */
-       for (int i = 0; i < adapter->num_queues; i++) {
-               rxr = &adapter->rx_rings[i];
-               E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check);
-               E1000_WRITE_REG(hw, E1000_RDT(i), rxr->next_to_refresh);
-       }
-       return;
-}
-
-/*********************************************************************
- *
- *  Free receive rings.
- *
- **********************************************************************/
-static void
-igb_free_receive_structures(struct adapter *adapter)
-{
-       struct rx_ring *rxr = adapter->rx_rings;
-
-       for (int i = 0; i < adapter->num_queues; i++, rxr++) {
-               struct lro_ctrl *lro = &rxr->lro;
-               igb_free_receive_buffers(rxr);
-               tcp_lro_free(lro);
-               igb_dma_free(adapter, &rxr->rxdma);
-       }
-
-       free(adapter->rx_rings, M_DEVBUF);
-}
-
-/*********************************************************************
- *
- *  Free receive ring data structures.
- *
- **********************************************************************/
-static void
-igb_free_receive_buffers(struct rx_ring *rxr)
-{
-       struct adapter          *adapter = rxr->adapter;
-       struct igb_rx_buf       *rxbuf;
-       int i;
-
-       INIT_DEBUGOUT("free_receive_structures: begin");
-
-       /* Cleanup any existing buffers */
-       if (rxr->rx_buffers != NULL) {
-               for (i = 0; i < adapter->num_rx_desc; i++) {
-                       rxbuf = &rxr->rx_buffers[i];
-                       if (rxbuf->m_head != NULL) {
-                               bus_dmamap_sync(rxr->htag, rxbuf->hmap,
-                                   BUS_DMASYNC_POSTREAD);
-                               bus_dmamap_unload(rxr->htag, rxbuf->hmap);
-                               rxbuf->m_head->m_flags |= M_PKTHDR;
-                               m_freem(rxbuf->m_head);
-                       }
-                       if (rxbuf->m_pack != NULL) {
-                               bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
-                                   BUS_DMASYNC_POSTREAD);
-                               bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
-                               rxbuf->m_pack->m_flags |= M_PKTHDR;
-                               m_freem(rxbuf->m_pack);
-                       }
-                       rxbuf->m_head = NULL;
-                       rxbuf->m_pack = NULL;
-                       if (rxbuf->hmap != NULL) {
-                               bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
-                               rxbuf->hmap = NULL;
-                       }
-                       if (rxbuf->pmap != NULL) {
-                               bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
-                               rxbuf->pmap = NULL;
-                       }
-               }
-               if (rxr->rx_buffers != NULL) {
-                       free(rxr->rx_buffers, M_DEVBUF);
-                       rxr->rx_buffers = NULL;
-               }
-       }
-
-       if (rxr->htag != NULL) {
-               bus_dma_tag_destroy(rxr->htag);
-               rxr->htag = NULL;
-       }
-       if (rxr->ptag != NULL) {
-               bus_dma_tag_destroy(rxr->ptag);
-               rxr->ptag = NULL;
-       }
-}
-
-static __inline void
-igb_rx_discard(struct rx_ring *rxr, int i)
-{
-       struct igb_rx_buf       *rbuf;
-
-       rbuf = &rxr->rx_buffers[i];
-
-       /* Partially received? Free the chain */
-       if (rxr->fmp != NULL) {
-               rxr->fmp->m_flags |= M_PKTHDR;
-               m_freem(rxr->fmp);
-               rxr->fmp = NULL;
-               rxr->lmp = NULL;
-       }
-
-       /*
-       ** With advanced descriptors the writeback
-       ** clobbers the buffer addrs, so its easier
-       ** to just free the existing mbufs and take
-       ** the normal refresh path to get new buffers
-       ** and mapping.
-       */
-       if (rbuf->m_head) {
-               m_free(rbuf->m_head);
-               rbuf->m_head = NULL;
-       }
-
-       if (rbuf->m_pack) {
-               m_free(rbuf->m_pack);
-               rbuf->m_pack = NULL;
-       }
-
-       return;
-}
-
-static __inline void
-igb_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
-{
-
-       /*
-        * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
-        * should be computed by hardware. Also it should not have VLAN tag in
-        * ethernet header.
-        */
-       if (rxr->lro_enabled &&
-           (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
-           (ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 &&
-           (ptype & (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP)) ==
-           (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP) &&
-           (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == 
-           (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
-               /*
-                * Send to the stack if:
-                **  - LRO not enabled, or
-                **  - no LRO resources, or
-                **  - lro enqueue fails
-                */
-               if (rxr->lro.lro_cnt != 0)
-                       if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
-                               return;
-       }
-       IGB_RX_UNLOCK(rxr);
-       (*ifp->if_input)(ifp, m);
-       IGB_RX_LOCK(rxr);
-}
-
-/*********************************************************************
- *
- *  This routine executes in interrupt context. It replenishes
- *  the mbufs in the descriptor and sends data which has been
- *  dma'ed into host memory to upper layer.
- *
- *  We loop at most count times if count is > 0, or until done if
- *  count < 0.
- *
- *  Return TRUE if more to clean, FALSE otherwise
- *********************************************************************/
-static bool
-igb_rxeof(struct igb_queue *que, int count, int *done)
-{
-       struct adapter          *adapter = que->adapter;
-       struct rx_ring          *rxr = que->rxr;
-       struct ifnet            *ifp = adapter->ifp;
-       struct lro_ctrl         *lro = &rxr->lro;
-       struct lro_entry        *queued;
-       int                     i, processed = 0, rxdone = 0;
-       u32                     ptype, staterr = 0;
-       union e1000_adv_rx_desc *cur;
-
-       IGB_RX_LOCK(rxr);
-       /* Sync the ring. */
-       bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
-           BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
-
-       /* Main clean loop */
-       for (i = rxr->next_to_check; count != 0;) {
-               struct mbuf             *sendmp, *mh, *mp;
-               struct igb_rx_buf       *rxbuf;
-               u16                     hlen, plen, hdr, vtag;
-               bool                    eop = FALSE;
-               cur = &rxr->rx_base[i];
-               staterr = le32toh(cur->wb.upper.status_error);
-               if ((staterr & E1000_RXD_STAT_DD) == 0)
-                       break;
-               if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
-                       break;
-               count--;
-               sendmp = mh = mp = NULL;
-               cur->wb.upper.status_error = 0;
-               rxbuf = &rxr->rx_buffers[i];
-               plen = le16toh(cur->wb.upper.length);
-               ptype = le32toh(cur->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK;
-               if ((adapter->hw.mac.type == e1000_i350) &&
-                   (staterr & E1000_RXDEXT_STATERR_LB))
-                       vtag = be16toh(cur->wb.upper.vlan);
-               else
-                       vtag = le16toh(cur->wb.upper.vlan);
-               hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
-               eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP);
-
-               /* Make sure all segments of a bad packet are discarded */
-               if (((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0) ||
-                   (rxr->discard)) {
-                       ifp->if_ierrors++;
-                       ++rxr->rx_discarded;
-                       if (!eop) /* Catch subsequent segs */
-                               rxr->discard = TRUE;
-                       else
-                               rxr->discard = FALSE;
-                       igb_rx_discard(rxr, i);
-                       goto next_desc;
-               }
-
-               /*
-               ** The way the hardware is configured to
-               ** split, it will ONLY use the header buffer
-               ** when header split is enabled, otherwise we
-               ** get normal behavior, ie, both header and
-               ** payload are DMA'd into the payload buffer.
-               **
-               ** The fmp test is to catch the case where a
-               ** packet spans multiple descriptors, in that
-               ** case only the first header is valid.
-               */
-               if (rxr->hdr_split && rxr->fmp == NULL) {
-                       hlen = (hdr & E1000_RXDADV_HDRBUFLEN_MASK) >>
-                           E1000_RXDADV_HDRBUFLEN_SHIFT;
-                       if (hlen > IGB_HDR_BUF)
-                               hlen = IGB_HDR_BUF;
-                       mh = rxr->rx_buffers[i].m_head;
-                       mh->m_len = hlen;
-                       /* clear buf pointer for refresh */
-                       rxbuf->m_head = NULL;
-                       /*
-                       ** Get the payload length, this
-                       ** could be zero if its a small
-                       ** packet.
-                       */
-                       if (plen > 0) {
-                               mp = rxr->rx_buffers[i].m_pack;
-                               mp->m_len = plen;
-                               mh->m_next = mp;
-                               /* clear buf pointer */
-                               rxbuf->m_pack = NULL;
-                               rxr->rx_split_packets++;
-                       }
-               } else {
-                       /*
-                       ** Either no header split, or a
-                       ** secondary piece of a fragmented
-                       ** split packet.
-                       */
-                       mh = rxr->rx_buffers[i].m_pack;
-                       mh->m_len = plen;
-                       /* clear buf info for refresh */
-                       rxbuf->m_pack = NULL;
-               }
-
-               ++processed; /* So we know when to refresh */
-
-               /* Initial frame - setup */
-               if (rxr->fmp == NULL) {
-                       mh->m_pkthdr.len = mh->m_len;
-                       /* Save the head of the chain */
-                       rxr->fmp = mh;
-                       rxr->lmp = mh;
-                       if (mp != NULL) {
-                               /* Add payload if split */
-                               mh->m_pkthdr.len += mp->m_len;
-                               rxr->lmp = mh->m_next;
-                       }
-               } else {
-                       /* Chain mbuf's together */
-                       rxr->lmp->m_next = mh;
-                       rxr->lmp = rxr->lmp->m_next;
-                       rxr->fmp->m_pkthdr.len += mh->m_len;
-               }
-
-               if (eop) {
-                       rxr->fmp->m_pkthdr.rcvif = ifp;
-                       ifp->if_ipackets++;
-                       rxr->rx_packets++;
-                       /* capture data for AIM */
-                       rxr->packets++;
-                       rxr->bytes += rxr->fmp->m_pkthdr.len;
-                       rxr->rx_bytes += rxr->fmp->m_pkthdr.len;
-
-                       if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
-                               igb_rx_checksum(staterr, rxr->fmp, ptype);
-
-                       if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
-                           (staterr & E1000_RXD_STAT_VP) != 0) {
-                               rxr->fmp->m_pkthdr.ether_vtag = vtag;
-                               rxr->fmp->m_flags |= M_VLANTAG;
-                       }
-#if __FreeBSD_version >= 800000
-                       rxr->fmp->m_pkthdr.flowid = que->msix;
-                       rxr->fmp->m_flags |= M_FLOWID;
-#endif
-                       sendmp = rxr->fmp;
-                       /* Make sure to set M_PKTHDR. */
-                       sendmp->m_flags |= M_PKTHDR;
-                       rxr->fmp = NULL;
-                       rxr->lmp = NULL;
-               }
-
-next_desc:
-               bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
-                   BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
-               /* Advance our pointers to the next descriptor. */
-               if (++i == adapter->num_rx_desc)
-                       i = 0;
-               /*
-               ** Send to the stack or LRO
-               */
-               if (sendmp != NULL) {
-                       rxr->next_to_check = i;
-                       igb_rx_input(rxr, ifp, sendmp, ptype);
-                       i = rxr->next_to_check;
-                       rxdone++;
-               }
-
-               /* Every 8 descriptors we go to refresh mbufs */
-               if (processed == 8) {
-                        igb_refresh_mbufs(rxr, i);
-                        processed = 0;
-               }
-       }
-
-       /* Catch any remainders */
-       if (igb_rx_unrefreshed(rxr))
-               igb_refresh_mbufs(rxr, i);
-
-       rxr->next_to_check = i;
-
-       /*
-        * Flush any outstanding LRO work
-        */
-       while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
-               SLIST_REMOVE_HEAD(&lro->lro_active, next);
-               tcp_lro_flush(lro, queued);
-       }
-
-       if (done != NULL)
-               *done = rxdone;
-
-       IGB_RX_UNLOCK(rxr);
-       return ((staterr & E1000_RXD_STAT_DD) ? TRUE : FALSE);
-}
-
-/*********************************************************************
- *
- *  Verify that the hardware indicated that the checksum is valid.
- *  Inform the stack about the status of checksum so that stack
- *  doesn't spend time verifying the checksum.
- *
- *********************************************************************/
-static void
-igb_rx_checksum(u32 staterr, struct mbuf *mp, u32 ptype)
-{
-       u16 status = (u16)staterr;
-       u8  errors = (u8) (staterr >> 24);
-       int sctp;
-
-       /* Ignore Checksum bit is set */
-       if (status & E1000_RXD_STAT_IXSM) {
-               mp->m_pkthdr.csum_flags = 0;
-               return;
-       }
-
-       if ((ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 &&
-           (ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0)
-               sctp = 1;
-       else
-               sctp = 0;
-       if (status & E1000_RXD_STAT_IPCS) {
-               /* Did it pass? */
-               if (!(errors & E1000_RXD_ERR_IPE)) {
-                       /* IP Checksum Good */
-                       mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
-                       mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
-               } else
-                       mp->m_pkthdr.csum_flags = 0;
-       }
-
-       if (status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) {
-               u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
-#if __FreeBSD_version >= 800000
-               if (sctp) /* reassign */
-                       type = CSUM_SCTP_VALID;
-#endif
-               /* Did it pass? */
-               if (!(errors & E1000_RXD_ERR_TCPE)) {
-                       mp->m_pkthdr.csum_flags |= type;
-                       if (sctp == 0)
-                               mp->m_pkthdr.csum_data = htons(0xffff);
-               }
-       }
-       return;
-}
-
-/*
- * This routine is run via an vlan
- * config EVENT
- */
-static void
-igb_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
-{
-       struct adapter  *adapter = ifp->if_softc;
-       u32             index, bit;
-
-       if (ifp->if_softc !=  arg)   /* Not our event */
-               return;
-
-       if ((vtag == 0) || (vtag > 4095))       /* Invalid */
-                return;
-
-       IGB_CORE_LOCK(adapter);
-       index = (vtag >> 5) & 0x7F;
-       bit = vtag & 0x1F;
-       adapter->shadow_vfta[index] |= (1 << bit);
-       ++adapter->num_vlans;
-       /* Change hw filter setting */
-       if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
-               igb_setup_vlan_hw_support(adapter);
-       IGB_CORE_UNLOCK(adapter);
-}
-
-/*
- * This routine is run via an vlan
- * unconfig EVENT
- */
-static void
-igb_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
-{
-       struct adapter  *adapter = ifp->if_softc;
-       u32             index, bit;
-
-       if (ifp->if_softc !=  arg)
-               return;
-
-       if ((vtag == 0) || (vtag > 4095))       /* Invalid */
-                return;
-
-       IGB_CORE_LOCK(adapter);
-       index = (vtag >> 5) & 0x7F;
-       bit = vtag & 0x1F;
-       adapter->shadow_vfta[index] &= ~(1 << bit);
-       --adapter->num_vlans;
-       /* Change hw filter setting */
-       if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
-               igb_setup_vlan_hw_support(adapter);
-       IGB_CORE_UNLOCK(adapter);
-}
-
-static void
-igb_setup_vlan_hw_support(struct adapter *adapter)
-{
-       struct e1000_hw *hw = &adapter->hw;
-       struct ifnet    *ifp = adapter->ifp;
-       u32             reg;
-
-       if (adapter->vf_ifp) {
-               e1000_rlpml_set_vf(hw,
-                   adapter->max_frame_size + VLAN_TAG_SIZE);
-               return;
-       }
-
-       reg = E1000_READ_REG(hw, E1000_CTRL);
-       reg |= E1000_CTRL_VME;
-       E1000_WRITE_REG(hw, E1000_CTRL, reg);
-
-       /* Enable the Filter Table */
-       if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
-               reg = E1000_READ_REG(hw, E1000_RCTL);
-               reg &= ~E1000_RCTL_CFIEN;
-               reg |= E1000_RCTL_VFE;
-               E1000_WRITE_REG(hw, E1000_RCTL, reg);
-       }
-
-       /* Update the frame size */
-       E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
-           adapter->max_frame_size + VLAN_TAG_SIZE);
-
-       /* Don't bother with table if no vlans */
-       if ((adapter->num_vlans == 0) ||
-           ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0))
-                return;
-       /*
-       ** A soft reset zero's out the VFTA, so
-       ** we need to repopulate it now.
-       */
-       for (int i = 0; i < IGB_VFTA_SIZE; i++)
-                if (adapter->shadow_vfta[i] != 0) {
-                       if (adapter->vf_ifp)
-                               e1000_vfta_set_vf(hw,
-                                   adapter->shadow_vfta[i], TRUE);
-                       else
-                               E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
-                                i, adapter->shadow_vfta[i]);
-               }
-}
-
-static void
-igb_enable_intr(struct adapter *adapter)
-{
-       /* With RSS set up what to auto clear */
-       if (adapter->msix_mem) {
-               u32 mask = (adapter->que_mask | adapter->link_mask);
-               E1000_WRITE_REG(&adapter->hw, E1000_EIAC, mask);
-               E1000_WRITE_REG(&adapter->hw, E1000_EIAM, mask);
-               E1000_WRITE_REG(&adapter->hw, E1000_EIMS, mask);
-               E1000_WRITE_REG(&adapter->hw, E1000_IMS,
-                   E1000_IMS_LSC);
-       } else {
-               E1000_WRITE_REG(&adapter->hw, E1000_IMS,
-                   IMS_ENABLE_MASK);
-       }
-       E1000_WRITE_FLUSH(&adapter->hw);
-
-       return;
-}
-
-static void
-igb_disable_intr(struct adapter *adapter)
-{
-       if (adapter->msix_mem) {
-               E1000_WRITE_REG(&adapter->hw, E1000_EIMC, ~0);
-               E1000_WRITE_REG(&adapter->hw, E1000_EIAC, 0);
-       } 
-       E1000_WRITE_REG(&adapter->hw, E1000_IMC, ~0);
-       E1000_WRITE_FLUSH(&adapter->hw);
-       return;
-}
-
-/*
- * Bit of a misnomer, what this really means is
- * to enable OS management of the system... aka
- * to disable special hardware management features 
- */
-static void
-igb_init_manageability(struct adapter *adapter)
-{
-       if (adapter->has_manage) {
-               int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
-               int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
-
-               /* disable hardware interception of ARP */
-               manc &= ~(E1000_MANC_ARP_EN);
-
-                /* enable receiving management packets to the host */
-               manc |= E1000_MANC_EN_MNG2HOST;
-               manc2h |= 1 << 5;  /* Mng Port 623 */
-               manc2h |= 1 << 6;  /* Mng Port 664 */
-               E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
-               E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
-       }
-}
-
-/*
- * Give control back to hardware management
- * controller if there is one.
- */
-static void
-igb_release_manageability(struct adapter *adapter)
-{
-       if (adapter->has_manage) {
-               int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
-
-               /* re-enable hardware interception of ARP */
-               manc |= E1000_MANC_ARP_EN;
-               manc &= ~E1000_MANC_EN_MNG2HOST;
-
-               E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
-       }
-}
-
-/*
- * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means that
- * the driver is loaded. 
- *
- */
-static void
-igb_get_hw_control(struct adapter *adapter)
-{
-       u32 ctrl_ext;
-
-       if (adapter->vf_ifp)
-               return;
-
-       /* Let firmware know the driver has taken over */
-       ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
-       E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
-           ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
-}
-
-/*
- * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means that the
- * driver is no longer loaded.
- *
- */
-static void
-igb_release_hw_control(struct adapter *adapter)
-{
-       u32 ctrl_ext;
-
-       if (adapter->vf_ifp)
-               return;
-
-       /* Let firmware taken over control of h/w */
-       ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
-       E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
-           ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
-}
-
-static int
-igb_is_valid_ether_addr(uint8_t *addr)
-{
-       char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
-
-       if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
-               return (FALSE);
-       }
-
-       return (TRUE);
-}
-
-
-/*
- * Enable PCI Wake On Lan capability
- */
-static void
-igb_enable_wakeup(device_t dev)
-{
-       u16     cap, status;
-       u8      id;
-
-       /* First find the capabilities pointer*/
-       cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
-       /* Read the PM Capabilities */
-       id = pci_read_config(dev, cap, 1);
-       if (id != PCIY_PMG)     /* Something wrong */
-               return;
-       /* OK, we have the power capabilities, so
-          now get the status register */
-       cap += PCIR_POWER_STATUS;
-       status = pci_read_config(dev, cap, 2);
-       status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
-       pci_write_config(dev, cap, status, 2);
-       return;
-}
-
-static void
-igb_led_func(void *arg, int onoff)
-{
-       struct adapter  *adapter = arg;
-
-       IGB_CORE_LOCK(adapter);
-       if (onoff) {
-               e1000_setup_led(&adapter->hw);
-               e1000_led_on(&adapter->hw);
-       } else {
-               e1000_led_off(&adapter->hw);
-               e1000_cleanup_led(&adapter->hw);
-       }
-       IGB_CORE_UNLOCK(adapter);
-}
-
-/**********************************************************************
- *
- *  Update the board statistics counters.
- *
- **********************************************************************/
-static void
-igb_update_stats_counters(struct adapter *adapter)
-{
-       struct ifnet            *ifp;
-        struct e1000_hw                *hw = &adapter->hw;
-       struct e1000_hw_stats   *stats;
-
-       /* 
-       ** The virtual function adapter has only a
-       ** small controlled set of stats, do only 
-       ** those and return.
-       */
-       if (adapter->vf_ifp) {
-               igb_update_vf_stats_counters(adapter);
-               return;
-       }
-
-       stats = (struct e1000_hw_stats  *)adapter->stats;
-
-       if(adapter->hw.phy.media_type == e1000_media_type_copper ||
-          (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
-               stats->symerrs +=
-                   E1000_READ_REG(hw,E1000_SYMERRS);
-               stats->sec += E1000_READ_REG(hw, E1000_SEC);
-       }
-
-       stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
-       stats->mpc += E1000_READ_REG(hw, E1000_MPC);
-       stats->scc += E1000_READ_REG(hw, E1000_SCC);
-       stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
-
-       stats->mcc += E1000_READ_REG(hw, E1000_MCC);
-       stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
-       stats->colc += E1000_READ_REG(hw, E1000_COLC);
-       stats->dc += E1000_READ_REG(hw, E1000_DC);
-       stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
-       stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
-       stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
-       /*
-       ** For watchdog management we need to know if we have been
-       ** paused during the last interval, so capture that here.
-       */ 
-        adapter->pause_frames = E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
-        stats->xoffrxc += adapter->pause_frames;
-       stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
-       stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
-       stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
-       stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
-       stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
-       stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
-       stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
-       stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
-       stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
-       stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
-       stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
-       stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
-
-       /* For the 64-bit byte counters the low dword must be read first. */
-       /* Both registers clear on the read of the high dword */
-
-       stats->gorc += E1000_READ_REG(hw, E1000_GORCL) +
-           ((u64)E1000_READ_REG(hw, E1000_GORCH) << 32);
-       stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) +
-           ((u64)E1000_READ_REG(hw, E1000_GOTCH) << 32);
-
-       stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
-       stats->ruc += E1000_READ_REG(hw, E1000_RUC);
-       stats->rfc += E1000_READ_REG(hw, E1000_RFC);
-       stats->roc += E1000_READ_REG(hw, E1000_ROC);
-       stats->rjc += E1000_READ_REG(hw, E1000_RJC);
-
-       stats->tor += E1000_READ_REG(hw, E1000_TORH);
-       stats->tot += E1000_READ_REG(hw, E1000_TOTH);
-
-       stats->tpr += E1000_READ_REG(hw, E1000_TPR);
-       stats->tpt += E1000_READ_REG(hw, E1000_TPT);
-       stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
-       stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
-       stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
-       stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
-       stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
-       stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
-       stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
-       stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
-
-       /* Interrupt Counts */
-
-       stats->iac += E1000_READ_REG(hw, E1000_IAC);
-       stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
-       stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
-       stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
-       stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
-       stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
-       stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
-       stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
-       stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
-
-       /* Host to Card Statistics */
-
-       stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
-       stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
-       stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
-       stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
-       stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
-       stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
-       stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
-       stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) +
-           ((u64)E1000_READ_REG(hw, E1000_HGORCH) << 32));
-       stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) +
-           ((u64)E1000_READ_REG(hw, E1000_HGOTCH) << 32));
-       stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
-       stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
-       stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
-
-       stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
-       stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
-       stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
-       stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
-       stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
-       stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
-
-       ifp = adapter->ifp;
-       ifp->if_collisions = stats->colc;
-
-       /* Rx Errors */
-       ifp->if_ierrors = adapter->dropped_pkts + stats->rxerrc +
-           stats->crcerrs + stats->algnerrc +
-           stats->ruc + stats->roc + stats->mpc + stats->cexterr;
-
-       /* Tx Errors */
-       ifp->if_oerrors = stats->ecol +
-           stats->latecol + adapter->watchdog_events;
-
-       /* Driver specific counters */
-       adapter->device_control = E1000_READ_REG(hw, E1000_CTRL);
-       adapter->rx_control = E1000_READ_REG(hw, E1000_RCTL);
-       adapter->int_mask = E1000_READ_REG(hw, E1000_IMS);
-       adapter->eint_mask = E1000_READ_REG(hw, E1000_EIMS);
-       adapter->packet_buf_alloc_tx =
-           ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16);
-       adapter->packet_buf_alloc_rx =
-           (E1000_READ_REG(hw, E1000_PBA) & 0xffff);
-}
-
-
-/**********************************************************************
- *
- *  Initialize the VF board statistics counters.
- *
- **********************************************************************/
-static void
-igb_vf_init_stats(struct adapter *adapter)
-{
-        struct e1000_hw *hw = &adapter->hw;
-       struct e1000_vf_stats   *stats;
-
-       stats = (struct e1000_vf_stats  *)adapter->stats;
-       if (stats == NULL)
-               return;
-        stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC);
-        stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC);
-        stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC);
-        stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC);
-        stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC);
-}
-/**********************************************************************
- *
- *  Update the VF board statistics counters.
- *
- **********************************************************************/
-static void
-igb_update_vf_stats_counters(struct adapter *adapter)
-{
-       struct e1000_hw *hw = &adapter->hw;
-       struct e1000_vf_stats   *stats;
-
-       if (adapter->link_speed == 0)
-               return;
-
-       stats = (struct e1000_vf_stats  *)adapter->stats;
-
-       UPDATE_VF_REG(E1000_VFGPRC,
-           stats->last_gprc, stats->gprc);
-       UPDATE_VF_REG(E1000_VFGORC,
-           stats->last_gorc, stats->gorc);
-       UPDATE_VF_REG(E1000_VFGPTC,
-           stats->last_gptc, stats->gptc);
-       UPDATE_VF_REG(E1000_VFGOTC,
-           stats->last_gotc, stats->gotc);
-       UPDATE_VF_REG(E1000_VFMPRC,
-           stats->last_mprc, stats->mprc);
-}
-
-/* Export a single 32-bit register via a read-only sysctl. */
-static int
-igb_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
-{
-       struct adapter *adapter;
-       u_int val;
-
-       adapter = oidp->oid_arg1;
-       val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
-       return (sysctl_handle_int(oidp, &val, 0, req));
-}
-
-/*
-**  Tuneable interrupt rate handler
-*/
-static int
-igb_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
-{
-       struct igb_queue        *que = ((struct igb_queue *)oidp->oid_arg1);
-       int                     error;
-       u32                     reg, usec, rate;
-                        
-       reg = E1000_READ_REG(&que->adapter->hw, E1000_EITR(que->msix));
-       usec = ((reg & 0x7FFC) >> 2);
-       if (usec > 0)
-               rate = 1000000 / usec;
-       else
-               rate = 0;
-       error = sysctl_handle_int(oidp, &rate, 0, req);
-       if (error || !req->newptr)
-               return error;
-       return 0;
-}
-
-/*
- * Add sysctl variables, one per statistic, to the system.
- */
-static void
-igb_add_hw_stats(struct adapter *adapter)
-{
-       device_t dev = adapter->dev;
-
-       struct tx_ring *txr = adapter->tx_rings;
-       struct rx_ring *rxr = adapter->rx_rings;
-
-       struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
-       struct sysctl_oid *tree = device_get_sysctl_tree(dev);
-       struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
-       struct e1000_hw_stats *stats = adapter->stats;
-
-       struct sysctl_oid *stat_node, *queue_node, *int_node, *host_node;
-       struct sysctl_oid_list *stat_list, *queue_list, *int_list, *host_list;
-
-#define QUEUE_NAME_LEN 32
-       char namebuf[QUEUE_NAME_LEN];
-
-       /* Driver Statistics */
-       SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "link_irq", 
-                       CTLFLAG_RD, &adapter->link_irq, 0,
-                       "Link MSIX IRQ Handled");
-       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 
-                       CTLFLAG_RD, &adapter->dropped_pkts,
-                       "Driver dropped packets");
-       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail", 
-                       CTLFLAG_RD, &adapter->no_tx_dma_setup,
-                       "Driver tx dma failure in xmit");
-       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
-                       CTLFLAG_RD, &adapter->rx_overruns,
-                       "RX overruns");
-       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
-                       CTLFLAG_RD, &adapter->watchdog_events,
-                       "Watchdog timeouts");
-
-       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "device_control", 
-                       CTLFLAG_RD, &adapter->device_control,
-                       "Device Control Register");
-       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_control", 
-                       CTLFLAG_RD, &adapter->rx_control,
-                       "Receiver Control Register");
-       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "interrupt_mask", 
-                       CTLFLAG_RD, &adapter->int_mask,
-                       "Interrupt Mask");
-       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "extended_int_mask", 
-                       CTLFLAG_RD, &adapter->eint_mask,
-                       "Extended Interrupt Mask");
-       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_buf_alloc", 
-                       CTLFLAG_RD, &adapter->packet_buf_alloc_tx,
-                       "Transmit Buffer Packet Allocation");
-       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_buf_alloc", 
-                       CTLFLAG_RD, &adapter->packet_buf_alloc_rx,
-                       "Receive Buffer Packet Allocation");
-       SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
-                       CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
-                       "Flow Control High Watermark");
-       SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", 
-                       CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
-                       "Flow Control Low Watermark");
-
-       for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
-               struct lro_ctrl *lro = &rxr->lro;
-
-               snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
-               queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
-                                           CTLFLAG_RD, NULL, "Queue Name");
-               queue_list = SYSCTL_CHILDREN(queue_node);
-
-               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 
-                               CTLFLAG_RD, &adapter->queues[i],
-                               sizeof(&adapter->queues[i]),
-                               igb_sysctl_interrupt_rate_handler,
-                               "IU", "Interrupt Rate");
-
-               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 
-                               CTLFLAG_RD, adapter, E1000_TDH(txr->me),
-                               igb_sysctl_reg_handler, "IU",
-                               "Transmit Descriptor Head");
-               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 
-                               CTLFLAG_RD, adapter, E1000_TDT(txr->me),
-                               igb_sysctl_reg_handler, "IU",
-                               "Transmit Descriptor Tail");
-               SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", 
-                               CTLFLAG_RD, &txr->no_desc_avail,
-                               "Queue No Descriptor Available");
-               SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
-                               CTLFLAG_RD, &txr->tx_packets,
-                               "Queue Packets Transmitted");
-
-               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 
-                               CTLFLAG_RD, adapter, E1000_RDH(rxr->me),
-                               igb_sysctl_reg_handler, "IU",
-                               "Receive Descriptor Head");
-               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 
-                               CTLFLAG_RD, adapter, E1000_RDT(rxr->me),
-                               igb_sysctl_reg_handler, "IU",
-                               "Receive Descriptor Tail");
-               SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
-                               CTLFLAG_RD, &rxr->rx_packets,
-                               "Queue Packets Received");
-               SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
-                               CTLFLAG_RD, &rxr->rx_bytes,
-                               "Queue Bytes Received");
-               SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "lro_queued",
-                               CTLFLAG_RD, &lro->lro_queued, 0,
-                               "LRO Queued");
-               SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "lro_flushed",
-                               CTLFLAG_RD, &lro->lro_flushed, 0,
-                               "LRO Flushed");
-       }
-
-       /* MAC stats get their own sub node */
-
-       stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 
-                                   CTLFLAG_RD, NULL, "MAC Statistics");
-       stat_list = SYSCTL_CHILDREN(stat_node);
-
-       /*
-       ** VF adapter has a very limited set of stats
-       ** since its not managing the metal, so to speak.
-       */
-       if (adapter->vf_ifp) {
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
-                       CTLFLAG_RD, &stats->gprc,
-                       "Good Packets Received");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
-                       CTLFLAG_RD, &stats->gptc,
-                       "Good Packets Transmitted");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", 
-                       CTLFLAG_RD, &stats->gorc, 
-                       "Good Octets Received"); 
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 
-                       CTLFLAG_RD, &stats->gotc, 
-                       "Good Octets Transmitted"); 
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
-                       CTLFLAG_RD, &stats->mprc,
-                       "Multicast Packets Received");
-               return;
-       }
-
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "excess_coll", 
-                       CTLFLAG_RD, &stats->ecol,
-                       "Excessive collisions");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "single_coll", 
-                       CTLFLAG_RD, &stats->scc,
-                       "Single collisions");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "multiple_coll", 
-                       CTLFLAG_RD, &stats->mcc,
-                       "Multiple collisions");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "late_coll", 
-                       CTLFLAG_RD, &stats->latecol,
-                       "Late collisions");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "collision_count", 
-                       CTLFLAG_RD, &stats->colc,
-                       "Collision Count");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
-                       CTLFLAG_RD, &stats->symerrs,
-                       "Symbol Errors");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
-                       CTLFLAG_RD, &stats->sec,
-                       "Sequence Errors");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "defer_count",
-                       CTLFLAG_RD, &stats->dc,
-                       "Defer Count");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "missed_packets",
-                       CTLFLAG_RD, &stats->mpc,
-                       "Missed Packets");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
-                       CTLFLAG_RD, &stats->rnbc,
-                       "Receive No Buffers");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
-                       CTLFLAG_RD, &stats->ruc,
-                       "Receive Undersize");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
-                       CTLFLAG_RD, &stats->rfc,
-                       "Fragmented Packets Received ");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
-                       CTLFLAG_RD, &stats->roc,
-                       "Oversized Packets Received");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
-                       CTLFLAG_RD, &stats->rjc,
-                       "Recevied Jabber");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_errs",
-                       CTLFLAG_RD, &stats->rxerrc,
-                       "Receive Errors");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "crc_errs",
-                       CTLFLAG_RD, &stats->crcerrs,
-                       "CRC errors");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
-                       CTLFLAG_RD, &stats->algnerrc,
-                       "Alignment Errors");
-       /* On 82575 these are collision counts */
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
-                       CTLFLAG_RD, &stats->cexterr,
-                       "Collision/Carrier extension errors");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
-                       CTLFLAG_RD, &stats->xonrxc,
-                       "XON Received");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_txd",
-                       CTLFLAG_RD, &stats->xontxc,
-                       "XON Transmitted");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
-                       CTLFLAG_RD, &stats->xoffrxc,
-                       "XOFF Received");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
-                       CTLFLAG_RD, &stats->xofftxc,
-                       "XOFF Transmitted");
-       /* Packet Reception Stats */
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
-                       CTLFLAG_RD, &stats->tpr,
-                       "Total Packets Received ");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
-                       CTLFLAG_RD, &stats->gprc,
-                       "Good Packets Received");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
-                       CTLFLAG_RD, &stats->bprc,
-                       "Broadcast Packets Received");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
-                       CTLFLAG_RD, &stats->mprc,
-                       "Multicast Packets Received");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
-                       CTLFLAG_RD, &stats->prc64,
-                       "64 byte frames received ");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
-                       CTLFLAG_RD, &stats->prc127,
-                       "65-127 byte frames received");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
-                       CTLFLAG_RD, &stats->prc255,
-                       "128-255 byte frames received");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
-                       CTLFLAG_RD, &stats->prc511,
-                       "256-511 byte frames received");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
-                       CTLFLAG_RD, &stats->prc1023,
-                       "512-1023 byte frames received");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
-                       CTLFLAG_RD, &stats->prc1522,
-                       "1023-1522 byte frames received");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", 
-                       CTLFLAG_RD, &stats->gorc, 
-                       "Good Octets Received"); 
-
-       /* Packet Transmission Stats */
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 
-                       CTLFLAG_RD, &stats->gotc, 
-                       "Good Octets Transmitted"); 
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
-                       CTLFLAG_RD, &stats->tpt,
-                       "Total Packets Transmitted");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
-                       CTLFLAG_RD, &stats->gptc,
-                       "Good Packets Transmitted");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
-                       CTLFLAG_RD, &stats->bptc,
-                       "Broadcast Packets Transmitted");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
-                       CTLFLAG_RD, &stats->mptc,
-                       "Multicast Packets Transmitted");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
-                       CTLFLAG_RD, &stats->ptc64,
-                       "64 byte frames transmitted ");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
-                       CTLFLAG_RD, &stats->ptc127,
-                       "65-127 byte frames transmitted");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
-                       CTLFLAG_RD, &stats->ptc255,
-                       "128-255 byte frames transmitted");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
-                       CTLFLAG_RD, &stats->ptc511,
-                       "256-511 byte frames transmitted");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
-                       CTLFLAG_RD, &stats->ptc1023,
-                       "512-1023 byte frames transmitted");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
-                       CTLFLAG_RD, &stats->ptc1522,
-                       "1024-1522 byte frames transmitted");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_txd",
-                       CTLFLAG_RD, &stats->tsctc,
-                       "TSO Contexts Transmitted");
-       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
-                       CTLFLAG_RD, &stats->tsctfc,
-                       "TSO Contexts Failed");
-
-
-       /* Interrupt Stats */
-
-       int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts", 
-                                   CTLFLAG_RD, NULL, "Interrupt Statistics");
-       int_list = SYSCTL_CHILDREN(int_node);
-
-       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "asserts",
-                       CTLFLAG_RD, &stats->iac,
-                       "Interrupt Assertion Count");
-
-       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer",
-                       CTLFLAG_RD, &stats->icrxptc,
-                       "Interrupt Cause Rx Pkt Timer Expire Count");
-
-       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_abs_timer",
-                       CTLFLAG_RD, &stats->icrxatc,
-                       "Interrupt Cause Rx Abs Timer Expire Count");
-
-       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer",
-                       CTLFLAG_RD, &stats->ictxptc,
-                       "Interrupt Cause Tx Pkt Timer Expire Count");
-
-       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_abs_timer",
-                       CTLFLAG_RD, &stats->ictxatc,
-                       "Interrupt Cause Tx Abs Timer Expire Count");
-
-       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_queue_empty",
-                       CTLFLAG_RD, &stats->ictxqec,
-                       "Interrupt Cause Tx Queue Empty Count");
-
-       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh",
-                       CTLFLAG_RD, &stats->ictxqmtc,
-                       "Interrupt Cause Tx Queue Min Thresh Count");
-
-       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh",
-                       CTLFLAG_RD, &stats->icrxdmtc,
-                       "Interrupt Cause Rx Desc Min Thresh Count");
-
-       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_overrun",
-                       CTLFLAG_RD, &stats->icrxoc,
-                       "Interrupt Cause Receiver Overrun Count");
-
-       /* Host to Card Stats */
-
-       host_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "host", 
-                                   CTLFLAG_RD, NULL, 
-                                   "Host to Card Statistics");
-
-       host_list = SYSCTL_CHILDREN(host_node);
-
-       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt",
-                       CTLFLAG_RD, &stats->cbtmpc,
-                       "Circuit Breaker Tx Packet Count");
-
-       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "host_tx_pkt_discard",
-                       CTLFLAG_RD, &stats->htdpmc,
-                       "Host Transmit Discarded Packets");
-
-       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "rx_pkt",
-                       CTLFLAG_RD, &stats->rpthc,
-                       "Rx Packets To Host");
-
-       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkts",
-                       CTLFLAG_RD, &stats->cbrmpc,
-                       "Circuit Breaker Rx Packet Count");
-
-       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkt_drop",
-                       CTLFLAG_RD, &stats->cbrdpc,
-                       "Circuit Breaker Rx Dropped Count");
-
-       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "tx_good_pkt",
-                       CTLFLAG_RD, &stats->hgptc,
-                       "Host Good Packets Tx Count");
-
-       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt_drop",
-                       CTLFLAG_RD, &stats->htcbdpc,
-                       "Host Tx Circuit Breaker Dropped Count");
-
-       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "rx_good_bytes",
-                       CTLFLAG_RD, &stats->hgorc,
-                       "Host Good Octets Received Count");
-
-       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "tx_good_bytes",
-                       CTLFLAG_RD, &stats->hgotc,
-                       "Host Good Octets Transmit Count");
-
-       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "length_errors",
-                       CTLFLAG_RD, &stats->lenerrs,
-                       "Length Errors");
-
-       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "serdes_violation_pkt",
-                       CTLFLAG_RD, &stats->scvpc,
-                       "SerDes/SGMII Code Violation Pkt Count");
-
-       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "header_redir_missed",
-                       CTLFLAG_RD, &stats->hrmpc,
-                       "Header Redirection Missed Packet Count");
-}
-
-
-/**********************************************************************
- *
- *  This routine provides a way to dump out the adapter eeprom,
- *  often a useful debug/service tool. This only dumps the first
- *  32 words, stuff that matters is in that extent.
- *
- **********************************************************************/
-static int
-igb_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
-{
-       struct adapter *adapter;
-       int error;
-       int result;
-
-       result = -1;
-       error = sysctl_handle_int(oidp, &result, 0, req);
-
-       if (error || !req->newptr)
-               return (error);
-
-       /*
-        * This value will cause a hex dump of the
-        * first 32 16-bit words of the EEPROM to
-        * the screen.
-        */
-       if (result == 1) {
-               adapter = (struct adapter *)arg1;
-               igb_print_nvm_info(adapter);
-        }
-
-       return (error);
-}
-
-static void
-igb_print_nvm_info(struct adapter *adapter)
-{
-       u16     eeprom_data;
-       int     i, j, row = 0;
-
-       /* Its a bit crude, but it gets the job done */
-       printf("\nInterface EEPROM Dump:\n");
-       printf("Offset\n0x0000  ");
-       for (i = 0, j = 0; i < 32; i++, j++) {
-               if (j == 8) { /* Make the offset block */
-                       j = 0; ++row;
-                       printf("\n0x00%x0  ",row);
-               }
-               e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
-               printf("%04x ", eeprom_data);
-       }
-       printf("\n");
-}
-
-static void
-igb_set_sysctl_value(struct adapter *adapter, const char *name,
-       const char *description, int *limit, int value)
-{
-       *limit = value;
-       SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
-           SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
-           OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
-}
-
-/*
-** Set flow control using sysctl:
-** Flow control values:
-**     0 - off
-**     1 - rx pause
-**     2 - tx pause
-**     3 - full
-*/
-static int
-igb_set_flowcntl(SYSCTL_HANDLER_ARGS)
-{
-       int error;
-       struct adapter *adapter;
-
-       error = sysctl_handle_int(oidp, &igb_fc_setting, 0, req);
-
-       if (error)
-               return (error);
-
-       adapter = (struct adapter *) arg1;
-       switch (igb_fc_setting) {
-               case e1000_fc_rx_pause:
-               case e1000_fc_tx_pause:
-               case e1000_fc_full:
-                       adapter->hw.fc.requested_mode = igb_fc_setting;
-                       break;
-               case e1000_fc_none:
-               default:
-                       adapter->hw.fc.requested_mode = e1000_fc_none;
-       }
-
-       adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode;
-       e1000_force_mac_fc(&adapter->hw);
-       return error;
-}
diff --git a/lib/librte_pmd_igb/igb/if_igb.h b/lib/librte_pmd_igb/igb/if_igb.h
deleted file mode 100644 (file)
index 9a0bb47..0000000
+++ /dev/null
@@ -1,541 +0,0 @@
-/******************************************************************************
-
-  Copyright (c) 2001-2011, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#ifndef _IGB_H_DEFINED_
-#define _IGB_H_DEFINED_
-
-/* Tunables */
-
-/*
- * IGB_TXD: Maximum number of Transmit Descriptors
- *
- *   This value is the number of transmit descriptors allocated by the driver.
- *   Increasing this value allows the driver to queue more transmits. Each
- *   descriptor is 16 bytes.
- *   Since TDLEN should be multiple of 128bytes, the number of transmit
- *   desscriptors should meet the following condition.
- *      (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
- */
-#define IGB_MIN_TXD            256
-#define IGB_DEFAULT_TXD                1024
-#define IGB_MAX_TXD            4096
-
-/*
- * IGB_RXD: Maximum number of Transmit Descriptors
- *
- *   This value is the number of receive descriptors allocated by the driver.
- *   Increasing this value allows the driver to buffer more incoming packets.
- *   Each descriptor is 16 bytes.  A receive buffer is also allocated for each
- *   descriptor. The maximum MTU size is 16110.
- *   Since TDLEN should be multiple of 128bytes, the number of transmit
- *   desscriptors should meet the following condition.
- *      (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
- */
-#define IGB_MIN_RXD            256
-#define IGB_DEFAULT_RXD                1024
-#define IGB_MAX_RXD            4096
-
-/*
- * IGB_TIDV - Transmit Interrupt Delay Value
- * Valid Range: 0-65535 (0=off)
- * Default Value: 64
- *   This value delays the generation of transmit interrupts in units of
- *   1.024 microseconds. Transmit interrupt reduction can improve CPU
- *   efficiency if properly tuned for specific network traffic. If the
- *   system is reporting dropped transmits, this value may be set too high
- *   causing the driver to run out of available transmit descriptors.
- */
-#define IGB_TIDV                         64
-
-/*
- * IGB_TADV - Transmit Absolute Interrupt Delay Value
- * Valid Range: 0-65535 (0=off)
- * Default Value: 64
- *   This value, in units of 1.024 microseconds, limits the delay in which a
- *   transmit interrupt is generated. Useful only if IGB_TIDV is non-zero,
- *   this value ensures that an interrupt is generated after the initial
- *   packet is sent on the wire within the set amount of time.  Proper tuning,
- *   along with IGB_TIDV, may improve traffic throughput in specific
- *   network conditions.
- */
-#define IGB_TADV                         64
-
-/*
- * IGB_RDTR - Receive Interrupt Delay Timer (Packet Timer)
- * Valid Range: 0-65535 (0=off)
- * Default Value: 0
- *   This value delays the generation of receive interrupts in units of 1.024
- *   microseconds.  Receive interrupt reduction can improve CPU efficiency if
- *   properly tuned for specific network traffic. Increasing this value adds
- *   extra latency to frame reception and can end up decreasing the throughput
- *   of TCP traffic. If the system is reporting dropped receives, this value
- *   may be set too high, causing the driver to run out of available receive
- *   descriptors.
- *
- *   CAUTION: When setting IGB_RDTR to a value other than 0, adapters
- *            may hang (stop transmitting) under certain network conditions.
- *            If this occurs a WATCHDOG message is logged in the system
- *            event log. In addition, the controller is automatically reset,
- *            restoring the network connection. To eliminate the potential
- *            for the hang ensure that IGB_RDTR is set to 0.
- */
-#define IGB_RDTR                         0
-
-/*
- * Receive Interrupt Absolute Delay Timer (Not valid for 82542/82543/82544)
- * Valid Range: 0-65535 (0=off)
- * Default Value: 64
- *   This value, in units of 1.024 microseconds, limits the delay in which a
- *   receive interrupt is generated. Useful only if IGB_RDTR is non-zero,
- *   this value ensures that an interrupt is generated after the initial
- *   packet is received within the set amount of time.  Proper tuning,
- *   along with IGB_RDTR, may improve traffic throughput in specific network
- *   conditions.
- */
-#define IGB_RADV                         64
-
-/*
- * This parameter controls the duration of transmit watchdog timer.
- */
-#define IGB_WATCHDOG                   (10 * hz)
-
-/*
- * This parameter controls when the driver calls the routine to reclaim
- * transmit descriptors. Cleaning earlier seems a win.
- */
-#define IGB_TX_CLEANUP_THRESHOLD       (adapter->num_tx_desc / 2)
-
-/*
- * This parameter controls whether or not autonegotation is enabled.
- *              0 - Disable autonegotiation
- *              1 - Enable  autonegotiation
- */
-#define DO_AUTO_NEG                     1
-
-/*
- * This parameter control whether or not the driver will wait for
- * autonegotiation to complete.
- *              1 - Wait for autonegotiation to complete
- *              0 - Don't wait for autonegotiation to complete
- */
-#define WAIT_FOR_AUTO_NEG_DEFAULT       0
-
-/* Tunables -- End */
-
-#define AUTONEG_ADV_DEFAULT    (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
-                               ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
-                               ADVERTISE_1000_FULL)
-
-#define AUTO_ALL_MODES         0
-
-/* PHY master/slave setting */
-#define IGB_MASTER_SLAVE               e1000_ms_hw_default
-
-/*
- * Micellaneous constants
- */
-#define IGB_VENDOR_ID                  0x8086
-
-#define IGB_JUMBO_PBA                  0x00000028
-#define IGB_DEFAULT_PBA                        0x00000030
-#define IGB_SMARTSPEED_DOWNSHIFT       3
-#define IGB_SMARTSPEED_MAX             15
-#define IGB_MAX_LOOP                   10
-
-#define IGB_RX_PTHRESH                 (hw->mac.type <= e1000_82576 ? 16 : 8)
-#define IGB_RX_HTHRESH                 8
-#define IGB_RX_WTHRESH                 1
-
-#define IGB_TX_PTHRESH                 8
-#define IGB_TX_HTHRESH                 1
-#define IGB_TX_WTHRESH                 ((hw->mac.type != e1000_82575 && \
-                                          adapter->msix_mem) ? 1 : 16)
-
-#define MAX_NUM_MULTICAST_ADDRESSES     128
-#define PCI_ANY_ID                      (~0U)
-#define ETHER_ALIGN                     2
-#define IGB_TX_BUFFER_SIZE             ((uint32_t) 1514)
-#define IGB_FC_PAUSE_TIME              0x0680
-#define IGB_EEPROM_APME                        0x400;
-#define IGB_QUEUE_IDLE                 0
-#define IGB_QUEUE_WORKING              1
-#define IGB_QUEUE_HUNG                 2
-
-/*
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
- * also optimize cache line size effect. H/W supports up to cache line size 128.
- */
-#define IGB_DBA_ALIGN                  128
-
-#define SPEED_MODE_BIT (1<<21)         /* On PCI-E MACs only */
-
-/* PCI Config defines */
-#define IGB_MSIX_BAR           3
-
-/* Defines for printing debug information */
-#define DEBUG_INIT  0
-#define DEBUG_IOCTL 0
-#define DEBUG_HW    0
-
-#define INIT_DEBUGOUT(S)            if (DEBUG_INIT)  printf(S "\n")
-#define INIT_DEBUGOUT1(S, A)        if (DEBUG_INIT)  printf(S "\n", A)
-#define INIT_DEBUGOUT2(S, A, B)     if (DEBUG_INIT)  printf(S "\n", A, B)
-#define IOCTL_DEBUGOUT(S)           if (DEBUG_IOCTL) printf(S "\n")
-#define IOCTL_DEBUGOUT1(S, A)       if (DEBUG_IOCTL) printf(S "\n", A)
-#define IOCTL_DEBUGOUT2(S, A, B)    if (DEBUG_IOCTL) printf(S "\n", A, B)
-#define HW_DEBUGOUT(S)              if (DEBUG_HW) printf(S "\n")
-#define HW_DEBUGOUT1(S, A)          if (DEBUG_HW) printf(S "\n", A)
-#define HW_DEBUGOUT2(S, A, B)       if (DEBUG_HW) printf(S "\n", A, B)
-
-#define IGB_MAX_SCATTER                64
-#define IGB_VFTA_SIZE          128
-#define IGB_BR_SIZE            4096    /* ring buf size */
-#define IGB_TSO_SIZE           (65535 + sizeof(struct ether_vlan_header))
-#define IGB_TSO_SEG_SIZE       4096    /* Max dma segment size */
-#define IGB_HDR_BUF            128
-#define IGB_PKTTYPE_MASK       0x0000FFF0
-#define ETH_ZLEN               60
-#define ETH_ADDR_LEN           6
-
-/* Offload bits in mbuf flag */
-#if __FreeBSD_version >= 800000
-#define CSUM_OFFLOAD           (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
-#else
-#define CSUM_OFFLOAD           (CSUM_IP|CSUM_TCP|CSUM_UDP)
-#endif
-
-/* Define the starting Interrupt rate per Queue */
-#define IGB_INTS_PER_SEC        8000
-#define IGB_DEFAULT_ITR         ((1000000/IGB_INTS_PER_SEC) << 2)
-
-#define IGB_LINK_ITR            2000
-
-/* Precision Time Sync (IEEE 1588) defines */
-#define ETHERTYPE_IEEE1588     0x88F7
-#define PICOSECS_PER_TICK      20833
-#define TSYNC_PORT             319 /* UDP port for the protocol */
-
-/*
- * Bus dma allocation structure used by
- * e1000_dma_malloc and e1000_dma_free.
- */
-struct igb_dma_alloc {
-        bus_addr_t              dma_paddr;
-        caddr_t                 dma_vaddr;
-        bus_dma_tag_t           dma_tag;
-        bus_dmamap_t            dma_map;
-        bus_dma_segment_t       dma_seg;
-        int                     dma_nseg;
-};
-
-
-/*
-** Driver queue struct: this is the interrupt container
-**  for the associated tx and rx ring.
-*/
-struct igb_queue {
-       struct adapter          *adapter;
-       u32                     msix;           /* This queue's MSIX vector */
-       u32                     eims;           /* This queue's EIMS bit */
-       u32                     eitr_setting;
-       struct resource         *res;
-       void                    *tag;
-       struct tx_ring          *txr;
-       struct rx_ring          *rxr;
-       struct task             que_task;
-       struct taskqueue        *tq;
-       u64                     irqs;
-};
-
-/*
- * Transmit ring: one per queue
- */
-struct tx_ring {
-       struct adapter          *adapter;
-       u32                     me;
-       struct mtx              tx_mtx;
-       char                    mtx_name[16];
-       struct igb_dma_alloc    txdma;
-       struct e1000_tx_desc    *tx_base;
-       u32                     next_avail_desc;
-       u32                     next_to_clean;
-       volatile u16            tx_avail;
-       struct igb_tx_buffer    *tx_buffers;
-#if __FreeBSD_version >= 800000
-       struct buf_ring         *br;
-#endif
-       bus_dma_tag_t           txtag;
-
-       u32                     bytes;
-       u32                     packets;
-
-       int                     queue_status;
-       int                     watchdog_time;
-       int                     tdt;
-       int                     tdh;
-       u64                     no_desc_avail;
-       u64                     tx_packets;
-};
-
-/*
- * Receive ring: one per queue
- */
-struct rx_ring {
-       struct adapter          *adapter;
-       u32                     me;
-       struct igb_dma_alloc    rxdma;
-       union e1000_adv_rx_desc *rx_base;
-       struct lro_ctrl         lro;
-       bool                    lro_enabled;
-       bool                    hdr_split;
-       bool                    discard;
-       struct mtx              rx_mtx;
-       char                    mtx_name[16];
-       u32                     next_to_refresh;
-       u32                     next_to_check;
-       struct igb_rx_buf       *rx_buffers;
-       bus_dma_tag_t           htag;           /* dma tag for rx head */
-       bus_dma_tag_t           ptag;           /* dma tag for rx packet */
-       /*
-        * First/last mbuf pointers, for
-        * collecting multisegment RX packets.
-        */
-       struct mbuf            *fmp;
-       struct mbuf            *lmp;
-
-       u32                     bytes;
-       u32                     packets;
-       int                     rdt;
-       int                     rdh;
-
-       /* Soft stats */
-       u64                     rx_split_packets;
-       u64                     rx_discarded;
-       u64                     rx_packets;
-       u64                     rx_bytes;
-};
-
-struct adapter {
-       struct ifnet    *ifp;
-       struct e1000_hw hw;
-
-       struct e1000_osdep osdep;
-       struct device   *dev;
-       struct cdev     *led_dev;
-
-       struct resource *pci_mem;
-       struct resource *msix_mem;
-       struct resource *res;
-       void            *tag;
-       u32             que_mask;
-
-       int             linkvec;
-       int             link_mask;
-       struct task     link_task;
-       int             link_irq;
-
-       struct ifmedia  media;
-       struct callout  timer;
-       int             msix;   /* total vectors allocated */
-       int             if_flags;
-       int             max_frame_size;
-       int             min_frame_size;
-       int             pause_frames;
-       struct mtx      core_mtx;
-       int             igb_insert_vlan_header;
-        u16            num_queues;
-       u16             vf_ifp;  /* a VF interface */
-
-       eventhandler_tag vlan_attach;
-       eventhandler_tag vlan_detach;
-       u32             num_vlans;
-
-       /* Management and WOL features */
-       int             wol;
-       int             has_manage;
-
-       /*
-       ** Shadow VFTA table, this is needed because
-       ** the real vlan filter table gets cleared during
-       ** a soft reset and the driver needs to be able
-       ** to repopulate it.
-       */
-       u32             shadow_vfta[IGB_VFTA_SIZE];
-
-       /* Info about the interface */
-       u8              link_active;
-       u16             link_speed;
-       u16             link_duplex;
-       u32             smartspeed;
-       u32             dma_coalesce;
-
-       /* Interface queues */
-       struct igb_queue        *queues;
-
-       /*
-        * Transmit rings
-        */
-       struct tx_ring          *tx_rings;
-        u16                    num_tx_desc;
-
-       /* Multicast array pointer */
-       u8                      *mta;
-
-       /* 
-        * Receive rings
-        */
-       struct rx_ring          *rx_rings;
-       bool                    rx_hdr_split;
-        u16                    num_rx_desc;
-       int                     rx_process_limit;
-       u32                     rx_mbuf_sz;
-       u32                     rx_mask;
-
-       /* Misc stats maintained by the driver */
-       unsigned long   dropped_pkts;
-       unsigned long   mbuf_defrag_failed;
-       unsigned long   mbuf_header_failed;
-       unsigned long   mbuf_packet_failed;
-       unsigned long   no_tx_map_avail;
-        unsigned long  no_tx_dma_setup;
-       unsigned long   watchdog_events;
-       unsigned long   rx_overruns;
-       unsigned long   device_control;
-       unsigned long   rx_control;
-       unsigned long   int_mask;
-       unsigned long   eint_mask;
-       unsigned long   packet_buf_alloc_rx;
-       unsigned long   packet_buf_alloc_tx;
-
-       boolean_t       in_detach;
-
-#ifdef IGB_IEEE1588
-       /* IEEE 1588 precision time support */
-       struct cyclecounter     cycles;
-       struct nettimer         clock;
-       struct nettime_compare  compare;
-       struct hwtstamp_ctrl    hwtstamp;
-#endif
-
-       void                    *stats;
-};
-
-/* ******************************************************************************
- * vendor_info_array
- *
- * This array contains the list of Subvendor/Subdevice IDs on which the driver
- * should load.
- *
- * ******************************************************************************/
-typedef struct _igb_vendor_info_t {
-       unsigned int vendor_id;
-       unsigned int device_id;
-       unsigned int subvendor_id;
-       unsigned int subdevice_id;
-       unsigned int index;
-} igb_vendor_info_t;
-
-
-struct igb_tx_buffer {
-       int             next_eop;  /* Index of the desc to watch */
-        struct mbuf    *m_head;
-        bus_dmamap_t    map;         /* bus_dma map for packet */
-};
-
-struct igb_rx_buf {
-        struct mbuf    *m_head;
-        struct mbuf    *m_pack;
-       bus_dmamap_t    hmap;   /* bus_dma map for header */
-       bus_dmamap_t    pmap;   /* bus_dma map for packet */
-};
-
-/*
-** Find the number of unrefreshed RX descriptors
-*/
-static inline u16
-igb_rx_unrefreshed(struct rx_ring *rxr)
-{
-       struct adapter  *adapter = rxr->adapter;
-       if (rxr->next_to_check > rxr->next_to_refresh)
-               return (rxr->next_to_check - rxr->next_to_refresh - 1);
-       else
-               return ((adapter->num_rx_desc + rxr->next_to_check) -
-                   rxr->next_to_refresh - 1);
-}
-
-#define        IGB_CORE_LOCK_INIT(_sc, _name) \
-       mtx_init(&(_sc)->core_mtx, _name, "IGB Core Lock", MTX_DEF)
-#define        IGB_CORE_LOCK_DESTROY(_sc)      mtx_destroy(&(_sc)->core_mtx)
-#define        IGB_CORE_LOCK(_sc)              mtx_lock(&(_sc)->core_mtx)
-#define        IGB_CORE_UNLOCK(_sc)            mtx_unlock(&(_sc)->core_mtx)
-#define        IGB_CORE_LOCK_ASSERT(_sc)       mtx_assert(&(_sc)->core_mtx, MA_OWNED)
-
-#define        IGB_TX_LOCK_DESTROY(_sc)        mtx_destroy(&(_sc)->tx_mtx)
-#define        IGB_TX_LOCK(_sc)                mtx_lock(&(_sc)->tx_mtx)
-#define        IGB_TX_UNLOCK(_sc)              mtx_unlock(&(_sc)->tx_mtx)
-#define        IGB_TX_TRYLOCK(_sc)             mtx_trylock(&(_sc)->tx_mtx)
-#define        IGB_TX_LOCK_ASSERT(_sc)         mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
-
-#define        IGB_RX_LOCK_DESTROY(_sc)        mtx_destroy(&(_sc)->rx_mtx)
-#define        IGB_RX_LOCK(_sc)                mtx_lock(&(_sc)->rx_mtx)
-#define        IGB_RX_UNLOCK(_sc)              mtx_unlock(&(_sc)->rx_mtx)
-#define        IGB_RX_LOCK_ASSERT(_sc)         mtx_assert(&(_sc)->rx_mtx, MA_OWNED)
-
-#define UPDATE_VF_REG(reg, last, cur)          \
-{                                              \
-       u32 new = E1000_READ_REG(hw, reg);      \
-       if (new < last)                         \
-               cur += 0x100000000LL;           \
-       last = new;                             \
-       cur &= 0xFFFFFFFF00000000LL;            \
-       cur |= new;                             \
-}
-
-#if __FreeBSD_version < 800504
-static __inline int
-drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
-{
-#ifdef ALTQ
-       if (ALTQ_IS_ENABLED(&ifp->if_snd))
-               return (1);
-#endif
-       return (!buf_ring_empty(br));
-}
-#endif
-
-#endif /* _IGB_H_DEFINED_ */
-
-
index fbb5788..68ef9e7 100644 (file)
@@ -65,8 +65,8 @@ LDLIBS += -lrte_kni
 endif
 endif
 
-ifeq ($(CONFIG_RTE_LIBRTE_IGB_PMD),y)
-LDLIBS += -lrte_pmd_igb
+ifeq ($(CONFIG_RTE_LIBRTE_E1000_PMD),y)
+LDLIBS += -lrte_pmd_e1000
 endif
 
 ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_PMD),y)
index 733aff0..c45d4c0 100644 (file)
@@ -79,6 +79,8 @@ ifeq ($(BUILDING_RTE_SDK),)
 include $(RTE_SDK)/mk/rte.extvars.mk
 endif
 
+CONFIG_RTE_LIBRTE_E1000_PMD = $(CONFIG_RTE_LIBRTE_IGB_PMD)
+
 ifeq ($(RTE_ARCH),)
 $(error RTE_ARCH is not defined)
 endif