SRCS-y += ethtool/ixgbe/kcompat.c
SRCS-y += ethtool/igb/e1000_82575.c
+SRCS-y += ethtool/igb/e1000_i210.c
SRCS-y += ethtool/igb/e1000_api.c
SRCS-y += ethtool/igb/e1000_mac.c
SRCS-y += ethtool/igb/e1000_manage.c
SRCS-y += ethtool/igb/e1000_nvm.c
SRCS-y += ethtool/igb/e1000_phy.c
SRCS-y += ethtool/igb/igb_ethtool.c
+SRCS-y += ethtool/igb/igb_hwmon.c
SRCS-y += ethtool/igb/igb_main.c
+SRCS-y += ethtool/igb/igb_debugfs.c
SRCS-y += ethtool/igb/igb_param.c
SRCS-y += ethtool/igb/igb_procfs.c
-SRCS-y += ethtool/igb/igb_sysfs.c
SRCS-y += ethtool/igb/igb_vmdq.c
+#SRCS-y += ethtool/igb/igb_ptp.c
#SRCS-y += ethtool/igb/kcompat.c
SRCS-y += kni_misc.c
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
* 82575GB Gigabit Network Connection
* 82576 Gigabit Network Connection
* 82576 Quad Port Gigabit Mezzanine Adapter
+ * 82580 Gigabit Network Connection
+ * I350 Gigabit Network Connection
*/
#include "e1000_api.h"
+#include "e1000_i210.h"
static s32 e1000_init_phy_params_82575(struct e1000_hw *hw);
static s32 e1000_init_mac_params_82575(struct e1000_hw *hw);
static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw);
static void e1000_release_nvm_82575(struct e1000_hw *hw);
static s32 e1000_check_for_link_82575(struct e1000_hw *hw);
+static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw);
static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw);
static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
u16 *duplex);
break;
case e1000_82580:
case e1000_i350:
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
reg = E1000_READ_REG(hw, E1000_MDICNFG);
ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
break;
switch (hw->mac.type) {
case e1000_82580:
case e1000_i350:
+ case e1000_i354:
phy->ops.read_reg = e1000_read_phy_reg_82580;
phy->ops.write_reg = e1000_write_phy_reg_82580;
break;
+ case e1000_i210:
+ case e1000_i211:
+ phy->ops.read_reg = e1000_read_phy_reg_gs40g;
+ phy->ops.write_reg = e1000_write_phy_reg_gs40g;
+ break;
default:
phy->ops.read_reg = e1000_read_phy_reg_igp;
phy->ops.write_reg = e1000_write_phy_reg_igp;
/* Verify phy id and set remaining function pointers */
switch (phy->id) {
+ case M88E1543_E_PHY_ID:
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
case M88E1340M_E_PHY_ID:
phy->id == M88E1340M_E_PHY_ID)
phy->ops.get_cable_length =
e1000_get_cable_length_m88_gen2;
+ else if (phy->id == M88E1543_E_PHY_ID)
+ phy->ops.get_cable_length =
+ e1000_get_cable_length_m88_gen2;
else
phy->ops.get_cable_length = e1000_get_cable_length_m88;
phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+ /* Check if this PHY is confgured for media swap. */
+ if (phy->id == M88E1112_E_PHY_ID) {
+ u16 data;
+
+ ret_val = phy->ops.write_reg(hw,
+ E1000_M88E1112_PAGE_ADDR,
+ 2);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw,
+ E1000_M88E1112_MAC_CTRL_1,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
+ E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
+ if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
+ data == E1000_M88E1112_AUTO_COPPER_BASEX)
+ hw->mac.ops.check_for_link =
+ e1000_check_for_link_media_swap;
+ }
break;
case IGP03E1000_E_PHY_ID:
case IGP04E1000_E_PHY_ID:
phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580;
phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580;
break;
+ case I210_I_PHY_ID:
+ phy->type = e1000_phy_i210;
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+ break;
default:
ret_val = -E1000_ERR_PHY;
goto out;
size = 15;
nvm->word_size = 1 << size;
- nvm->opcode_bits = 8;
- nvm->delay_usec = 1;
- switch (nvm->override) {
- case e1000_nvm_override_spi_large:
- nvm->page_size = 32;
- nvm->address_bits = 16;
- break;
- case e1000_nvm_override_spi_small:
- nvm->page_size = 8;
- nvm->address_bits = 8;
- break;
- default:
- nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
- nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
- break;
- }
-
- nvm->type = e1000_nvm_eeprom_spi;
+ if (hw->mac.type < e1000_i210) {
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
+
+ switch (nvm->override) {
+ case e1000_nvm_override_spi_large:
+ nvm->page_size = 32;
+ nvm->address_bits = 16;
+ break;
+ case e1000_nvm_override_spi_small:
+ nvm->page_size = 8;
+ nvm->address_bits = 8;
+ break;
+ default:
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
+ 16 : 8;
+ break;
+ }
+ if (nvm->word_size == (1 << 15))
+ nvm->page_size = 128;
- if (nvm->word_size == (1 << 15))
- nvm->page_size = 128;
+ nvm->type = e1000_nvm_eeprom_spi;
+ } else {
+ nvm->type = e1000_nvm_flash_hw;
+ }
/* Function Pointers */
nvm->ops.acquire = e1000_acquire_nvm_82575;
nvm->ops.update = e1000_update_nvm_checksum_82580;
break;
case e1000_i350:
+ //case e1000_i354:
nvm->ops.validate = e1000_validate_nvm_checksum_i350;
nvm->ops.update = e1000_update_nvm_checksum_i350;
break;
mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
if (mac->type == e1000_82580)
mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
- if (mac->type == e1000_i350) {
+ if (mac->type == e1000_i350 || mac->type == e1000_i354)
mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
- /* Enable EEE default settings for i350 */
+
+ /* Enable EEE default settings for EEE supported devices */
+ if (mac->type >= e1000_i350)
dev_spec->eee_disable = false;
- }
+
+ /* Allow a single clear of the SW semaphore on I210 and newer */
+ if (mac->type >= e1000_i210)
+ dev_spec->clear_semaphore_once = true;
/* Set if part includes ASF firmware */
mac->asf_firmware_present = true;
mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575;
/* check for link */
mac->ops.check_for_link = e1000_check_for_link_82575;
- /* receive address register setting */
- mac->ops.rar_set = e1000_rar_set_generic;
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
/* configure collision distance */
mac->ops.config_collision_dist = e1000_config_collision_dist_82575;
/* multicast address update */
mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
- if (hw->mac.type == e1000_i350) {
+ if (hw->mac.type == e1000_i350 || mac->type == e1000_i354) {
/* writing VFTA */
mac->ops.write_vfta = e1000_write_vfta_i350;
/* clearing VFTA */
/* clearing VFTA */
mac->ops.clear_vfta = e1000_clear_vfta_generic;
}
+ if (hw->mac.type >= e1000_82580)
+ mac->ops.validate_mdi_setting =
+ e1000_validate_mdi_setting_crossover_generic;
/* ID LED init */
mac->ops.id_led_init = e1000_id_led_init_generic;
/* blink LED */
/* acquire SW_FW sync */
mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_82575;
mac->ops.release_swfw_sync = e1000_release_swfw_sync_82575;
+ if (mac->type >= e1000_i210) {
+ mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_i210;
+ mac->ops.release_swfw_sync = e1000_release_swfw_sync_i210;
+ }
/* set lan id for port to determine which phy lock to use */
hw->mac.ops.set_lan_id(hw);
DEBUGFUNC("e1000_get_phy_id_82575");
+ /* i354 devices can have a PHY that needs an extra read for id */
+ if (hw->mac.type == e1000_i354)
+ e1000_get_phy_id(hw);
+
+
/*
* For SGMII PHYs, we try the list of possible addresses until
* we find one that works. For non-SGMII PHYs
break;
case e1000_82580:
case e1000_i350:
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
mdic = E1000_READ_REG(hw, E1000_MDICNFG);
mdic &= E1000_MDICNFG_PHY_MASK;
phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val = E1000_SUCCESS;
- u16 data;
+ u32 data;
DEBUGFUNC("e1000_set_d0_lplu_state_82580");
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val = E1000_SUCCESS;
- u16 data;
+ u32 data;
DEBUGFUNC("e1000_set_d3_lplu_state_82580");
DEBUGOUT("MNG configuration cycle has not completed.\n");
/* If EEPROM is not marked present, init the PHY manually */
- if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
+ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
(hw->phy.type == e1000_phy_igp_3))
e1000_phy_init_script_igp3(hw);
*/
hw->mac.get_link_status = !hw->mac.serdes_has_link;
+ /*
+ * Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val)
+ DEBUGOUT("Error configuring flow control\n");
} else {
ret_val = e1000_check_for_copper_link_generic(hw);
}
return ret_val;
}
+/**
+ * e1000_check_for_link_media_swap - Check which M88E1112 interface linked
+ * @hw: pointer to the HW structure
+ *
+ * Poll the M88E1112 interfaces to see which interface achieved link.
+ */
+static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ u8 port = 0;
+
+ DEBUGFUNC("e1000_check_for_link_media_swap");
+
+ /* Check the copper medium. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (data & E1000_M88E1112_STATUS_LINK)
+ port = E1000_MEDIA_PORT_COPPER;
+
+ /* Check the other medium. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (data & E1000_M88E1112_STATUS_LINK)
+ port = E1000_MEDIA_PORT_OTHER;
+
+ /* Determine if a swap needs to happen. */
+ if (port && (hw->dev_spec._82575.media_port != port)) {
+ hw->dev_spec._82575.media_port = port;
+ hw->dev_spec._82575.media_changed = true;
+ } else {
+ ret_val = e1000_check_for_link_82575(hw);
+ }
+
+ return E1000_SUCCESS;
+}
+
/**
* e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown
* @hw: pointer to the HW structure
{
struct e1000_mac_info *mac = &hw->mac;
u32 pcs;
+ u32 status;
DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575");
- /* Set up defaults for the return values of this function */
- mac->serdes_has_link = false;
- *speed = 0;
- *duplex = 0;
-
/*
* Read the PCS Status register for link state. For non-copper mode,
* the status register is not accurate. The PCS status register is
*duplex = FULL_DUPLEX;
else
*duplex = HALF_DUPLEX;
+
+ /* Check if it is an I354 2.5Gb backplane connection. */
+ if (mac->type == e1000_i354) {
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER)) {
+ *speed = SPEED_2500;
+ *duplex = FULL_DUPLEX;
+ DEBUGOUT("2500 Mbs, ");
+ DEBUGOUT("Full Duplex\n");
+ }
+ }
+
+ } else {
+ mac->serdes_has_link = false;
+ *speed = 0;
+ *duplex = 0;
}
return E1000_SUCCESS;
}
/* If EEPROM is not present, run manual init scripts */
- if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
+ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES))
e1000_reset_init_script_82575(hw);
/* Clear any pending interrupt events. */
static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
{
u32 ctrl;
- s32 ret_val;
+ s32 ret_val;
+ u32 phpm_reg;
DEBUGFUNC("e1000_setup_copper_link_82575");
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ /* Clear Go Link Disconnect bit on supported devices */
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+ phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+ phpm_reg &= ~E1000_82580_PM_GO_LINKD;
+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
+ break;
+ default:
+ break;
+ }
+
ret_val = e1000_setup_serdes_link_82575(hw);
if (ret_val)
goto out;
}
}
switch (hw->phy.type) {
+ case e1000_phy_i210:
case e1000_phy_m88:
- if (hw->phy.id == I347AT4_E_PHY_ID ||
- hw->phy.id == M88E1112_E_PHY_ID ||
- hw->phy.id == M88E1340M_E_PHY_ID)
+ switch (hw->phy.id) {
+ case I347AT4_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
+ case I210_I_PHY_ID:
ret_val = e1000_copper_link_setup_m88_gen2(hw);
- else
+ break;
+ default:
ret_val = e1000_copper_link_setup_m88(hw);
+ break;
+ }
break;
case e1000_phy_igp_3:
ret_val = e1000_copper_link_setup_igp(hw);
**/
static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
{
- u32 ctrl_ext, ctrl_reg, reg;
+ u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
bool pcs_autoneg;
s32 ret_val = E1000_SUCCESS;
u16 data;
reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
- /*
- * We force flow control to prevent the CTRL register values from being
- * overwritten by the autonegotiated flow control values
- */
- reg |= E1000_PCS_LCTL_FORCE_FCTRL;
-
if (pcs_autoneg) {
/* Set PCS register for autoneg */
reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
+
+ /* Disable force flow control for autoneg */
+ reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
+
+ /* Configure flow control advertisement for autoneg */
+ anadv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV);
+ anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
+
+ switch (hw->fc.requested_mode) {
+ case e1000_fc_full:
+ case e1000_fc_rx_pause:
+ anadv_reg |= E1000_TXCW_ASM_DIR;
+ anadv_reg |= E1000_TXCW_PAUSE;
+ break;
+ case e1000_fc_tx_pause:
+ anadv_reg |= E1000_TXCW_ASM_DIR;
+ break;
+ default:
+ break;
+ }
+
+ E1000_WRITE_REG(hw, E1000_PCS_ANADV, anadv_reg);
+
DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
} else {
/* Set PCS register for forced link */
reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
+
+ /* Force flow control for forced link */
+ reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+
DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
}
E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
- if (!e1000_sgmii_active_82575(hw))
+ if (!pcs_autoneg && !e1000_sgmii_active_82575(hw))
e1000_force_mac_fc_generic(hw);
return ret_val;
**/
static s32 e1000_get_media_type_82575(struct e1000_hw *hw)
{
- u32 lan_id = 0;
- s32 ret_val = E1000_ERR_CONFIG;
struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+ s32 ret_val = E1000_SUCCESS;
u32 ctrl_ext = 0;
- u32 current_link_mode = 0;
- u16 init_ctrl_wd_3 = 0;
- u8 init_ctrl_wd_3_offset = 0;
- u8 init_ctrl_wd_3_bit_offset = 0;
+ u32 link_mode = 0;
/* Set internal phy as default */
dev_spec->sgmii_active = false;
dev_spec->module_plugged = false;
- /*
- * Check if NVM access method is attached already.
- * If it is then Init Control Word #3 is considered
- * otherwise runtime CSR register content is taken.
- */
-
/* Get CSR setting */
ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
- /* Get link mode setting */
- if ((hw->nvm.ops.read) && (hw->nvm.ops.read != e1000_null_read_nvm)) {
- /* Take link mode from EEPROM */
-
- /*
- * Get LAN port ID to derive its
- * adequate Init Control Word #3
- */
- lan_id = ((E1000_READ_REG(hw, E1000_STATUS) &
- E1000_STATUS_LAN_ID_MASK) >> E1000_STATUS_LAN_ID_OFFSET);
- /*
- * Derive Init Control Word #3 offset
- * and mask to pick up link mode setting.
- */
- if (hw->mac.type < e1000_82580) {
- init_ctrl_wd_3_offset = lan_id ?
- NVM_INIT_CONTROL3_PORT_A : NVM_INIT_CONTROL3_PORT_B;
- init_ctrl_wd_3_bit_offset = NVM_WORD24_LNK_MODE_OFFSET;
- } else {
- init_ctrl_wd_3_offset =
- NVM_82580_LAN_FUNC_OFFSET(lan_id) +
- NVM_INIT_CONTROL3_PORT_A;
- init_ctrl_wd_3_bit_offset =
- NVM_WORD24_82580_LNK_MODE_OFFSET;
- }
- /* Read Init Control Word #3*/
- hw->nvm.ops.read(hw, init_ctrl_wd_3_offset, 1, &init_ctrl_wd_3);
- current_link_mode = init_ctrl_wd_3;
- /*
- * Switch to CSR for all but internal PHY.
- */
- if ((init_ctrl_wd_3 << (E1000_CTRL_EXT_LINK_MODE_OFFSET -
- init_ctrl_wd_3_bit_offset)) !=
- E1000_CTRL_EXT_LINK_MODE_GMII) {
- current_link_mode = ctrl_ext;
- init_ctrl_wd_3_bit_offset =
- E1000_CTRL_EXT_LINK_MODE_OFFSET;
- }
- } else {
- /* Take link mode from CSR */
- current_link_mode = ctrl_ext;
- init_ctrl_wd_3_bit_offset = E1000_CTRL_EXT_LINK_MODE_OFFSET;
- }
-
- /*
- * Align link mode bits to
- * their CTRL_EXT location.
- */
- current_link_mode <<= (E1000_CTRL_EXT_LINK_MODE_OFFSET -
- init_ctrl_wd_3_bit_offset);
- current_link_mode &= E1000_CTRL_EXT_LINK_MODE_MASK;
-
- switch (current_link_mode) {
+ /* extract link mode setting */
+ link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK;
+ switch (link_mode) {
case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
hw->phy.media_type = e1000_media_type_internal_serdes;
- current_link_mode = E1000_CTRL_EXT_LINK_MODE_1000BASE_KX;
break;
case E1000_CTRL_EXT_LINK_MODE_GMII:
hw->phy.media_type = e1000_media_type_copper;
- current_link_mode = E1000_CTRL_EXT_LINK_MODE_GMII;
break;
case E1000_CTRL_EXT_LINK_MODE_SGMII:
- case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
/* Get phy control interface type set (MDIO vs. I2C)*/
if (e1000_sgmii_uses_mdio_82575(hw)) {
hw->phy.media_type = e1000_media_type_copper;
dev_spec->sgmii_active = true;
- current_link_mode = E1000_CTRL_EXT_LINK_MODE_SGMII;
- } else {
- ret_val = e1000_set_sfp_media_type_82575(hw);
- if (ret_val != E1000_SUCCESS)
- goto out;
- if (hw->phy.media_type ==
- e1000_media_type_internal_serdes) {
- current_link_mode =
- E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
- } else if (hw->phy.media_type ==
- e1000_media_type_copper) {
- current_link_mode =
- E1000_CTRL_EXT_LINK_MODE_SGMII;
+ break;
+ }
+ /* fall through for I2C based SGMII */
+ case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
+ /* read media type from SFP EEPROM */
+ ret_val = e1000_set_sfp_media_type_82575(hw);
+ if ((ret_val != E1000_SUCCESS) ||
+ (hw->phy.media_type == e1000_media_type_unknown)) {
+ /*
+ * If media type was not identified then return media
+ * type defined by the CTRL_EXT settings.
+ */
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+
+ if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) {
+ hw->phy.media_type = e1000_media_type_copper;
+ dev_spec->sgmii_active = true;
}
+
+ break;
}
- break;
- default:
- DEBUGOUT("Link mode mask doesn't fit bit field size\n");
- goto out;
- }
- /*
- * Do not change current link mode setting
- * if media type is fibre or has not been
- * recognized.
- */
- if ((hw->phy.media_type != e1000_media_type_unknown) &&
- (hw->phy.media_type != e1000_media_type_fiber)) {
- /* Update link mode */
+
+ /* do not change link mode for 100BaseFX */
+ if (dev_spec->eth_flags.e100_base_fx)
+ break;
+
+ /* change current link mode setting */
ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext |
- current_link_mode);
- }
- ret_val = E1000_SUCCESS;
-out:
- /*
- * If media type was not identified then return media type
- * defined by the CTRL_EXT settings.
- */
- if (hw->phy.media_type == e1000_media_type_unknown) {
- if (current_link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII)
- hw->phy.media_type = e1000_media_type_copper;
+ if (hw->phy.media_type == e1000_media_type_copper)
+ ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
else
- hw->phy.media_type = e1000_media_type_internal_serdes;
+ ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+ break;
}
return ret_val;
s32 ret_val = E1000_ERR_CONFIG;
u32 ctrl_ext = 0;
struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
- struct sfp_e1000_flags eth_flags = {0};
+ struct sfp_e1000_flags *eth_flags = &dev_spec->eth_flags;
u8 tranceiver_type = 0;
+ s32 timeout = 3;
- /* Turn I2C interface ON */
+ /* Turn I2C interface ON and power on sfp cage */
ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA);
+ E1000_WRITE_FLUSH(hw);
+
/* Read SFP module data */
- ret_val = e1000_read_sfp_data_byte(hw,
+ while (timeout) {
+ ret_val = e1000_read_sfp_data_byte(hw,
E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET),
&tranceiver_type);
+ if (ret_val == E1000_SUCCESS)
+ break;
+ msec_delay(100);
+ timeout--;
+ }
if (ret_val != E1000_SUCCESS)
goto out;
+
ret_val = e1000_read_sfp_data_byte(hw,
E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET),
- (u8 *)ð_flags);
+ (u8 *)eth_flags);
if (ret_val != E1000_SUCCESS)
goto out;
- /*
- * Check if there is some SFP
- * module plugged and powered
- */
+
+ /* Check if there is some SFP module plugged and powered */
if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) ||
(tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) {
dev_spec->module_plugged = true;
- if (eth_flags.e1000_base_lx || eth_flags.e1000_base_sx) {
+ if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ } else if (eth_flags->e100_base_fx) {
+ dev_spec->sgmii_active = true;
hw->phy.media_type = e1000_media_type_internal_serdes;
- } else if (eth_flags.e1000_base_t) {
+ } else if (eth_flags->e1000_base_t) {
dev_spec->sgmii_active = true;
hw->phy.media_type = e1000_media_type_copper;
} else {
- hw->phy.media_type = e1000_media_type_unknown;
- DEBUGOUT("PHY module has not been recognized\n");
- goto out;
+ hw->phy.media_type = e1000_media_type_unknown;
+ DEBUGOUT("PHY module has not been recognized\n");
+ goto out;
}
} else {
hw->phy.media_type = e1000_media_type_unknown;
**/
void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
{
- u32 dtxswc;
+ u32 reg_val, reg_offset;
switch (hw->mac.type) {
case e1000_82576:
- dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
- if (enable) {
- dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
- E1000_DTXSWC_VLAN_SPOOF_MASK);
- /* The PF can spoof - it has to in order to
- * support emulation mode NICs */
- dtxswc ^= (1 << pf | 1 << (pf +
- E1000_DTXSWC_VLAN_SPOOF_SHIFT));
- } else {
- dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
- E1000_DTXSWC_VLAN_SPOOF_MASK);
- }
- E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
+ reg_offset = E1000_DTXSWC;
break;
case e1000_i350:
- dtxswc = E1000_READ_REG(hw, E1000_TXSWC);
- if (enable) {
- dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
- E1000_DTXSWC_VLAN_SPOOF_MASK);
- /* The PF can spoof - it has to in order to
- * support emulation mode NICs
- */
- dtxswc ^= (1 << pf | 1 << (pf +
- E1000_DTXSWC_VLAN_SPOOF_SHIFT));
- } else {
- dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
- E1000_DTXSWC_VLAN_SPOOF_MASK);
- }
- E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc);
- default:
+ case e1000_i354:
+ reg_offset = E1000_TXSWC;
break;
+ default:
+ return;
}
+
+ reg_val = E1000_READ_REG(hw, reg_offset);
+ if (enable) {
+ reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
+ E1000_DTXSWC_VLAN_SPOOF_MASK);
+ /* The PF can spoof - it has to in order to
+ * support emulation mode NICs
+ */
+ reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
+ } else {
+ reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
+ E1000_DTXSWC_VLAN_SPOOF_MASK);
+ }
+ E1000_WRITE_REG(hw, reg_offset, reg_val);
}
/**
E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
break;
case e1000_i350:
+ case e1000_i354:
dtxswc = E1000_READ_REG(hw, E1000_TXSWC);
if (enable)
dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
hw->dev_spec._82575.global_device_reset = false;
+ /* 82580 does not reliably do global_device_reset due to hw errata */
+ if (hw->mac.type == e1000_82580)
+ global_device_reset = false;
+
/* Get current control state. */
ctrl = E1000_READ_REG(hw, E1000_CTRL);
DEBUGOUT("Auto Read Done did not complete\n");
}
- /* If EEPROM is not present, run manual init scripts */
- if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
- e1000_reset_init_script_82575(hw);
-
/* clear global device reset status bit */
E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET);
goto out;
}
- if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
+ if (!(nvm_data & NVM_COMPATIBILITY_BIT_MASK)) {
/* set compatibility bit to validate checksums appropriately */
nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
return ret_val;
}
+/**
+ * __e1000_access_emi_reg - Read/write EMI register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: pointer to value to read/write from/to the EMI address
+ * @read: boolean flag to indicate read or write
+ **/
+static s32 __e1000_access_emi_reg(struct e1000_hw *hw, u16 address,
+ u16 *data, bool read)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("__e1000_access_emi_reg");
+
+ ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
+ if (ret_val)
+ return ret_val;
+
+ if (read)
+ ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data);
+ else
+ ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_emi_reg - Read Extended Management Interface register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: value to be read from the EMI address
+ **/
+s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
+{
+ DEBUGFUNC("e1000_read_emi_reg");
+
+ return __e1000_access_emi_reg(hw, addr, data, true);
+}
+
/**
* e1000_set_eee_i350 - Enable/disable EEE support
* @hw: pointer to the HW structure
/* enable or disable per user setting */
if (!(hw->dev_spec._82575.eee_disable)) {
+ u32 eee_su = E1000_READ_REG(hw, E1000_EEE_SU);
+
ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
E1000_EEER_LPI_FC);
+ /* This bit should not be set in normal operation. */
+ if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
+ DEBUGOUT("LPI Clock Stop Bit should not be set!\n");
} else {
ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
return ret_val;
}
+/**
+ * e1000_set_eee_i354 - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ *
+ * Enable/disable EEE legacy mode based on setting in dev_spec structure.
+ *
+ **/
+s32 e1000_set_eee_i354(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_set_eee_i354");
+
+ if ((hw->phy.media_type != e1000_media_type_copper) ||
+ ((phy->id != M88E1543_E_PHY_ID)))
+ goto out;
+
+ if (!hw->dev_spec._82575.eee_disable) {
+ /* Switch to PHY page 18. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
+ phy_data);
+ if (ret_val)
+ goto out;
+
+ /* Return the PHY to page 0. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
+ if (ret_val)
+ goto out;
+
+ /* Turn on EEE advertisement. */
+ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data |= E1000_EEE_ADV_100_SUPPORTED |
+ E1000_EEE_ADV_1000_SUPPORTED;
+ ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ phy_data);
+ } else {
+ /* Turn off EEE advertisement. */
+ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
+ E1000_EEE_ADV_1000_SUPPORTED);
+ ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ phy_data);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_eee_status_i354 - Get EEE status
+ * @hw: pointer to the HW structure
+ * @status: EEE status
+ *
+ * Get EEE status by guessing based on whether Tx or Rx LPI indications have
+ * been received.
+ **/
+s32 e1000_get_eee_status_i354(struct e1000_hw *hw, bool *status)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_get_eee_status_i354");
+
+ /* Check if EEE is supported on this device. */
+ if ((hw->phy.media_type != e1000_media_type_copper) ||
+ ((phy->id != M88E1543_E_PHY_ID)))
+ goto out;
+
+ ret_val = e1000_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
+ E1000_PCS_STATUS_DEV_I354,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD |
+ E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false;
+
+out:
+ return ret_val;
+}
+
/* Due to a hw errata, if the host tries to configure the VFTA register
* while performing queries from the BMC or DMA, then the VFTA in some
* cases won't be written.
u32 retry = 1;
u16 swfw_mask = 0;
- bool nack = 1;
+ bool nack = true;
DEBUGFUNC("e1000_read_i2c_byte_generic");
u32 i = 0;
u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
u32 timeout = 10;
- bool ack = 1;
+ bool ack = true;
DEBUGFUNC("e1000_get_i2c_ack");
return E1000_ERR_I2C;
ack = e1000_get_i2c_data(&i2cctl);
- if (ack == 1) {
+ if (ack) {
DEBUGOUT("I2C ack was not received.\n");
status = E1000_ERR_I2C;
}
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header ena */
#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload ena */
+#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx Desc Relax Order */
#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
u16 e1000_rxpbs_adjust_82580(u32 data);
+s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data);
s32 e1000_set_eee_i350(struct e1000_hw *);
+s32 e1000_set_eee_i354(struct e1000_hw *);
+s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *);
#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8
#define E1000_EMC_INTERNAL_DATA 0x00
#define E1000_EMC_INTERNAL_THERM_LIMIT 0x20
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
case E1000_DEV_ID_I350_DA4:
mac->type = e1000_i350;
break;
+ case E1000_DEV_ID_I210_COPPER_FLASHLESS:
+ case E1000_DEV_ID_I210_SERDES_FLASHLESS:
+ case E1000_DEV_ID_I210_COPPER:
+ case E1000_DEV_ID_I210_COPPER_OEM1:
+ case E1000_DEV_ID_I210_COPPER_IT:
+ case E1000_DEV_ID_I210_FIBER:
+ case E1000_DEV_ID_I210_SERDES:
+ case E1000_DEV_ID_I210_SGMII:
+ mac->type = e1000_i210;
+ break;
+ case E1000_DEV_ID_I211_COPPER:
+ mac->type = e1000_i211;
+ break;
+
case E1000_DEV_ID_I354_BACKPLANE_1GBPS:
case E1000_DEV_ID_I354_SGMII:
case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS:
mac->type = e1000_i354;
break;
-
default:
/* Should never have loaded on this device */
ret_val = -E1000_ERR_MAC_INIT;
case e1000_82576:
case e1000_82580:
case e1000_i350:
+ case e1000_i354:
e1000_init_function_pointers_82575(hw);
break;
+ case e1000_i210:
+ case e1000_i211:
+ e1000_init_function_pointers_i210(hw);
+ break;
default:
DEBUGOUT("Hardware not supported\n");
ret_val = -E1000_ERR_CONFIG;
s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
u16 offset, u8 *sum)
{
- if (hw->mac.ops.mng_host_if_write)
- return hw->mac.ops.mng_host_if_write(hw, buffer, length,
- offset, sum);
-
- return E1000_NOT_IMPLEMENTED;
+ return e1000_mng_host_if_write_generic(hw, buffer, length, offset, sum);
}
/**
s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
struct e1000_host_mng_command_header *hdr)
{
- if (hw->mac.ops.mng_write_cmd_header)
- return hw->mac.ops.mng_write_cmd_header(hw, hdr);
-
- return E1000_NOT_IMPLEMENTED;
+ return e1000_mng_write_cmd_header_generic(hw, hdr);
}
/**
**/
s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
{
- if (hw->mac.ops.mng_enable_host_if)
- return hw->mac.ops.mng_enable_host_if(hw);
-
- return E1000_NOT_IMPLEMENTED;
-}
-
-/**
- * e1000_wait_autoneg - Waits for autonegotiation completion
- * @hw: pointer to the HW structure
- *
- * Waits for autoneg to complete. Currently no func pointer exists and all
- * implementations are handled in the generic version of this function.
- **/
-s32 e1000_wait_autoneg(struct e1000_hw *hw)
-{
- if (hw->mac.ops.wait_autoneg)
- return hw->mac.ops.wait_autoneg(hw);
-
- return E1000_SUCCESS;
+ return e1000_mng_enable_host_if_generic(hw);
}
/**
return E1000_SUCCESS;
}
+
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
extern void e1000_init_function_pointers_vf(struct e1000_hw *hw);
extern void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw);
extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_i210(struct e1000_hw *hw);
+s32 e1000_set_obff_timer(struct e1000_hw *hw, u32 itr);
s32 e1000_set_mac_type(struct e1000_hw *hw);
s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device);
s32 e1000_init_mac_params(struct e1000_hw *hw);
s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-s32 e1000_wait_autoneg(struct e1000_hw *hw);
s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
bool e1000_check_mng_mode(struct e1000_hw *hw);
(((length) > min_frame_size) && \
((length) <= (max_frame_size + VLAN_TAG_SIZE + 1)))))
+#ifndef E1000_MAX
+#define E1000_MAX(a, b) ((a) > (b) ? (a) : (b))
#endif
+#ifndef E1000_DIVIDE_ROUND_UP
+#define E1000_DIVIDE_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) /* ceil(a/b) */
+#endif
+#endif /* _E1000_API_H_ */
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
-#define E1000_WUC_LSCWE 0x00000010 /* Link Status wake up enable */
-#define E1000_WUC_PPROXYE 0x00000010 /* Protocol Proxy Enable */
-#define E1000_WUC_LSCWO 0x00000020 /* Link Status wake up override */
-#define E1000_WUC_SPM 0x80000000 /* Enable SPM */
#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
/* Wake Up Filter Control */
#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
-#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
-#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
-#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
-#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
-#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
-#define E1000_WUFC_FW_RST 0x80000000 /* Wake on FW Reset Enable */
-#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* all wakeup filters mask */
-#define E1000_WUFC_FLX_OFFSET 16 /* Flexible Filters bits offset */
-#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* 4 flexible filters mask */
-/*
- * For 82576 to utilize Extended filter masks in addition to
- * existing (filter) masks
- */
-#define E1000_WUFC_EXT_FLX_FILTERS 0x00300000 /* Ext. FLX filter mask */
/* Wake Up Status */
#define E1000_WUS_LNKC E1000_WUFC_LNKC
#define E1000_WUS_EX E1000_WUFC_EX
#define E1000_WUS_MC E1000_WUFC_MC
#define E1000_WUS_BC E1000_WUFC_BC
-#define E1000_WUS_ARP E1000_WUFC_ARP
-#define E1000_WUS_IPV4 E1000_WUFC_IPV4
-#define E1000_WUS_IPV6 E1000_WUFC_IPV6
-#define E1000_WUS_FLX0 E1000_WUFC_FLX0
-#define E1000_WUS_FLX1 E1000_WUFC_FLX1
-#define E1000_WUS_FLX2 E1000_WUFC_FLX2
-#define E1000_WUS_FLX3 E1000_WUFC_FLX3
-#define E1000_WUS_FLX_FILTERS E1000_WUFC_FLX_FILTERS
-
-/* Wake Up Packet Length */
-#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */
-
-/* Four Flexible Filters are supported */
-#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
-/* Two Extended Flexible Filters are supported (82576) */
-#define E1000_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
-#define E1000_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */
-#define E1000_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */
-
-/* Each Flexible Filter is at most 128 (0x80) bytes in length */
-#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128
-
-#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX
-#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
-#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
/* Extended Device Control */
-#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */
-#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */
-#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN
-#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */
-#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */
-/* Reserved (bits 4,5) in >= 82575 */
#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* SW Definable Pin 4 data */
-#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* SW Definable Pin 5 data */
-#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA
#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* SW Definable Pin 6 data */
#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* SW Definable Pin 3 data */
-/* SDP 4/5 (bits 8,9) are reserved in >= 82575 */
-#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */
-#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */
#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */
#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */
-#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */
#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
-#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */
/* Physical Func Reset Done Indication */
#define E1000_CTRL_EXT_PFRSTD 0x00004000
#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
/* Offset of the link mode field in Ctrl Ext register */
#define E1000_CTRL_EXT_LINK_MODE_OFFSET 22
-#define E1000_CTRL_EXT_LINK_MODE_82580_MASK 0x01C00000 /*82580 bit 24:22*/
#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
-#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000
-#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000
#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
-#define E1000_CTRL_EXT_LINK_MODE_PCIX_SERDES 0x00800000
#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
#define E1000_CTRL_EXT_EIAME 0x01000000
#define E1000_CTRL_EXT_IRCA 0x00000001
-#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000
-#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000
-#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000
-#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000
-#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000
-#define E1000_CTRL_EXT_CANC 0x04000000 /* Int delay cancellation */
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */
-/* IAME enable bit (27) was removed in >= 82575 */
#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */
-/* packet buffer parity error detection enabled */
-#define E1000_CRTL_EXT_PB_PAREN 0x01000000
-/* descriptor FIFO parity error detection enable */
-#define E1000_CTRL_EXT_DF_PAREN 0x02000000
-#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
#define E1000_I2CCMD_REG_ADDR_SHIFT 16
-#define E1000_I2CCMD_REG_ADDR 0x00FF0000
#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
-#define E1000_I2CCMD_PHY_ADDR 0x07000000
#define E1000_I2CCMD_OPCODE_READ 0x08000000
#define E1000_I2CCMD_OPCODE_WRITE 0x00000000
-#define E1000_I2CCMD_RESET 0x10000000
#define E1000_I2CCMD_READY 0x20000000
-#define E1000_I2CCMD_INTERRUPT_ENA 0x40000000
#define E1000_I2CCMD_ERROR 0x80000000
#define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a))
#define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a))
#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */
-#define E1000_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */
#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
-#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
-#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
-#define E1000_RXD_SPC_PRI_SHIFT 13
-#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */
-#define E1000_RXD_SPC_CFI_SHIFT 12
+#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */
#define E1000_RXDEXT_STATERR_LB 0x00040000
#define E1000_RXDEXT_STATERR_CE 0x01000000
#define E1000_RXDEXT_STATERR_SE 0x02000000
E1000_RXDEXT_STATERR_CXE | \
E1000_RXDEXT_STATERR_RXE)
-#define E1000_MRQC_ENABLE_MASK 0x00000007
-#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001
-#define E1000_MRQC_ENABLE_RSS_INT 0x00000004
#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
-#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000
#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
-#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
/* Management Control */
#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
-#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */
-#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */
-#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */
-#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */
-#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */
-#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */
#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
-/* Enable Neighbor Discovery Filtering */
-#define E1000_MANC_NEIGHBOR_EN 0x00004000
-#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */
-#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */
#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
-#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
-#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */
#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
/* Enable MAC address filtering */
#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
/* Enable MNG packets to host memory */
#define E1000_MANC_EN_MNG2HOST 0x00200000
-/* Enable IP address filtering */
-#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000
-#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Ena checksum filtering */
-#define E1000_MANC_BR_EN 0x01000000 /* Ena broadcast filtering */
-#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */
-#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */
-#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */
-#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */
-#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */
-#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */
-#define E1000_MANC_MPROXYE 0x40000000 /* Mngment Proxy Enable */
-#define E1000_MANC_EN_BMC2OS 0x10000000 /* OS2BMC is enabld or not */
-
-#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */
-#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */
#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */
#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */
#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
-#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */
#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
-#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */
#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */
-#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* Rx desc min thresh size */
-#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* Rx desc min thresh size */
#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
-#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */
-#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */
-#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */
#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
-#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */
#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */
#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
-#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */
-#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */
-/*
- * Use byte values for the following shift parameters
+/* Use byte values for the following shift parameters
* Usage:
* psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
* E1000_PSRCTL_BSIZE0_MASK) |
#define E1000_SWFW_PHY3_SM 0x40
#define E1000_SWFW_SW_MNG_SM 0x400
-/* FACTPS Definitions */
-#define E1000_FACTPS_LFS 0x40000000 /* LAN Function Select */
/* Device Control */
#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
-#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */
#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */
#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
-#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */
-#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */
#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
-#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */
#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
-#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */
-/* Defined polarity of Dock/Undock indication in SDP[0] */
-#define E1000_CTRL_D_UD_POLARITY 0x00004000
-/* Reset both PHY ports, through PHYRST_N pin */
-#define E1000_CTRL_FORCE_PHY_RESET 0x00008000
-/* enable link status from external LINK_0 and LINK_1 pins */
-#define E1000_CTRL_EXT_LINK_EN 0x00010000
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
-#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */
-#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */
-#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */
#define E1000_CTRL_RST 0x04000000 /* Global reset */
#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
-#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */
#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
-#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to ME */
#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */
-/*
- * Bit definitions for the Management Data IO (MDIO) and Management Data
- * Clock (MDC) pins in the Device Control Register.
- */
-#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0
-#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0
-#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2
-#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2
-#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3
-#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3
-#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR
-#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA
#define E1000_CONNSW_ENRGSRC 0x4
+#define E1000_CONNSW_PHYSD 0x400
+#define E1000_CONNSW_PHY_PDN 0x800
+#define E1000_CONNSW_SERDESD 0x200
+#define E1000_CONNSW_AUTOSENSE_CONF 0x2
+#define E1000_CONNSW_AUTOSENSE_EN 0x1
#define E1000_PCS_CFG_PCS_EN 8
#define E1000_PCS_LCTL_FLV_LINK_UP 1
#define E1000_PCS_LCTL_FSV_10 0
#define E1000_PCS_LCTL_FDV_FULL 8
#define E1000_PCS_LCTL_FSD 0x10
#define E1000_PCS_LCTL_FORCE_LINK 0x20
-#define E1000_PCS_LCTL_LOW_LINK_LATCH 0x40
#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
#define E1000_PCS_LCTL_AN_ENABLE 0x10000
#define E1000_PCS_LCTL_AN_RESTART 0x20000
#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
-#define E1000_PCS_LCTL_AN_SGMII_BYPASS 0x80000
-#define E1000_PCS_LCTL_AN_SGMII_TRIGGER 0x100000
-#define E1000_PCS_LCTL_FAST_LINK_TIMER 0x1000000
-#define E1000_PCS_LCTL_LINK_OK_FIX 0x2000000
-#define E1000_PCS_LCTL_CRS_ON_NI 0x4000000
#define E1000_ENABLE_SERDES_LOOPBACK 0x0410
#define E1000_PCS_LSTS_LINK_OK 1
-#define E1000_PCS_LSTS_SPEED_10 0
#define E1000_PCS_LSTS_SPEED_100 2
#define E1000_PCS_LSTS_SPEED_1000 4
#define E1000_PCS_LSTS_DUPLEX_FULL 8
#define E1000_PCS_LSTS_SYNK_OK 0x10
#define E1000_PCS_LSTS_AN_COMPLETE 0x10000
-#define E1000_PCS_LSTS_AN_PAGE_RX 0x20000
-#define E1000_PCS_LSTS_AN_TIMED_OUT 0x40000
-#define E1000_PCS_LSTS_AN_REMOTE_FAULT 0x80000
-#define E1000_PCS_LSTS_AN_ERROR_RWS 0x100000
/* Device Status */
#define E1000_STATUS_FD 0x00000001 /* Duplex 0=half 1=full */
#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
#define E1000_STATUS_FUNC_SHIFT 2
-#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */
#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
-#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */
-#define E1000_STATUS_SPEED_MASK 0x000000C0
+#define E1000_STATUS_SPEED_MASK 0x000000C0
#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Compltn by NVM */
-#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */
#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
-/* Change in Dock/Undock state clear on write '0'. */
-#define E1000_STATUS_DOCK_CI 0x00000800
#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */
-#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */
-#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */
-#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */
-#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */
-#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */
-#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disbld */
-#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */
-#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */
-#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
-/* BMC external code execution disabled */
-#define E1000_STATUS_BMC_LITE 0x01000000
-#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
-#define E1000_STATUS_FUSE_8 0x04000000
-#define E1000_STATUS_FUSE_9 0x08000000
-#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disbld on port 0 */
-#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disbld on port 1 */
-
-/* Constants used to interpret the masked PCI-X bus speed. */
-#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus spd 50-66MHz */
-#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus spd 66-100MHz */
-#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus spd 100-133MHz*/
+#define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */
+#define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */
#define SPEED_10 10
#define SPEED_100 100
#define SPEED_1000 1000
+#define SPEED_2500 2500
#define HALF_DUPLEX 1
#define FULL_DUPLEX 2
-#define PHY_FORCE_TIME 20
#define ADVERTISE_10_HALF 0x0001
#define ADVERTISE_10_FULL 0x0002
ADVERTISE_100_FULL)
#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
-#define E1000_ALL_FULL_DUPLEX ( \
- ADVERTISE_10_FULL | ADVERTISE_100_FULL | ADVERTISE_1000_FULL)
#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
/* LED Control */
#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
#define E1000_LEDCTL_LED0_MODE_SHIFT 0
-#define E1000_LEDCTL_LED0_BLINK_RATE 0x00000020
#define E1000_LEDCTL_LED0_IVRT 0x00000040
#define E1000_LEDCTL_LED0_BLINK 0x00000080
-#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00
-#define E1000_LEDCTL_LED1_MODE_SHIFT 8
-#define E1000_LEDCTL_LED1_BLINK_RATE 0x00002000
-#define E1000_LEDCTL_LED1_IVRT 0x00004000
-#define E1000_LEDCTL_LED1_BLINK 0x00008000
-#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000
-#define E1000_LEDCTL_LED2_MODE_SHIFT 16
-#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000
-#define E1000_LEDCTL_LED2_IVRT 0x00400000
-#define E1000_LEDCTL_LED2_BLINK 0x00800000
-#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000
-#define E1000_LEDCTL_LED3_MODE_SHIFT 24
-#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000
-#define E1000_LEDCTL_LED3_IVRT 0x40000000
-#define E1000_LEDCTL_LED3_BLINK 0x80000000
-
-#define E1000_LEDCTL_MODE_LINK_10_1000 0x0
-#define E1000_LEDCTL_MODE_LINK_100_1000 0x1
-#define E1000_LEDCTL_MODE_LINK_UP 0x2
-#define E1000_LEDCTL_MODE_ACTIVITY 0x3
-#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4
-#define E1000_LEDCTL_MODE_LINK_10 0x5
-#define E1000_LEDCTL_MODE_LINK_100 0x6
-#define E1000_LEDCTL_MODE_LINK_1000 0x7
-#define E1000_LEDCTL_MODE_PCIX_MODE 0x8
-#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9
-#define E1000_LEDCTL_MODE_COLLISION 0xA
-#define E1000_LEDCTL_MODE_BUS_SPEED 0xB
-#define E1000_LEDCTL_MODE_BUS_SIZE 0xC
-#define E1000_LEDCTL_MODE_PAUSED 0xD
+
#define E1000_LEDCTL_MODE_LED_ON 0xE
#define E1000_LEDCTL_MODE_LED_OFF 0xF
/* Transmit Descriptor bit definitions */
#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */
-#define E1000_TXD_POPTS_SHIFT 8 /* POPTS shift */
#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
-/* Extended desc bits for Linksec and timesync */
+#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
/* Transmit Control */
-#define E1000_TCTL_RST 0x00000001 /* software reset */
#define E1000_TCTL_EN 0x00000002 /* enable Tx */
-#define E1000_TCTL_BCE 0x00000004 /* busy check enable */
#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
-#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */
-#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */
#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
-#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */
#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
/* Transmit Arbitration Count */
/* SerDes Control */
#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
+#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410
/* Receive Checksum Control */
-#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */
#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
-#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */
#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
/* Header split receive */
-#define E1000_RFCTL_ISCSI_DIS 0x00000001
-#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E
-#define E1000_RFCTL_ISCSI_DWC_SHIFT 1
#define E1000_RFCTL_NFSW_DIS 0x00000040
#define E1000_RFCTL_NFSR_DIS 0x00000080
-#define E1000_RFCTL_NFS_VER_MASK 0x00000300
-#define E1000_RFCTL_NFS_VER_SHIFT 8
-#define E1000_RFCTL_IPV6_DIS 0x00000400
-#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800
#define E1000_RFCTL_ACK_DIS 0x00001000
-#define E1000_RFCTL_ACKD_DIS 0x00002000
-#define E1000_RFCTL_IPFRSP_DIS 0x00004000
#define E1000_RFCTL_EXTEN 0x00008000
#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
#define DEFAULT_82543_TIPG_IPGT_COPPER 8
#define E1000_TIPG_IPGT_MASK 0x000003FF
-#define E1000_TIPG_IPGR1_MASK 0x000FFC00
-#define E1000_TIPG_IPGR2_MASK 0x3FF00000
#define DEFAULT_82543_TIPG_IPGR1 8
#define E1000_TIPG_IPGR1_SHIFT 10
#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000
#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16
-#define E1000_PHY_CTRL_SPD_EN 0x00000001
#define E1000_PHY_CTRL_D0A_LPLU 0x00000002
#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004
#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
#define E1000_KABGTXD_BGSQLBIAS 0x00050000
/* PBA constants */
-#define E1000_PBA_6K 0x0006 /* 6KB */
#define E1000_PBA_8K 0x0008 /* 8KB */
#define E1000_PBA_10K 0x000A /* 10KB */
#define E1000_PBA_12K 0x000C /* 12KB */
#define E1000_PBA_48K 0x0030 /* 48KB */
#define E1000_PBA_64K 0x0040 /* 64KB */
-#define E1000_PBA_RXA_MASK 0xFFFF;
+#define E1000_PBA_RXA_MASK 0xFFFF
#define E1000_PBS_16K E1000_PBA_16K
-#define E1000_PBS_24K E1000_PBA_24K
#define IFS_MAX 80
#define IFS_MIN 40
/* SW Semaphore Register */
#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
-#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */
#define E1000_ICR_RXO 0x00000040 /* Rx overrun */
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
#define E1000_ICR_VMMB 0x00000100 /* VM MB event */
-#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */
#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */
#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
#define E1000_ICR_TXD_LOW 0x00008000
-#define E1000_ICR_SRPD 0x00010000
-#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */
#define E1000_ICR_MNG 0x00040000 /* Manageability event */
-#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */
+#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */
#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */
/* If this bit asserted, the driver should claim the interrupt */
#define E1000_ICR_INT_ASSERTED 0x80000000
-#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* Q0 Rx desc FIFO parity error */
-#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* Q0 Tx desc FIFO parity error */
-#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity err */
-#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */
-#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* Q1 Rx desc FIFO parity error */
-#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* Q1 Tx desc FIFO parity error */
-#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */
-/* FW changed the status of DISSW bit in the FWSM */
-#define E1000_ICR_DSW 0x00000020
-/* LAN connected device generates an interrupt */
-#define E1000_ICR_PHYINT 0x00001000
#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
-#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */
#define E1000_ICR_FER 0x00400000 /* Fatal Error */
#define E1000_ICR_THS 0x00800000 /* ICR.THS: Thermal Sensor Event*/
#define E1000_ICR_MDDET 0x10000000 /* Malicious Driver Detect */
-#define E1000_ITR_MASK 0x000FFFFF /* ITR value bitfield */
-#define E1000_ITR_MULT 256 /* ITR mulitplier in nsec */
-
/* Extended Interrupt Cause Read */
#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */
#define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */
#define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */
-/*
- * This defines the bits that are set in the Interrupt Mask
- * Set/Read Register. Each bit is documented below:
- * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
- * o RXSEQ = Receive Sequence Error
- */
-#define POLL_IMS_ENABLE_MASK ( \
- E1000_IMS_RXDMT0 | \
- E1000_IMS_RXSEQ)
-
-/*
- * This defines the bits that are set in the Interrupt Mask
+/* This defines the bits that are set in the Interrupt Mask
* Set/Read Register. Each bit is documented below:
* o RXT0 = Receiver Timer Interrupt (ring 0)
* o TXDW = Transmit Descriptor Written Back
#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
#define E1000_IMS_RXO E1000_ICR_RXO /* Rx overrun */
#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
-#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */
-#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* Rx /c/ ordered set */
-#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
-#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
-#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
-#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW
-#define E1000_IMS_SRPD E1000_ICR_SRPD
-#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */
-#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */
-#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */
+#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */
#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */
-/* Q0 Rx desc FIFO parity error */
-#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0
-/* Q0 Tx desc FIFO parity error */
-#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0
-/* host arb read buffer parity error */
-#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR
-/* packet buffer parity error */
-#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR
-/* Q1 Rx desc FIFO parity error */
-#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1
-/* Q1 Tx desc FIFO parity error */
-#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1
-#define E1000_IMS_DSW E1000_ICR_DSW
-#define E1000_IMS_PHYINT E1000_ICR_PHYINT
#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
-#define E1000_IMS_EPRST E1000_ICR_EPRST
#define E1000_IMS_FER E1000_ICR_FER /* Fatal Error */
#define E1000_IMS_THS E1000_ICR_THS /* ICR.TS: Thermal Sensor Event*/
#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
/* Interrupt Cause Set */
-#define E1000_ICS_TXDW E1000_ICR_TXDW /* Tx desc written back */
-#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
-#define E1000_ICS_RXO E1000_ICR_RXO /* Rx overrun */
-#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
-#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */
-#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* Rx /c/ ordered set */
-#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
-#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
-#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
-#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
-#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW
-#define E1000_ICS_SRPD E1000_ICR_SRPD
-#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */
-#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */
-#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */
-#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */
-/* Q0 Rx desc FIFO parity error */
-#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0
-/* Q0 Tx desc FIFO parity error */
-#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0
-/* host arb read buffer parity error */
-#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR
-/* packet buffer parity error */
-#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR
-/* Q1 Rx desc FIFO parity error */
-#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1
-/* Q1 Tx desc FIFO parity error */
-#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1
-#define E1000_ICS_DSW E1000_ICR_DSW
-#define E1000_ICS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
-#define E1000_ICS_PHYINT E1000_ICR_PHYINT
-#define E1000_ICS_EPRST E1000_ICR_EPRST
/* Extended Interrupt Cause Set */
#define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
#define E1000_EITR_ITR_INT_MASK 0x0000FFFF
/* E1000_EITR_CNT_IGNR is only for 82576 and newer */
#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
+#define E1000_EITR_INTERVAL 0x00007FFC
/* Transmit Descriptor Control */
#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
-#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
/* Enable the counting of descriptors still to be processed. */
#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
-/* Receive Address */
-/*
+/* Receive Address
* Number of high/low register pairs in the RAR. The RAR (Receive Address
* Registers) holds the directed and multicast addresses that we monitor.
* Technically, we have 16 spots. However, we reserve one of these spots
#define E1000_RAL_MAC_ADDR_LEN 4
#define E1000_RAH_MAC_ADDR_LEN 2
#define E1000_RAH_QUEUE_MASK_82575 0x000C0000
-#define E1000_RAH_POOL_MASK 0x03FC0000
-#define E1000_RAH_POOL_SHIFT 18
#define E1000_RAH_POOL_1 0x00040000
/* Error Codes */
/* Flow Control */
#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
-#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */
#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
/* Transmit Configuration Word */
#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
-#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */
#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
-#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */
-#define E1000_TXCW_NP 0x00008000 /* TXCW next page */
-#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */
-#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */
#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
/* Receive Configuration Word */
#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
-#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */
#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
-#define E1000_RXCW_CC 0x10000000 /* Receive config change */
#define E1000_RXCW_C 0x20000000 /* Receive config */
#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
-#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */
#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */
+#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */
#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00
#define E1000_TIMINCA_16NS_SHIFT 24
+#define E1000_TIMINCA_INCPERIOD_SHIFT 24
+#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF
+
+#define E1000_TSICR_TXTS 0x00000002
+#define E1000_TSIM_TXTS 0x00000002
/* TUPLE Filtering Configuration */
#define E1000_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */
#define E1000_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */
#define E1000_MDICNFG_PHY_MASK 0x03E00000
#define E1000_MDICNFG_PHY_SHIFT 21
+#define E1000_MEDIA_PORT_COPPER 1
+#define E1000_MEDIA_PORT_OTHER 2
+#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2
+#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3
+#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */
+#define E1000_M88E1112_MAC_CTRL_1 0x10
+#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */
+#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7
+#define E1000_M88E1112_PAGE_ADDR 0x16
+#define E1000_M88E1112_STATUS 0x01
+
#define E1000_THSTAT_LOW_EVENT 0x20000000 /* Low thermal threshold */
#define E1000_THSTAT_MID_EVENT 0x00200000 /* Mid thermal threshold */
#define E1000_THSTAT_HIGH_EVENT 0x00002000 /* High thermal threshold */
#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */
#define E1000_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */
#define E1000_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */
-
+#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */
+#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */
+#define E1000_M88E1543_EEE_CTRL_1 0x0
+#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */
+#define E1000_EEE_ADV_DEV_I354 7
+#define E1000_EEE_ADV_ADDR_I354 60
+#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
+#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */
+#define E1000_PCS_STATUS_DEV_I354 3
+#define E1000_PCS_STATUS_ADDR_I354 1
+#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400
+#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800
+#define E1000_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */
+#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */
+#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */
/* PCI Express Control */
#define E1000_GCR_RXD_NO_SNOOP 0x00000001
#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
E1000_GCR_TXDSCW_NO_SNOOP | \
E1000_GCR_TXDSCR_NO_SNOOP)
+#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */
+
/* mPHY address control and data registers */
#define E1000_MPHY_ADDR_CTL 0x0024 /* Address Control Reg */
#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000
#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
#define E1000_EECD_DI 0x00000004 /* NVM Data In */
#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
-#define E1000_EECD_FWE_MASK 0x00000030
-#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */
-#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */
-#define E1000_EECD_FWE_SHIFT 4
#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
#define E1000_EECD_PRES 0x00000100 /* NVM Present */
#define E1000_EECD_ERROR_CLR 0x00040000 /* NVM error status clear bit */
/* NVM Addressing bits based on type 0=small, 1=large */
#define E1000_EECD_ADDR_BITS 0x00000400
-#define E1000_EECD_TYPE 0x00002000 /* NVM Type (1-SPI, 0-Microwire) */
#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
#define E1000_EECD_SIZE_EX_SHIFT 11
-#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */
-#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */
-#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */
#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
#define E1000_EECD_AUPDEN 0x00100000 /* Ena Auto FLASH update */
-#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */
#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
-#define E1000_EECD_SECVAL_SHIFT 22
#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
+#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
+#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done */
+#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */
+#define E1000_EECD_SEC1VAL_I210 0x02000000 /* Sector One Valid */
+#define E1000_FLUDONE_ATTEMPTS 20000
+#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
+#define E1000_I210_FIFO_SEL_RX 0x00
+#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i))
+#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0)
+#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06
+#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01
+
+#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */
+/* Secure FLASH mode requires removing MSb */
+#define E1000_I210_FW_PTR_MASK 0x7FFF
+/* Firmware code revision field word offset*/
+#define E1000_I210_FW_VER_OFFSET 328
-#define E1000_NVM_SWDPIN0 0x0001 /* SWDPIN 0 NVM Value */
-#define E1000_NVM_LED_LOGIC 0x0020 /* Led Logic Word */
#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */
#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
#define E1000_NVM_RW_REG_START 1 /* Start operation */
#define NVM_COMPAT 0x0003
#define NVM_ID_LED_SETTINGS 0x0004
#define NVM_VERSION 0x0005
-#define NVM_SERDES_AMPLITUDE 0x0006 /* SERDES output amplitude */
-#define NVM_PHY_CLASS_WORD 0x0007
+#define E1000_I210_NVM_FW_MODULE_PTR 0x0010
+#define E1000_I350_NVM_FW_MODULE_PTR 0x0051
+#define NVM_FUTURE_INIT_WORD1 0x0019
+#define NVM_ETRACK_WORD 0x0042
+#define NVM_ETRACK_HIWORD 0x0043
+#define NVM_COMB_VER_OFF 0x0083
+#define NVM_COMB_VER_PTR 0x003d
+
+/* NVM version defines */
+#define NVM_MAJOR_MASK 0xF000
+#define NVM_MINOR_MASK 0x0FF0
+#define NVM_IMAGE_ID_MASK 0x000F
+#define NVM_COMB_VER_MASK 0x00FF
+#define NVM_MAJOR_SHIFT 12
+#define NVM_MINOR_SHIFT 4
+#define NVM_COMB_VER_SHFT 8
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETRACK_SHIFT 16
+#define NVM_ETRACK_VALID 0x8000
+#define NVM_NEW_DEC_MASK 0x0F00
+#define NVM_HEX_CONV 16
+#define NVM_HEX_TENS 10
+
+/* FW version defines */
+/* Offset of "Loader patch ptr" in Firmware Header */
+#define E1000_I350_NVM_FW_LOADER_PATCH_PTR_OFFSET 0x01
+/* Patch generation hour & minutes */
+#define E1000_I350_NVM_FW_VER_WORD1_OFFSET 0x04
+/* Patch generation month & day */
+#define E1000_I350_NVM_FW_VER_WORD2_OFFSET 0x05
+/* Patch generation year */
+#define E1000_I350_NVM_FW_VER_WORD3_OFFSET 0x06
+/* Patch major & minor numbers */
+#define E1000_I350_NVM_FW_VER_WORD4_OFFSET 0x07
+
+#define NVM_MAC_ADDR 0x0000
+#define NVM_SUB_DEV_ID 0x000B
+#define NVM_SUB_VEN_ID 0x000C
+#define NVM_DEV_ID 0x000D
+#define NVM_VEN_ID 0x000E
+#define NVM_INIT_CTRL_2 0x000F
+#define NVM_INIT_CTRL_4 0x0013
+#define NVM_LED_1_CFG 0x001C
+#define NVM_LED_0_2_CFG 0x001F
+
+#define NVM_COMPAT_VALID_CSUM 0x0001
+#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040
#define NVM_ETS_CFG 0x003E
#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0
#define NVM_ETS_DATA_INDEX_MASK 0x0300
#define NVM_ETS_DATA_INDEX_SHIFT 8
#define NVM_ETS_DATA_HTHRESH_MASK 0x00FF
-#define NVM_INIT_CONTROL1_REG 0x000A
#define NVM_INIT_CONTROL2_REG 0x000F
-#define NVM_SWDEF_PINS_CTRL_PORT_1 0x0010
#define NVM_INIT_CONTROL3_PORT_B 0x0014
#define NVM_INIT_3GIO_3 0x001A
#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020
#define NVM_INIT_CONTROL3_PORT_A 0x0024
#define NVM_CFG 0x0012
-#define NVM_FLASH_VERSION 0x0032
#define NVM_ALT_MAC_ADDR_PTR 0x0037
#define NVM_CHECKSUM_REG 0x003F
#define NVM_COMPATIBILITY_REG_3 0x0003
#define NVM_WORD0F_PAUSE_MASK 0x3000
#define NVM_WORD0F_PAUSE 0x1000
#define NVM_WORD0F_ASM_DIR 0x2000
-#define NVM_WORD0F_ANE 0x0800
-#define NVM_WORD0F_SWPDIO_EXT_MASK 0x00F0
-#define NVM_WORD0F_LPLU 0x0001
/* Mask bits for fields in Word 0x1a of the NVM */
#define NVM_WORD1A_ASPM_MASK 0x000C
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
#define NVM_SUM 0xBABA
-#define NVM_MAC_ADDR_OFFSET 0
+/* PBA (printed board assembly) number words */
#define NVM_PBA_OFFSET_0 8
#define NVM_PBA_OFFSET_1 9
#define NVM_PBA_PTR_GUARD 0xFAFA
#define NVM_RESERVED_WORD 0xFFFF
-#define NVM_PHY_CLASS_A 0x8000
-#define NVM_SERDES_AMPLITUDE_MASK 0x000F
-#define NVM_SIZE_MASK 0x1C00
-#define NVM_SIZE_SHIFT 10
#define NVM_WORD_SIZE_BASE_SHIFT 6
-#define NVM_SWDPIO_EXT_SHIFT 4
/* NVM Commands - SPI */
#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
-#define NVM_WRDI_OPCODE_SPI 0x04 /* NVM reset Write Enable latch */
#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
-#define NVM_WRSR_OPCODE_SPI 0x01 /* NVM write Status register */
/* SPI NVM Status Register */
#define NVM_STATUS_RDY_SPI 0x01
-#define NVM_STATUS_WEN_SPI 0x02
-#define NVM_STATUS_BP0_SPI 0x04
-#define NVM_STATUS_BP1_SPI 0x08
-#define NVM_STATUS_WPEN_SPI 0x80
/* Word definitions for ID LED Settings */
#define ID_LED_RESERVED_0000 0x0000
#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
#define MAX_PHY_MULTI_PAGE_REG 0xF
-/* Bit definitions for valid PHY IDs. */
-/*
+/* Bit definitions for valid PHY IDs.
* I = Integrated
* E = External
*/
#define M88E1000_I_PHY_ID 0x01410C30
#define M88E1011_I_PHY_ID 0x01410C20
#define IGP01E1000_I_PHY_ID 0x02A80380
-#define M88E1011_I_REV_4 0x04
#define M88E1111_I_PHY_ID 0x01410CC0
+#define M88E1543_E_PHY_ID 0x01410EA0
#define M88E1112_E_PHY_ID 0x01410C90
#define I347AT4_E_PHY_ID 0x01410DC0
#define M88E1340M_E_PHY_ID 0x01410DF0
#define IFE_C_E_PHY_ID 0x02A80310
#define I82580_I_PHY_ID 0x015403A0
#define I350_I_PHY_ID 0x015403B0
+#define I210_I_PHY_ID 0x01410C00
#define IGP04E1000_E_PHY_ID 0x02A80391
#define M88_VENDOR 0x0141
/* M88E1000 Specific Registers */
#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Reg */
#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Reg */
-#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Reg */
-#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Reg */
#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Cntrl */
#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */
-#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */
#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for pg number setting */
#define M88E1000_PHY_GEN_CONTROL 0x1E /* meaning depends on reg 29 */
-#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */
-#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */
/* M88E1000 PHY Specific Control Register */
-#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */
-#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */
-/* 1=CLK125 low, 0=CLK125 toggling */
-#define M88E1000_PSCR_CLK125_DISABLE 0x0010
/* MDI Crossover Mode bits 6:5 Manual MDI configuration */
#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000
#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
#define M88E1000_PSCR_AUTO_X_1000T 0x0040
/* Auto crossover enabled all speeds */
#define M88E1000_PSCR_AUTO_X_MODE 0x0060
-/*
- * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
- * 0=Normal 10BASE-T Rx Threshold
- */
-#define M88E1000_PSCR_EN_10BT_EXT_DIST 0x0080
-/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
-#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100
-#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */
-#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */
#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */
/* M88E1000 PHY Specific Status Register */
-#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */
#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
-/*
- * 0 = <50M
+/* 0 = <50M
* 1 = 50-80M
* 2 = 80-110M
* 3 = 110-140M
#define M88E1000_PSSR_CABLE_LENGTH 0x0380
#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */
#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
-#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */
-#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
-#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */
-#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */
#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
-/* M88E1000 Extended PHY Specific Control Register */
-#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
-/*
- * 1 = Lost lock detect enabled.
- * Will assert lost lock and bring
- * link down if idle not seen
- * within 1ms in 1000BASE-T
- */
-#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000
-/*
- * Number of times we will attempt to autonegotiate before downshifting if we
+/* Number of times we will attempt to autonegotiate before downshifting if we
* are the master
*/
#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
-#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400
-#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800
-#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00
-/*
- * Number of times we will attempt to autonegotiate before downshifting if we
+/* Number of times we will attempt to autonegotiate before downshifting if we
* are the slave
*/
#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
-#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000
#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
-#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200
-#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300
-#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */
#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
-#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */
-
-/* M88E1111 Specific Registers */
-#define M88E1111_PHY_PAGE_SELECT1 0x16 /* for registers 0-28 */
-#define M88E1111_PHY_PAGE_SELECT2 0x1D /* for registers 30-31 */
-
-/* M88E1111 page select register mask */
-#define M88E1111_PHY_PAGE_SELECT_MASK1 0xFF
-#define M88E1111_PHY_PAGE_SELECT_MASK2 0x3F
/* Intel I347AT4 Registers */
-
#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */
#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */
#define I347AT4_PAGE_SELECT 0x16
/* I347AT4 Extended PHY Specific Control Register */
-/*
- * Number of times we will attempt to autonegotiate before downshifting if we
+/* Number of times we will attempt to autonegotiate before downshifting if we
* are the master
*/
#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
/* M88EC018 Rev 2 specific DownShift settings */
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000
-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200
-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400
-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00
-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00
-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00
-/*
- * Bits...
+/* Bits...
* 15-5: page
* 4-0: register offset
*/
/* GG82563 Specific Registers */
#define GG82563_PHY_SPEC_CTRL GG82563_REG(0, 16) /* PHY Spec Cntrl */
-#define GG82563_PHY_SPEC_STATUS GG82563_REG(0, 17) /* PHY Spec Status */
-#define GG82563_PHY_INT_ENABLE GG82563_REG(0, 18) /* Interrupt Ena */
-#define GG82563_PHY_SPEC_STATUS_2 GG82563_REG(0, 19) /* PHY Spec Stat2 */
-#define GG82563_PHY_RX_ERR_CNTR GG82563_REG(0, 21) /* Rx Err Counter */
#define GG82563_PHY_PAGE_SELECT GG82563_REG(0, 22) /* Page Select */
#define GG82563_PHY_SPEC_CTRL_2 GG82563_REG(0, 26) /* PHY Spec Cntrl2 */
#define GG82563_PHY_PAGE_SELECT_ALT GG82563_REG(0, 29) /* Alt Page Select */
-/* Test Clock Control (use reg. 29 to select) */
-#define GG82563_PHY_TEST_CLK_CTRL GG82563_REG(0, 30)
/* MAC Specific Control Register */
#define GG82563_PHY_MAC_SPEC_CTRL GG82563_REG(2, 21)
-#define GG82563_PHY_MAC_SPEC_CTRL_2 GG82563_REG(2, 26) /* MAC Spec Ctrl 2 */
#define GG82563_PHY_DSP_DISTANCE GG82563_REG(5, 26) /* DSP Distance */
/* Page 193 - Port Control Registers */
/* Kumeran Mode Control */
#define GG82563_PHY_KMRN_MODE_CTRL GG82563_REG(193, 16)
-#define GG82563_PHY_PORT_RESET GG82563_REG(193, 17) /* Port Reset */
-#define GG82563_PHY_REVISION_ID GG82563_REG(193, 18) /* Revision ID */
-#define GG82563_PHY_DEVICE_ID GG82563_REG(193, 19) /* Device ID */
#define GG82563_PHY_PWR_MGMT_CTRL GG82563_REG(193, 20) /* Pwr Mgt Ctrl */
-/* Rate Adaptation Control */
-#define GG82563_PHY_RATE_ADAPT_CTRL GG82563_REG(193, 25)
/* Page 194 - KMRN Registers */
-/* FIFO's Control/Status */
-#define GG82563_PHY_KMRN_FIFO_CTRL_STAT GG82563_REG(194, 16)
-#define GG82563_PHY_KMRN_CTRL GG82563_REG(194, 17) /* Control */
#define GG82563_PHY_INBAND_CTRL GG82563_REG(194, 18) /* Inband Ctrl */
-#define GG82563_PHY_KMRN_DIAGNOSTIC GG82563_REG(194, 19) /* Diagnostic */
-#define GG82563_PHY_ACK_TIMEOUTS GG82563_REG(194, 20) /* Ack Timeouts */
-#define GG82563_PHY_ADV_ABILITY GG82563_REG(194, 21) /* Adver Ability */
-/* Link Partner Advertised Ability */
-#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY GG82563_REG(194, 23)
-#define GG82563_PHY_ADV_NEXT_PAGE GG82563_REG(194, 24) /* Adver Next Pg */
-/* Link Partner Advertised Next page */
-#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE GG82563_REG(194, 25)
-#define GG82563_PHY_KMRN_MISC GG82563_REG(194, 26) /* Misc. */
/* MDI Control */
-#define E1000_MDIC_DATA_MASK 0x0000FFFF
#define E1000_MDIC_REG_MASK 0x001F0000
#define E1000_MDIC_REG_SHIFT 16
#define E1000_MDIC_PHY_MASK 0x03E00000
#define E1000_MDIC_OP_WRITE 0x04000000
#define E1000_MDIC_OP_READ 0x08000000
#define E1000_MDIC_READY 0x10000000
-#define E1000_MDIC_INT_EN 0x20000000
#define E1000_MDIC_ERROR 0x40000000
#define E1000_MDIC_DEST 0x80000000
/* Lx power decision based on DMA coal */
#define E1000_PCIEMISC_LX_DECISION 0x00000080
-/* Proxy Filer Control */
+#define E1000_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
+#define E1000_RXPBS_SIZE_I210_MASK 0x0000003F /* Rx packet buffer size */
+#define E1000_TXPB0S_SIZE_I210_MASK 0x0000003F /* Tx packet buffer 0 size */
+
+/* Proxy Filter Control */
#define E1000_PROXYFC_D0 0x00000001 /* Enable offload in D0 */
#define E1000_PROXYFC_EX 0x00000004 /* Directed exact proxy */
#define E1000_PROXYFC_MC 0x00000008 /* Directed MC Proxy */
#define E1000_PROXYFC_ARP_DIRECTED 0x00000020 /* Directed ARP Proxy Ena */
#define E1000_PROXYFC_IPV4 0x00000040 /* Directed IPv4 Enable */
#define E1000_PROXYFC_IPV6 0x00000080 /* Directed IPv6 Enable */
-#define E1000_PROXYFC_NS 0x00000200 /* IPv4 NBRHD Solicitation */
+#define E1000_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */
#define E1000_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Ena */
/* Proxy Status */
#define E1000_PROXYS_CLEAR 0xFFFFFFFF /* Clear */
/* Lan ID bit field offset in status register */
#define E1000_STATUS_LAN_ID_OFFSET 2
#define E1000_VFTA_ENTRIES 128
+#ifndef E1000_UNUSEDARG
+#define E1000_UNUSEDARG
+#endif /* E1000_UNUSEDARG */
#endif /* _E1000_DEFINES_H_ */
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#define E1000_DEV_ID_I350_SERDES 0x1523
#define E1000_DEV_ID_I350_SGMII 0x1524
#define E1000_DEV_ID_I350_DA4 0x1546
-#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40
-#define E1000_DEV_ID_I354_SGMII 0x1F41
-#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45
+#define E1000_DEV_ID_I210_COPPER 0x1533
+#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534
+#define E1000_DEV_ID_I210_COPPER_IT 0x1535
+#define E1000_DEV_ID_I210_FIBER 0x1536
+#define E1000_DEV_ID_I210_SERDES 0x1537
+#define E1000_DEV_ID_I210_SGMII 0x1538
+#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B
+#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C
+#define E1000_DEV_ID_I211_COPPER 0x1539
+#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40
+#define E1000_DEV_ID_I354_SGMII 0x1F41
+#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45
#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
#define E1000_DEV_ID_DH89XXCC_SFP 0x0440
+
#define E1000_REVISION_0 0
#define E1000_REVISION_1 1
#define E1000_REVISION_2 2
e1000_82580,
e1000_i350,
e1000_i354,
+ e1000_i210,
+ e1000_i211,
e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
};
e1000_nvm_none,
e1000_nvm_eeprom_spi,
e1000_nvm_flash_hw,
+ e1000_nvm_invm,
e1000_nvm_flash_sw
};
e1000_phy_ife,
e1000_phy_82580,
e1000_phy_vf,
+ e1000_phy_i210,
};
enum e1000_bus_type {
};
#define MAX_PS_BUFFERS 4
+
+/* Number of packet split data buffers (not including the header buffer) */
+#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
+
/* Receive Descriptor - Packet Split */
union e1000_rx_desc_packet_split {
struct {
} middle;
struct {
__le16 header_status;
- __le16 length[3]; /* length of buffers 1-3 */
+ /* length of buffers 1-3 */
+ __le16 length[PS_PAGE_BUFFERS];
} upper;
__le64 reserved;
} wb; /* writeback */
#include "e1000_manage.h"
#include "e1000_mbx.h"
+/* Function pointers for the MAC. */
struct e1000_mac_operations {
- /* Function pointers for the MAC. */
s32 (*init_params)(struct e1000_hw *);
s32 (*id_led_init)(struct e1000_hw *);
s32 (*blink_led)(struct e1000_hw *);
+ bool (*check_mng_mode)(struct e1000_hw *);
s32 (*check_for_link)(struct e1000_hw *);
- bool (*check_mng_mode)(struct e1000_hw *hw);
s32 (*cleanup_led)(struct e1000_hw *);
void (*clear_hw_cntrs)(struct e1000_hw *);
void (*clear_vfta)(struct e1000_hw *);
void (*rar_set)(struct e1000_hw *, u8*, u32);
s32 (*read_mac_addr)(struct e1000_hw *);
s32 (*validate_mdi_setting)(struct e1000_hw *);
- s32 (*mng_host_if_write)(struct e1000_hw *, u8*, u16, u16, u8*);
- s32 (*mng_write_cmd_header)(struct e1000_hw *hw,
- struct e1000_host_mng_command_header*);
- s32 (*mng_enable_host_if)(struct e1000_hw *);
- s32 (*wait_autoneg)(struct e1000_hw *);
s32 (*get_thermal_sensor_data)(struct e1000_hw *);
s32 (*init_thermal_sensor_thresh)(struct e1000_hw *);
s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
void (*release_swfw_sync)(struct e1000_hw *, u16);
};
-/*
- * When to use various PHY register access functions:
+/* When to use various PHY register access functions:
*
* Func Caller
* Function Does Does When to use
s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
};
+/* Function pointers for the NVM. */
struct e1000_nvm_operations {
s32 (*init_params)(struct e1000_hw *);
s32 (*acquire)(struct e1000_hw *);
bool global_device_reset;
bool eee_disable;
bool module_plugged;
+ bool clear_semaphore_once;
u32 mtu;
+ struct sfp_e1000_flags eth_flags;
+ u8 media_port;
+ bool media_changed;
};
struct e1000_dev_spec_vf {
};
#include "e1000_82575.h"
+#include "e1000_i210.h"
/* These functions must be implemented by drivers */
s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
--- /dev/null
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000_api.h"
+
+
+static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw);
+static void e1000_release_nvm_i210(struct e1000_hw *hw);
+static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw);
+static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw);
+static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
+
+/**
+ * e1000_acquire_nvm_i210 - Request for access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the necessary semaphores for exclusive access to the EEPROM.
+ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_acquire_nvm_i210");
+
+ ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
+
+ return ret_val;
+}
+
+/**
+ * e1000_release_nvm_i210 - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit,
+ * then release the semaphores acquired.
+ **/
+static void e1000_release_nvm_i210(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_release_nvm_i210");
+
+ e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ * e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
+ * will also specify which port we're acquiring the lock for.
+ **/
+s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+ u32 fwmask = mask << 16;
+ s32 ret_val = E1000_SUCCESS;
+ s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+
+ DEBUGFUNC("e1000_acquire_swfw_sync_i210");
+
+ while (i < timeout) {
+ if (e1000_get_hw_semaphore_i210(hw)) {
+ ret_val = -E1000_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask)))
+ break;
+
+ /*
+ * Firmware currently using resource (fwmask)
+ * or other software thread using resource (swmask)
+ */
+ e1000_put_hw_semaphore_generic(hw);
+ msec_delay_irq(5);
+ i++;
+ }
+
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+ ret_val = -E1000_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync |= swmask;
+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_semaphore_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_release_swfw_sync_i210 - Release SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Release the SW/FW semaphore used to access the PHY or NVM. The mask
+ * will also specify which port we're releasing the lock for.
+ **/
+void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+
+ DEBUGFUNC("e1000_release_swfw_sync_i210");
+
+ while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
+ ; /* Empty */
+
+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+ swfw_sync &= ~mask;
+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_semaphore_generic(hw);
+}
+
+/**
+ * e1000_get_hw_semaphore_i210 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ **/
+static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw)
+{
+ u32 swsm;
+ s32 timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+
+ DEBUGFUNC("e1000_get_hw_semaphore_i210");
+
+ /* Get the SW semaphore */
+ while (i < timeout) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ usec_delay(50);
+ i++;
+ }
+
+ if (i == timeout) {
+ /* In rare circumstances, the SW semaphore may already be held
+ * unintentionally. Clear the semaphore once before giving up.
+ */
+ if (hw->dev_spec._82575.clear_semaphore_once) {
+ hw->dev_spec._82575.clear_semaphore_once = false;
+ e1000_put_hw_semaphore_generic(hw);
+ for (i = 0; i < timeout; i++) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ usec_delay(50);
+ }
+ }
+
+ /* If we do not have the semaphore here, we have to give up. */
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+ return -E1000_ERR_NVM;
+ }
+ }
+
+ /* Get the FW semaphore. */
+ for (i = 0; i < timeout; i++) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+ if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
+ break;
+
+ usec_delay(50);
+ }
+
+ if (i == timeout) {
+ /* Release semaphores */
+ e1000_put_hw_semaphore_generic(hw);
+ DEBUGOUT("Driver can't access the NVM\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the Shadow Ram to read
+ * @words: number of words to read
+ * @data: word read from the Shadow Ram
+ *
+ * Reads a 16 bit word from the Shadow Ram using the EERD register.
+ * Uses necessary synchronization semaphores.
+ **/
+s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 status = E1000_SUCCESS;
+ u16 i, count;
+
+ DEBUGFUNC("e1000_read_nvm_srrd_i210");
+
+ /* We cannot hold synchronization semaphores for too long,
+ * because of forceful takeover procedure. However it is more efficient
+ * to read in bursts than synchronizing access for each word. */
+ for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
+ count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
+ E1000_EERD_EEWR_MAX_COUNT : (words - i);
+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ status = e1000_read_nvm_eerd(hw, offset, count,
+ data + i);
+ hw->nvm.ops.release(hw);
+ } else {
+ status = E1000_ERR_SWFW_SYNC;
+ }
+
+ if (status != E1000_SUCCESS)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
+ * @hw: pointer to the HW structure
+ * @offset: offset within the Shadow RAM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the Shadow RAM
+ *
+ * Writes data to Shadow RAM at offset using EEWR register.
+ *
+ * If e1000_update_nvm_checksum is not called after this function , the
+ * data will not be committed to FLASH and also Shadow RAM will most likely
+ * contain an invalid checksum.
+ *
+ * If error code is returned, data and Shadow RAM may be inconsistent - buffer
+ * partially written.
+ **/
+s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 status = E1000_SUCCESS;
+ u16 i, count;
+
+ DEBUGFUNC("e1000_write_nvm_srwr_i210");
+
+ /* We cannot hold synchronization semaphores for too long,
+ * because of forceful takeover procedure. However it is more efficient
+ * to write in bursts than synchronizing access for each word. */
+ for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
+ count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
+ E1000_EERD_EEWR_MAX_COUNT : (words - i);
+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ status = e1000_write_nvm_srwr(hw, offset, count,
+ data + i);
+ hw->nvm.ops.release(hw);
+ } else {
+ status = E1000_ERR_SWFW_SYNC;
+ }
+
+ if (status != E1000_SUCCESS)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * e1000_write_nvm_srwr - Write to Shadow Ram using EEWR
+ * @hw: pointer to the HW structure
+ * @offset: offset within the Shadow Ram to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the Shadow Ram
+ *
+ * Writes data to Shadow Ram at offset using EEWR register.
+ *
+ * If e1000_update_nvm_checksum is not called after this function , the
+ * Shadow Ram will most likely contain an invalid checksum.
+ **/
+static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i, k, eewr = 0;
+ u32 attempts = 100000;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_write_nvm_srwr");
+
+ /*
+ * A check for invalid values: offset too large, too many words,
+ * too many words for the offset, and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+ for (i = 0; i < words; i++) {
+ eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
+ (data[i] << E1000_NVM_RW_REG_DATA) |
+ E1000_NVM_RW_REG_START;
+
+ E1000_WRITE_REG(hw, E1000_SRWR, eewr);
+
+ for (k = 0; k < attempts; k++) {
+ if (E1000_NVM_RW_REG_DONE &
+ E1000_READ_REG(hw, E1000_SRWR)) {
+ ret_val = E1000_SUCCESS;
+ break;
+ }
+ usec_delay(5);
+ }
+
+ if (ret_val != E1000_SUCCESS) {
+ DEBUGOUT("Shadow RAM write EEWR timed out\n");
+ break;
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/** e1000_read_invm_word_i210 - Reads OTP
+ * @hw: pointer to the HW structure
+ * @address: the word address (aka eeprom offset) to read
+ * @data: pointer to the data read
+ *
+ * Reads 16-bit words from the OTP. Return error when the word is not
+ * stored in OTP.
+ **/
+static s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
+{
+ s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ u32 invm_dword;
+ u16 i;
+ u8 record_type, word_address;
+
+ DEBUGFUNC("e1000_read_invm_word_i210");
+
+ for (i = 0; i < E1000_INVM_SIZE; i++) {
+ invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
+ /* Get record type */
+ record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
+ if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
+ break;
+ if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
+ i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
+ if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
+ i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
+ if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
+ word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
+ if (word_address == address) {
+ *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
+ DEBUGOUT2("Read INVM Word 0x%02x = %x",
+ address, *data);
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+ }
+ if (status != E1000_SUCCESS)
+ DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address);
+ return status;
+}
+
+/** e1000_read_invm_i210 - Read invm wrapper function for I210/I211
+ * @hw: pointer to the HW structure
+ * @address: the word address (aka eeprom offset) to read
+ * @data: pointer to the data read
+ *
+ * Wrapper function to return data formerly found in the NVM.
+ **/
+static s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset,
+ u16 E1000_UNUSEDARG words, u16 *data)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_read_invm_i210");
+
+ /* Only the MAC addr is required to be present in the iNVM */
+ switch (offset) {
+ case NVM_MAC_ADDR:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]);
+ ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1,
+ &data[1]);
+ ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2,
+ &data[2]);
+ if (ret_val != E1000_SUCCESS)
+ DEBUGOUT("MAC Addr not found in iNVM\n");
+ break;
+ case NVM_INIT_CTRL_2:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_INIT_CTRL_2_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_INIT_CTRL_4:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_INIT_CTRL_4_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_LED_1_CFG:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_LED_1_CFG_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_LED_0_2_CFG:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_LED_0_2_CFG_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_ID_LED_SETTINGS:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = ID_LED_RESERVED_FFFF;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_SUB_DEV_ID:
+ *data = hw->subsystem_device_id;
+ break;
+ case NVM_SUB_VEN_ID:
+ *data = hw->subsystem_vendor_id;
+ break;
+ case NVM_DEV_ID:
+ *data = hw->device_id;
+ break;
+ case NVM_VEN_ID:
+ *data = hw->vendor_id;
+ break;
+ default:
+ DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset);
+ *data = NVM_RESERVED_WORD;
+ break;
+ }
+ return ret_val;
+}
+
+/**
+ * e1000_read_invm_version - Reads iNVM version and image type
+ * @hw: pointer to the HW structure
+ * @invm_ver: version structure for the version read
+ *
+ * Reads iNVM version and image type.
+ **/
+s32 e1000_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver)
+{
+ u32 *record = NULL;
+ u32 *next_record = NULL;
+ u32 i = 0;
+ u32 invm_dword = 0;
+ u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
+ E1000_INVM_RECORD_SIZE_IN_BYTES);
+ u32 buffer[E1000_INVM_SIZE];
+ s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ u16 version = 0;
+
+ DEBUGFUNC("e1000_read_invm_version");
+
+ /* Read iNVM memory */
+ for (i = 0; i < E1000_INVM_SIZE; i++) {
+ invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
+ buffer[i] = invm_dword;
+ }
+
+ /* Read version number */
+ for (i = 1; i < invm_blocks; i++) {
+ record = &buffer[invm_blocks - i];
+ next_record = &buffer[invm_blocks - i + 1];
+
+ /* Check if we have first version location used */
+ if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
+ version = 0;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have second version location used */
+ else if ((i == 1) &&
+ ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
+ version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /*
+ * Check if we have odd version location
+ * used and it is the last one used
+ */
+ else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
+ ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
+ (i != 1))) {
+ version = (*next_record & E1000_INVM_VER_FIELD_TWO)
+ >> 13;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /*
+ * Check if we have even version location
+ * used and it is the last one used
+ */
+ else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
+ ((*record & 0x3) == 0)) {
+ version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+
+ if (status == E1000_SUCCESS) {
+ invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
+ >> E1000_INVM_MAJOR_SHIFT;
+ invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
+ }
+ /* Read Image Type */
+ for (i = 1; i < invm_blocks; i++) {
+ record = &buffer[invm_blocks - i];
+ next_record = &buffer[invm_blocks - i + 1];
+
+ /* Check if we have image type in first location used */
+ if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
+ invm_ver->invm_img_type = 0;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have image type in first location used */
+ else if ((((*record & 0x3) == 0) &&
+ ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
+ ((((*record & 0x3) != 0) && (i != 1)))) {
+ invm_ver->invm_img_type =
+ (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw)
+{
+ s32 status = E1000_SUCCESS;
+ s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
+
+ DEBUGFUNC("e1000_validate_nvm_checksum_i210");
+
+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+
+ /*
+ * Replace the read function with semaphore grabbing with
+ * the one that skips this for a while.
+ * We have semaphore taken already here.
+ */
+ read_op_ptr = hw->nvm.ops.read;
+ hw->nvm.ops.read = e1000_read_nvm_eerd;
+
+ status = e1000_validate_nvm_checksum_generic(hw);
+
+ /* Revert original read operation. */
+ hw->nvm.ops.read = read_op_ptr;
+
+ hw->nvm.ops.release(hw);
+ } else {
+ status = E1000_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+
+/**
+ * e1000_update_nvm_checksum_i210 - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM. Next commit EEPROM data onto the Flash.
+ **/
+s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ DEBUGFUNC("e1000_update_nvm_checksum_i210");
+
+ /*
+ * Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data);
+ if (ret_val != E1000_SUCCESS) {
+ DEBUGOUT("EEPROM read failed\n");
+ goto out;
+ }
+
+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ /*
+ * Do not use hw->nvm.ops.write, hw->nvm.ops.read
+ * because we do not want to take the synchronization
+ * semaphores twice here.
+ */
+
+ for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+ ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ hw->nvm.ops.release(hw);
+ DEBUGOUT("NVM Read Error while updating checksum.\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16) NVM_SUM - checksum;
+ ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
+ &checksum);
+ if (ret_val != E1000_SUCCESS) {
+ hw->nvm.ops.release(hw);
+ DEBUGOUT("NVM Write Error while updating checksum.\n");
+ goto out;
+ }
+
+ hw->nvm.ops.release(hw);
+
+ ret_val = e1000_update_flash_i210(hw);
+ } else {
+ ret_val = E1000_ERR_SWFW_SYNC;
+ }
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_flash_presence_i210 - Check if flash device is detected.
+ * @hw: pointer to the HW structure
+ *
+ **/
+bool e1000_get_flash_presence_i210(struct e1000_hw *hw)
+{
+ u32 eec = 0;
+ bool ret_val = false;
+
+ DEBUGFUNC("e1000_get_flash_presence_i210");
+
+ eec = E1000_READ_REG(hw, E1000_EECD);
+
+ if (eec & E1000_EECD_FLASH_DETECTED_I210)
+ ret_val = true;
+
+ return ret_val;
+}
+
+/**
+ * e1000_update_flash_i210 - Commit EEPROM to the flash
+ * @hw: pointer to the HW structure
+ *
+ **/
+s32 e1000_update_flash_i210(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u32 flup;
+
+ DEBUGFUNC("e1000_update_flash_i210");
+
+ ret_val = e1000_pool_flash_update_done_i210(hw);
+ if (ret_val == -E1000_ERR_NVM) {
+ DEBUGOUT("Flash update time out\n");
+ goto out;
+ }
+
+ flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210;
+ E1000_WRITE_REG(hw, E1000_EECD, flup);
+
+ ret_val = e1000_pool_flash_update_done_i210(hw);
+ if (ret_val == E1000_SUCCESS)
+ DEBUGOUT("Flash update complete\n");
+ else
+ DEBUGOUT("Flash update time out\n");
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_pool_flash_update_done_i210 - Pool FLUDONE status.
+ * @hw: pointer to the HW structure
+ *
+ **/
+s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw)
+{
+ s32 ret_val = -E1000_ERR_NVM;
+ u32 i, reg;
+
+ DEBUGFUNC("e1000_pool_flash_update_done_i210");
+
+ for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
+ reg = E1000_READ_REG(hw, E1000_EECD);
+ if (reg & E1000_EECD_FLUDONE_I210) {
+ ret_val = E1000_SUCCESS;
+ break;
+ }
+ usec_delay(5);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize the i210/i211 NVM parameters and function pointers.
+ **/
+static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ struct e1000_nvm_info *nvm = &hw->nvm;
+
+ DEBUGFUNC("e1000_init_nvm_params_i210");
+
+ ret_val = e1000_init_nvm_params_82575(hw);
+ nvm->ops.acquire = e1000_acquire_nvm_i210;
+ nvm->ops.release = e1000_release_nvm_i210;
+ nvm->ops.valid_led_default = e1000_valid_led_default_i210;
+ if (e1000_get_flash_presence_i210(hw)) {
+ hw->nvm.type = e1000_nvm_flash_hw;
+ nvm->ops.read = e1000_read_nvm_srrd_i210;
+ nvm->ops.write = e1000_write_nvm_srwr_i210;
+ nvm->ops.validate = e1000_validate_nvm_checksum_i210;
+ nvm->ops.update = e1000_update_nvm_checksum_i210;
+ } else {
+ hw->nvm.type = e1000_nvm_invm;
+ nvm->ops.read = e1000_read_invm_i210;
+ nvm->ops.write = e1000_null_write_nvm;
+ nvm->ops.validate = e1000_null_ops_generic;
+ nvm->ops.update = e1000_null_ops_generic;
+ }
+ return ret_val;
+}
+
+/**
+ * e1000_init_function_pointers_i210 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_i210(struct e1000_hw *hw)
+{
+ e1000_init_function_pointers_82575(hw);
+ hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
+
+ return;
+}
+
+/**
+ * e1000_valid_led_default_i210 - Verify a valid default LED config
+ * @hw: pointer to the HW structure
+ * @data: pointer to the NVM (EEPROM)
+ *
+ * Read the EEPROM for the current default LED configuration. If the
+ * LED configuration is not valid, set to a valid LED configuration.
+ **/
+static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_valid_led_default_i210");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
+ switch (hw->phy.media_type) {
+ case e1000_media_type_internal_serdes:
+ *data = ID_LED_DEFAULT_I210_SERDES;
+ break;
+ case e1000_media_type_copper:
+ default:
+ *data = ID_LED_DEFAULT_I210;
+ break;
+ }
+ }
+out:
+ return ret_val;
+}
+
+/**
+ * __e1000_access_xmdio_reg - Read/write XMDIO register
+ * @hw: pointer to the HW structure
+ * @address: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: pointer to value to read/write from/to the XMDIO address
+ * @read: boolean flag to indicate read or write
+ **/
+static s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address,
+ u8 dev_addr, u16 *data, bool read)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("__e1000_access_xmdio_reg");
+
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
+ dev_addr);
+ if (ret_val)
+ return ret_val;
+
+ if (read)
+ ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
+ else
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
+ if (ret_val)
+ return ret_val;
+
+ /* Recalibrate the device back to 0 */
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
+ if (ret_val)
+ return ret_val;
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_xmdio_reg - Read XMDIO register
+ * @hw: pointer to the HW structure
+ * @addr: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: value to be read from the EMI address
+ **/
+s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
+{
+ DEBUGFUNC("e1000_read_xmdio_reg");
+
+ return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, true);
+}
+
+/**
+ * e1000_write_xmdio_reg - Write XMDIO register
+ * @hw: pointer to the HW structure
+ * @addr: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: value to be written to the XMDIO address
+ **/
+s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
+{
+ DEBUGFUNC("e1000_read_xmdio_reg");
+
+ return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, false);
+}
--- /dev/null
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_I210_H_
+#define _E1000_I210_H_
+
+bool e1000_get_flash_presence_i210(struct e1000_hw *hw);
+s32 e1000_update_flash_i210(struct e1000_hw *hw);
+s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw);
+s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw);
+s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 e1000_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver);
+s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
+ u16 *data);
+s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
+ u16 data);
+
+#define E1000_STM_OPCODE 0xDB00
+#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
+
+#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \
+ (u8)((invm_dword) & 0x7)
+#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \
+ (u8)(((invm_dword) & 0x0000FE00) >> 9)
+#define INVM_DWORD_TO_WORD_DATA(invm_dword) \
+ (u16)(((invm_dword) & 0xFFFF0000) >> 16)
+
+enum E1000_INVM_STRUCTURE_TYPE {
+ E1000_INVM_UNINITIALIZED_STRUCTURE = 0x00,
+ E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 0x01,
+ E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 0x02,
+ E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 0x03,
+ E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 0x04,
+ E1000_INVM_INVALIDATED_STRUCTURE = 0x0F,
+};
+
+#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8
+#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1
+#define E1000_INVM_ULT_BYTES_SIZE 8
+#define E1000_INVM_RECORD_SIZE_IN_BYTES 4
+#define E1000_INVM_VER_FIELD_ONE 0x1FF8
+#define E1000_INVM_VER_FIELD_TWO 0x7FE000
+#define E1000_INVM_IMGTYPE_FIELD 0x1F800000
+
+#define E1000_INVM_MAJOR_MASK 0x3F0
+#define E1000_INVM_MINOR_MASK 0xF
+#define E1000_INVM_MAJOR_SHIFT 4
+
+#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_OFF2))
+#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_ON2))
+
+/* NVM offset defaults for I211 devices */
+#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243
+#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1
+#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
+#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
+#endif
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
static void e1000_config_collision_dist_generic(struct e1000_hw *hw);
+static void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
/**
* e1000_init_mac_ops_generic - Initialize MAC function pointers
mac->ops.setup_link = e1000_null_ops_generic;
mac->ops.get_link_up_info = e1000_null_link_info;
mac->ops.check_for_link = e1000_null_ops_generic;
- mac->ops.wait_autoneg = e1000_wait_autoneg_generic;
/* Management */
mac->ops.check_mng_mode = e1000_null_mng_mode;
- mac->ops.mng_host_if_write = e1000_mng_host_if_write_generic;
- mac->ops.mng_write_cmd_header = e1000_mng_write_cmd_header_generic;
- mac->ops.mng_enable_host_if = e1000_mng_enable_host_if_generic;
/* VLAN, MC, etc. */
mac->ops.update_mc_addr_list = e1000_null_update_mc;
mac->ops.clear_vfta = e1000_null_mac_generic;
* e1000_null_ops_generic - No-op function, returns 0
* @hw: pointer to the HW structure
**/
-s32 e1000_null_ops_generic(struct e1000_hw *hw)
+s32 e1000_null_ops_generic(struct e1000_hw E1000_UNUSEDARG *hw)
{
DEBUGFUNC("e1000_null_ops_generic");
return E1000_SUCCESS;
* e1000_null_mac_generic - No-op function, return void
* @hw: pointer to the HW structure
**/
-void e1000_null_mac_generic(struct e1000_hw *hw)
+void e1000_null_mac_generic(struct e1000_hw E1000_UNUSEDARG *hw)
{
DEBUGFUNC("e1000_null_mac_generic");
return;
* e1000_null_link_info - No-op function, return 0
* @hw: pointer to the HW structure
**/
-s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d)
+s32 e1000_null_link_info(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG *s, u16 E1000_UNUSEDARG *d)
{
DEBUGFUNC("e1000_null_link_info");
return E1000_SUCCESS;
* e1000_null_mng_mode - No-op function, return false
* @hw: pointer to the HW structure
**/
-bool e1000_null_mng_mode(struct e1000_hw *hw)
+bool e1000_null_mng_mode(struct e1000_hw E1000_UNUSEDARG *hw)
{
DEBUGFUNC("e1000_null_mng_mode");
return false;
* e1000_null_update_mc - No-op function, return void
* @hw: pointer to the HW structure
**/
-void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a)
+void e1000_null_update_mc(struct e1000_hw E1000_UNUSEDARG *hw,
+ u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a)
{
DEBUGFUNC("e1000_null_update_mc");
return;
* e1000_null_write_vfta - No-op function, return void
* @hw: pointer to the HW structure
**/
-void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b)
+void e1000_null_write_vfta(struct e1000_hw E1000_UNUSEDARG *hw,
+ u32 E1000_UNUSEDARG a, u32 E1000_UNUSEDARG b)
{
DEBUGFUNC("e1000_null_write_vfta");
return;
* e1000_null_rar_set - No-op function, return void
* @hw: pointer to the HW structure
**/
-void e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a)
+void e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG *hw,
+ u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a)
{
DEBUGFUNC("e1000_null_rar_set");
return;
struct e1000_bus_info *bus = &hw->bus;
u32 reg;
- /*
- * The status register reports the correct function number
+ /* The status register reports the correct function number
* for the device regardless of function swap state.
*/
reg = E1000_READ_REG(hw, E1000_STATUS);
s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
{
u32 i;
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val;
u16 offset, nvm_alt_mac_addr_offset, nvm_data;
u8 alt_mac_addr[ETH_ADDR_LEN];
return ret_val;
- /*
- * Alternate MAC address is handled by the option ROM for 82580
+ /* Alternate MAC address is handled by the option ROM for 82580
* and newer. SW support not required.
*/
if (hw->mac.type >= e1000_82580)
return E1000_SUCCESS;
}
- /*
- * We have a valid alternate MAC address, and we want to treat it the
+ /* We have a valid alternate MAC address, and we want to treat it the
* same as the normal permanent MAC address stored by the HW into the
* RAR. Do this by mapping this address into RAR0.
*/
* Sets the receive address array register at index to the address passed
* in by addr.
**/
-void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
+static void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
DEBUGFUNC("e1000_rar_set_generic");
- /*
- * HW expects these in little endian so we reverse the byte order
+ /* HW expects these in little endian so we reverse the byte order
* from network order (big endian) to little endian
*/
rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
if (rar_low || rar_high)
rar_high |= E1000_RAH_AV;
- /*
- * Some bridges will combine consecutive 32-bit writes into
+ /* Some bridges will combine consecutive 32-bit writes into
* a single burst write, which will malfunction on some parts.
* The flushes avoid this.
*/
/* Register count multiplied by bits per register */
hash_mask = (hw->mac.mta_reg_count * 32) - 1;
- /*
- * For a mc_filter_type of 0, bit_shift is the number of left-shifts
+ /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
* where 0xFF would still fall within the hash mask.
*/
while (hash_mask >> bit_shift != 0xFF)
bit_shift++;
- /*
- * The portion of the address that is used for the hash table
+ /* The portion of the address that is used for the hash table
* is determined by the mc_filter_type setting.
* The algorithm is such that there is a total of 8 bits of shifting.
* The bit_shift for a mc_filter_type of 0 represents the number of
DEBUGFUNC("e1000_check_for_copper_link");
- /*
- * We only want to go out to the PHY registers to see if Auto-Neg
+ /* We only want to go out to the PHY registers to see if Auto-Neg
* has completed and/or if our link status has changed. The
* get_link_status flag is set upon receiving a Link Status
* Change or Rx Sequence Error interrupt.
if (!mac->get_link_status)
return E1000_SUCCESS;
- /*
- * First we want to see if the MII Status Register reports
+ /* First we want to see if the MII Status Register reports
* link. If so, then we want to get the current speed/duplex
* of the PHY.
*/
mac->get_link_status = false;
- /*
- * Check if there was DownShift, must be checked
+ /* Check if there was DownShift, must be checked
* immediately after link-up
*/
e1000_check_downshift_generic(hw);
- /*
- * If we are forcing speed/duplex, then we simply return since
+ /* If we are forcing speed/duplex, then we simply return since
* we have already determined whether we have link or not.
*/
if (!mac->autoneg)
return -E1000_ERR_CONFIG;
- /*
- * Auto-Neg is enabled. Auto Speed Detection takes care
+ /* Auto-Neg is enabled. Auto Speed Detection takes care
* of MAC speed/duplex configuration. So we only need to
* configure Collision Distance in the MAC.
*/
mac->ops.config_collision_dist(hw);
- /*
- * Configure Flow Control now that Auto-Neg has completed.
+ /* Configure Flow Control now that Auto-Neg has completed.
* First, we need to restore the desired flow control
* settings because we may have had to re-autoneg with a
* different link partner.
status = E1000_READ_REG(hw, E1000_STATUS);
rxcw = E1000_READ_REG(hw, E1000_RXCW);
- /*
- * If we don't have link (auto-negotiation failed or link partner
+ /* If we don't have link (auto-negotiation failed or link partner
* cannot auto-negotiate), the cable is plugged in (we have signal),
* and our link partner is not trying to auto-negotiate with us (we
* are receiving idles or data), we need to force link up. We also
return ret_val;
}
} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
- /*
- * If we are forcing link and we are receiving /C/ ordered
+ /* If we are forcing link and we are receiving /C/ ordered
* sets, re-enable auto-negotiation in the TXCW register
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
status = E1000_READ_REG(hw, E1000_STATUS);
rxcw = E1000_READ_REG(hw, E1000_RXCW);
- /*
- * If we don't have link (auto-negotiation failed or link partner
+ /* If we don't have link (auto-negotiation failed or link partner
* cannot auto-negotiate), and our link partner is not trying to
* auto-negotiate with us (we are receiving idles or data),
* we need to force link up. We also need to give auto-negotiation
return ret_val;
}
} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
- /*
- * If we are forcing link and we are receiving /C/ ordered
+ /* If we are forcing link and we are receiving /C/ ordered
* sets, re-enable auto-negotiation in the TXCW register
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
mac->serdes_has_link = true;
} else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) {
- /*
- * If we force link for non-auto-negotiation switch, check
+ /* If we force link for non-auto-negotiation switch, check
* link status based on MAC synchronization for internal
* serdes media type.
*/
DEBUGFUNC("e1000_set_default_fc_generic");
- /*
- * Read and store word 0x0F of the EEPROM. This word contains bits
+ /* Read and store word 0x0F of the EEPROM. This word contains bits
* that determine the hardware's default PAUSE (flow control) mode,
* a bit that determines whether the HW defaults to enabling or
* disabling auto-negotiation, and the direction of the
return ret_val;
}
- if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
+ if (!(nvm_data & NVM_WORD0F_PAUSE_MASK))
hw->fc.requested_mode = e1000_fc_none;
else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
NVM_WORD0F_ASM_DIR)
DEBUGFUNC("e1000_setup_link_generic");
- /*
- * In the case of the phy reset being blocked, we already have a link.
+ /* In the case of the phy reset being blocked, we already have a link.
* We do not need to set it up again.
*/
- if (hw->phy.ops.check_reset_block(hw))
+ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
return E1000_SUCCESS;
- /*
- * If requested flow control is set to default, set flow control
+ /* If requested flow control is set to default, set flow control
* based on the EEPROM flow control settings.
*/
if (hw->fc.requested_mode == e1000_fc_default) {
return ret_val;
}
- /*
- * Save off the requested flow control mode for use later. Depending
+ /* Save off the requested flow control mode for use later. Depending
* on the link partner's capabilities, we may or may not use this mode.
*/
hw->fc.current_mode = hw->fc.requested_mode;
if (ret_val)
return ret_val;
- /*
- * Initialize the flow control address, type, and PAUSE timer
+ /* Initialize the flow control address, type, and PAUSE timer
* registers to their default values. This is done even if flow
* control is disabled, because it does not hurt anything to
* initialize these registers.
DEBUGFUNC("e1000_commit_fc_settings_generic");
- /*
- * Check for a software override of the flow control settings, and
+ /* Check for a software override of the flow control settings, and
* setup the device accordingly. If auto-negotiation is enabled, then
* software will have to set the "PAUSE" bits to the correct value in
* the Transmit Config Word Register (TXCW) and re-start auto-
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
break;
case e1000_fc_rx_pause:
- /*
- * Rx Flow control is enabled and Tx Flow control is disabled
+ /* Rx Flow control is enabled and Tx Flow control is disabled
* by a software over-ride. Since there really isn't a way to
* advertise that we are capable of Rx Pause ONLY, we will
* advertise that we support both symmetric and asymmetric Rx
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
break;
case e1000_fc_tx_pause:
- /*
- * Tx Flow control is enabled, and Rx Flow control is disabled,
+ /* Tx Flow control is enabled, and Rx Flow control is disabled,
* by a software over-ride.
*/
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
break;
case e1000_fc_full:
- /*
- * Flow control (both Rx and Tx) is enabled by a software
+ /* Flow control (both Rx and Tx) is enabled by a software
* over-ride.
*/
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
DEBUGFUNC("e1000_poll_fiber_serdes_link_generic");
- /*
- * If we have a signal (the cable is plugged in, or assumed true for
+ /* If we have a signal (the cable is plugged in, or assumed true for
* serdes media) then poll for a "Link-Up" indication in the Device
* Status Register. Time-out if a link isn't seen in 500 milliseconds
* seconds (Auto-negotiation should complete in less than 500
if (i == FIBER_LINK_UP_LIMIT) {
DEBUGOUT("Never got a valid link from auto-neg!!!\n");
mac->autoneg_failed = true;
- /*
- * AutoNeg failed to achieve a link, so we'll call
+ /* AutoNeg failed to achieve a link, so we'll call
* mac->check_for_link. This routine will force the
* link up if we detect a signal. This will allow us to
* communicate with non-autonegotiating link partners.
if (ret_val)
return ret_val;
- /*
- * Since auto-negotiation is enabled, take the link out of reset (the
+ /* Since auto-negotiation is enabled, take the link out of reset (the
* link will be in reset, because we previously reset the chip). This
* will restart auto-negotiation. If auto-negotiation is successful
* then the link-up status bit will be set and the flow control enable
E1000_WRITE_FLUSH(hw);
msec_delay(1);
- /*
- * For these adapters, the SW definable pin 1 is set when the optics
+ /* For these adapters, the SW definable pin 1 is set when the optics
* detect a signal. If we have a signal, then poll for a "Link-Up"
* indication.
*/
DEBUGFUNC("e1000_set_fc_watermarks_generic");
- /*
- * Set the flow control receive threshold registers. Normally,
+ /* Set the flow control receive threshold registers. Normally,
* these registers will be set to a default threshold that may be
* adjusted later by the driver's runtime code. However, if the
* ability to transmit pause frames is not enabled, then these
* registers will be set to 0.
*/
if (hw->fc.current_mode & e1000_fc_tx_pause) {
- /*
- * We need to set up the Receive Threshold high and low water
+ /* We need to set up the Receive Threshold high and low water
* marks as well as (optionally) enabling the transmission of
* XON frames.
*/
ctrl = E1000_READ_REG(hw, E1000_CTRL);
- /*
- * Because we didn't get link via the internal auto-negotiation
+ /* Because we didn't get link via the internal auto-negotiation
* mechanism (we either forced link or we got link via PHY
* auto-neg), we have to manually enable/disable transmit an
* receive flow control.
{
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val = E1000_SUCCESS;
+ u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
u16 speed, duplex;
DEBUGFUNC("e1000_config_fc_after_link_up_generic");
- /*
- * Check for the case where we have fiber media and auto-neg failed
+ /* Check for the case where we have fiber media and auto-neg failed
* so we had to force link. In this case, we need to force the
* configuration of the MAC to match the "fc" parameter.
*/
return ret_val;
}
- /*
- * Check for the case where we have copper media and auto-neg is
+ /* Check for the case where we have copper media and auto-neg is
* enabled. In this case, we need to check and see if Auto-Neg
* has completed, and if so, how the PHY and link partner has
* flow control configured.
*/
if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
- /*
- * Read the MII Status Register and check to see if AutoNeg
+ /* Read the MII Status Register and check to see if AutoNeg
* has completed. We read this twice because this reg has
* some "sticky" (latched) bits.
*/
return ret_val;
}
- /*
- * The AutoNeg process has completed, so we now need to
+ /* The AutoNeg process has completed, so we now need to
* read both the Auto Negotiation Advertisement
* Register (Address 4) and the Auto_Negotiation Base
* Page Ability Register (Address 5) to determine how
if (ret_val)
return ret_val;
- /*
- * Two bits in the Auto Negotiation Advertisement Register
+ /* Two bits in the Auto Negotiation Advertisement Register
* (Address 4) and two bits in the Auto Negotiation Base
* Page Ability Register (Address 5) determine flow control
* for both the PHY and the link partner. The following
*/
if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
- /*
- * Now we need to check if the user selected Rx ONLY
+ /* Now we need to check if the user selected Rx ONLY
* of pause frames. In this case, we had to advertise
* FULL flow control because we could not advertise Rx
* ONLY. Hence, we must now check to see if we need to
DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
}
}
- /*
- * For receiving PAUSE frames ONLY.
+ /* For receiving PAUSE frames ONLY.
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
hw->fc.current_mode = e1000_fc_tx_pause;
DEBUGOUT("Flow Control = Tx PAUSE frames only.\n");
}
- /*
- * For transmitting PAUSE frames ONLY.
+ /* For transmitting PAUSE frames ONLY.
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
hw->fc.current_mode = e1000_fc_rx_pause;
DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
} else {
- /*
- * Per the IEEE spec, at this point flow control
+ /* Per the IEEE spec, at this point flow control
* should be disabled.
*/
hw->fc.current_mode = e1000_fc_none;
DEBUGOUT("Flow Control = NONE.\n");
}
- /*
- * Now we need to do one last check... If we auto-
+ /* Now we need to do one last check... If we auto-
* negotiated to HALF DUPLEX, flow control should not be
* enabled per IEEE 802.3 spec.
*/
if (duplex == HALF_DUPLEX)
hw->fc.current_mode = e1000_fc_none;
- /*
- * Now we call a subroutine to actually force the MAC
+ /* Now we call a subroutine to actually force the MAC
* controller to use the correct flow control settings.
*/
ret_val = e1000_force_mac_fc_generic(hw);
}
}
+ /* Check for the case where we have SerDes media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
+ mac->autoneg) {
+ /* Read the PCS_LSTS and check to see if AutoNeg
+ * has completed.
+ */
+ pcs_status_reg = E1000_READ_REG(hw, E1000_PCS_LSTAT);
+
+ if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
+ DEBUGOUT("PCS Auto Neg has not completed.\n");
+ return ret_val;
+ }
+
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (PCS_ANADV) and the Auto_Negotiation Base
+ * Page Ability Register (PCS_LPAB) to determine how
+ * flow control was negotiated.
+ */
+ pcs_adv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV);
+ pcs_lp_ability_reg = E1000_READ_REG(hw, E1000_PCS_LPAB);
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (PCS_ANADV) and two bits in the Auto Negotiation Base
+ * Page Ability Register (PCS_LPAB) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | e1000_fc_none
+ * 0 | 1 | 0 | DC | e1000_fc_none
+ * 0 | 1 | 1 | 0 | e1000_fc_none
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ * 1 | 0 | 0 | DC | e1000_fc_none
+ * 1 | DC | 1 | DC | e1000_fc_full
+ * 1 | 1 | 0 | 0 | e1000_fc_none
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | e1000_fc_full
+ *
+ */
+ if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
+ /* Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise Rx
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == e1000_fc_full) {
+ hw->fc.current_mode = e1000_fc_full;
+ DEBUGOUT("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ */
+ else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_tx_pause;
+ DEBUGOUT("Flow Control = Tx PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ */
+ else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
+ } else {
+ /* Per the IEEE spec, at this point flow control
+ * should be disabled.
+ */
+ hw->fc.current_mode = e1000_fc_none;
+ DEBUGOUT("Flow Control = NONE.\n");
+ }
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ pcs_ctrl_reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
+ pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+ E1000_WRITE_REG(hw, E1000_PCS_LCTL, pcs_ctrl_reg);
+
+ ret_val = e1000_force_mac_fc_generic(hw);
+ if (ret_val) {
+ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
+
return E1000_SUCCESS;
}
* Sets the speed and duplex to gigabit full duplex (the only possible option)
* for fiber/serdes links.
**/
-s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
+s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw E1000_UNUSEDARG *hw,
u16 *speed, u16 *duplex)
{
DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic");
ledctl_blink = E1000_LEDCTL_LED0_BLINK |
(E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
} else {
- /*
- * set the blink bit for each LED that's "on" (0x0E)
- * in ledctl_mode2
+ /* Set the blink bit for each LED that's "on" (0x0E)
+ * (or "off" if inverted) in ledctl_mode2. The blink
+ * logic in hardware only works when mode is set to "on"
+ * so it must be changed accordingly when the mode is
+ * "off" and inverted.
*/
ledctl_blink = hw->mac.ledctl_mode2;
- for (i = 0; i < 4; i++)
- if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
- E1000_LEDCTL_MODE_LED_ON)
- ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
- (i * 8));
+ for (i = 0; i < 32; i += 8) {
+ u32 mode = (hw->mac.ledctl_mode2 >> i) &
+ E1000_LEDCTL_LED0_MODE_MASK;
+ u32 led_default = hw->mac.ledctl_default >> i;
+
+ if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
+ (mode == E1000_LEDCTL_MODE_LED_ON)) ||
+ ((led_default & E1000_LEDCTL_LED0_IVRT) &&
+ (mode == E1000_LEDCTL_MODE_LED_OFF))) {
+ ledctl_blink &=
+ ~(E1000_LEDCTL_LED0_MODE_MASK << i);
+ ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
+ E1000_LEDCTL_MODE_LED_ON) << i;
+ }
+ }
}
E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink);
return E1000_SUCCESS;
}
+/**
+ * e1000_validate_mdi_setting_crossover_generic - Verify MDI/MDIx settings
+ * @hw: pointer to the HW structure
+ *
+ * Validate the MDI/MDIx setting, allowing for auto-crossover during forced
+ * operation.
+ **/
+s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_validate_mdi_setting_crossover_generic");
+
+ return E1000_SUCCESS;
+}
+
/**
* e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register
* @hw: pointer to the HW structure
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#ifndef _E1000_MAC_H_
#define _E1000_MAC_H_
-/*
- * Functions that should not be called directly from drivers but can be used
- * by other files in this 'shared code'
- */
void e1000_init_mac_ops_generic(struct e1000_hw *hw);
void e1000_null_mac_generic(struct e1000_hw *hw);
s32 e1000_null_ops_generic(struct e1000_hw *hw);
s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw);
s32 e1000_setup_led_generic(struct e1000_hw *hw);
s32 e1000_setup_link_generic(struct e1000_hw *hw);
+s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw *hw);
s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
u32 offset, u8 data);
void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw);
void e1000_clear_vfta_generic(struct e1000_hw *hw);
void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
-void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw);
void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
-void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
void e1000_reset_adaptive_generic(struct e1000_hw *hw);
void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop);
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/* Check that the host interface is enabled. */
hicr = E1000_READ_REG(hw, E1000_HICR);
- if ((hicr & E1000_HICR_EN) == 0) {
+ if (!(hicr & E1000_HICR_EN)) {
DEBUGOUT("E1000_HOST_EN bit disabled.\n");
return -E1000_ERR_HOST_INTERFACE_COMMAND;
}
return hw->mac.tx_pkt_filtering;
}
- /*
- * If we can't read from the host interface for whatever
+ /* If we can't read from the host interface for whatever
* reason, disable filtering.
*/
- ret_val = hw->mac.ops.mng_enable_host_if(hw);
+ ret_val = e1000_mng_enable_host_if_generic(hw);
if (ret_val != E1000_SUCCESS) {
hw->mac.tx_pkt_filtering = false;
return hw->mac.tx_pkt_filtering;
hdr->checksum = 0;
csum = e1000_calculate_checksum((u8 *)hdr,
E1000_MNG_DHCP_COOKIE_LENGTH);
- /*
- * If either the checksums or signature don't match, then
+ /* If either the checksums or signature don't match, then
* the cookie area isn't considered valid, in which case we
* take the safe route of assuming Tx filtering is enabled.
*/
/* Calculate length in DWORDs */
length >>= 2;
- /*
- * The device driver writes the relevant command block into the
+ /* The device driver writes the relevant command block into the
* ram area.
*/
for (i = 0; i < length; i++) {
hdr.checksum = 0;
/* Enable the host interface */
- ret_val = hw->mac.ops.mng_enable_host_if(hw);
+ ret_val = e1000_mng_enable_host_if_generic(hw);
if (ret_val)
return ret_val;
/* Populate the host interface with the contents of "buffer". */
- ret_val = hw->mac.ops.mng_host_if_write(hw, buffer, length,
- sizeof(hdr), &(hdr.checksum));
+ ret_val = e1000_mng_host_if_write_generic(hw, buffer, length,
+ sizeof(hdr), &(hdr.checksum));
if (ret_val)
return ret_val;
/* Write the manageability command header */
- ret_val = hw->mac.ops.mng_write_cmd_header(hw, &hdr);
+ ret_val = e1000_mng_write_cmd_header_generic(hw, &hdr);
if (ret_val)
return ret_val;
return true;
} else if ((manc & E1000_MANC_SMBUS_EN) &&
!(manc & E1000_MANC_ASF_EN)) {
- return true;
+ return true;
}
return false;
/* Check that the host interface is enabled. */
hicr = E1000_READ_REG(hw, E1000_HICR);
- if ((hicr & E1000_HICR_EN) == 0) {
+ if (!(hicr & E1000_HICR_EN)) {
DEBUGOUT("E1000_HOST_EN bit disabled.\n");
return -E1000_ERR_HOST_INTERFACE_COMMAND;
}
/* Calculate length in DWORDs */
length >>= 2;
- /*
- * The device driver writes the relevant command block
+ /* The device driver writes the relevant command block
* into the ram area.
*/
for (i = 0; i < length; i++)
return E1000_SUCCESS;
}
+/**
+ * e1000_load_firmware - Writes proxy FW code buffer to host interface
+ * and execute.
+ * @hw: pointer to the HW structure
+ * @buffer: contains a firmware to write
+ * @length: the byte length of the buffer, must be multiple of 4 bytes
+ *
+ * Upon success returns E1000_SUCCESS, returns E1000_ERR_CONFIG if not enabled
+ * in HW else returns E1000_ERR_HOST_INTERFACE_COMMAND.
+ **/
+s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length)
+{
+ u32 hicr, hibba, fwsm, icr, i;
+
+ DEBUGFUNC("e1000_load_firmware");
+
+ if (hw->mac.type < e1000_i210) {
+ DEBUGOUT("Hardware doesn't support loading FW by the driver\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ /* Check that the host interface is enabled. */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_EN)) {
+ DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+ return -E1000_ERR_CONFIG;
+ }
+ if (!(hicr & E1000_HICR_MEMORY_BASE_EN)) {
+ DEBUGOUT("E1000_HICR_MEMORY_BASE_EN bit disabled.\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ if (length == 0 || length & 0x3 || length > E1000_HI_FW_MAX_LENGTH) {
+ DEBUGOUT("Buffer length failure.\n");
+ return -E1000_ERR_INVALID_ARGUMENT;
+ }
+
+ /* Clear notification from ROM-FW by reading ICR register */
+ icr = E1000_READ_REG(hw, E1000_ICR_V2);
+
+ /* Reset ROM-FW */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ hicr |= E1000_HICR_FW_RESET_ENABLE;
+ E1000_WRITE_REG(hw, E1000_HICR, hicr);
+ hicr |= E1000_HICR_FW_RESET;
+ E1000_WRITE_REG(hw, E1000_HICR, hicr);
+ E1000_WRITE_FLUSH(hw);
+
+ /* Wait till MAC notifies about its readiness after ROM-FW reset */
+ for (i = 0; i < (E1000_HI_COMMAND_TIMEOUT * 2); i++) {
+ icr = E1000_READ_REG(hw, E1000_ICR_V2);
+ if (icr & E1000_ICR_MNG)
+ break;
+ msec_delay(1);
+ }
+
+ /* Check for timeout */
+ if (i == E1000_HI_COMMAND_TIMEOUT) {
+ DEBUGOUT("FW reset failed.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Wait till MAC is ready to accept new FW code */
+ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
+ fwsm = E1000_READ_REG(hw, E1000_FWSM);
+ if ((fwsm & E1000_FWSM_FW_VALID) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT ==
+ E1000_FWSM_HI_EN_ONLY_MODE))
+ break;
+ msec_delay(1);
+ }
+
+ /* Check for timeout */
+ if (i == E1000_HI_COMMAND_TIMEOUT) {
+ DEBUGOUT("FW reset failed.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Calculate length in DWORDs */
+ length >>= 2;
+
+ /* The device driver writes the relevant FW code block
+ * into the ram area in DWORDs via 1kB ram addressing window.
+ */
+ for (i = 0; i < length; i++) {
+ if (!(i % E1000_HI_FW_BLOCK_DWORD_LENGTH)) {
+ /* Point to correct 1kB ram window */
+ hibba = E1000_HI_FW_BASE_ADDRESS +
+ ((E1000_HI_FW_BLOCK_DWORD_LENGTH << 2) *
+ (i / E1000_HI_FW_BLOCK_DWORD_LENGTH));
+
+ E1000_WRITE_REG(hw, E1000_HIBBA, hibba);
+ }
+
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF,
+ i % E1000_HI_FW_BLOCK_DWORD_LENGTH,
+ *((u32 *)buffer + i));
+ }
+
+ /* Setting this bit tells the ARC that a new FW is ready to execute. */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
+
+ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_C))
+ break;
+ msec_delay(1);
+ }
+
+ /* Check for successful FW start. */
+ if (i == E1000_HI_COMMAND_TIMEOUT) {
+ DEBUGOUT("New FW did not start within timeout period.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ return E1000_SUCCESS;
+}
+
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
bool e1000_enable_mng_pass_thru(struct e1000_hw *hw);
u8 e1000_calculate_checksum(u8 *buffer, u32 length);
s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length);
+s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length);
enum e1000_mng_mode {
e1000_mng_mode_none = 0,
#define E1000_FWSM_MODE_MASK 0xE
#define E1000_FWSM_MODE_SHIFT 1
+#define E1000_FWSM_FW_VALID 0x00008000
+#define E1000_FWSM_HI_EN_ONLY_MODE 0x4
#define E1000_MNG_IAMT_MODE 0x3
#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
#define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI cmd limit */
+#define E1000_HI_FW_BASE_ADDRESS 0x10000
+#define E1000_HI_FW_MAX_LENGTH (64 * 1024) /* Num of bytes */
+#define E1000_HI_FW_BLOCK_DWORD_LENGTH 256 /* Num of DWORDs per page */
+#define E1000_HICR_MEMORY_BASE_EN 0x200 /* MB Enable bit - RO */
#define E1000_HICR_EN 0x01 /* Enable bit - RO */
/* Driver sets this bit when done to put command in RAM */
#define E1000_HICR_C 0x02
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
* e1000_null_mbx_check_for_flag - No-op function, return 0
* @hw: pointer to the HW structure
**/
-static s32 e1000_null_mbx_check_for_flag(struct e1000_hw *hw, u16 mbx_id)
+static s32 e1000_null_mbx_check_for_flag(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG mbx_id)
{
DEBUGFUNC("e1000_null_mbx_check_flag");
* e1000_null_mbx_transact - No-op function, return 0
* @hw: pointer to the HW structure
**/
-static s32 e1000_null_mbx_transact(struct e1000_hw *hw, u32 *msg, u16 size,
- u16 mbx_id)
+static s32 e1000_null_mbx_transact(struct e1000_hw E1000_UNUSEDARG *hw,
+ u32 E1000_UNUSEDARG *msg,
+ u16 E1000_UNUSEDARG size,
+ u16 E1000_UNUSEDARG mbx_id)
{
DEBUGFUNC("e1000_null_mbx_rw_msg");
switch (hw->mac.type) {
case e1000_82576:
case e1000_i350:
+ case e1000_i354:
mbx->timeout = 0;
mbx->usec_delay = 0;
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
* e1000_null_nvm_read - No-op function, return 0
* @hw: pointer to the HW structure
**/
-s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c)
+s32 e1000_null_read_nvm(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b,
+ u16 E1000_UNUSEDARG *c)
{
DEBUGFUNC("e1000_null_read_nvm");
return E1000_SUCCESS;
* e1000_null_nvm_generic - No-op function, return void
* @hw: pointer to the HW structure
**/
-void e1000_null_nvm_generic(struct e1000_hw *hw)
+void e1000_null_nvm_generic(struct e1000_hw E1000_UNUSEDARG *hw)
{
DEBUGFUNC("e1000_null_nvm_generic");
return;
* e1000_null_led_default - No-op function, return 0
* @hw: pointer to the HW structure
**/
-s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data)
+s32 e1000_null_led_default(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG *data)
{
DEBUGFUNC("e1000_null_led_default");
return E1000_SUCCESS;
* e1000_null_write_nvm - No-op function, return 0
* @hw: pointer to the HW structure
**/
-s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c)
+s32 e1000_null_write_nvm(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b,
+ u16 E1000_UNUSEDARG *c)
{
DEBUGFUNC("e1000_null_write_nvm");
return E1000_SUCCESS;
E1000_WRITE_FLUSH(hw);
usec_delay(1);
- /*
- * Read "Status Register" repeatedly until the LSB is cleared.
+ /* Read "Status Register" repeatedly until the LSB is cleared.
* The EEPROM will signal that the command has been completed
* by clearing bit 0 of the internal status register. If it's
* not cleared within 'timeout', then error out.
DEBUGFUNC("e1000_read_nvm_spi");
- /*
- * A check for invalid values: offset too large, too many words,
+ /* A check for invalid values: offset too large, too many words,
* and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
- /*
- * Read the data. SPI NVMs increment the address with each byte
+ /* Read the data. SPI NVMs increment the address with each byte
* read and will roll over if reading beyond the end. This allows
* us to read the whole NVM from any offset
*/
DEBUGFUNC("e1000_read_nvm_eerd");
- /*
- * A check for invalid values: offset too large, too many words,
+ /* A check for invalid values: offset too large, too many words,
* too many words for the offset, and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
struct e1000_nvm_info *nvm = &hw->nvm;
- s32 ret_val;
+ s32 ret_val = -E1000_ERR_NVM;
u16 widx = 0;
DEBUGFUNC("e1000_write_nvm_spi");
- /*
- * A check for invalid values: offset too large, too many words,
+ /* A check for invalid values: offset too large, too many words,
* and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
return -E1000_ERR_NVM;
}
- ret_val = nvm->ops.acquire(hw);
- if (ret_val)
- return ret_val;
-
while (widx < words) {
u8 write_opcode = NVM_WRITE_OPCODE_SPI;
- ret_val = e1000_ready_nvm_eeprom(hw);
+ ret_val = nvm->ops.acquire(hw);
if (ret_val)
- goto release;
+ return ret_val;
+
+ ret_val = e1000_ready_nvm_eeprom(hw);
+ if (ret_val) {
+ nvm->ops.release(hw);
+ return ret_val;
+ }
e1000_standby_nvm(hw);
e1000_standby_nvm(hw);
- /*
- * Some SPI eeproms use the 8th address bit embedded in the
+ /* Some SPI eeproms use the 8th address bit embedded in the
* opcode
*/
if ((nvm->address_bits == 8) && (offset >= 128))
break;
}
}
+ msec_delay(10);
+ nvm->ops.release(hw);
}
- msec_delay(10);
-release:
- nvm->ops.release(hw);
-
return ret_val;
}
return ret_val;
}
- /*
- * if nvm_data is not ptr guard the PBA must be in legacy format which
+ /* if nvm_data is not ptr guard the PBA must be in legacy format which
* means pba_ptr is actually our second data word for the PBA number
* and we can decode it into an ascii string
*/
if (nvm_data != NVM_PBA_PTR_GUARD) {
DEBUGOUT("NVM PBA number is not stored as string\n");
- /* we will need 11 characters to store the PBA */
- if (pba_num_size < 11) {
+ /* make sure callers buffer is big enough to store the PBA */
+ if (pba_num_size < E1000_PBANUM_LENGTH) {
DEBUGOUT("PBA string buffer too small\n");
return E1000_ERR_NO_SPACE;
}
/* if data is not ptr guard the PBA must be in legacy format */
if (nvm_data != NVM_PBA_PTR_GUARD) {
- *pba_num_size = 11;
+ *pba_num_size = E1000_PBANUM_LENGTH;
return E1000_SUCCESS;
}
return -E1000_ERR_NVM_PBA_SECTION;
}
- /*
- * Convert from length in u16 values to u8 chars, add 1 for NULL,
+ /* Convert from length in u16 values to u8 chars, add 1 for NULL,
* and subtract 2 because length field is included in length.
*/
*pba_num_size = ((u32)length * 2) - 1;
return E1000_SUCCESS;
}
+
+
+
+
/**
* e1000_read_mac_addr_generic - Read device MAC address
* @hw: pointer to the HW structure
E1000_WRITE_FLUSH(hw);
}
+/**
+ * e1000_get_fw_version - Get firmware version information
+ * @hw: pointer to the HW structure
+ * @fw_vers: pointer to output version structure
+ *
+ * unsupported/not present features return 0 in version structure
+ **/
+void e1000_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
+{
+ u16 eeprom_verh, eeprom_verl, etrack_test, fw_version;
+ u8 q, hval, rem, result;
+ u16 comb_verh, comb_verl, comb_offset;
+
+ memset(fw_vers, 0, sizeof(struct e1000_fw_version));
+
+ /* basic eeprom version numbers, bits used vary by part and by tool
+ * used to create the nvm images */
+ /* Check which data format we have */
+ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
+ switch (hw->mac.type) {
+ case e1000_i211:
+ e1000_read_invm_version(hw, fw_vers);
+ return;
+ case e1000_82575:
+ case e1000_82576:
+ case e1000_82580:
+ /* Use this format, unless EETRACK ID exists,
+ * then use alternate format
+ */
+ if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+ >> NVM_MAJOR_SHIFT;
+ fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
+ >> NVM_MINOR_SHIFT;
+ fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
+ goto etrack_id;
+ }
+ break;
+ case e1000_i210:
+ if (!(e1000_get_flash_presence_i210(hw))) {
+ e1000_read_invm_version(hw, fw_vers);
+ return;
+ }
+ /* fall through */
+ case e1000_i350:
+ case e1000_i354:
+ /* find combo image version */
+ hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
+ if ((comb_offset != 0x0) &&
+ (comb_offset != NVM_VER_INVALID)) {
+
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+ + 1), 1, &comb_verh);
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
+ 1, &comb_verl);
+
+ /* get Option Rom version if it exists and is valid */
+ if ((comb_verh && comb_verl) &&
+ ((comb_verh != NVM_VER_INVALID) &&
+ (comb_verl != NVM_VER_INVALID))) {
+
+ fw_vers->or_valid = true;
+ fw_vers->or_major =
+ comb_verl >> NVM_COMB_VER_SHFT;
+ fw_vers->or_build =
+ (comb_verl << NVM_COMB_VER_SHFT)
+ | (comb_verh >> NVM_COMB_VER_SHFT);
+ fw_vers->or_patch =
+ comb_verh & NVM_COMB_VER_MASK;
+ }
+ }
+ break;
+ default:
+ return;
+ }
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+ >> NVM_MAJOR_SHIFT;
+
+ /* check for old style version format in newer images*/
+ if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
+ eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
+ } else {
+ eeprom_verl = (fw_version & NVM_MINOR_MASK)
+ >> NVM_MINOR_SHIFT;
+ }
+ /* Convert minor value to hex before assigning to output struct
+ * Val to be converted will not be higher than 99, per tool output
+ */
+ q = eeprom_verl / NVM_HEX_CONV;
+ hval = q * NVM_HEX_TENS;
+ rem = eeprom_verl % NVM_HEX_CONV;
+ result = hval + rem;
+ fw_vers->eep_minor = result;
+
+etrack_id:
+ if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) {
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
+ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
+ | eeprom_verl;
+ }
+ return;
+}
+
+
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#ifndef _E1000_NVM_H_
#define _E1000_NVM_H_
+
+struct e1000_fw_version {
+ u32 etrack_id;
+ u16 eep_major;
+ u16 eep_minor;
+ u16 eep_build;
+
+ u8 invm_major;
+ u8 invm_minor;
+ u8 invm_img_type;
+
+ bool or_valid;
+ u16 or_major;
+ u16 or_build;
+ u16 or_patch;
+};
+
+
void e1000_init_nvm_ops_generic(struct e1000_hw *hw);
s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
void e1000_null_nvm_generic(struct e1000_hw *hw);
u16 *data);
s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw);
void e1000_release_nvm_generic(struct e1000_hw *hw);
+void e1000_get_fw_version(struct e1000_hw *hw,
+ struct e1000_fw_version *fw_vers);
#define E1000_STM_OPCODE 0xDB00
+
#endif
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#endif
#define usec_delay(x) udelay(x)
+#define usec_delay_irq(x) udelay(x)
#ifndef msec_delay
#define msec_delay(x) do { \
/* Don't mdelay in interrupt context! */ \
#endif
+#ifdef DEBUG
+#define DEBUGOUT(S) printk(KERN_DEBUG S)
+#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S, ## A)
+#else
#define DEBUGOUT(S)
#define DEBUGOUT1(S, A...)
+#endif
+#ifdef DEBUG_FUNC
+#define DEBUGFUNC(F) DEBUGOUT(F "\n")
+#else
#define DEBUGFUNC(F)
+#endif
#define DEBUGOUT2 DEBUGOUT1
#define DEBUGOUT3 DEBUGOUT2
#define DEBUGOUT7 DEBUGOUT3
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#include "e1000_api.h"
-static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
+static s32 e1000_wait_autoneg(struct e1000_hw *hw);
/* Cable length tables */
static const u16 e1000_m88_cable_length_table[] = {
0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
* e1000_null_set_page - No-op function, return 0
* @hw: pointer to the HW structure
**/
-s32 e1000_null_set_page(struct e1000_hw *hw, u16 data)
+s32 e1000_null_set_page(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG data)
{
DEBUGFUNC("e1000_null_set_page");
return E1000_SUCCESS;
* e1000_null_read_reg - No-op function, return 0
* @hw: pointer to the HW structure
**/
-s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+s32 e1000_null_read_reg(struct e1000_hw E1000_UNUSEDARG *hw,
+ u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG *data)
{
DEBUGFUNC("e1000_null_read_reg");
return E1000_SUCCESS;
* e1000_null_phy_generic - No-op function, return void
* @hw: pointer to the HW structure
**/
-void e1000_null_phy_generic(struct e1000_hw *hw)
+void e1000_null_phy_generic(struct e1000_hw E1000_UNUSEDARG *hw)
{
DEBUGFUNC("e1000_null_phy_generic");
return;
* e1000_null_lplu_state - No-op function, return 0
* @hw: pointer to the HW structure
**/
-s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active)
+s32 e1000_null_lplu_state(struct e1000_hw E1000_UNUSEDARG *hw,
+ bool E1000_UNUSEDARG active)
{
DEBUGFUNC("e1000_null_lplu_state");
return E1000_SUCCESS;
* e1000_null_write_reg - No-op function, return 0
* @hw: pointer to the HW structure
**/
-s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data)
+s32 e1000_null_write_reg(struct e1000_hw E1000_UNUSEDARG *hw,
+ u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG data)
{
DEBUGFUNC("e1000_null_write_reg");
return E1000_SUCCESS;
* @data: data value read
*
**/
-s32 e1000_read_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data)
+s32 e1000_read_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw,
+ u8 E1000_UNUSEDARG byte_offset,
+ u8 E1000_UNUSEDARG dev_addr,
+ u8 E1000_UNUSEDARG *data)
{
DEBUGFUNC("e1000_read_i2c_byte_null");
return E1000_SUCCESS;
* @data: data value to write
*
**/
-s32 e1000_write_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data)
+s32 e1000_write_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw,
+ u8 E1000_UNUSEDARG byte_offset,
+ u8 E1000_UNUSEDARG dev_addr,
+ u8 E1000_UNUSEDARG data)
{
DEBUGFUNC("e1000_write_i2c_byte_null");
return E1000_SUCCESS;
return -E1000_ERR_PARAM;
}
- /*
- * Set up Op-code, Phy Address, and register offset in the MDI
+ /* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
E1000_WRITE_REG(hw, E1000_MDIC, mdic);
- /*
- * Poll the ready bit to see if the MDI read completed
+ /* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with
* the lower time out
*/
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- usec_delay(50);
+ usec_delay_irq(50);
mdic = E1000_READ_REG(hw, E1000_MDIC);
if (mdic & E1000_MDIC_READY)
break;
DEBUGOUT("MDI Error\n");
return -E1000_ERR_PHY;
}
+ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
+ DEBUGOUT2("MDI Read offset error - requested %d, returned %d\n",
+ offset,
+ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
+ return -E1000_ERR_PHY;
+ }
*data = (u16) mdic;
return E1000_SUCCESS;
return -E1000_ERR_PARAM;
}
- /*
- * Set up Op-code, Phy Address, and register offset in the MDI
+ /* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
E1000_WRITE_REG(hw, E1000_MDIC, mdic);
- /*
- * Poll the ready bit to see if the MDI read completed
+ /* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with
* the lower time out
*/
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- usec_delay(50);
+ usec_delay_irq(50);
mdic = E1000_READ_REG(hw, E1000_MDIC);
if (mdic & E1000_MDIC_READY)
break;
DEBUGOUT("MDI Error\n");
return -E1000_ERR_PHY;
}
+ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
+ DEBUGOUT2("MDI Write offset error - requested %d, returned %d\n",
+ offset,
+ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
+ return -E1000_ERR_PHY;
+ }
return E1000_SUCCESS;
}
DEBUGFUNC("e1000_read_phy_reg_i2c");
- /*
- * Set up Op-code, Phy Address, and register address in the I2CCMD
+ /* Set up Op-code, Phy Address, and register address in the I2CCMD
* register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
/* Swap the data bytes for the I2C interface */
phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
- /*
- * Set up Op-code, Phy Address, and register address in the I2CCMD
+ /* Set up Op-code, Phy Address, and register address in the I2CCMD
* register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
return -E1000_ERR_PHY;
}
- /*
- * Set up Op-code, EEPROM Address,in the I2CCMD
+ /* Set up Op-code, EEPROM Address,in the I2CCMD
* register. The MAC will take care of interfacing with the
* EEPROM to retrieve the desired data.
*/
DEBUGOUT("I2CCMD command address exceeds upper limit\n");
return -E1000_ERR_PHY;
}
- /*
- * The programming interface is 16 bits wide
+ /* The programming interface is 16 bits wide
* so we need to read the whole word first
* then update appropriate byte lane and write
* the updated word back.
*/
- /*
- * Set up Op-code, EEPROM Address,in the I2CCMD
+ /* Set up Op-code, EEPROM Address,in the I2CCMD
* register. The MAC will take care of interfacing
* with an EEPROM to write the data given.
*/
E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
usec_delay(50);
- /*
- * Poll the ready bit to see if lastly
+ /* Poll the ready bit to see if lastly
* launched I2C operation completed
*/
i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
/* Check if this is READ or WRITE phase */
if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) ==
E1000_I2CCMD_OPCODE_READ) {
- /*
- * Write the selected byte
+ /* Write the selected byte
* lane and update whole word
*/
data_local = i2ccmd & 0xFF00;
if (ret_val)
return ret_val;
+ /* Set MDI/MDIX mode */
+ ret_val = hw->phy.ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK;
+ /* Options:
+ * 0 - Auto (default)
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ */
+ switch (hw->phy.mdix) {
+ case 1:
+ break;
+ case 2:
+ phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX;
+ break;
+ case 0:
+ default:
+ phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX;
+ break;
+ }
+ ret_val = hw->phy.ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data);
+ if (ret_val)
+ return ret_val;
+
return e1000_set_master_slave_mode(hw);
}
phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
- /*
- * Options:
+ /* Options:
* MDI/MDI-X = 0 (default)
* 0 - Auto for all speeds
* 1 - MDI mode
break;
}
- /*
- * Options:
+ /* Options:
* disable_polarity_correction = 0 (default)
* Automatic Correction for Reversed Cable Polarity
* 0 - Disabled
* 1 - Enabled
*/
phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
- if (phy->disable_polarity_correction == 1)
+ if (phy->disable_polarity_correction)
phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
return ret_val;
if (phy->revision < E1000_REVISION_4) {
- /*
- * Force TX_CLK in the Extended PHY Specific Control Register
+ /* Force TX_CLK in the Extended PHY Specific Control Register
* to 25MHz clock.
*/
ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
if (ret_val)
return ret_val;
- /*
- * Options:
+ /* Options:
* MDI/MDI-X = 0 (default)
* 0 - Auto for all speeds
* 1 - MDI mode
break;
}
- /*
- * Options:
+ /* Options:
* disable_polarity_correction = 0 (default)
* Automatic Correction for Reversed Cable Polarity
* 0 - Disabled
* 1 - Enabled
*/
phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
- if (phy->disable_polarity_correction == 1)
+ if (phy->disable_polarity_correction)
phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
/* Enable downshift and setting it to X6 */
+ if (phy->id == M88E1543_E_PHY_ID) {
+ phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE;
+ ret_val =
+ phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
+ }
+ }
+
phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK;
phy_data |= I347AT4_PSCR_DOWNSHIFT_6X;
phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE;
return ret_val;
}
+ ret_val = e1000_set_master_slave_mode(hw);
+ if (ret_val)
+ return ret_val;
+
return E1000_SUCCESS;
}
return ret_val;
}
- /*
- * Wait 100ms for MAC to configure PHY from NVM settings, to avoid
+ /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid
* timeout issues when LFS is enabled.
*/
msec_delay(100);
/* set auto-master slave resolution settings */
if (hw->mac.autoneg) {
- /*
- * when autonegotiation advertisement is only 1000Mbps then we
+ /* when autonegotiation advertisement is only 1000Mbps then we
* should disable SmartSpeed and enable Auto MasterSlave
* resolution as hardware default.
*/
return ret_val;
}
- /*
- * Need to parse both autoneg_advertised and fc and set up
+ /* Need to parse both autoneg_advertised and fc and set up
* the appropriate PHY registers. First we will parse for
* autoneg_advertised software override. Since we can advertise
* a plethora of combinations, we need to check each bit
* individually.
*/
- /*
- * First we clear all the 10/100 mb speed bits in the Auto-Neg
+ /* First we clear all the 10/100 mb speed bits in the Auto-Neg
* Advertisement Register (Address 4) and the 1000 mb speed bits in
* the 1000Base-T Control Register (Address 9).
*/
mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
}
- /*
- * Check for a software override of the flow control settings, and
+ /* Check for a software override of the flow control settings, and
* setup the PHY advertisement registers accordingly. If
* auto-negotiation is enabled, then software will have to set the
* "PAUSE" bits to the correct value in the Auto-Negotiation
*/
switch (hw->fc.current_mode) {
case e1000_fc_none:
- /*
- * Flow control (Rx & Tx) is completely disabled by a
+ /* Flow control (Rx & Tx) is completely disabled by a
* software over-ride.
*/
mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
case e1000_fc_rx_pause:
- /*
- * Rx Flow control is enabled, and Tx Flow control is
+ /* Rx Flow control is enabled, and Tx Flow control is
* disabled, by a software over-ride.
*
* Since there really isn't a way to advertise that we are
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
case e1000_fc_tx_pause:
- /*
- * Tx Flow control is enabled, and Rx Flow control is
+ /* Tx Flow control is enabled, and Rx Flow control is
* disabled, by a software over-ride.
*/
mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
break;
case e1000_fc_full:
- /*
- * Flow control (both Rx and Tx) is enabled by a software
+ /* Flow control (both Rx and Tx) is enabled by a software
* over-ride.
*/
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
DEBUGFUNC("e1000_copper_link_autoneg");
- /*
- * Perform some bounds checking on the autoneg advertisement
+ /* Perform some bounds checking on the autoneg advertisement
* parameter.
*/
phy->autoneg_advertised &= phy->autoneg_mask;
- /*
- * If autoneg_advertised is zero, we assume it was not defaulted
+ /* If autoneg_advertised is zero, we assume it was not defaulted
* by the calling code so we set to advertise full capability.
*/
- if (phy->autoneg_advertised == 0)
+ if (!phy->autoneg_advertised)
phy->autoneg_advertised = phy->autoneg_mask;
DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
}
DEBUGOUT("Restarting Auto-Neg\n");
- /*
- * Restart auto-negotiation by setting the Auto Neg Enable bit and
+ /* Restart auto-negotiation by setting the Auto Neg Enable bit and
* the Auto Neg Restart bit in the PHY control register.
*/
ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
if (ret_val)
return ret_val;
- /*
- * Does the user want to wait for Auto-Neg to complete here, or
+ /* Does the user want to wait for Auto-Neg to complete here, or
* check at a later time (for example, callback routine).
*/
if (phy->autoneg_wait_to_complete) {
- ret_val = hw->mac.ops.wait_autoneg(hw);
+ ret_val = e1000_wait_autoneg(hw);
if (ret_val) {
DEBUGOUT("Error while waiting for autoneg to complete\n");
return ret_val;
DEBUGFUNC("e1000_setup_copper_link_generic");
if (hw->mac.autoneg) {
- /*
- * Setup autoneg and flow control advertisement and perform
+ /* Setup autoneg and flow control advertisement and perform
* autonegotiation.
*/
ret_val = e1000_copper_link_autoneg(hw);
if (ret_val)
return ret_val;
} else {
- /*
- * PHY will be set to 10H, 10F, 100H or 100F
+ /* PHY will be set to 10H, 10F, 100H or 100F
* depending on user settings.
*/
DEBUGOUT("Forcing Speed and Duplex\n");
}
}
- /*
- * Check link status. Wait up to 100 microseconds for link to become
+ /* Check link status. Wait up to 100 microseconds for link to become
* valid.
*/
ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10,
if (ret_val)
return ret_val;
- /*
- * Clear Auto-Crossover to force MDI manually. IGP requires MDI
+ /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
* forced whenever speed and duplex are forced.
*/
ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
DEBUGFUNC("e1000_phy_force_speed_duplex_m88");
- /*
- * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
- * forced whenever speed and duplex are forced.
- */
- ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
+ /* I210 and I211 devices support Auto-Crossover in forced operation. */
+ if (phy->type != e1000_phy_i210) {
+ /* Clear Auto-Crossover to force MDI manually. M88E1000
+ * requires MDI forced whenever speed and duplex are forced.
+ */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
- phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
- ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
- if (ret_val)
- return ret_val;
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data);
case I347AT4_E_PHY_ID:
case M88E1340M_E_PHY_ID:
case M88E1112_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
+ case I210_I_PHY_ID:
reset_dsp = false;
break;
default:
if (!reset_dsp) {
DEBUGOUT("Link taking longer than expected.\n");
} else {
- /*
- * We didn't get link.
+ /* We didn't get link.
* Reset the DSP and cross our fingers.
*/
ret_val = phy->ops.write_reg(hw,
hw->phy.id == M88E1340M_E_PHY_ID ||
hw->phy.id == M88E1112_E_PHY_ID)
return E1000_SUCCESS;
+ if (hw->phy.id == I210_I_PHY_ID)
+ return E1000_SUCCESS;
+ if ((hw->phy.id == M88E1543_E_PHY_ID))
+ return E1000_SUCCESS;
ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
return ret_val;
- /*
- * Resetting the phy means we need to re-force TX_CLK in the
+ /* Resetting the phy means we need to re-force TX_CLK in the
* Extended PHY Specific Control Register to 25MHz clock from
* the reset value of 2.5MHz.
*/
if (ret_val)
return ret_val;
- /*
- * In addition, we must re-enable CRS on Tx for both half and full
+ /* In addition, we must re-enable CRS on Tx for both half and full
* duplex.
*/
ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
ctrl |= E1000_CTRL_SPD_100;
*phy_ctrl |= MII_CR_SPEED_100;
- *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+ *phy_ctrl &= ~MII_CR_SPEED_1000;
DEBUGOUT("Forcing 100mb\n");
} else {
ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
- *phy_ctrl |= MII_CR_SPEED_10;
*phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
DEBUGOUT("Forcing 10mb\n");
}
data);
if (ret_val)
return ret_val;
- /*
- * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
* SmartSpeed, so performance is maintained.
DEBUGFUNC("e1000_check_downshift_generic");
switch (phy->type) {
+ case e1000_phy_i210:
case e1000_phy_m88:
case e1000_phy_gg82563:
offset = M88E1000_PHY_SPEC_STATUS;
ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
if (!ret_val)
- phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
return ret_val;
}
DEBUGFUNC("e1000_check_polarity_igp");
- /*
- * Polarity is determined based on the speed of
+ /* Polarity is determined based on the speed of
* our connection.
*/
ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
offset = IGP01E1000_PHY_PCS_INIT_REG;
mask = IGP01E1000_PHY_POLARITY_MASK;
} else {
- /*
- * This really only applies to 10Mbps since
+ /* This really only applies to 10Mbps since
* there is no polarity for 100Mbps (always 0).
*/
offset = IGP01E1000_PHY_PORT_STATUS;
ret_val = phy->ops.read_reg(hw, offset, &data);
if (!ret_val)
- phy->cable_polarity = (data & mask)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ phy->cable_polarity = ((data & mask)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
return ret_val;
}
DEBUGFUNC("e1000_check_polarity_ife");
- /*
- * Polarity is determined based on the reversal feature being enabled.
+ /* Polarity is determined based on the reversal feature being enabled.
*/
if (phy->polarity_correction) {
offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
ret_val = phy->ops.read_reg(hw, offset, &phy_data);
if (!ret_val)
- phy->cable_polarity = (phy_data & mask)
+ phy->cable_polarity = ((phy_data & mask)
? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ : e1000_rev_polarity_normal);
return ret_val;
}
/**
- * e1000_wait_autoneg_generic - Wait for auto-neg completion
+ * e1000_wait_autoneg - Wait for auto-neg completion
* @hw: pointer to the HW structure
*
* Waits for auto-negotiation to complete or for the auto-negotiation time
* limit to expire, which ever happens first.
**/
-s32 e1000_wait_autoneg_generic(struct e1000_hw *hw)
+static s32 e1000_wait_autoneg(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
u16 i, phy_status;
- DEBUGFUNC("e1000_wait_autoneg_generic");
+ DEBUGFUNC("e1000_wait_autoneg");
if (!hw->phy.ops.read_reg)
return E1000_SUCCESS;
msec_delay(100);
}
- /*
- * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+ /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
* has completed.
*/
return ret_val;
return E1000_SUCCESS;
for (i = 0; i < iterations; i++) {
- /*
- * Some PHYs require the PHY_STATUS register to be read
+ /* Some PHYs require the PHY_STATUS register to be read
* twice due to the link bit being sticky. No harm doing
* it across the board.
*/
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
if (ret_val)
- /*
- * If the first read fails, another entity may have
+ /* If the first read fails, another entity may have
* ownership of the resources, wait and try again to
* see if they have relinquished the resources yet.
*/
if (ret_val)
return ret_val;
- index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+ index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT);
if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
return -E1000_ERR_PHY;
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
- u16 phy_data, phy_data2, index, default_page, is_cm;
+ u16 phy_data, phy_data2, is_cm;
+ u16 index, default_page;
DEBUGFUNC("e1000_get_cable_length_m88_gen2");
switch (hw->phy.id) {
+ case I210_I_PHY_ID:
+ /* Get cable length from PHY Cable Diagnostics Control Reg */
+ ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
+ (I347AT4_PCDL + phy->addr),
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Check if the unit of cable length is meters or cm */
+ ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
+ I347AT4_PCDC, &phy_data2);
+ if (ret_val)
+ return ret_val;
+
+ is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
+
+ /* Populate the phy structure with cable length in meters */
+ phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->cable_length = phy_data / (is_cm ? 100 : 1);
+ break;
+ case M88E1543_E_PHY_ID:
case M88E1340M_E_PHY_ID:
case I347AT4_E_PHY_ID:
/* Remember the original page select and set it to 7 */
if (ret_val)
return ret_val;
- /*
- * Getting bits 15:9, which represent the combination of
+ /* Getting bits 15:9, which represent the combination of
* coarse and fine gain values. The result is a number
* that can be put into the lookup table to obtain the
* approximate cable length.
*/
- cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
- IGP02E1000_AGC_LENGTH_MASK;
+ cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+ IGP02E1000_AGC_LENGTH_MASK);
/* Array index bound check. */
if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
/* Calculate cable length with the error range of +/- 10 meters. */
- phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
- (agc_value - IGP02E1000_AGC_RANGE) : 0;
+ phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+ (agc_value - IGP02E1000_AGC_RANGE) : 0);
phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
return ret_val;
} else {
/* Polarity is forced */
- phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
}
ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
DEBUGFUNC("e1000_phy_hw_reset_generic");
- ret_val = phy->ops.check_reset_block(hw);
- if (ret_val)
- return E1000_SUCCESS;
+ if (phy->ops.check_reset_block) {
+ ret_val = phy->ops.check_reset_block(hw);
+ if (ret_val)
+ return E1000_SUCCESS;
+ }
ret_val = phy->ops.acquire(hw);
if (ret_val)
* Generic function to wait 10 milli-seconds for configuration to complete
* and return success.
**/
-s32 e1000_get_cfg_done_generic(struct e1000_hw *hw)
+s32 e1000_get_cfg_done_generic(struct e1000_hw E1000_UNUSEDARG *hw)
{
DEBUGFUNC("e1000_get_cfg_done_generic");
hw->phy.ops.write_reg(hw, 0x1796, 0x0008);
/* Change cg_icount + enable integbp for channels BCD */
hw->phy.ops.write_reg(hw, 0x1798, 0xD008);
- /*
- * Change cg_icount + enable integbp + change prop_factor_master
+ /* Change cg_icount + enable integbp + change prop_factor_master
* to 8 for channel A
*/
hw->phy.ops.write_reg(hw, 0x1898, 0xD918);
/* Disable AHT in Slave mode on channel A */
hw->phy.ops.write_reg(hw, 0x187A, 0x0800);
- /*
- * Enable LPLU and disable AN to 1000 in non-D0a states,
+ /* Enable LPLU and disable AN to 1000 in non-D0a states,
* Enable SPD+B2B
*/
hw->phy.ops.write_reg(hw, 0x0019, 0x008D);
case M88E1000_E_PHY_ID:
case M88E1111_I_PHY_ID:
case M88E1011_I_PHY_ID:
+ case M88E1543_E_PHY_ID:
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
case M88E1340M_E_PHY_ID:
case I82580_I_PHY_ID:
phy_type = e1000_phy_82580;
break;
+ case I210_I_PHY_ID:
+ phy_type = e1000_phy_i210;
+ break;
default:
phy_type = e1000_phy_unknown;
break;
e1000_get_phy_id(hw);
phy_type = e1000_get_phy_type_from_id(hw->phy.id);
- /*
- * If phy_type is valid, break - we found our
+ /* If phy_type is valid, break - we found our
* PHY address
*/
if (phy_type != e1000_phy_unknown)
void e1000_power_up_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
+ u16 power_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg &= ~MII_CR_POWER_DOWN;
+ if (hw->phy.type == e1000_phy_i210) {
+ hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
+ power_reg &= ~GS40G_CS_POWER_DOWN;
+ hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
+ }
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
}
void e1000_power_down_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
+ u16 power_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
+ /* i210 Phy requires an additional bit for power up/down */
+ if (hw->phy.type == e1000_phy_i210) {
+ hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
+ power_reg |= GS40G_CS_POWER_DOWN;
+ hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
+ }
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
msec_delay(1);
}
ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
if (!ret_val)
- phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
return ret_val;
}
if (ret_val)
return ret_val;
- length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
- I82577_DSTATUS_CABLE_LENGTH_SHIFT;
+ length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
+ I82577_DSTATUS_CABLE_LENGTH_SHIFT);
if (length == E1000_CABLE_LENGTH_UNDEFINED)
- ret_val = -E1000_ERR_PHY;
+ return -E1000_ERR_PHY;
phy->cable_length = length;
return E1000_SUCCESS;
}
+
+/**
+ * e1000_write_phy_reg_gs40g - Write GS40G PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+ u16 page = offset >> GS40G_PAGE_SHIFT;
+
+ DEBUGFUNC("e1000_write_phy_reg_gs40g");
+
+ offset = offset & GS40G_OFFSET_MASK;
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
+ if (ret_val)
+ goto release;
+ ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
+
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_read_phy_reg_gs40g - Read GS40G PHY register
+ * @hw: pointer to the HW structure
+ * @offset: lower half is register offset to read to
+ * upper half is page to use.
+ * @data: data to read at register offset
+ *
+ * Acquires semaphore, if necessary, then reads the data in the PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+ u16 page = offset >> GS40G_PAGE_SHIFT;
+
+ DEBUGFUNC("e1000_read_phy_reg_gs40g");
+
+ offset = offset & GS40G_OFFSET_MASK;
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
+ if (ret_val)
+ goto release;
+ ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
+
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_read_phy_reg_mphy - Read mPHY control register
+ * @hw: pointer to the HW structure
+ * @address: address to be read
+ * @data: pointer to the read data
+ *
+ * Reads the mPHY control register in the PHY at offset and stores the
+ * information read to data.
+ **/
+s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data)
+{
+ u32 mphy_ctrl = 0;
+ bool locked = false;
+ bool ready = false;
+
+ DEBUGFUNC("e1000_read_phy_reg_mphy");
+
+ /* Check if mPHY is ready to read/write operations */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+
+ /* Check if mPHY access is disabled and enable it if so */
+ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
+ if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) {
+ locked = true;
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ mphy_ctrl |= E1000_MPHY_ENA_ACCESS;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
+ }
+
+ /* Set the address that we want to read */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+
+ /* We mask address, because we want to use only current lane */
+ mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK &
+ ~E1000_MPHY_ADDRESS_FNC_OVERRIDE) |
+ (address & E1000_MPHY_ADDRESS_MASK);
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
+
+ /* Read data from the address */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ *data = E1000_READ_REG(hw, E1000_MPHY_DATA);
+
+ /* Disable access to mPHY if it was originally disabled */
+ if (locked)
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
+ E1000_MPHY_DIS_ACCESS);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_phy_reg_mphy - Write mPHY control register
+ * @hw: pointer to the HW structure
+ * @address: address to write to
+ * @data: data to write to register at offset
+ * @line_override: used when we want to use different line than default one
+ *
+ * Writes data to mPHY control register.
+ **/
+s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
+ bool line_override)
+{
+ u32 mphy_ctrl = 0;
+ bool locked = false;
+ bool ready = false;
+
+ DEBUGFUNC("e1000_write_phy_reg_mphy");
+
+ /* Check if mPHY is ready to read/write operations */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+
+ /* Check if mPHY access is disabled and enable it if so */
+ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
+ if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) {
+ locked = true;
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ mphy_ctrl |= E1000_MPHY_ENA_ACCESS;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
+ }
+
+ /* Set the address that we want to read */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+
+ /* We mask address, because we want to use only current lane */
+ if (line_override)
+ mphy_ctrl |= E1000_MPHY_ADDRESS_FNC_OVERRIDE;
+ else
+ mphy_ctrl &= ~E1000_MPHY_ADDRESS_FNC_OVERRIDE;
+ mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK) |
+ (address & E1000_MPHY_ADDRESS_MASK);
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
+
+ /* Read data from the address */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ E1000_WRITE_REG(hw, E1000_MPHY_DATA, data);
+
+ /* Disable access to mPHY if it was originally disabled */
+ if (locked)
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
+ E1000_MPHY_DIS_ACCESS);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_is_mphy_ready - Check if mPHY control register is not busy
+ * @hw: pointer to the HW structure
+ *
+ * Returns mPHY control register status.
+ **/
+bool e1000_is_mphy_ready(struct e1000_hw *hw)
+{
+ u16 retry_count = 0;
+ u32 mphy_ctrl = 0;
+ bool ready = false;
+
+ while (retry_count < 2) {
+ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
+ if (mphy_ctrl & E1000_MPHY_BUSY) {
+ usec_delay(20);
+ retry_count++;
+ continue;
+ }
+ ready = true;
+ break;
+ }
+
+ if (!ready)
+ DEBUGOUT("ERROR READING mPHY control register, phy is busy.\n");
+
+ return ready;
+}
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active);
s32 e1000_setup_copper_link_generic(struct e1000_hw *hw);
-s32 e1000_wait_autoneg_generic(struct e1000_hw *hw);
s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
-s32 e1000_phy_reset_dsp(struct e1000_hw *hw);
s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
u32 usec_interval, bool *success);
s32 e1000_phy_init_script_igp3(struct e1000_hw *hw);
s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
+s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data);
+s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
+ bool line_override);
+bool e1000_is_mphy_ready(struct e1000_hw *hw);
#define E1000_MAX_PHY_ADDR 8
#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
-#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO */
-#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality */
#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
#define IGP_PAGE_SHIFT 5
#define PHY_REG_MASK 0x1F
+/* GS40G - I210 PHY defines */
+#define GS40G_PAGE_SELECT 0x16
+#define GS40G_PAGE_SHIFT 16
+#define GS40G_OFFSET_MASK 0xFFFF
+#define GS40G_PAGE_2 0x20000
+#define GS40G_MAC_REG2 0x15
+#define GS40G_MAC_LB 0x4140
+#define GS40G_MAC_SPEED_1G 0X0006
+#define GS40G_COPPER_SPEC 0x0010
+#define GS40G_CS_POWER_DOWN 0x0002
+
#define HV_INTC_FC_PAGE_START 768
#define I82578_ADDR_REG 29
#define I82577_ADDR_REG 16
#define I82577_CFG_REG 22
#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
-#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
+#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */
#define I82577_CTRL_REG 23
/* 82577 specific PHY registers */
#define I82577_PHY_STATUS2_MDIX 0x0800
#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
-#define I82577_PHY_STATUS2_SPEED_100MBPS 0x0100
/* I82577 PHY Control 2 */
-#define I82577_PHY_CTRL2_AUTO_MDIX 0x0400
-#define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200
+#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
+#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
+#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
/* I82577 PHY Diagnostics Status */
#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */
#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */
#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */
+#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */
+
+#define E1000_MPHY_DIS_ACCESS 0x80000000 /* disable_access bit */
+#define E1000_MPHY_ENA_ACCESS 0x40000000 /* enable_access bit */
+#define E1000_MPHY_BUSY 0x00010000 /* busy bit */
+#define E1000_MPHY_ADDRESS_FNC_OVERRIDE 0x20000000 /* fnc_override bit */
+#define E1000_MPHY_ADDRESS_MASK 0x0000FFFF /* address mask */
#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
#define IGP01E1000_PHY_POLARITY_MASK 0x0078
#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
-/* Enable flexible speed on link-up */
-#define IGP01E1000_GMII_FLEX_SPD 0x0010
-#define IGP01E1000_GMII_SPD 0x0020 /* Enable SPD */
-
#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
#define IGP02E1000_PHY_AGC_C 0x14B1
#define IGP02E1000_PHY_AGC_D 0x18B1
-#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */
+#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */
#define IGP02E1000_AGC_LENGTH_MASK 0x7F
#define IGP02E1000_AGC_RANGE 15
-#define IGP03E1000_PHY_MISC_CTRL 0x1B
-#define IGP03E1000_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Manually Set Duplex */
-
#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
-#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */
-#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */
+#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */
#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
/* IFE PHY Extended Status Control */
/* IFE PHY Special Control */
#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
#define IFE_PSC_FORCE_POLARITY 0x0020
-#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100
/* IFE PHY Special Control and LED Control */
#define IFE_PSCL_PROBE_MODE 0x0020
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#define _E1000_REGS_H_
#define E1000_CTRL 0x00000 /* Device Control - RW */
-#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */
#define E1000_STATUS 0x00008 /* Device Status - RO */
#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
#define E1000_EERD 0x00014 /* EEPROM Read - RW */
#define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */
#define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */
#define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */
+#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */
+#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */
+#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */
+#define E1000_PPHY_CTRL 0x5b48 /* PCIe PHY Control */
#define E1000_I350_BARCTRL 0x5BFC /* BAR ctrl reg */
#define E1000_I350_DTXMXPKTSZ 0x355C /* Maximum sent packet size reg*/
#define E1000_SCTL 0x00024 /* SerDes Control - RW */
#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
-#define E1000_FEXT 0x0002C /* Future Extended - RW */
-#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
-#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
#define E1000_TCTL 0x00400 /* Tx Control - RW */
#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */
#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */
-#define E1000_TBT 0x00448 /* Tx Burst Timer - RW */
#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
+#define E1000_LEDMUX 0x08130 /* LED MUX Control */
#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
#define E1000_PBS 0x01008 /* Packet Buffer Size */
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
-#define E1000_FLASHT 0x01028 /* FLASH Timer Register */
#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
-#define E1000_FLSWCTL 0x01030 /* FLASH control register */
-#define E1000_FLSWDATA 0x01034 /* FLASH data register */
-#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */
#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */
#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
-#define E1000_RDFPCQ(_n) (0x02430 + (0x4 * (_n)))
+#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */
#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
/* Split and Replication Rx Control - RW */
#define E1000_PBRWAC 0x024E8 /* Rx packet buffer wrap around counter - RO */
#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */
#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */
-/*
- * Convenience macros
+#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */
+#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */
+#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */
+#define E1000_I210_FLMNGCTL 0x12038
+#define E1000_I210_FLMNGDATA 0x1203C
+#define E1000_I210_FLMNGCNT 0x12040
+
+#define E1000_I210_FLSWCTL 0x12048
+#define E1000_I210_FLSWDATA 0x1204C
+#define E1000_I210_FLSWCNT 0x12050
+
+#define E1000_I210_FLA 0x1201C
+
+#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n))
+#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */
+
+/* QAV Tx mode control register */
+#define E1000_I210_TQAVCTRL 0x3570
+
+/* QAV Tx mode control register bitfields masks */
+/* QAV enable */
+#define E1000_TQAVCTRL_MODE (1 << 0)
+/* Fetching arbitration type */
+#define E1000_TQAVCTRL_FETCH_ARB (1 << 4)
+/* Fetching timer enable */
+#define E1000_TQAVCTRL_FETCH_TIMER_ENABLE (1 << 5)
+/* Launch arbitration type */
+#define E1000_TQAVCTRL_LAUNCH_ARB (1 << 8)
+/* Launch timer enable */
+#define E1000_TQAVCTRL_LAUNCH_TIMER_ENABLE (1 << 9)
+/* SP waits for SR enable */
+#define E1000_TQAVCTRL_SP_WAIT_SR (1 << 10)
+/* Fetching timer correction */
+#define E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET 16
+#define E1000_TQAVCTRL_FETCH_TIMER_DELTA \
+ (0xFFFF << E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET)
+
+/* High credit registers where _n can be 0 or 1. */
+#define E1000_I210_TQAVHC(_n) (0x300C + 0x40 * (_n))
+
+/* Queues fetch arbitration priority control register */
+#define E1000_I210_TQAVARBCTRL 0x3574
+/* Queues priority masks where _n and _p can be 0-3. */
+#define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * _n))
+/* QAV Tx mode control registers where _n can be 0 or 1. */
+#define E1000_I210_TQAVCC(_n) (0x3004 + 0x40 * (_n))
+
+/* QAV Tx mode control register bitfields masks */
+#define E1000_TQAVCC_IDLE_SLOPE 0xFFFF /* Idle slope */
+#define E1000_TQAVCC_KEEP_CREDITS (1 << 30) /* Keep credits opt enable */
+#define E1000_TQAVCC_QUEUE_MODE (1 << 31) /* SP vs. SR Tx mode */
+
+/* Good transmitted packets counter registers */
+#define E1000_PQGPTC(_n) (0x010014 + (0x100 * (_n)))
+
+/* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */
+#define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * _n))
+
+#define E1000_MMDAC 13 /* MMD Access Control */
+#define E1000_MMDAAD 14 /* MMD Access Address/Data */
+
+/* Convenience macros
*
* Note: "_n" is the queue number of the register to be written to.
*
#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100))
#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */
#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
-#define E1000_TXDMAC 0x03000 /* Tx DMA Control - RW */
#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
#define E1000_DTXMXSZRQ 0x03540
#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */
#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */
-#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
#define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n)))
#define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* Rx SAs - RW */
#define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* Rx SAs - RW */
-/*
- * LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit
+/* LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit
* key - RW.
*/
#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m)))
#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */
#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Pg - RW */
-#define E1000_1GSTAT_RCV 0x04228 /* 1GSTAT Code Violation Pkt Cnt - RW */
#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */
#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */
#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
#define E1000_HOST_IF 0x08800 /* Host Interface */
#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */
#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */
+#define E1000_HIBBA 0x8F40 /* Host Interface Buffer Base Address */
/* Flexible Host Filter Table */
#define E1000_FHFT(_n) (0x09000 + ((_n) * 0x100))
/* Ext Flexible Host Filter Table */
#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
-#define E1000_MDPHYA 0x0003C /* PHY address - RW */
#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
/* Management Decision Filters */
#define E1000_MDEF(_n) (0x05890 + (4 * (_n)))
#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/
#define E1000_IMIRVP 0x05AC0 /* Immediate INT Rx VLAN Priority -RW */
#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Alloc Reg -RW */
-/* MSI-X Table entry addr low reg - RW */
-#define E1000_MSIXTADD(_i) (0x0C000 + ((_i) * 0x10))
-/* MSI-X Table entry addr upper reg - RW */
-#define E1000_MSIXTUADD(_i) (0x0C004 + ((_i) * 0x10))
-/* MSI-X Table entry message reg - RW */
-#define E1000_MSIXTMSG(_i) (0x0C008 + ((_i) * 0x10))
-/* MSI-X Table entry vector ctrl reg - RW */
-#define E1000_MSIXVCTRL(_i) (0x0C00C + ((_i) * 0x10))
-#define E1000_MSIXPBA 0x0E000 /* MSI-X Pending bit array */
#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */
#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */
#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */
#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
+#define E1000_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */
+#define E1000_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */
#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
+#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
+#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */
/* Filtering Registers */
#define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */
#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
+
#endif
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#include <net/tcp.h>
#endif
+#undef HAVE_HW_TIME_STAMP
+#ifdef HAVE_HW_TIME_STAMP
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
+#endif
#ifdef SIOCETHTOOL
#include <linux/ethtool.h>
#endif
-#undef HAVE_HW_TIME_STAMP
-#ifdef HAVE_HW_TIME_STAMP
-#include <linux/clocksource.h>
-#include <linux/timecompare.h>
-#include <linux/net_tstamp.h>
-
-#endif
struct igb_adapter;
#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
#include <linux/dca.h>
#endif
-#ifndef HAVE_HW_TIME_STAMP
-#undef IGB_PER_PKT_TIMESTAMP
-#endif
-
-
#include "kcompat.h"
#ifdef HAVE_SCTP
printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
__FUNCTION__ , ## args))
+#ifdef HAVE_PTP_1588_CLOCK
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#endif /* HAVE_PTP_1588_CLOCK */
+
+#ifdef HAVE_I2C_SUPPORT
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#endif /* HAVE_I2C_SUPPORT */
+
/* Interrupt defines */
#define IGB_START_ITR 648 /* ~6000 ints/sec */
#define IGB_4K_ITR 980
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
u16 tx_rate;
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+ bool spoofchk_enabled;
+#endif
#endif
- struct pci_dev *vfdev;
};
#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
* descriptors until either it has this many to write back, or the
* ITR timer expires.
*/
-#define IGB_RX_PTHRESH 8
-#define IGB_RX_HTHRESH 8
-#define IGB_TX_PTHRESH 8
-#define IGB_TX_HTHRESH 1
-#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
- adapter->msix_entries) ? 1 : 4)
-#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
- adapter->msix_entries) ? 1 : 16)
+#define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8)
+#define IGB_RX_HTHRESH 8
+#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
+#define IGB_TX_HTHRESH 1
+#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
+ adapter->msix_entries) ? 1 : 4)
/* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
* i.e. RXBUFFER_512 --> size-1024 slab
*/
/* Supported Rx Buffer Sizes */
-#define IGB_RXBUFFER_512 512
+#define IGB_RXBUFFER_256 256
+#define IGB_RXBUFFER_2048 2048
#define IGB_RXBUFFER_16384 16384
-#define IGB_RX_HDR_LEN IGB_RXBUFFER_512
+#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
+#if MAX_SKB_FRAGS < 8
+#define IGB_RX_BUFSZ ALIGN(MAX_JUMBO_FRAME_SIZE / MAX_SKB_FRAGS, 1024)
+#else
+#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
+#endif
/* Packet Buffer allocations */
#define IGB_FC_PAUSE_TIME 0x0680 /* 858 usec */
-/* How many Tx Descriptors do we need to call netif_wake_queue ? */
-#define IGB_TX_QUEUE_WAKE 32
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define IGB_EEPROM_APME 0x0400
-#ifndef ETH_TP_MDI_X
#define AUTO_ALL_MODES 0
-#endif
#ifndef IGB_MASTER_SLAVE
/* Switch to override PHY master/slave setting */
struct igb_lro_stats {
u32 flushed;
u32 coal;
- u32 recycled;
};
/*
#endif /* IGB_NO_LRO */
struct igb_cb {
#ifndef IGB_NO_LRO
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
union { /* Union defining head/tail partner */
struct sk_buff *head;
struct sk_buff *tail;
};
+#endif
__be32 tsecr; /* timestamp echo response */
u32 tsval; /* timestamp value in host order */
u32 next_seq; /* next expected sequence number */
};
#define IGB_CB(skb) ((struct igb_cb *)(skb)->cb)
-#define IGB_TX_FLAGS_CSUM 0x00000001
-#define IGB_TX_FLAGS_VLAN 0x00000002
-#define IGB_TX_FLAGS_TSO 0x00000004
-#define IGB_TX_FLAGS_IPV4 0x00000008
-#define IGB_TX_FLAGS_TSTAMP 0x00000010
+enum igb_tx_flags {
+ /* cmd_type flags */
+ IGB_TX_FLAGS_VLAN = 0x01,
+ IGB_TX_FLAGS_TSO = 0x02,
+ IGB_TX_FLAGS_TSTAMP = 0x04,
+
+ /* olinfo flags */
+ IGB_TX_FLAGS_IPV4 = 0x10,
+ IGB_TX_FLAGS_CSUM = 0x20,
+};
+
+/* VLAN info */
#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
#define IGB_TX_FLAGS_VLAN_SHIFT 16
+/*
+ * The largest size we can write to the descriptor is 65535. In order to
+ * maintain a power of two alignment we have to limit ourselves to 32K.
+ */
+#define IGB_MAX_TXD_PWR 15
+#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
+#ifndef MAX_SKB_FRAGS
+#define DESC_NEEDED 4
+#elif (MAX_SKB_FRAGS < 16)
+#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+#else
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+#endif
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct igb_tx_buffer {
};
struct igb_rx_buffer {
- struct sk_buff *skb;
dma_addr_t dma;
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ struct sk_buff *skb;
+#else
struct page *page;
- dma_addr_t page_dma;
u32 page_offset;
#endif
};
u64 drops;
u64 csum_err;
u64 alloc_failed;
+ u64 ipv4_packets; /* IPv4 headers processed */
+ u64 ipv4e_packets; /* IPv4E headers with extensions processed */
+ u64 ipv6_packets; /* IPv6 headers processed */
+ u64 ipv6e_packets; /* IPv6E headers with extensions processed */
+ u64 tcp_packets; /* TCP headers processed */
+ u64 udp_packets; /* UDP headers processed */
+ u64 sctp_packets; /* SCTP headers processed */
+ u64 nfs_packets; /* NFS headers processe */
};
struct igb_ring_container {
u8 itr; /* current ITR setting for ring */
};
-struct igb_q_vector {
- struct igb_adapter *adapter; /* backlink */
- int cpu; /* CPU for DCA */
- u32 eims_value; /* EIMS mask value */
-
- struct igb_ring_container rx, tx;
-
- struct napi_struct napi;
- int numa_node;
-
- u16 itr_val;
- u8 set_itr;
- void __iomem *itr_register;
-
-#ifndef IGB_NO_LRO
- struct igb_lro_list *lrolist; /* LRO list for queue vector*/
-#endif
- char name[IFNAMSIZ + 9];
-#ifndef HAVE_NETDEV_NAPI_LIST
- struct net_device poll_dev;
-#endif
-} ____cacheline_internodealigned_in_smp;
-
struct igb_ring {
struct igb_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */
struct igb_tx_buffer *tx_buffer_info;
struct igb_rx_buffer *rx_buffer_info;
};
+#ifdef HAVE_PTP_1588_CLOCK
+ unsigned long last_rx_timestamp;
+#endif /* HAVE_PTP_1588_CLOCK */
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
void __iomem *tail; /* pointer to ring tail register */
+ dma_addr_t dma; /* phys address of the ring */
+ unsigned int size; /* length of desc. ring in bytes */
u16 count; /* number of desc. in the ring */
u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */
- u32 size; /* length of desc. ring in bytes */
/* everything past this point are written often */
- u16 next_to_clean ____cacheline_aligned_in_smp;
+ u16 next_to_clean;
u16 next_to_use;
+ u16 next_to_alloc;
union {
/* TX */
struct igb_rx_queue_stats rx_stats;
#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
u16 rx_buffer_len;
+#else
+ struct sk_buff *skb;
#endif
};
};
struct net_device *vmdq_netdev;
int vqueue_index; /* queue index for virtual netdev */
#endif
- /* Items past this point are only used during ring alloc / free */
- dma_addr_t dma; /* phys address of the ring */
- int numa_node; /* node to alloc ring memory on */
-
} ____cacheline_internodealigned_in_smp;
+struct igb_q_vector {
+ struct igb_adapter *adapter; /* backlink */
+ int cpu; /* CPU for DCA */
+ u32 eims_value; /* EIMS mask value */
+
+ u16 itr_val;
+ u8 set_itr;
+ void __iomem *itr_register;
+
+ struct igb_ring_container rx, tx;
+
+ struct napi_struct napi;
+#ifndef IGB_NO_LRO
+ struct igb_lro_list lrolist; /* LRO list for queue vector*/
+#endif
+ char name[IFNAMSIZ + 9];
+#ifndef HAVE_NETDEV_NAPI_LIST
+ struct net_device poll_dev;
+#endif
+
+ /* for dynamic allocation of rings associated with this q_vector */
+ struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
+};
+
enum e1000_ring_flags_t {
#ifndef HAVE_NDO_SET_FEATURES
IGB_RING_FLAG_RX_CSUM,
IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG,
};
+
struct igb_mac_addr {
u8 addr[ETH_ALEN];
u16 queue;
// #endif /* IGB_PROCFS */
// #endif /* EXT_THERMAL_SENSOR_SUPPORT */
+#ifdef IGB_HWMON
+#define IGB_HWMON_TYPE_LOC 0
+#define IGB_HWMON_TYPE_TEMP 1
+#define IGB_HWMON_TYPE_CAUTION 2
+#define IGB_HWMON_TYPE_MAX 3
+
+struct hwmon_attr {
+ struct device_attribute dev_attr;
+ struct e1000_hw *hw;
+ struct e1000_thermal_diode_data *sensor;
+ char name[12];
+ };
+
+struct hwmon_buff {
+ struct device *device;
+ struct hwmon_attr *hwmon_list;
+ unsigned int n_hwmon;
+ };
+#endif /* IGB_HWMON */
+
/* board specific private data structure */
struct igb_adapter {
#ifdef HAVE_VLAN_RX_REGISTER
bool fc_autoneg;
u8 tx_timeout_factor;
+#ifdef DEBUG
+ bool tx_hang_detected;
+ bool disable_hw_reset;
+#endif
u32 max_frame_size;
/* OS defined structs */
#ifndef IGB_NO_LRO
struct igb_lro_stats lro_stats;
#endif
-#ifdef HAVE_HW_TIME_STAMP
- struct cyclecounter cycles;
- struct timecounter clock;
- struct timecompare compare;
- struct hwtstamp_config hwtstamp_config;
-#endif
/* structs defined in e1000_hw.h */
struct e1000_hw hw;
u32 eims_other;
/* to not mess up cache alignment, always add to the bottom */
- u32 eeprom_wol;
-
u32 *config_space;
u16 tx_ring_count;
u16 rx_ring_count;
int int_mode;
u32 rss_queues;
u32 vmdq_pools;
- u16 fw_version;
- int node;
+ char fw_version[32];
u32 wvbr;
struct igb_mac_addr *mac_table;
#ifdef CONFIG_IGB_VMDQ_NETDEV
/* External Thermal Sensor support flag */
bool ets;
-#ifdef IGB_SYSFS
- struct kobject *info_kobj;
- struct kobject *therm_kobj[E1000_MAX_SENSORS];
-#else /* IGB_SYSFS */
+#ifdef IGB_HWMON
+ struct hwmon_buff igb_hwmon_buff;
+#else /* IGB_HWMON */
#ifdef IGB_PROCFS
struct proc_dir_entry *eth_dir;
struct proc_dir_entry *info_dir;
struct proc_dir_entry *therm_dir[E1000_MAX_SENSORS];
struct igb_therm_proc_data therm_data[E1000_MAX_SENSORS];
+ bool old_lsc;
#endif /* IGB_PROCFS */
-#endif /* IGB_SYSFS */
+#endif /* IGB_HWMON */
+ u32 etrack_id;
+
+#ifdef HAVE_PTP_1588_CLOCK
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ struct delayed_work ptp_overflow_work;
+ struct work_struct ptp_tx_work;
+ struct sk_buff *ptp_tx_skb;
+ unsigned long ptp_tx_start;
+ unsigned long last_rx_ptp_check;
+ spinlock_t tmreg_lock;
+ struct cyclecounter cc;
+ struct timecounter tc;
+ u32 tx_hwtstamp_timeouts;
+ u32 rx_hwtstamp_cleared;
+#endif /* HAVE_PTP_1588_CLOCK */
+
+#ifdef HAVE_I2C_SUPPORT
+ struct i2c_algo_bit_data i2c_algo;
+ struct i2c_adapter i2c_adap;
+ struct i2c_client *i2c_client;
+#endif /* HAVE_I2C_SUPPORT */
+ unsigned long link_check_timeout;
+
+
+ int devrc;
+
+ int copper_tries;
+ u16 eee_advert;
};
#ifdef CONFIG_IGB_VMDQ_NETDEV
};
#endif
-
-#define IGB_FLAG_HAS_MSI (1 << 0)
-#define IGB_FLAG_MSI_ENABLE (1 << 1)
-#define IGB_FLAG_DCA_ENABLED (1 << 2)
-#define IGB_FLAG_LLI_PUSH (1 << 3)
-#define IGB_FLAG_QUAD_PORT_A (1 << 4)
-#define IGB_FLAG_QUEUE_PAIRS (1 << 5)
-#define IGB_FLAG_EEE (1 << 6)
-#define IGB_FLAG_DMAC (1 << 7)
-#define IGB_FLAG_DETECT_BAD_DMA (1 << 8)
+#define IGB_FLAG_HAS_MSI (1 << 0)
+#define IGB_FLAG_DCA_ENABLED (1 << 1)
+#define IGB_FLAG_LLI_PUSH (1 << 2)
+#define IGB_FLAG_QUAD_PORT_A (1 << 3)
+#define IGB_FLAG_QUEUE_PAIRS (1 << 4)
+#define IGB_FLAG_EEE (1 << 5)
+#define IGB_FLAG_DMAC (1 << 6)
+#define IGB_FLAG_DETECT_BAD_DMA (1 << 7)
+#define IGB_FLAG_PTP (1 << 8)
+#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 9)
+#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 10)
+#define IGB_FLAG_WOL_SUPPORTED (1 << 11)
+#define IGB_FLAG_NEED_LINK_UPDATE (1 << 12)
+#define IGB_FLAG_LOOPBACK_ENABLE (1 << 13)
+#define IGB_FLAG_MEDIA_RESET (1 << 14)
+#define IGB_FLAG_MAS_ENABLE (1 << 15)
+
+/* Media Auto Sense */
+#define IGB_MAS_ENABLE_0 0X0001
+#define IGB_MAS_ENABLE_1 0X0002
+#define IGB_MAS_ENABLE_2 0X0004
+#define IGB_MAS_ENABLE_3 0X0008
#define IGB_MIN_TXPBSIZE 20408
#define IGB_TX_BUF_4096 4096
#define IGB_DMAC_9000 9000
#define IGB_DMAC_MAX 10000
-
#define IGB_82576_TSYNC_SHIFT 19
#define IGB_82580_TSYNC_SHIFT 24
#define IGB_TS_HDR_LEN 16
} cmd_or_resp;
u8 checksum;
};
+
+#pragma pack(push,1)
struct e1000_fw_drv_info {
struct e1000_fw_hdr hdr;
u8 port_num;
u16 pad; /* end spacing to ensure length is mult. of dword */
u8 pad2; /* end spacing to ensure length is mult. of dword2 */
};
+#pragma pack(pop)
+
enum e1000_state_t {
__IGB_TESTING,
__IGB_RESETTING,
extern void igb_set_ethtool_ops(struct net_device *);
extern void igb_check_options(struct igb_adapter *);
extern void igb_power_up_link(struct igb_adapter *);
+#ifdef HAVE_PTP_1588_CLOCK
+extern void igb_ptp_init(struct igb_adapter *adapter);
+extern void igb_ptp_stop(struct igb_adapter *adapter);
+extern void igb_ptp_reset(struct igb_adapter *adapter);
+extern void igb_ptp_tx_work(struct work_struct *work);
+extern void igb_ptp_rx_hang(struct igb_adapter *adapter);
+extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
+extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
+ struct sk_buff *skb);
+extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
+ unsigned char *va,
+ struct sk_buff *skb);
+static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
+ skb_pull(skb, IGB_TS_HDR_LEN);
+#endif
+ return;
+ }
+
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS))
+ igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
+
+ /* Update the last_rx_timestamp timer in order to enable watchdog check
+ * for error case of latched timestamp on a dropped packet.
+ */
+ rx_ring->last_rx_timestamp = jiffies;
+}
+
+extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd);
+#endif /* HAVE_PTP_1588_CLOCK */
#ifdef ETHTOOL_OPS_COMPAT
extern int ethtool_ioctl(struct ifreq *);
#endif
extern void igb_vlan_mode(struct net_device *, u32);
#endif
+#define E1000_PCS_CFG_IGN_SD 1
-#ifdef IGB_SYSFS
+#ifdef IGB_HWMON
void igb_sysfs_exit(struct igb_adapter *adapter);
int igb_sysfs_init(struct igb_adapter *adapter);
#else
int igb_procfs_topdir_init(void);
void igb_procfs_topdir_exit(void);
#endif /* IGB_PROCFS */
-#endif /* IGB_SYSFS */
+#endif /* IGB_HWMON */
+
+
+
#endif /* _IGB_H_ */
--- /dev/null
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "igb.h"
+
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#ifdef CONFIG_PM_RUNTIME
#include <linux/pm_runtime.h>
#endif /* CONFIG_PM_RUNTIME */
+#include <linux/highmem.h>
#include "igb.h"
#include "igb_regtest.h"
#include <linux/if_vlan.h>
+#ifdef ETHTOOL_GEEE
+#include <linux/mdio.h>
+#endif
#ifdef ETHTOOL_OPS_COMPAT
#include "kcompat_ethtool.c"
#ifndef IGB_NO_LRO
IGB_STAT("lro_aggregated", lro_stats.coal),
IGB_STAT("lro_flushed", lro_stats.flushed),
- IGB_STAT("lro_recycled", lro_stats.recycled),
#endif /* IGB_LRO */
IGB_STAT("tx_smbus", stats.mgptc),
IGB_STAT("rx_smbus", stats.mgprc),
IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
+#ifdef HAVE_PTP_1588_CLOCK
+ IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+#endif /* HAVE_PTP_1588_CLOCK */
};
#define IGB_NETDEV_STAT(_net_stat) { \
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full|
SUPPORTED_Autoneg |
- SUPPORTED_TP);
- ecmd->advertising = (ADVERTISED_TP |
- ADVERTISED_Pause);
+ SUPPORTED_TP |
+ SUPPORTED_Pause);
+ ecmd->advertising = ADVERTISED_TP;
if (hw->mac.autoneg == 1) {
ecmd->advertising |= ADVERTISED_Autoneg;
ecmd->port = PORT_TP;
ecmd->phy_address = hw->phy.addr;
+ ecmd->transceiver = XCVR_INTERNAL;
+
} else {
- ecmd->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE |
- SUPPORTED_Autoneg);
+ ecmd->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause);
+ if (hw->mac.type == e1000_i354)
+ ecmd->supported |= (SUPPORTED_2500baseX_Full);
+
+ ecmd->advertising = ADVERTISED_FIBRE;
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE |
- ADVERTISED_Autoneg |
- ADVERTISED_Pause);
+ switch (adapter->link_speed) {
+ case SPEED_2500:
+ ecmd->advertising = ADVERTISED_2500baseX_Full;
+ break;
+ case SPEED_1000:
+ ecmd->advertising = ADVERTISED_1000baseT_Full;
+ break;
+ case SPEED_100:
+ ecmd->advertising = ADVERTISED_100baseT_Full;
+ break;
+ default:
+ break;
+ }
+
+ if (hw->mac.autoneg == 1)
+ ecmd->advertising |= ADVERTISED_Autoneg;
ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
}
- ecmd->transceiver = XCVR_INTERNAL;
+ if (hw->mac.autoneg != 1)
+ ecmd->advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+
+ if (hw->fc.requested_mode == e1000_fc_full)
+ ecmd->advertising |= ADVERTISED_Pause;
+ else if (hw->fc.requested_mode == e1000_fc_rx_pause)
+ ecmd->advertising |= (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ else if (hw->fc.requested_mode == e1000_fc_tx_pause)
+ ecmd->advertising |= ADVERTISED_Asym_Pause;
+ else
+ ecmd->advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
status = E1000_READ_REG(hw, E1000_STATUS);
if (status & E1000_STATUS_LU) {
-
- if ((status & E1000_STATUS_SPEED_1000) ||
- hw->phy.media_type != e1000_media_type_copper)
+ if ((hw->mac.type == e1000_i354) &&
+ (status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER))
+ ecmd->speed = SPEED_2500;
+ else if (status & E1000_STATUS_SPEED_1000)
ecmd->speed = SPEED_1000;
else if (status & E1000_STATUS_SPEED_100)
ecmd->speed = SPEED_100;
ecmd->duplex = DUPLEX_FULL;
else
ecmd->duplex = DUPLEX_HALF;
+
} else {
ecmd->speed = -1;
ecmd->duplex = -1;
}
- ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ if ((hw->phy.media_type == e1000_media_type_fiber) ||
+ hw->mac.autoneg)
+ ecmd->autoneg = AUTONEG_ENABLE;
+ else
+ ecmd->autoneg = AUTONEG_DISABLE;
#ifdef ETH_TP_MDI_X
/* MDI-X => 2; MDI =>1; Invalid =>0 */
- if ((hw->phy.media_type == e1000_media_type_copper) &&
- netif_carrier_ok(netdev))
- ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
- ETH_TP_MDI;
+ if (hw->phy.media_type == e1000_media_type_copper)
+ ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
+ ETH_TP_MDI;
else
ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+#ifdef ETH_TP_MDI_AUTO
+ if (hw->phy.mdix == AUTO_ALL_MODES)
+ ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ else
+ ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+
+#endif
#endif /* ETH_TP_MDI_X */
return 0;
}
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ if (ecmd->duplex == DUPLEX_HALF) {
+ if (!hw->dev_spec._82575.eee_disable)
+ dev_info(pci_dev_to_dev(adapter->pdev), "EEE disabled: not supported with half duplex\n");
+ hw->dev_spec._82575.eee_disable = true;
+ } else {
+ if (hw->dev_spec._82575.eee_disable)
+ dev_info(pci_dev_to_dev(adapter->pdev), "EEE enabled\n");
+ hw->dev_spec._82575.eee_disable = false;
+ }
+
/* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed */
if (e1000_check_reset_block(hw)) {
return -EINVAL;
}
+#ifdef ETH_TP_MDI_AUTO
+ /*
+ * MDI setting is only allowed when autoneg enabled because
+ * some hardware doesn't allow MDI setting when speed or
+ * duplex is forced.
+ */
+ if (ecmd->eth_tp_mdix_ctrl) {
+ if (hw->phy.media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
+
+ if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+ (ecmd->autoneg != AUTONEG_ENABLE)) {
+ dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+ return -EINVAL;
+ }
+ }
+
+#endif /* ETH_TP_MDI_AUTO */
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
usleep_range(1000, 2000);
if (ecmd->autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
- hw->phy.autoneg_advertised = ecmd->advertising |
- ADVERTISED_TP |
- ADVERTISED_Autoneg;
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ hw->phy.autoneg_advertised = ecmd->advertising |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg;
+ switch (adapter->link_speed) {
+ case SPEED_2500:
+ hw->phy.autoneg_advertised =
+ ADVERTISED_2500baseX_Full;
+ break;
+ case SPEED_1000:
+ hw->phy.autoneg_advertised =
+ ADVERTISED_1000baseT_Full;
+ break;
+ case SPEED_100:
+ hw->phy.autoneg_advertised =
+ ADVERTISED_100baseT_Full;
+ break;
+ default:
+ break;
+ }
+ } else {
+ hw->phy.autoneg_advertised = ecmd->advertising |
+ ADVERTISED_TP |
+ ADVERTISED_Autoneg;
+ }
ecmd->advertising = hw->phy.autoneg_advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = e1000_fc_default;
}
}
-#ifdef ETH_TP_MDI_X
- /* MDI-X =>2; MDI=>1; Invalid =>0 */
- if (hw->phy.media_type == e1000_media_type_copper) {
- switch (ecmd->eth_tp_mdix) {
- case ETH_TP_MDI_X:
- hw->phy.mdix = 2;
- break;
- case ETH_TP_MDI:
- hw->phy.mdix = 1;
- break;
- case ETH_TP_MDI_INVALID:
- default:
- hw->phy.mdix = 0;
- break;
- }
+#ifdef ETH_TP_MDI_AUTO
+ /* MDI-X => 2; MDI => 1; Auto => 3 */
+ if (ecmd->eth_tp_mdix_ctrl) {
+ /* fix up the value for auto (3 => 0) as zero is mapped
+ * internally to auto
+ */
+ if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+ hw->phy.mdix = AUTO_ALL_MODES;
+ else
+ hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
}
-#endif /* ETH_TP_MDI_X */
+#endif /* ETH_TP_MDI_AUTO */
/* reset the link */
if (netif_running(adapter->netdev)) {
igb_down(adapter);
hw->fc.current_mode = hw->fc.requested_mode;
- retval = ((hw->phy.media_type == e1000_media_type_copper) ?
- e1000_force_mac_fc(hw) : hw->mac.ops.setup_link(hw));
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ retval = hw->mac.ops.setup_link(hw);
+ /* implicit goto out */
+ } else {
+ retval = e1000_force_mac_fc(hw);
+ if (retval)
+ goto out;
+ e1000_set_fc_watermarks_generic(hw);
+ }
}
+out:
clear_bit(__IGB_RESETTING, &adapter->state);
return retval;
}
ret_val = e1000_write_nvm(hw, first_word,
last_word - first_word + 1, eeprom_buff);
- /* Update the checksum over the first part of the EEPROM if needed
+ /* Update the checksum if write succeeded.
* and flush shadow RAM for 82573 controllers */
- if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
+ if (ret_val == 0)
e1000_update_nvm_checksum(hw);
kfree(eeprom_buff);
strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1);
strncpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version) - 1);
- /* EEPROM image version # is reported as firmware version # for
- * 82575 controllers */
- snprintf(drvinfo->fw_version, 32, "%d.%d-%d",
- (adapter->fw_version & 0xF000) >> 12,
- (adapter->fw_version & 0x0FF0) >> 4,
- adapter->fw_version & 0x000F);
-
+ strncpy(drvinfo->fw_version, adapter->fw_version,
+ sizeof(drvinfo->fw_version) - 1);
strncpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info) -1);
drvinfo->n_stats = IGB_STATS_LEN;
drvinfo->testinfo_len = IGB_TEST_LEN;
switch (adapter->hw.mac.type) {
case e1000_i350:
+ case e1000_i354:
test = reg_test_i350;
toggle = 0x7FEFF3FF;
break;
+ case e1000_i210:
+ case e1000_i211:
+ test = reg_test_i210;
+ toggle = 0x7FEFF3FF;
+ break;
case e1000_82580:
test = reg_test_82580;
toggle = 0x7FEFF3FF;
static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
{
- u16 temp;
- u16 checksum = 0;
- u16 i;
-
*data = 0;
- /* Read and add up the contents of the EEPROM */
- for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
- if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) {
- *data = 1;
- break;
- }
- checksum += temp;
- }
- /* If Checksum is not Correct return error else test passed */
- if ((checksum != (u16) NVM_SUM) && !(*data))
+ /* Validate NVM checksum */
+ if (e1000_validate_nvm_checksum(&adapter->hw) < 0)
*data = 2;
return *data;
ics_mask = 0x77DCFED5;
break;
case e1000_i350:
+ case e1000_i354:
ics_mask = 0x77DCFED5;
break;
+ case e1000_i210:
+ case e1000_i211:
+ ics_mask = 0x774CFED5;
+ break;
default:
ics_mask = 0x7FFFFFFF;
break;
rx_ring->dev = pci_dev_to_dev(adapter->pdev);
rx_ring->netdev = adapter->netdev;
#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
- rx_ring->rx_buffer_len = IGB_RXBUFFER_512;
+ rx_ring->rx_buffer_len = IGB_RX_HDR_LEN;
#endif
rx_ring->reg_idx = adapter->vfs_allocated_count;
hw->mac.autoneg = FALSE;
if (hw->phy.type == e1000_phy_m88) {
+ if (hw->phy.id != I210_I_PHY_ID) {
/* Auto-MDI/MDIX Off */
e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
/* reset to update Auto-MDI/MDIX */
e1000_write_phy_reg(hw, PHY_CONTROL, 0x9140);
/* autoneg off */
e1000_write_phy_reg(hw, PHY_CONTROL, 0x8140);
+ } else {
+ /* force 1000, set loopback */
+ e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
+ e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140);
+ }
} else {
/* enable MII loopback */
if (hw->phy.type == e1000_phy_82580)
/* force 1000, set loopback */
e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140);
- ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
/* Now set up the MAC to the same speed/duplex as the PHY. */
ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
if (hw->phy.type == e1000_phy_m88)
igb_phy_disable_receiver(adapter);
- udelay(500);
+ mdelay(500);
return 0;
}
reg &= ~E1000_CONNSW_ENRGSRC;
E1000_WRITE_REG(hw, E1000_CONNSW, reg);
+ /* Unset sigdetect for SERDES loopback on
+ * 82580 and newer devices
+ */
+ if (hw->mac.type >= e1000_82580) {
+ reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
+ reg |= E1000_PCS_CFG_IGN_SD;
+ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
+ }
+
/* Set PCS register for forced speed */
reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/
e1000_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
if (phy_reg & MII_CR_LOOPBACK) {
phy_reg &= ~MII_CR_LOOPBACK;
+ if (hw->phy.type == I210_I_PHY_ID)
+ e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
e1000_write_phy_reg(hw, PHY_CONTROL, phy_reg);
e1000_phy_commit(hw);
}
memset(&skb->data[frame_size + 12], 0xAF, 1);
}
-static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
+static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
+ unsigned int frame_size)
{
- frame_size /= 2;
- if (*(skb->data + 3) == 0xFF) {
- if ((*(skb->data + frame_size + 10) == 0xBE) &&
- (*(skb->data + frame_size + 12) == 0xAF)) {
- return 0;
- }
- }
- return 13;
+ unsigned char *data;
+ bool match = true;
+
+ frame_size >>= 1;
+
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ data = rx_buffer->skb->data;
+#else
+ data = kmap(rx_buffer->page);
+#endif
+
+ if (data[3] != 0xFF ||
+ data[frame_size + 10] != 0xBE ||
+ data[frame_size + 12] != 0xAF)
+ match = false;
+
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ kunmap(rx_buffer->page);
+
+#endif
+ return match;
}
static u16 igb_clean_test_rings(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc;
struct igb_rx_buffer *rx_buffer_info;
struct igb_tx_buffer *tx_buffer_info;
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
- const int bufsz = rx_ring->rx_buffer_len;
-#else
- const int bufsz = IGB_RX_HDR_LEN;
-#endif
u16 rx_ntc, tx_ntc, count = 0;
/* initialize next to clean and descriptor values */
/* check rx buffer */
rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
- /* unmap rx buffer, will be remapped by alloc_rx_buffers */
- dma_unmap_single(rx_ring->dev,
- rx_buffer_info->dma,
- bufsz,
- DMA_FROM_DEVICE);
- rx_buffer_info->dma = 0;
+ /* sync Rx buffer for CPU read */
+ dma_sync_single_for_cpu(rx_ring->dev,
+ rx_buffer_info->dma,
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ IGB_RX_HDR_LEN,
+#else
+ IGB_RX_BUFSZ,
+#endif
+ DMA_FROM_DEVICE);
/* verify contents of skb */
- if (!igb_check_lbtest_frame(rx_buffer_info->skb, size))
+ if (igb_check_lbtest_frame(rx_buffer_info, size))
count++;
+ /* sync Rx buffer for device write */
+ dma_sync_single_for_device(rx_ring->dev,
+ rx_buffer_info->dma,
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ IGB_RX_HDR_LEN,
+#else
+ IGB_RX_BUFSZ,
+#endif
+ DMA_FROM_DEVICE);
+
/* unmap buffer on tx side */
tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
struct igb_ring *rx_ring = &adapter->test_rx_ring;
u16 i, j, lc, good_cnt;
int ret_val = 0;
- unsigned int size = IGB_RXBUFFER_512;
+ unsigned int size = IGB_RX_HDR_LEN;
netdev_tx_t tx_ret_val;
struct sk_buff *skb;
*data = 0;
goto out;
}
+ if (adapter->hw.mac.type == e1000_i354) {
+ dev_info(&adapter->pdev->dev,
+ "Loopback test not supported on i354.\n");
+ *data = 0;
+ goto out;
+ }
*data = igb_setup_desc_rings(adapter);
if (*data)
goto out;
dev_info(pci_dev_to_dev(adapter->pdev), "offline testing starting\n");
/* power up link for link test */
- igb_power_up_link(adapter);
+ igb_power_up_link(adapter);
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result */
eth_test->flags |= ETH_TEST_FL_FAILED;
igb_reset(adapter);
+
/* power up link for loopback test */
- igb_power_up_link(adapter);
+ igb_power_up_link(adapter);
if (igb_loopback_test(adapter, &data[3]))
eth_test->flags |= ETH_TEST_FL_FAILED;
msleep_interruptible(4 * 1000);
}
-static int igb_wol_exclusion(struct igb_adapter *adapter,
- struct ethtool_wolinfo *wol)
-{
- struct e1000_hw *hw = &adapter->hw;
- int retval = 1; /* fail by default */
-
- switch (hw->device_id) {
- case E1000_DEV_ID_82575GB_QUAD_COPPER:
- /* WoL not supported */
- wol->supported = 0;
- break;
- case E1000_DEV_ID_82575EB_FIBER_SERDES:
- case E1000_DEV_ID_82576_FIBER:
- case E1000_DEV_ID_82576_SERDES:
- /* Wake events not supported on port B */
- if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1) {
- wol->supported = 0;
- break;
- }
- /* return success for non excluded adapter ports */
- retval = 0;
- break;
- case E1000_DEV_ID_82576_QUAD_COPPER:
- case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
- /* quad port adapters only support WoL on port A */
- if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
- wol->supported = 0;
- break;
- }
- /* return success for non excluded adapter ports */
- retval = 0;
- break;
- default:
- /* dual port cards only support WoL on port A from now on
- * unless it was enabled in the eeprom for port B
- * so exclude FUNC_1 ports from having WoL enabled */
- if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_MASK) &&
- !adapter->eeprom_wol) {
- wol->supported = 0;
- break;
- }
-
- retval = 0;
- }
-
- return retval;
-}
-
static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct igb_adapter *adapter = netdev_priv(netdev);
WAKE_PHY;
wol->wolopts = 0;
- /* this function will set ->supported = 0 and return 1 if wol is not
- * supported by this hardware */
- if (igb_wol_exclusion(adapter, wol) ||
- !device_can_wakeup(&adapter->pdev->dev))
+ if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
return;
/* apply any specific unsupported masks here */
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
return -EOPNOTSUPP;
- if (igb_wol_exclusion(adapter, wol) ||
- !device_can_wakeup(&adapter->pdev->dev))
+ if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
return wol->wolopts ? -EOPNOTSUPP : 0;
+
/* these settings will always override what we currently have */
adapter->wol = 0;
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_alloc_failed", i);
p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_ipv4_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_ipv4e_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_ipv6_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_ipv6e_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_tcp_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_udp_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_sctp_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_nfs_packets", i);
+ p += ETH_GSTRING_LEN;
}
/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
break;
}
}
+#ifdef HAVE_ETHTOOL_GET_TS_INFO
+static int igb_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
+ switch (adapter->hw.mac.type) {
+#ifdef HAVE_PTP_1588_CLOCK
+ case e1000_82575:
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = 1 << HWTSTAMP_FILTER_NONE;
+
+ /* 82576 does not support timestamping all packets. */
+ if (adapter->hw.mac.type >= e1000_82580)
+ info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL;
+ else
+ info->rx_filters |=
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+#endif /* HAVE_PTP_1588_CLOCK */
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+#endif /* HAVE_ETHTOOL_GET_TS_INFO */
+
#ifdef CONFIG_PM_RUNTIME
static int igb_ethtool_begin(struct net_device *netdev)
{
#ifndef HAVE_NDO_SET_FEATURES
static u32 igb_get_rx_csum(struct net_device *netdev)
{
- struct igb_adapter *adapter = netdev_priv(netdev);
- return test_bit(IGB_RING_FLAG_RX_CSUM, &adapter->rx_ring[0]->flags);
+ return !!(netdev->features & NETIF_F_RXCSUM);
}
static int igb_set_rx_csum(struct net_device *netdev, u32 data)
{
- struct igb_adapter *adapter = netdev_priv(netdev);
- int i;
+ const u32 feature_list = NETIF_F_RXCSUM;
- for (i = 0; i < adapter->rss_queues; i++) {
- struct igb_ring *ring = adapter->rx_ring[i];
- if (data)
- set_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags);
- else
- clear_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags);
- }
+ if (data)
+ netdev->features |= feature_list;
+ else
+ netdev->features &= ~feature_list;
return 0;
}
-static u32 igb_get_tx_csum(struct net_device *netdev)
-{
- return (netdev->features & NETIF_F_IP_CSUM) != 0;
-}
-
static int igb_set_tx_csum(struct net_device *netdev, u32 data)
{
struct igb_adapter *adapter = netdev_priv(netdev);
-
- if (data) {
#ifdef NETIF_F_IPV6_CSUM
- netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- if (adapter->hw.mac.type >= e1000_82576)
- netdev->features |= NETIF_F_SCTP_CSUM;
- } else {
- netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_SCTP_CSUM);
+ u32 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
#else
- netdev->features |= NETIF_F_IP_CSUM;
- if (adapter->hw.mac.type == e1000_82576)
- netdev->features |= NETIF_F_SCTP_CSUM;
- } else {
- netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SCTP_CSUM);
+ u32 feature_list = NETIF_F_IP_CSUM;
#endif
- }
+
+ if (adapter->hw.mac.type >= e1000_82576)
+ feature_list |= NETIF_F_SCTP_CSUM;
+
+ if (data)
+ netdev->features |= feature_list;
+ else
+ netdev->features &= ~feature_list;
return 0;
}
#ifdef NETIF_F_TSO
static int igb_set_tso(struct net_device *netdev, u32 data)
{
- struct igb_adapter *adapter = netdev_priv(netdev);
-#ifndef HAVE_NETDEV_VLAN_FEATURES
- int i;
- struct net_device *v_netdev;
-#endif
-
- if (data) {
- netdev->features |= NETIF_F_TSO;
#ifdef NETIF_F_TSO6
- netdev->features |= NETIF_F_TSO6;
-#endif
- } else {
- netdev->features &= ~NETIF_F_TSO;
-#ifdef NETIF_F_TSO6
- netdev->features &= ~NETIF_F_TSO6;
+ const u32 feature_list = NETIF_F_TSO | NETIF_F_TSO6;
+#else
+ const u32 feature_list = NETIF_F_TSO;
#endif
+
+ if (data)
+ netdev->features |= feature_list;
+ else
+ netdev->features &= ~feature_list;
+
#ifndef HAVE_NETDEV_VLAN_FEATURES
+ if (!data) {
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct net_device *v_netdev;
+ int i;
+
/* disable TSO on all VLANs if they're present */
if (!adapter->vlgrp)
goto tso_out;
+
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
v_netdev = vlan_group_get_device(adapter->vlgrp, i);
if (!v_netdev)
continue;
- v_netdev->features &= ~NETIF_F_TSO;
-#ifdef NETIF_F_TSO6
- v_netdev->features &= ~NETIF_F_TSO6;
-#endif
+ v_netdev->features &= ~feature_list;
vlan_group_set_device(adapter->vlgrp, i, v_netdev);
}
-#endif /* HAVE_NETDEV_VLAN_FEATURES */
}
-#ifndef HAVE_NETDEV_VLAN_FEATURES
tso_out:
+
#endif /* HAVE_NETDEV_VLAN_FEATURES */
- dev_info(pci_dev_to_dev(adapter->pdev), "TSO is %s\n",
- data ? "Enabled" : "Disabled");
return 0;
}
return;
}
#endif
-static struct ethtool_ops igb_ethtool_ops = {
+
+#ifdef ETHTOOL_GEEE
+static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ret_val;
+ u16 phy_data;
+
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
+ return -EOPNOTSUPP;
+
+ edata->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full);
+
+ if (!hw->dev_spec._82575.eee_disable)
+ edata->advertised =
+ mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+
+ /* The IPCNFG and EEER registers are not supported on I354. */
+ if (hw->mac.type == e1000_i354) {
+ e1000_get_eee_status_i354(hw, (bool *)&edata->eee_active);
+ } else {
+ u32 eeer;
+
+ eeer = E1000_READ_REG(hw, E1000_EEER);
+
+ /* EEE status on negotiated link */
+ if (eeer & E1000_EEER_EEE_NEG)
+ edata->eee_active = true;
+
+ if (eeer & E1000_EEER_TX_LPI_EN)
+ edata->tx_lpi_enabled = true;
+ }
+
+ /* EEE Link Partner Advertised */
+ switch (hw->mac.type) {
+ case e1000_i350:
+ ret_val = e1000_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350,
+ &phy_data);
+ if (ret_val)
+ return -ENODATA;
+
+ edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+
+ break;
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210,
+ E1000_EEE_LP_ADV_DEV_I210,
+ &phy_data);
+ if (ret_val)
+ return -ENODATA;
+
+ edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+
+ break;
+ default:
+ break;
+ }
+
+ edata->eee_enabled = !hw->dev_spec._82575.eee_disable;
+
+ if ((hw->mac.type == e1000_i354) &&
+ (edata->eee_enabled))
+ edata->tx_lpi_enabled = true;
+
+ /*
+ * report correct negotiated EEE status for devices that
+ * wrongly report EEE at half-duplex
+ */
+ if (adapter->link_duplex == HALF_DUPLEX) {
+ edata->eee_enabled = false;
+ edata->eee_active = false;
+ edata->tx_lpi_enabled = false;
+ edata->advertised &= ~edata->advertised;
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef ETHTOOL_SEEE
+static int igb_set_eee(struct net_device *netdev,
+ struct ethtool_eee *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct ethtool_eee eee_curr;
+ s32 ret_val;
+
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
+ return -EOPNOTSUPP;
+
+ ret_val = igb_get_eee(netdev, &eee_curr);
+ if (ret_val)
+ return ret_val;
+
+ if (eee_curr.eee_enabled) {
+ if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "Setting EEE tx-lpi is not supported\n");
+ return -EINVAL;
+ }
+
+ /* Tx LPI time is not implemented currently */
+ if (edata->tx_lpi_timer) {
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "Setting EEE Tx LPI timer is not supported\n");
+ return -EINVAL;
+ }
+
+ if (edata->advertised &
+ ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "EEE Advertisement supports only 100Tx and or 100T full duplex\n");
+ return -EINVAL;
+ }
+
+ } else if (!edata->eee_enabled) {
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "Setting EEE options is not supported with EEE disabled\n");
+ return -EINVAL;
+ }
+
+ adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+
+ if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
+ hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
+
+ /* reset link */
+ if (netif_running(netdev))
+ igb_reinit_locked(adapter);
+ else
+ igb_reset(adapter);
+ }
+
+ return 0;
+}
+#endif /* ETHTOOL_SEEE */
+
+#ifdef ETHTOOL_GRXRINGS
+static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ cmd->data = 0;
+
+ /* Report default options for RSS on igb */
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case UDP_V4_FLOW:
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case UDP_V6_FLOW:
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
+ void *rule_locs)
+#else
+ u32 *rule_locs)
+#endif
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ ret = igb_get_rss_hash_opts(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \
+ IGB_FLAG_RSS_FIELD_IPV6_UDP)
+static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
+{
+ u32 flags = adapter->flags;
+
+ /*
+ * RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ !(nfc->data & RXH_L4_B_0_1) ||
+ !(nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ case UDP_V4_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ (nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* if we changed something we need to update flags */
+ if (flags != adapter->flags) {
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mrqc = E1000_READ_REG(hw, E1000_MRQC);
+
+ if ((flags & UDP_RSS_FLAGS) &&
+ !(adapter->flags & UDP_RSS_FLAGS))
+ DPRINTK(DRV, WARNING,
+ "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
+
+ adapter->flags = flags;
+
+ /* Perform hash on these packet types */
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
+ E1000_MRQC_RSS_FIELD_IPV4_TCP |
+ E1000_MRQC_RSS_FIELD_IPV6 |
+ E1000_MRQC_RSS_FIELD_IPV6_TCP;
+
+ mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP |
+ E1000_MRQC_RSS_FIELD_IPV6_UDP);
+
+ if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
+
+ if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
+
+ E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+ }
+
+ return 0;
+}
+
+static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = igb_set_rss_hash_opt(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+#endif /* ETHTOOL_GRXRINGS */
+
+static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings,
.set_settings = igb_set_settings,
.get_drvinfo = igb_get_drvinfo,
.set_pauseparam = igb_set_pauseparam,
.self_test = igb_diag_test,
.get_strings = igb_get_strings,
+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
#ifdef HAVE_ETHTOOL_SET_PHYS_ID
.set_phys_id = igb_set_phys_id,
#else
.phys_id = igb_phys_id,
#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
.get_sset_count = igb_get_sset_count,
#else
#endif
.get_coalesce = igb_get_coalesce,
.set_coalesce = igb_set_coalesce,
+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#ifdef HAVE_ETHTOOL_GET_TS_INFO
+ .get_ts_info = igb_get_ts_info,
+#endif /* HAVE_ETHTOOL_GET_TS_INFO */
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
#ifdef CONFIG_PM_RUNTIME
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
#ifndef HAVE_NDO_SET_FEATURES
.get_rx_csum = igb_get_rx_csum,
.set_rx_csum = igb_set_rx_csum,
- .get_tx_csum = igb_get_tx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
.set_tx_csum = igb_set_tx_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
#endif /* HAVE_NDO_SET_FEATURES */
#ifdef ETHTOOL_GADV_COAL
.get_advcoal = igb_get_adv_coal,
- .set_advcoal = igb_set_dmac_coal
+ .set_advcoal = igb_set_dmac_coal,
#endif /* ETHTOOL_GADV_COAL */
+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#ifdef ETHTOOL_GEEE
+ .get_eee = igb_get_eee,
+#endif
+#ifdef ETHTOOL_SEEE
+ .set_eee = igb_set_eee,
+#endif
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+#ifdef ETHTOOL_GRXRINGS
+ .get_rxnfc = igb_get_rxnfc,
+ .set_rxnfc = igb_set_rxnfc,
+#endif
+};
+
+#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+static const struct ethtool_ops_ext igb_ethtool_ops_ext = {
+ .size = sizeof(struct ethtool_ops_ext),
+ .get_ts_info = igb_get_ts_info,
+ .set_phys_id = igb_set_phys_id,
+ .get_eee = igb_get_eee,
+ .set_eee = igb_set_eee,
};
void igb_set_ethtool_ops(struct net_device *netdev)
{
SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
+ set_ethtool_ops_ext(netdev, &igb_ethtool_ops_ext);
}
-
+#else
+void igb_set_ethtool_ops(struct net_device *netdev)
+{
+ /* have to "undeclare" const on this struct to remove warnings */
+ SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igb_ethtool_ops);
+}
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
#endif /* SIOCETHTOOL */
+
+
--- /dev/null
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "igb.h"
+#include "e1000_82575.h"
+#include "e1000_hw.h"
+#ifdef IGB_HWMON
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/hwmon.h>
+#include <linux/pci.h>
+
+#ifdef HAVE_I2C_SUPPORT
+static struct i2c_board_info i350_sensor_info = {
+ I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
+};
+#endif /* HAVE_I2C_SUPPORT */
+
+/* hwmon callback functions */
+static ssize_t igb_hwmon_show_location(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+ dev_attr);
+ return sprintf(buf, "loc%u\n",
+ igb_attr->sensor->location);
+}
+
+static ssize_t igb_hwmon_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+ dev_attr);
+ unsigned int value;
+
+ /* reset the temp field */
+ igb_attr->hw->mac.ops.get_thermal_sensor_data(igb_attr->hw);
+
+ value = igb_attr->sensor->temp;
+
+ /* display millidegree */
+ value *= 1000;
+
+ return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t igb_hwmon_show_cautionthresh(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+ dev_attr);
+ unsigned int value = igb_attr->sensor->caution_thresh;
+
+ /* display millidegree */
+ value *= 1000;
+
+ return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t igb_hwmon_show_maxopthresh(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+ dev_attr);
+ unsigned int value = igb_attr->sensor->max_op_thresh;
+
+ /* display millidegree */
+ value *= 1000;
+
+ return sprintf(buf, "%u\n", value);
+}
+
+/* igb_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
+ * @ adapter: pointer to the adapter structure
+ * @ offset: offset in the eeprom sensor data table
+ * @ type: type of sensor data to display
+ *
+ * For each file we want in hwmon's sysfs interface we need a device_attribute
+ * This is included in our hwmon_attr struct that contains the references to
+ * the data structures we need to get the data to display.
+ */
+static int igb_add_hwmon_attr(struct igb_adapter *adapter,
+ unsigned int offset, int type) {
+ int rc;
+ unsigned int n_attr;
+ struct hwmon_attr *igb_attr;
+
+ n_attr = adapter->igb_hwmon_buff.n_hwmon;
+ igb_attr = &adapter->igb_hwmon_buff.hwmon_list[n_attr];
+
+ switch (type) {
+ case IGB_HWMON_TYPE_LOC:
+ igb_attr->dev_attr.show = igb_hwmon_show_location;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+ "temp%u_label", offset);
+ break;
+ case IGB_HWMON_TYPE_TEMP:
+ igb_attr->dev_attr.show = igb_hwmon_show_temp;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+ "temp%u_input", offset);
+ break;
+ case IGB_HWMON_TYPE_CAUTION:
+ igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+ "temp%u_max", offset);
+ break;
+ case IGB_HWMON_TYPE_MAX:
+ igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+ "temp%u_crit", offset);
+ break;
+ default:
+ rc = -EPERM;
+ return rc;
+ }
+
+ /* These always the same regardless of type */
+ igb_attr->sensor =
+ &adapter->hw.mac.thermal_sensor_data.sensor[offset];
+ igb_attr->hw = &adapter->hw;
+ igb_attr->dev_attr.store = NULL;
+ igb_attr->dev_attr.attr.mode = S_IRUGO;
+ igb_attr->dev_attr.attr.name = igb_attr->name;
+ sysfs_attr_init(&igb_attr->dev_attr.attr);
+ rc = device_create_file(&adapter->pdev->dev,
+ &igb_attr->dev_attr);
+ if (rc == 0)
+ ++adapter->igb_hwmon_buff.n_hwmon;
+
+ return rc;
+}
+
+static void igb_sysfs_del_adapter(struct igb_adapter *adapter)
+{
+ int i;
+
+ if (adapter == NULL)
+ return;
+
+ for (i = 0; i < adapter->igb_hwmon_buff.n_hwmon; i++) {
+ device_remove_file(&adapter->pdev->dev,
+ &adapter->igb_hwmon_buff.hwmon_list[i].dev_attr);
+ }
+
+ kfree(adapter->igb_hwmon_buff.hwmon_list);
+
+ if (adapter->igb_hwmon_buff.device)
+ hwmon_device_unregister(adapter->igb_hwmon_buff.device);
+}
+
+/* called from igb_main.c */
+void igb_sysfs_exit(struct igb_adapter *adapter)
+{
+ igb_sysfs_del_adapter(adapter);
+}
+
+/* called from igb_main.c */
+int igb_sysfs_init(struct igb_adapter *adapter)
+{
+ struct hwmon_buff *igb_hwmon = &adapter->igb_hwmon_buff;
+ unsigned int i;
+ int n_attrs;
+ int rc = 0;
+#ifdef HAVE_I2C_SUPPORT
+ struct i2c_client *client = NULL;
+#endif /* HAVE_I2C_SUPPORT */
+
+ /* If this method isn't defined we don't support thermals */
+ if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
+ goto exit;
+
+ /* Don't create thermal hwmon interface if no sensors present */
+ rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw));
+ if (rc)
+ goto exit;
+#ifdef HAVE_I2C_SUPPORT
+ /* init i2c_client */
+ client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
+ if (client == NULL) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to create new i2c device..\n");
+ goto exit;
+ }
+ adapter->i2c_client = client;
+#endif /* HAVE_I2C_SUPPORT */
+
+ /* Allocation space for max attributes
+ * max num sensors * values (loc, temp, max, caution)
+ */
+ n_attrs = E1000_MAX_SENSORS * 4;
+ igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
+ GFP_KERNEL);
+ if (!igb_hwmon->hwmon_list) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ igb_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
+ if (IS_ERR(igb_hwmon->device)) {
+ rc = PTR_ERR(igb_hwmon->device);
+ goto err;
+ }
+
+ for (i = 0; i < E1000_MAX_SENSORS; i++) {
+
+ /* Only create hwmon sysfs entries for sensors that have
+ * meaningful data.
+ */
+ if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0)
+ continue;
+
+ /* Bail if any hwmon attr struct fails to initialize */
+ rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION);
+ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
+ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
+ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
+ if (rc)
+ goto err;
+ }
+
+ goto exit;
+
+err:
+ igb_sysfs_del_adapter(adapter);
+exit:
+ return rc;
+}
+#endif /* IGB_HWMON */
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#include <linux/pm_runtime.h>
#endif /* CONFIG_PM_RUNTIME */
+#include <linux/if_bridge.h>
#include "igb.h"
#include "igb_vmdq.h"
#include <linux/uio_driver.h>
+#if defined(DEBUG) || defined (DEBUG_DUMP) || defined (DEBUG_ICR) || defined(DEBUG_ITR)
+#define DRV_DEBUG "_debug"
+#else
#define DRV_DEBUG
+#endif
#define DRV_HW_PERF
#define VERSION_SUFFIX
-#define MAJ 3
-#define MIN 4
-#define BUILD 8
+#define MAJ 5
+#define MIN 0
+#define BUILD 6
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." __stringify(BUILD) VERSION_SUFFIX DRV_DEBUG DRV_HW_PERF
char igb_driver_name[] = "igb";
char igb_driver_version[] = DRV_VERSION;
static const char igb_driver_string[] =
"Intel(R) Gigabit Ethernet Network Driver";
-static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
+static const char igb_copyright[] =
+ "Copyright (c) 2007-2013 Intel Corporation.";
static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER) },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER) },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER) },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES) },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER) },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER) },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER) },
void igb_update_stats(struct igb_adapter *);
static int igb_probe(struct pci_dev *, const struct pci_device_id *);
static void __devexit igb_remove(struct pci_dev *pdev);
-#ifdef HAVE_HW_TIME_STAMP
-static void igb_init_hw_timer(struct igb_adapter *adapter);
-#endif
static int igb_sw_init(struct igb_adapter *);
static int igb_open(struct net_device *);
static int igb_close(struct net_device *);
+static void igb_configure(struct igb_adapter *);
static void igb_configure_tx(struct igb_adapter *);
static void igb_configure_rx(struct igb_adapter *);
static void igb_clean_all_tx_rings(struct igb_adapter *);
static void igb_vlan_mode(struct net_device *, struct vlan_group *);
#endif
#ifdef HAVE_VLAN_PROTOCOL
-static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
-static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
+static int igb_vlan_rx_add_vid(struct net_device *,
+ __be16 proto, u16);
+static int igb_vlan_rx_kill_vid(struct net_device *,
+ __be16 proto, u16);
#elif defined HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+static int igb_vlan_rx_add_vid(struct net_device *,
+ __always_unused __be16 proto, u16);
+static int igb_vlan_rx_kill_vid(struct net_device *,
+ __always_unused __be16 proto, u16);
+#else
static int igb_vlan_rx_add_vid(struct net_device *, u16);
static int igb_vlan_rx_kill_vid(struct net_device *, u16);
+#endif
#else
static void igb_vlan_rx_add_vid(struct net_device *, u16);
static void igb_vlan_rx_kill_vid(struct net_device *, u16);
#ifdef IFLA_VF_MAX
static int igb_ndo_set_vf_mac( struct net_device *netdev, int vf, u8 *mac);
static int igb_ndo_set_vf_vlan(struct net_device *netdev,
- int vf, u16 vlan, u8 qos);
+ int vf, u16 vlan, u8 qos);
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
+ bool setting);
+#endif
static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
struct ifla_vf_info *ivi);
static void igb_check_vf_rate_limit(struct igb_adapter *);
#endif
static int igb_vf_configure(struct igb_adapter *adapter, int vf);
-static int igb_check_vf_assignment(struct igb_adapter *adapter);
-#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
-static int igb_find_enabled_vfs(struct igb_adapter *adapter);
-#endif
#ifdef CONFIG_PM
#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
static int igb_suspend(struct device *dev);
#endif /* CONFIG_PM_RUNTIME */
static const struct dev_pm_ops igb_pm_ops = {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)
- .suspend = igb_suspend,
- .resume = igb_resume,
- .freeze = igb_suspend,
- .thaw = igb_resume,
- .poweroff = igb_suspend,
- .restore = igb_resume,
+ .suspend = igb_suspend,
+ .resume = igb_resume,
+ .freeze = igb_suspend,
+ .thaw = igb_resume,
+ .poweroff = igb_suspend,
+ .restore = igb_resume,
#ifdef CONFIG_PM_RUNTIME
- .runtime_suspend = igb_runtime_suspend,
- .runtime_resume = igb_runtime_resume,
- .runtime_idle = igb_runtime_idle,
+ .runtime_suspend = igb_runtime_suspend,
+ .runtime_resume = igb_runtime_resume,
+ .runtime_idle = igb_runtime_idle,
#endif
#else /* Linux >= 2.6.34 */
SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
#endif /* CONFIG_PM_RUNTIME */
#endif /* Linux version */
};
+#else
+static int igb_suspend(struct pci_dev *pdev, pm_message_t state);
+static int igb_resume(struct pci_dev *pdev);
#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
#endif /* CONFIG_PM */
#ifndef USE_REBOOT_NOTIFIER
#ifdef CONFIG_PM
#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
.driver.pm = &igb_pm_ops,
+#else
+ .suspend = igb_suspend,
+ .resume = igb_resume,
#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
#endif /* CONFIG_PM */
#ifndef USE_REBOOT_NOTIFIER
add = TRUE;
vfta = adapter->shadow_vfta[index];
-
+
if (add)
vfta |= mask;
else
adapter->shadow_vfta[index] = vfta;
}
-#ifdef HAVE_HW_TIME_STAMP
-/**
- * igb_read_clock - read raw cycle counter (to be used by time counter)
- */
-static cycle_t igb_read_clock(const struct cyclecounter *tc)
-{
- struct igb_adapter *adapter =
- container_of(tc, struct igb_adapter, cycles);
- struct e1000_hw *hw = &adapter->hw;
- u64 stamp = 0;
- int shift = 0;
-
- /*
- * The timestamp latches on lowest register read. For the 82580
- * the lowest register is SYSTIMR instead of SYSTIML. However we never
- * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
- */
- if (hw->mac.type >= e1000_82580) {
- stamp = E1000_READ_REG(hw, E1000_SYSTIMR) >> 8;
- shift = IGB_82580_TSYNC_SHIFT;
- }
-
- stamp |= (u64)E1000_READ_REG(hw, E1000_SYSTIML) << shift;
- stamp |= (u64)E1000_READ_REG(hw, E1000_SYSTIMH) << (shift + 32);
- return stamp;
-}
-
-#endif /* SIOCSHWTSTAMP */
static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
//module_param(debug, int, 0);
//MODULE_PARM_DESC(debug, "Debug level (0=none, ..., 16=all)");
igb_driver_string, igb_driver_version);
printk(KERN_INFO "%s\n", igb_copyright);
-#ifdef IGB_SYSFS
-/* only use IGB_PROCFS if IGB_SYSFS is not defined */
+#ifdef IGB_HWMON
+/* only use IGB_PROCFS if IGB_HWMON is not defined */
#else
#ifdef IGB_PROCFS
if (igb_procfs_topdir_init())
printk(KERN_INFO "Procfs failed to initialize topdir\n");
#endif /* IGB_PROCFS */
-#endif /* IGB_SYSFS */
+#endif /* IGB_HWMON */
#ifdef IGB_DCA
dca_register_notify(&dca_notifier);
#endif
pci_unregister_driver(&igb_driver);
-#ifdef IGB_SYSFS
-/* only compile IGB_PROCFS if IGB_SYSFS is not defined */
+#ifdef IGB_HWMON
+/* only compile IGB_PROCFS if IGB_HWMON is not defined */
#else
#ifdef IGB_PROCFS
igb_procfs_topdir_exit();
#endif /* IGB_PROCFS */
-#endif /* IGB_SYSFS */
+#endif /* IGB_HWMON */
}
#undef module_exit
case e1000_82575:
case e1000_82580:
case e1000_i350:
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
default:
for (; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = rbase_offset + i;
}
}
-static void igb_free_queues(struct igb_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- kfree(adapter->tx_ring[i]);
- adapter->tx_ring[i] = NULL;
- }
- for (i = 0; i < adapter->num_rx_queues; i++) {
- kfree(adapter->rx_ring[i]);
- adapter->rx_ring[i] = NULL;
- }
- adapter->num_rx_queues = 0;
- adapter->num_tx_queues = 0;
-}
-
-/**
- * igb_alloc_queues - Allocate memory for all rings
- * @adapter: board private structure to initialize
- *
- * We allocate one ring per queue at run-time since we don't know the
- * number of queues at compile-time.
- **/
-static int igb_alloc_queues(struct igb_adapter *adapter)
-{
- struct igb_ring *ring;
- int i;
-#ifdef HAVE_DEVICE_NUMA_NODE
- int orig_node = adapter->node;
-#endif /* HAVE_DEVICE_NUMA_NODE */
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
-#ifdef HAVE_DEVICE_NUMA_NODE
- if (orig_node == -1) {
- int cur_node = next_online_node(adapter->node);
- if (cur_node == MAX_NUMNODES)
- cur_node = first_online_node;
- adapter->node = cur_node;
- }
-#endif /* HAVE_DEVICE_NUMA_NODE */
- ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
- adapter->node);
- if (!ring)
- ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
- if (!ring)
- goto err;
- ring->count = adapter->tx_ring_count;
- ring->queue_index = i;
- ring->dev = pci_dev_to_dev(adapter->pdev);
- ring->netdev = adapter->netdev;
- ring->numa_node = adapter->node;
- /* For 82575, context index must be unique per ring. */
- if (adapter->hw.mac.type == e1000_82575)
- set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
- adapter->tx_ring[i] = ring;
- }
-#ifdef HAVE_DEVICE_NUMA_NODE
- /* Restore the adapter's original node */
- adapter->node = orig_node;
-#endif /* HAVE_DEVICE_NUMA_NODE */
-
- for (i = 0; i < adapter->num_rx_queues; i++) {
-#ifdef HAVE_DEVICE_NUMA_NODE
- if (orig_node == -1) {
- int cur_node = next_online_node(adapter->node);
- if (cur_node == MAX_NUMNODES)
- cur_node = first_online_node;
- adapter->node = cur_node;
- }
-#endif /* HAVE_DEVICE_NUMA_NODE */
- ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
- adapter->node);
- if (!ring)
- ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
- if (!ring)
- goto err;
- ring->count = adapter->rx_ring_count;
- ring->queue_index = i;
- ring->dev = pci_dev_to_dev(adapter->pdev);
- ring->netdev = adapter->netdev;
- ring->numa_node = adapter->node;
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
- ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
-#endif
-#ifndef HAVE_NDO_SET_FEATURES
- /* enable rx checksum */
- set_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags);
-
-#endif
- /* set flag indicating ring supports SCTP checksum offload */
- if (adapter->hw.mac.type >= e1000_82576)
- set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
-
- /* On i350, loopback VLAN packets have the tag byte-swapped. */
- if (adapter->hw.mac.type == e1000_i350)
- set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
-
- adapter->rx_ring[i] = ring;
- }
-#ifdef HAVE_DEVICE_NUMA_NODE
- /* Restore the adapter's original node */
- adapter->node = orig_node;
-#endif /* HAVE_DEVICE_NUMA_NODE */
-
- igb_cache_ring_register(adapter);
-
- return E1000_SUCCESS;
-
-err:
-#ifdef HAVE_DEVICE_NUMA_NODE
- /* Restore the adapter's original node */
- adapter->node = orig_node;
-#endif /* HAVE_DEVICE_NUMA_NODE */
- igb_free_queues(adapter);
-
- return -ENOMEM;
-}
-
static void igb_configure_lli(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
/*
* 82576 uses a table that essentially consists of 2 columns
* with 8 rows. The ordering is column-major so we use the
- * lower 3 bits as the row index, and the 4th bit as the
+ * lower 3 bits as the row index, and the 4th bit as the
* column offset.
*/
if (rx_queue > IGB_N0_QUEUE)
break;
case e1000_82580:
case e1000_i350:
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
/*
* On 82580 and newer adapters the scheme is similar to 82576
* however instead of ordering column-major we have things
case e1000_82576:
case e1000_82580:
case e1000_i350:
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
/* Turn on MSI-X capability first, or our settings
* won't stick. And it will take days to debug. */
E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
{
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
- int i, err = 0, vector = 0;
+ int i, err = 0, vector = 0, free_vector = 0;
err = request_irq(adapter->msix_entries[vector].vector,
&igb_msix_other, 0, netdev->name, adapter);
if (err)
- goto out;
- vector++;
+ goto err_out;
for (i = 0; i < adapter->num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
+ vector++;
+
q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
if (q_vector->rx.ring && q_vector->tx.ring)
igb_msix_ring, 0, q_vector->name,
q_vector);
if (err)
- goto out;
- vector++;
+ goto err_free;
}
igb_configure_msix(adapter);
return 0;
-out:
+
+err_free:
+ /* free already assigned IRQs */
+ free_irq(adapter->msix_entries[free_vector++].vector, adapter);
+
+ vector--;
+ for (i = 0; i < vector; i++) {
+ free_irq(adapter->msix_entries[free_vector++].vector,
+ adapter->q_vector[i]);
+ }
+err_out:
return err;
}
}
}
+/**
+ * igb_free_q_vector - Free memory allocated for specific interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
+{
+ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
+ if (q_vector->tx.ring)
+ adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
+
+ if (q_vector->rx.ring)
+ adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
+
+ adapter->q_vector[v_idx] = NULL;
+ netif_napi_del(&q_vector->napi);
+#ifndef IGB_NO_LRO
+ __skb_queue_purge(&q_vector->lrolist.active);
+#endif
+ kfree(q_vector);
+}
+
/**
* igb_free_q_vectors - Free memory allocated for interrupt vectors
* @adapter: board private structure to initialize
**/
static void igb_free_q_vectors(struct igb_adapter *adapter)
{
- int v_idx;
+ int v_idx = adapter->num_q_vectors;
- for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
- struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
- adapter->q_vector[v_idx] = NULL;
- if (!q_vector)
- continue;
- netif_napi_del(&q_vector->napi);
-#ifndef IGB_NO_LRO
- if (q_vector->lrolist) {
- __skb_queue_purge(&q_vector->lrolist->active);
- vfree(q_vector->lrolist);
- q_vector->lrolist = NULL;
- }
-#endif
- kfree(q_vector);
- }
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
adapter->num_q_vectors = 0;
+
+ while (v_idx--)
+ igb_free_q_vector(adapter, v_idx);
}
/**
*/
static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
{
- igb_free_queues(adapter);
igb_free_q_vectors(adapter);
igb_reset_interrupt_capability(adapter);
}
vfre &= ~(1 << vf_queue);
E1000_WRITE_REG(hw, E1000_VFRE, vfre);
- /* Disable MDFB related bit */
+ /* Disable MDFB related bit. Clear on write */
mdfb = E1000_READ_REG(hw, E1000_MDFB);
- mdfb &= ~(1 << vf_queue);
+ mdfb |= (1 << vf_queue);
E1000_WRITE_REG(hw, E1000_MDFB, mdfb);
/* Reset the specific VF */
struct e1000_hw *hw = &adapter->hw;
u32 reg;
- if (hw->mac.type != e1000_i350)
+ if ((hw->mac.type != e1000_i350) ||
+ (hw->mac.type != e1000_i354))
return;
reg = E1000_READ_REG(hw, E1000_DTXCTL);
* @adapter - board private structure
*
* Enable the HW to detect malicious driver and sends an interrupt to
- * the driver.
- *
- * Only available on i350 device
+ * the driver.
**/
static void igb_enable_mdd(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 reg;
+ /* Only available on i350 device */
if (hw->mac.type != e1000_i350)
return;
/* reclaim resources allocated to VFs */
if (adapter->vf_data) {
- if (!igb_check_vf_assignment(adapter)) {
+ if (!pci_vfs_assigned(pdev)) {
/*
* disable iov and allow time for transactions to
* clear
int old_vfs = 0;
int i;
-#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
- old_vfs = igb_find_enabled_vfs(adapter);
-#endif
+ old_vfs = pci_num_vf(pdev);
if (old_vfs) {
dev_info(pci_dev_to_dev(pdev),
"%d pre-allocated VFs found - override "
- "max_vfs setting of %d\n", old_vfs,
+ "max_vfs setting of %d\n", old_vfs,
adapter->vfs_allocated_count);
adapter->vfs_allocated_count = old_vfs;
- }
+ }
/* no VFs requested, do nothing */
if (!adapter->vfs_allocated_count)
return;
for (i = 0; i < adapter->vfs_allocated_count; i++)
igb_vf_configure(adapter, i);
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ case e1000_i350:
+ /* Enable VM to VM loopback by default */
+ adapter->flags |= IGB_FLAG_LOOPBACK_ENABLE;
+ break;
+ default:
+ /* Currently no other hardware supports loopback */
+ break;
+ }
+
/* DMA Coalescing is not supported in IOV mode. */
if (adapter->hw.mac.type >= e1000_i350)
adapter->dmac = IGB_DMAC_DISABLE;
* Attempt to configure interrupts using the best available
* capabilities of the hardware and kernel.
**/
-static void igb_set_interrupt_capability(struct igb_adapter *adapter)
+static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
{
struct pci_dev *pdev = adapter->pdev;
int err;
int numvecs, i;
+ if (!msix)
+ adapter->int_mode = IGB_INT_MODE_MSI;
+
/* Number of supported queues. */
adapter->num_rx_queues = adapter->rss_queues;
/* Don't do anything; this is system default */
break;
}
+}
-#ifdef HAVE_TX_MQ
- /* Notify the stack of the (possibly) reduced Tx Queue count. */
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
- adapter->netdev->egress_subqueue_count = adapter->num_tx_queues;
-#else
- adapter->netdev->real_num_tx_queues =
- (adapter->vmdq_pools ? 1 : adapter->num_tx_queues);
-#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
-#endif /* HAVE_TX_MQ */
+static void igb_add_ring(struct igb_ring *ring,
+ struct igb_ring_container *head)
+{
+ head->ring = ring;
+ head->count++;
}
/**
- * igb_alloc_q_vectors - Allocate memory for interrupt vectors
+ * igb_alloc_q_vector - Allocate memory for a single interrupt vector
* @adapter: board private structure to initialize
+ * @v_count: q_vectors allocated on adapter, used for ring interleaving
+ * @v_idx: index of vector in adapter struct
+ * @txr_count: total number of Tx rings to allocate
+ * @txr_idx: index of first Tx ring to allocate
+ * @rxr_count: total number of Rx rings to allocate
+ * @rxr_idx: index of first Rx ring to allocate
*
- * We allocate one q_vector per queue interrupt. If allocation fails we
- * return -ENOMEM.
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
-static int igb_alloc_q_vectors(struct igb_adapter *adapter)
+static int igb_alloc_q_vector(struct igb_adapter *adapter,
+ unsigned int v_count, unsigned int v_idx,
+ unsigned int txr_count, unsigned int txr_idx,
+ unsigned int rxr_count, unsigned int rxr_idx)
{
struct igb_q_vector *q_vector;
- struct e1000_hw *hw = &adapter->hw;
- int v_idx;
-#ifdef HAVE_DEVICE_NUMA_NODE
- int orig_node = adapter->node;
-#endif /* HAVE_DEVICE_NUMA_NODE */
-
- for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
-#ifdef HAVE_DEVICE_NUMA_NODE
- if ((adapter->num_q_vectors == (adapter->num_rx_queues +
- adapter->num_tx_queues)) &&
- (adapter->num_rx_queues == v_idx))
- adapter->node = orig_node;
- if (orig_node == -1) {
- int cur_node = next_online_node(adapter->node);
- if (cur_node == MAX_NUMNODES)
- cur_node = first_online_node;
- adapter->node = cur_node;
- }
-#endif /* HAVE_DEVICE_NUMA_NODE */
- q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
- adapter->node);
- if (!q_vector)
- q_vector = kzalloc(sizeof(struct igb_q_vector),
- GFP_KERNEL);
- if (!q_vector)
- goto err_out;
- q_vector->adapter = adapter;
- q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
- q_vector->itr_val = IGB_START_ITR;
- netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
- adapter->q_vector[v_idx] = q_vector;
+ struct igb_ring *ring;
+ int ring_count, size;
+
+ /* igb only supports 1 Tx and/or 1 Rx queue per vector */
+ if (txr_count > 1 || rxr_count > 1)
+ return -ENOMEM;
+
+ ring_count = txr_count + rxr_count;
+ size = sizeof(struct igb_q_vector) +
+ (sizeof(struct igb_ring) * ring_count);
+
+ /* allocate q_vector and rings */
+ q_vector = kzalloc(size, GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
#ifndef IGB_NO_LRO
- if (v_idx < adapter->num_rx_queues) {
- int size = sizeof(struct igb_lro_list);
- q_vector->lrolist = vzalloc_node(size, q_vector->numa_node);
- if (!q_vector->lrolist)
- q_vector->lrolist = vzalloc(size);
- if (!q_vector->lrolist)
- goto err_out;
- __skb_queue_head_init(&q_vector->lrolist->active);
- }
-#endif /* IGB_NO_LRO */
+ /* initialize LRO */
+ __skb_queue_head_init(&q_vector->lrolist.active);
+
+#endif
+ /* initialize NAPI */
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ igb_poll, 64);
+
+ /* tie q_vector and adapter together */
+ adapter->q_vector[v_idx] = q_vector;
+ q_vector->adapter = adapter;
+
+ /* initialize work limits */
+ q_vector->tx.work_limit = adapter->tx_work_limit;
+
+ /* initialize ITR configuration */
+ q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0);
+ q_vector->itr_val = IGB_START_ITR;
+
+ /* initialize pointer to rings */
+ ring = q_vector->ring;
+
+ /* intialize ITR */
+ if (rxr_count) {
+ /* rx or rx/tx vector */
+ if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
+ q_vector->itr_val = adapter->rx_itr_setting;
+ } else {
+ /* tx only vector */
+ if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
+ q_vector->itr_val = adapter->tx_itr_setting;
}
-#ifdef HAVE_DEVICE_NUMA_NODE
- /* Restore the adapter's original node */
- adapter->node = orig_node;
-#endif /* HAVE_DEVICE_NUMA_NODE */
- return 0;
+ if (txr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
-err_out:
-#ifdef HAVE_DEVICE_NUMA_NODE
- /* Restore the adapter's original node */
- adapter->node = orig_node;
-#endif /* HAVE_DEVICE_NUMA_NODE */
- igb_free_q_vectors(adapter);
- return -ENOMEM;
-}
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
-static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
- int ring_idx, int v_idx)
-{
- struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+ /* update q_vector Tx values */
+ igb_add_ring(ring, &q_vector->tx);
- q_vector->rx.ring = adapter->rx_ring[ring_idx];
- q_vector->rx.ring->q_vector = q_vector;
- q_vector->rx.count++;
- q_vector->itr_val = adapter->rx_itr_setting;
- if (q_vector->itr_val && q_vector->itr_val <= 3)
- q_vector->itr_val = IGB_START_ITR;
-}
+ /* For 82575, context index must be unique per ring. */
+ if (adapter->hw.mac.type == e1000_82575)
+ set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
-static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
- int ring_idx, int v_idx)
-{
- struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+ /* apply Tx specific ring traits */
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = txr_idx;
- q_vector->tx.ring = adapter->tx_ring[ring_idx];
- q_vector->tx.ring->q_vector = q_vector;
- q_vector->tx.count++;
- q_vector->itr_val = adapter->tx_itr_setting;
- q_vector->tx.work_limit = adapter->tx_work_limit;
- if (q_vector->itr_val && q_vector->itr_val <= 3)
- q_vector->itr_val = IGB_START_ITR;
+ /* assign ring to adapter */
+ adapter->tx_ring[txr_idx] = ring;
+
+ /* push pointer to next ring */
+ ring++;
+ }
+
+ if (rxr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Rx values */
+ igb_add_ring(ring, &q_vector->rx);
+
+#ifndef HAVE_NDO_SET_FEATURES
+ /* enable rx checksum */
+ set_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags);
+
+#endif
+ /* set flag indicating ring supports SCTP checksum offload */
+ if (adapter->hw.mac.type >= e1000_82576)
+ set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
+
+ if ((adapter->hw.mac.type == e1000_i350) ||
+ (adapter->hw.mac.type == e1000_i354))
+ set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
+
+ /* apply Rx specific ring traits */
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = rxr_idx;
+
+ /* assign ring to adapter */
+ adapter->rx_ring[rxr_idx] = ring;
+ }
+
+ return 0;
}
/**
- * igb_map_ring_to_vector - maps allocated queues to vectors
+ * igb_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
*
- * This function maps the recently allocated queues to vectors.
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
**/
-static int igb_map_ring_to_vector(struct igb_adapter *adapter)
+static int igb_alloc_q_vectors(struct igb_adapter *adapter)
{
- int i;
- int v_idx = 0;
+ int q_vectors = adapter->num_q_vectors;
+ int rxr_remaining = adapter->num_rx_queues;
+ int txr_remaining = adapter->num_tx_queues;
+ int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+ int err;
- if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
- (adapter->num_q_vectors < adapter->num_tx_queues))
- return -ENOMEM;
+ if (q_vectors >= (rxr_remaining + txr_remaining)) {
+ for (; rxr_remaining; v_idx++) {
+ err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
+ 0, 0, 1, rxr_idx);
- if (adapter->num_q_vectors >=
- (adapter->num_rx_queues + adapter->num_tx_queues)) {
- for (i = 0; i < adapter->num_rx_queues; i++)
- igb_map_rx_ring_to_vector(adapter, i, v_idx++);
- for (i = 0; i < adapter->num_tx_queues; i++)
- igb_map_tx_ring_to_vector(adapter, i, v_idx++);
- } else {
- for (i = 0; i < adapter->num_rx_queues; i++) {
- if (i < adapter->num_tx_queues)
- igb_map_tx_ring_to_vector(adapter, i, v_idx);
- igb_map_rx_ring_to_vector(adapter, i, v_idx++);
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining--;
+ rxr_idx++;
}
- for (; i < adapter->num_tx_queues; i++)
- igb_map_tx_ring_to_vector(adapter, i, v_idx++);
}
+
+ for (; v_idx < q_vectors; v_idx++) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
+ int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+ err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
+ tqpv, txr_idx, rqpv, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining -= rqpv;
+ txr_remaining -= tqpv;
+ rxr_idx++;
+ txr_idx++;
+ }
+
return 0;
+
+err_out:
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
+
+ while (v_idx--)
+ igb_free_q_vector(adapter, v_idx);
+
+ return -ENOMEM;
}
/**
*
* This function initializes the interrupts and allocates all of the queues.
**/
-static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
+static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
{
struct pci_dev *pdev = adapter->pdev;
int err;
- igb_set_interrupt_capability(adapter);
+ igb_set_interrupt_capability(adapter, msix);
err = igb_alloc_q_vectors(adapter);
if (err) {
goto err_alloc_q_vectors;
}
- err = igb_alloc_queues(adapter);
- if (err) {
- dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for queues\n");
- goto err_alloc_queues;
- }
-
- err = igb_map_ring_to_vector(adapter);
- if (err) {
- dev_err(pci_dev_to_dev(pdev), "Invalid q_vector to ring mapping\n");
- goto err_map_queues;
- }
-
+ igb_cache_ring_register(adapter);
return 0;
-err_map_queues:
- igb_free_queues(adapter);
-err_alloc_queues:
- igb_free_q_vectors(adapter);
+
err_alloc_q_vectors:
igb_reset_interrupt_capability(adapter);
return err;
if (!err)
goto request_done;
/* fall back to MSI */
- igb_clear_interrupt_scheme(adapter);
- igb_reset_sriov_capability(adapter);
- if (!pci_enable_msi(pdev))
- adapter->flags |= IGB_FLAG_HAS_MSI;
igb_free_all_tx_resources(adapter);
igb_free_all_rx_resources(adapter);
- adapter->num_tx_queues = 1;
- adapter->num_rx_queues = 1;
- adapter->num_q_vectors = 1;
- err = igb_alloc_q_vectors(adapter);
- if (err) {
- dev_err(pci_dev_to_dev(pdev),
- "Unable to allocate memory for vectors\n");
- goto request_done;
- }
- err = igb_alloc_queues(adapter);
- if (err) {
- dev_err(pci_dev_to_dev(pdev),
- "Unable to allocate memory for queues\n");
- igb_free_q_vectors(adapter);
+
+ igb_clear_interrupt_scheme(adapter);
+ igb_reset_sriov_capability(adapter);
+ err = igb_init_interrupt_scheme(adapter, false);
+ if (err)
goto request_done;
- }
igb_setup_all_tx_resources(adapter);
igb_setup_all_rx_resources(adapter);
+ igb_configure(adapter);
}
igb_assign_vector(adapter->q_vector[0], 0);
if (adapter->vfs_allocated_count) {
E1000_WRITE_REG(hw, E1000_MBVFIMR, 0xFF);
ims |= E1000_IMS_VMMB;
- /* For I350 device only enable MDD interrupts*/
- if ((adapter->mdd) &&
- (adapter->hw.mac.type == e1000_i350))
+ if (adapter->mdd)
+ if ((adapter->hw.mac.type == e1000_i350) ||
+ (adapter->hw.mac.type == e1000_i354))
ims |= E1000_IMS_MDDET;
}
E1000_WRITE_REG(hw, E1000_IMS, ims);
e1000_rx_fifo_flush_82575(&adapter->hw);
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
-
if (adapter->num_tx_queues > 1)
netdev->features |= NETIF_F_MULTI_QUEUE;
else
**/
void igb_power_up_link(struct igb_adapter *adapter)
{
+ e1000_phy_hw_reset(&adapter->hw);
+
if (adapter->hw.phy.media_type == e1000_media_type_copper)
e1000_power_up_phy(&adapter->hw);
else
e1000_power_up_fiber_serdes_link(&adapter->hw);
-
- e1000_phy_hw_reset(&adapter->hw);
}
/**
e1000_shutdown_fiber_serdes_link(&adapter->hw);
}
+/* Detect and switch function for Media Auto Sense */
+static void igb_check_swap_media(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_ext, connsw;
+ bool swap_now = false;
+ bool link;
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ connsw = E1000_READ_REG(hw, E1000_CONNSW);
+ link = igb_has_link(adapter);
+
+ /* need to live swap if current media is copper and we have fiber/serdes
+ * to go to.
+ */
+
+ if ((hw->phy.media_type == e1000_media_type_copper) &&
+ (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
+ swap_now = true;
+ } else if (!(connsw & E1000_CONNSW_SERDESD)) {
+ /* copper signal takes time to appear */
+ if (adapter->copper_tries < 2) {
+ adapter->copper_tries++;
+ connsw |= E1000_CONNSW_AUTOSENSE_CONF;
+ E1000_WRITE_REG(hw, E1000_CONNSW, connsw);
+ return;
+ } else {
+ adapter->copper_tries = 0;
+ if ((connsw & E1000_CONNSW_PHYSD) &&
+ (!(connsw & E1000_CONNSW_PHY_PDN))) {
+ swap_now = true;
+ connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
+ E1000_WRITE_REG(hw, E1000_CONNSW, connsw);
+ }
+ }
+ }
+
+ if (swap_now) {
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ dev_info(pci_dev_to_dev(adapter->pdev),
+ "%s:MAS: changing media to fiber/serdes\n",
+ adapter->netdev->name);
+ ctrl_ext |=
+ E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ adapter->copper_tries = 0;
+ break;
+ case e1000_media_type_internal_serdes:
+ case e1000_media_type_fiber:
+ dev_info(pci_dev_to_dev(adapter->pdev),
+ "%s:MAS: changing media to copper\n",
+ adapter->netdev->name);
+ ctrl_ext &=
+ ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ break;
+ default:
+ /* shouldn't get here during regular operation */
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "%s:AMS: Invalid media type found, returning\n",
+ adapter->netdev->name);
+ break;
+ }
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ }
+}
+
+#ifdef HAVE_I2C_SUPPORT
+/* igb_get_i2c_data - Reads the I2C SDA data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Returns the I2C data bit value
+ */
+static int igb_get_i2c_data(void *data)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ return ((i2cctl & E1000_I2C_DATA_IN) != 0);
+}
+
+/* igb_set_i2c_data - Sets the I2C data bit
+ * @data: pointer to hardware structure
+ * @state: I2C data value (0 or 1) to set
+ *
+ * Sets the I2C data bit
+ */
+static void igb_set_i2c_data(void *data, int state)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ if (state)
+ i2cctl |= E1000_I2C_DATA_OUT;
+ else
+ i2cctl &= ~E1000_I2C_DATA_OUT;
+
+ i2cctl &= ~E1000_I2C_DATA_OE_N;
+ i2cctl |= E1000_I2C_CLK_OE_N;
+
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl);
+ E1000_WRITE_FLUSH(hw);
+
+}
+
+/* igb_set_i2c_clk - Sets the I2C SCL clock
+ * @data: pointer to hardware structure
+ * @state: state to set clock
+ *
+ * Sets the I2C clock line to state
+ */
+static void igb_set_i2c_clk(void *data, int state)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ if (state) {
+ i2cctl |= E1000_I2C_CLK_OUT;
+ i2cctl &= ~E1000_I2C_CLK_OE_N;
+ } else {
+ i2cctl &= ~E1000_I2C_CLK_OUT;
+ i2cctl &= ~E1000_I2C_CLK_OE_N;
+ }
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/* igb_get_i2c_clk - Gets the I2C SCL clock state
+ * @data: pointer to hardware structure
+ *
+ * Gets the I2C clock state
+ */
+static int igb_get_i2c_clk(void *data)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ return ((i2cctl & E1000_I2C_CLK_IN) != 0);
+}
+
+static const struct i2c_algo_bit_data igb_i2c_algo = {
+ .setsda = igb_set_i2c_data,
+ .setscl = igb_set_i2c_clk,
+ .getsda = igb_get_i2c_data,
+ .getscl = igb_get_i2c_clk,
+ .udelay = 5,
+ .timeout = 20,
+};
+
+/* igb_init_i2c - Init I2C interface
+ * @adapter: pointer to adapter structure
+ *
+ */
+static s32 igb_init_i2c(struct igb_adapter *adapter)
+{
+ s32 status = E1000_SUCCESS;
+
+ /* I2C interface supported on i350 devices */
+ if (adapter->hw.mac.type != e1000_i350)
+ return E1000_SUCCESS;
+
+ /* Initialize the i2c bus which is controlled by the registers.
+ * This bus will use the i2c_algo_bit structue that implements
+ * the protocol through toggling of the 4 bits in the register.
+ */
+ adapter->i2c_adap.owner = THIS_MODULE;
+ adapter->i2c_algo = igb_i2c_algo;
+ adapter->i2c_algo.data = adapter;
+ adapter->i2c_adap.algo_data = &adapter->i2c_algo;
+ adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
+ strlcpy(adapter->i2c_adap.name, "igb BB",
+ sizeof(adapter->i2c_adap.name));
+ status = i2c_bit_add_bus(&adapter->i2c_adap);
+ return status;
+}
+
+#endif /* HAVE_I2C_SUPPORT */
/**
* igb_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
hw->mac.get_link_status = 1;
schedule_work(&adapter->watchdog_task);
+ if ((adapter->flags & IGB_FLAG_EEE) &&
+ (!hw->dev_spec._82575.eee_disable))
+ adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
+
return 0;
}
igb_irq_disable(adapter);
+ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+
del_timer_sync(&adapter->watchdog_timer);
if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
del_timer_sync(&adapter->dma_err_timer);
igb_clean_all_tx_rings(adapter);
igb_clean_all_rx_rings(adapter);
#ifdef IGB_DCA
-
/* since we reset the hardware DCA settings were cleared */
igb_setup_dca(adapter);
#endif
clear_bit(__IGB_RESETTING, &adapter->state);
}
+/**
+ * igb_enable_mas - Media Autosense re-enable after swap
+ *
+ * @adapter: adapter struct
+ **/
+static s32 igb_enable_mas(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 connsw;
+ s32 ret_val = E1000_SUCCESS;
+
+ connsw = E1000_READ_REG(hw, E1000_CONNSW);
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ /* configure for SerDes media detect */
+ if (!(connsw & E1000_CONNSW_SERDESD)) {
+ connsw |= E1000_CONNSW_ENRGSRC;
+ connsw |= E1000_CONNSW_AUTOSENSE_EN;
+ E1000_WRITE_REG(hw, E1000_CONNSW, connsw);
+ E1000_WRITE_FLUSH(hw);
+ } else if (connsw & E1000_CONNSW_SERDESD) {
+ /* already SerDes, no need to enable anything */
+ return ret_val;
+ } else {
+ dev_info(pci_dev_to_dev(adapter->pdev),
+ "%s:MAS: Unable to configure feature, disabling..\n",
+ adapter->netdev->name);
+ adapter->flags &= ~IGB_FLAG_MAS_ENABLE;
+ }
+ }
+ return ret_val;
+}
+
void igb_reset(struct igb_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;
struct e1000_mac_info *mac = &hw->mac;
struct e1000_fc_info *fc = &hw->fc;
- u32 pba = 0, tx_space, min_tx_space, min_rx_space;
- u16 hwm;
+ u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm;
/* Repartition Pba for greater than 9k mtu
* To take effect CTRL.RST is required.
switch (mac->type) {
case e1000_i350:
case e1000_82580:
+ case e1000_i354:
pba = E1000_READ_REG(hw, E1000_RXPBS);
pba = e1000_rxpbs_adjust_82580(pba);
break;
pba &= E1000_RXPBS_SIZE_MASK_82576;
break;
case e1000_82575:
+ case e1000_i210:
+ case e1000_i211:
default:
pba = E1000_PBA_34K;
break;
hwm = min(((pba << 10) * 9 / 10),
((pba << 10) - 2 * adapter->max_frame_size));
- fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
+ fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
fc->low_water = fc->high_water - 16;
fc->pause_time = 0xFFFF;
fc->send_xon = 1;
e1000_reset_hw(hw);
E1000_WRITE_REG(hw, E1000_WUC, 0);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ e1000_setup_init_funcs(hw, TRUE);
+ igb_check_options(adapter);
+ e1000_get_bus_info(hw);
+ adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
+ }
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ if (igb_enable_mas(adapter))
+ dev_err(pci_dev_to_dev(pdev),
+ "Error enabling Media Auto Sense\n");
+ }
if (e1000_init_hw(hw))
dev_err(pci_dev_to_dev(pdev), "Hardware Error\n");
+ /*
+ * Flow control settings reset on hardware reset, so guarantee flow
+ * control is off when forcing speed.
+ */
+ if (!hw->mac.autoneg)
+ e1000_force_mac_fc(hw);
+
igb_init_dmac(adapter, pba);
/* Re-initialize the thermal sensor on i350 devices. */
if (mac->type == e1000_i350 && hw->bus.func == 0) {
e1000_set_i2c_bb(hw);
e1000_init_thermal_sensor_thresh(hw);
}
+
+ /*Re-establish EEE setting */
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ switch (mac->type) {
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+ e1000_set_eee_i350(hw);
+ break;
+ case e1000_i354:
+ e1000_set_eee_i354(hw);
+ break;
+ default:
+ break;
+ }
+ }
+
if (!netif_running(adapter->netdev))
igb_power_down_link(adapter);
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
E1000_WRITE_REG(hw, E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
+
+#ifdef HAVE_PTP_1588_CLOCK
+ /* Re-enable PTP, where applicable. */
+ igb_ptp_reset(adapter);
+#endif /* HAVE_PTP_1588_CLOCK */
+
e1000_get_phy_info(hw);
+
+ adapter->devrc++;
}
#ifdef HAVE_NDO_SET_FEATURES
* Since there is no support for separate tx vlan accel
* enabled make sure tx flag is cleared if rx is.
*/
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+#else
if (!(features & NETIF_F_HW_VLAN_RX))
features &= ~NETIF_F_HW_VLAN_TX;
+#endif
/* If Rx checksum is disabled, then LRO should also be disabled */
if (!(features & NETIF_F_RXCSUM))
{
u32 changed = netdev->features ^ features;
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+#else
if (changed & NETIF_F_HW_VLAN_RX)
+#endif
igb_vlan_mode(netdev, features);
return 0;
}
+#ifdef NTF_SELF
+#ifdef USE_CONST_DEV_UC_CHAR
+static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 flags)
+#else
+static int igb_ndo_fdb_add(struct ndmsg *ndm,
+ struct net_device *dev,
+ unsigned char *addr,
+ u16 flags)
+#endif
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
+
+ if (!(adapter->vfs_allocated_count))
+ return -EOPNOTSUPP;
+
+ /* Hardware does not support aging addresses so if a
+ * ndm_state is given only allow permanent addresses
+ */
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+ pr_info("%s: FDB only supports static addresses\n",
+ igb_driver_name);
+ return -EINVAL;
+ }
+
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
+ u32 rar_uc_entries = hw->mac.rar_entry_count -
+ (adapter->vfs_allocated_count + 1);
+
+ if (netdev_uc_count(dev) < rar_uc_entries)
+ err = dev_uc_add_excl(dev, addr);
+ else
+ err = -ENOMEM;
+ } else if (is_multicast_ether_addr(addr)) {
+ err = dev_mc_add_excl(dev, addr);
+ } else {
+ err = -EINVAL;
+ }
+
+ /* Only return duplicate errors if NLM_F_EXCL is set */
+ if (err == -EEXIST && !(flags & NLM_F_EXCL))
+ err = 0;
+
+ return err;
+}
+
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+#ifdef USE_CONST_DEV_UC_CHAR
+static int igb_ndo_fdb_del(struct ndmsg *ndm,
+ struct net_device *dev,
+ const unsigned char *addr)
+#else
+static int igb_ndo_fdb_del(struct ndmsg *ndm,
+ struct net_device *dev,
+ unsigned char *addr)
+#endif
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ int err = -EOPNOTSUPP;
+
+ if (ndm->ndm_state & NUD_PERMANENT) {
+ pr_info("%s: FDB only supports static addresses\n",
+ igb_driver_name);
+ return -EINVAL;
+ }
+
+ if (adapter->vfs_allocated_count) {
+ if (is_unicast_ether_addr(addr))
+ err = dev_uc_del(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_del(dev, addr);
+ else
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int igb_ndo_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ int idx)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
+ if (adapter->vfs_allocated_count)
+ idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+
+ return idx;
+}
+#endif /* USE_DEFAULT_FDB_DEL_DUMP */
+
+#ifdef HAVE_BRIDGE_ATTRIBS
+static int igb_ndo_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct nlattr *attr, *br_spec;
+ int rem;
+
+ if (!(adapter->vfs_allocated_count))
+ return -EOPNOTSUPP;
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ case e1000_i350:
+ case e1000_i354:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ __u16 mode;
+
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+
+ mode = nla_get_u16(attr);
+ if (mode == BRIDGE_MODE_VEPA) {
+ e1000_vmdq_set_loopback_pf(hw, 0);
+ adapter->flags &= ~IGB_FLAG_LOOPBACK_ENABLE;
+ } else if (mode == BRIDGE_MODE_VEB) {
+ e1000_vmdq_set_loopback_pf(hw, 1);
+ adapter->flags |= IGB_FLAG_LOOPBACK_ENABLE;
+ } else
+ return -EINVAL;
+
+ netdev_info(adapter->netdev, "enabling bridge mode: %s\n",
+ mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+ }
+
+ return 0;
+}
+
+#ifdef HAVE_BRIDGE_FILTER
+static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, u32 filter_mask)
+#else
+static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev)
+#endif
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ u16 mode;
+
+ if (!(adapter->vfs_allocated_count))
+ return -EOPNOTSUPP;
+
+ if (adapter->flags & IGB_FLAG_LOOPBACK_ENABLE)
+ mode = BRIDGE_MODE_VEB;
+ else
+ mode = BRIDGE_MODE_VEPA;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
+}
+#endif /* HAVE_BRIDGE_ATTRIBS */
+#endif /* NTF_SELF */
+
#endif /* HAVE_NDO_SET_FEATURES */
#ifdef HAVE_NET_DEVICE_OPS
static const struct net_device_ops igb_netdev_ops = {
.ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
.ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
.ndo_get_vf_config = igb_ndo_get_vf_config,
-#endif
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+ .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
+#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */
+#endif /* IFLA_VF_MAX */
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = igb_netpoll,
#endif
#ifdef HAVE_VLAN_RX_REGISTER
.ndo_vlan_rx_register = igb_vlan_mode,
#endif
+#ifdef NTF_SELF
+ .ndo_fdb_add = igb_ndo_fdb_add,
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+ .ndo_fdb_del = igb_ndo_fdb_del,
+ .ndo_fdb_dump = igb_ndo_fdb_dump,
+#endif
+#ifdef HAVE_BRIDGE_ATTRIBS
+ .ndo_bridge_setlink = igb_ndo_bridge_setlink,
+ .ndo_bridge_getlink = igb_ndo_bridge_getlink,
+#endif /* HAVE_BRIDGE_ATTRIBS */
+#endif
};
#ifdef CONFIG_IGB_VMDQ_NETDEV
#ifdef HAVE_TX_TIMEOUT
dev->tx_timeout = &igb_vmdq_tx_timeout;
#endif
-#ifdef NETIF_F_HW_VLAN_TX
+#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX)
dev->vlan_rx_register = &igb_vmdq_vlan_rx_register;
dev->vlan_rx_add_vid = &igb_vmdq_vlan_rx_add_vid;
dev->vlan_rx_kill_vid = &igb_vmdq_vlan_rx_kill_vid;
}
#endif /* CONFIG_IGB_VMDQ_NETDEV */
+/**
+ * igb_set_fw_version - Configure version string for ethtool
+ * @adapter: adapter struct
+ *
+ **/
+static void igb_set_fw_version(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_fw_version fw;
+
+ e1000_get_fw_version(hw, &fw);
+
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ if (!(e1000_get_flash_presence_i210(hw))) {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%2d.%2d-%d",
+ fw.invm_major, fw.invm_minor, fw.invm_img_type);
+ break;
+ }
+ /* fall through */
+ default:
+ /* if option rom is valid, display its version too*/
+ if (fw.or_valid) {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x, %d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.etrack_id,
+ fw.or_major, fw.or_build, fw.or_patch);
+ /* no option rom */
+ } else {
+ if (fw.etrack_id != 0X0000) {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor, fw.etrack_id);
+ } else {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.eep_build);
+ }
+ }
+ break;
+ }
+
+ return;
+}
+
+/**
+ * igb_init_mas - init Media Autosense feature if enabled in the NVM
+ *
+ * @adapter: adapter struct
+ **/
+static void igb_init_mas(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 eeprom_data;
+
+ e1000_read_nvm(hw, NVM_COMPAT, 1, &eeprom_data);
+ switch (hw->bus.func) {
+ case E1000_FUNC_0:
+ if (eeprom_data & IGB_MAS_ENABLE_0)
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ break;
+ case E1000_FUNC_1:
+ if (eeprom_data & IGB_MAS_ENABLE_1)
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ break;
+ case E1000_FUNC_2:
+ if (eeprom_data & IGB_MAS_ENABLE_2)
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ break;
+ case E1000_FUNC_3:
+ if (eeprom_data & IGB_MAS_ENABLE_3)
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ break;
+ default:
+ /* Shouldn't get here */
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "%s:AMS: Invalid port configuration, returning\n",
+ adapter->netdev->name);
+ break;
+ }
+}
+
/**
* igb_probe - Device Initialization Routine
* @pdev: PCI device information struct
/* Copper options */
if (hw->phy.media_type == e1000_media_type_copper) {
-#ifdef ETH_TP_MDI_X
- hw->phy.mdix = ETH_TP_MDI_INVALID;
-#else
hw->phy.mdix = AUTO_ALL_MODES;
-#endif /* ETH_TP_MDI_X */
hw->phy.disable_polarity_correction = FALSE;
hw->phy.ms_type = e1000_ms_hw_default;
}
#ifdef NETIF_F_RXHASH
NETIF_F_RXHASH |
#endif
-#ifdef HAVE_NDO_SET_FEATURES
NETIF_F_RXCSUM |
-#endif
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
+#else
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_TX;
+#endif
+
+ if (hw->mac.type >= e1000_82576)
+ netdev->features |= NETIF_F_SCTP_CSUM;
#ifdef HAVE_NDO_SET_FEATURES
/* copy netdev features into list of user selectable features */
#endif
/* set this bit last since it cannot be part of hw_features */
+#ifdef NETIF_F_HW_VLAN_CTAG_FILTER
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#else
netdev->features |= NETIF_F_HW_VLAN_FILTER;
+#endif
#ifdef HAVE_NETDEV_VLAN_FEATURES
netdev->vlan_features |= NETIF_F_TSO |
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- if (hw->mac.type >= e1000_82576)
- netdev->features |= NETIF_F_SCTP_CSUM;
-
adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
+#ifdef DEBUG
+ if (adapter->dmac != IGB_DMAC_DISABLE)
+ printk("%s: DMA Coalescing is enabled..\n", netdev->name);
+#endif
/* before reading the NVM, reset the controller to put the device in a
* known good starting state */
igb_rar_set(adapter, 0);
/* get firmware version for ethtool -i */
- e1000_read_nvm(&adapter->hw, 5, 1, &adapter->fw_version);
+ igb_set_fw_version(adapter);
+
+ /* Check if Media Autosense is enabled */
+ if (hw->mac.type == e1000_82580)
+ igb_init_mas(adapter);
setup_timer(&adapter->watchdog_timer, &igb_watchdog,
(unsigned long) adapter);
if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
e1000_validate_mdi_setting(hw);
- /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
- * enable the ACPI Magic Packet filter
- */
-
+ /* By default, support wake on port A */
if (hw->bus.func == 0)
- e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
- else if (hw->mac.type >= e1000_82580)
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+
+ /* Check the NVM for wake support for non-port A ports */
+ if (hw->mac.type >= e1000_82580)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
&eeprom_data);
e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
if (eeprom_data & IGB_EEPROM_APME)
- adapter->eeprom_wol |= E1000_WUFC_MAG;
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
/* now that we have the eeprom settings, apply the special cases where
* the eeprom may be wrong or the board simply won't support wake on
* lan on a particular port */
switch (pdev->device) {
case E1000_DEV_ID_82575GB_QUAD_COPPER:
- adapter->eeprom_wol = 0;
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
break;
case E1000_DEV_ID_82575EB_FIBER_SERDES:
case E1000_DEV_ID_82576_FIBER:
/* Wake events only supported on port A for dual fiber
* regardless of eeprom setting */
if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1)
- adapter->eeprom_wol = 0;
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
break;
case E1000_DEV_ID_82576_QUAD_COPPER:
case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
/* if quad port adapter, disable WoL on all but port A */
if (global_quad_port_a != 0)
- adapter->eeprom_wol = 0;
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
else
adapter->flags |= IGB_FLAG_QUAD_PORT_A;
/* Reset for multiple quad port adapters */
if (++global_quad_port_a == 4)
global_quad_port_a = 0;
break;
+ default:
+ /* If the device can't wake, don't set software support */
+ if (!device_can_wakeup(&adapter->pdev->dev))
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
+ break;
}
/* initialize the wol settings based on the eeprom settings */
- adapter->wol = adapter->eeprom_wol;
- device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol);
+ if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
+ adapter->wol |= E1000_WUFC_MAG;
+
+ /* Some vendors want WoL disabled by default, but still supported */
+ if ((hw->mac.type == e1000_i350) &&
+ (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+ adapter->wol = 0;
+ }
+
+ device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev),
+ adapter->flags & IGB_FLAG_WOL_SUPPORTED);
/* reset the hardware with the new settings */
igb_reset(adapter);
+ adapter->devrc = 0;
- /* let the f/w know that the h/w is now under the control of the
+#ifdef HAVE_I2C_SUPPORT
+ /* Init the I2C interface */
+ err = igb_init_i2c(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to init i2c interface\n");
+ goto err_eeprom;
+ }
+#endif /* HAVE_I2C_SUPPORT */
+
+ /* let the f/w know that the h/w is now under the control of the
* driver. */
igb_get_hw_control(adapter);
}
#endif
-#ifdef HAVE_HW_TIME_STAMP
+#ifdef HAVE_PTP_1588_CLOCK
/* do hw tstamp init after resetting */
- igb_init_hw_timer(adapter);
+ igb_ptp_init(adapter);
+#endif /* HAVE_PTP_1588_CLOCK */
-#endif
dev_info(pci_dev_to_dev(pdev), "Intel(R) Gigabit Ethernet Network Connection\n");
/* print bus type/speed/width info */
dev_info(pci_dev_to_dev(pdev), "%s: (PCIe:%s:%s) ",
netdev->name,
((hw->bus.speed == e1000_bus_speed_2500) ? "2.5GT/s" :
(hw->bus.speed == e1000_bus_speed_5000) ? "5.0GT/s" :
+ (hw->mac.type == e1000_i354) ? "integrated" :
"unknown"),
- ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4\n" :
- (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2\n" :
- (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1\n" :
+ ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
+ (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
+ (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
+ (hw->mac.type == e1000_i354) ? "integrated" :
"unknown"));
dev_info(pci_dev_to_dev(pdev), "%s: MAC: ", netdev->name);
for (i = 0; i < 6; i++)
/* Initialize the thermal sensor on i350 devices. */
- if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
- u16 ets_word;
+ if (hw->mac.type == e1000_i350) {
+ if (hw->bus.func == 0) {
+ u16 ets_word;
+
+ /*
+ * Read the NVM to determine if this i350 device
+ * supports an external thermal sensor.
+ */
+ e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_word);
+ if (ets_word != 0x0000 && ets_word != 0xFFFF)
+ adapter->ets = true;
+ else
+ adapter->ets = false;
+ }
+#ifdef IGB_HWMON
- /*
- * Read the NVM to determine if this i350 device supports an
- * external thermal sensor.
- */
- e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_word);
- if (ets_word != 0x0000 && ets_word != 0xFFFF)
- adapter->ets = true;
- else
- adapter->ets = false;
-#ifdef IGB_SYSFS
igb_sysfs_init(adapter);
#else
#ifdef IGB_PROCFS
+
igb_procfs_init(adapter);
#endif /* IGB_PROCFS */
-#endif /* IGB_SYSFS */
+#endif /* IGB_HWMON */
} else {
adapter->ets = false;
}
- switch (hw->mac.type) {
- case e1000_i350:
- /* Enable EEE for internal copper PHY devices */
- if (hw->phy.media_type == e1000_media_type_copper)
- e1000_set_eee_i350(hw);
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ switch (hw->mac.type) {
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+ /* Enable EEE for internal copper PHY devices */
+ err = e1000_set_eee_i350(hw);
+ if ((!err) &&
+ (adapter->flags & IGB_FLAG_EEE))
+ adapter->eee_advert =
+ MDIO_EEE_100TX | MDIO_EEE_1000T;
+ break;
+ case e1000_i354:
+ if ((E1000_READ_REG(hw, E1000_CTRL_EXT)) &
+ (E1000_CTRL_EXT_LINK_MODE_SGMII)) {
+ err = e1000_set_eee_i354(hw);
+ if ((!err) &&
+ (adapter->flags & IGB_FLAG_EEE))
+ adapter->eee_advert =
+ MDIO_EEE_100TX | MDIO_EEE_1000T;
+ }
+ break;
+ default:
+ break;
+ }
+ }
- /* send driver version info to firmware */
+ /* send driver version info to firmware */
+ if (hw->mac.type >= e1000_i350)
igb_init_fw(adapter);
- break;
- default:
- break;
- }
+
#ifndef IGB_NO_LRO
if (netdev->features & NETIF_F_LRO)
dev_info(pci_dev_to_dev(pdev), "Internal LRO is enabled \n");
err_register:
igb_release_hw_control(adapter);
+#ifdef HAVE_I2C_SUPPORT
+ memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
+#endif /* HAVE_I2C_SUPPORT */
err_eeprom:
if (!e1000_check_reset_block(hw))
e1000_phy_hw_reset(hw);
pci_disable_device(pdev);
return err;
}
+#ifdef HAVE_I2C_SUPPORT
+/*
+ * igb_remove_i2c - Cleanup I2C interface
+ * @adapter: pointer to adapter structure
+ *
+ */
+static void igb_remove_i2c(struct igb_adapter *adapter)
+{
+
+ /* free the adapter bus structure */
+ i2c_del_adapter(&adapter->i2c_adap);
+}
+#endif /* HAVE_I2C_SUPPORT */
/**
* igb_remove - Device Removal Routine
struct e1000_hw *hw = &adapter->hw;
pm_runtime_get_noresume(&pdev->dev);
+#ifdef HAVE_I2C_SUPPORT
+ igb_remove_i2c(adapter);
+#endif /* HAVE_I2C_SUPPORT */
+#ifdef HAVE_PTP_1588_CLOCK
+ igb_ptp_stop(adapter);
+#endif /* HAVE_PTP_1588_CLOCK */
/* flush_scheduled work may reschedule our watchdog task, so
* explicitly disable watchdog tasks from being rescheduled */
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
- kfree(adapter->mac_table);
- kfree(adapter->shadow_vfta);
- free_netdev(netdev);
-
- pci_disable_pcie_error_reporting(pdev);
-
- pci_disable_device(pdev);
-
-#ifdef IGB_SYSFS
+#ifdef IGB_HWMON
igb_sysfs_exit(adapter);
#else
#ifdef IGB_PROCFS
igb_procfs_exit(adapter);
#endif /* IGB_PROCFS */
-#endif /* IGB_SYSFS */
-}
-
-#ifdef HAVE_HW_TIME_STAMP
-/**
- * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
- * @adapter: board private structure to initialize
- *
- * igb_init_hw_timer initializes the function pointer and values for the hw
- * timer found in hardware.
- **/
-static void igb_init_hw_timer(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
-
- switch (hw->mac.type) {
- case e1000_i350:
- case e1000_82580:
- memset(&adapter->cycles, 0, sizeof(adapter->cycles));
- adapter->cycles.read = igb_read_clock;
- adapter->cycles.mask = CLOCKSOURCE_MASK(64);
- adapter->cycles.mult = 1;
- /*
- * The 82580 timesync updates the system timer every 8ns by 8ns
- * and the value cannot be shifted. Instead we need to shift
- * the registers to generate a 64bit timer value. As a result
- * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
- * 24 in order to generate a larger value for synchronization.
- */
- adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
- /* disable system timer temporarily by setting bit 31 */
- E1000_WRITE_REG(hw, E1000_TSAUXC, 0x80000000);
- E1000_WRITE_FLUSH(hw);
-
- /* Set registers so that rollover occurs soon to test this. */
- E1000_WRITE_REG(hw, E1000_SYSTIMR, 0x00000000);
- E1000_WRITE_REG(hw, E1000_SYSTIML, 0x80000000);
- E1000_WRITE_REG(hw, E1000_SYSTIMH, 0x000000FF);
- E1000_WRITE_FLUSH(hw);
-
- /* enable system timer by clearing bit 31 */
- E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0);
- E1000_WRITE_FLUSH(hw);
+#endif /* IGB_HWMON */
+ kfree(adapter->mac_table);
+ kfree(adapter->shadow_vfta);
+ free_netdev(netdev);
- timecounter_init(&adapter->clock,
- &adapter->cycles,
- ktime_to_ns(ktime_get_real()));
- /*
- * Synchronize our NIC clock against system wall clock. NIC
- * time stamp reading requires ~3us per sample, each sample
- * was pretty stable even under load => only require 10
- * samples for each offset comparison.
- */
- memset(&adapter->compare, 0, sizeof(adapter->compare));
- adapter->compare.source = &adapter->clock;
- adapter->compare.target = ktime_get_real;
- adapter->compare.num_samples = 10;
- timecompare_update(&adapter->compare, 0);
- break;
- case e1000_82576:
- /*
- * Initialize hardware timer: we keep it running just in case
- * that some program needs it later on.
- */
- memset(&adapter->cycles, 0, sizeof(adapter->cycles));
- adapter->cycles.read = igb_read_clock;
- adapter->cycles.mask = CLOCKSOURCE_MASK(64);
- adapter->cycles.mult = 1;
- /**
- * Scale the NIC clock cycle by a large factor so that
- * relatively small clock corrections can be added or
- * subtracted at each clock tick. The drawbacks of a large
- * factor are a) that the clock register overflows more quickly
- * (not such a big deal) and b) that the increment per tick has
- * to fit into 24 bits. As a result we need to use a shift of
- * 19 so we can fit a value of 16 into the TIMINCA register.
- */
- adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
- E1000_WRITE_REG(hw, E1000_TIMINCA,
- (1 << E1000_TIMINCA_16NS_SHIFT) |
- (16 << IGB_82576_TSYNC_SHIFT));
-
- /* Set registers so that rollover occurs soon to test this. */
- E1000_WRITE_REG(hw, E1000_SYSTIML, 0x00000000);
- E1000_WRITE_REG(hw, E1000_SYSTIMH, 0xFF800000);
- E1000_WRITE_FLUSH(hw);
+ pci_disable_pcie_error_reporting(pdev);
- timecounter_init(&adapter->clock,
- &adapter->cycles,
- ktime_to_ns(ktime_get_real()));
- /*
- * Synchronize our NIC clock against system wall clock. NIC
- * time stamp reading requires ~3us per sample, each sample
- * was pretty stable even under load => only require 10
- * samples for each offset comparison.
- */
- memset(&adapter->compare, 0, sizeof(adapter->compare));
- adapter->compare.source = &adapter->clock;
- adapter->compare.target = ktime_get_real;
- adapter->compare.num_samples = 10;
- timecompare_update(&adapter->compare, 0);
- break;
- case e1000_82575:
- /* 82575 does not support timesync */
- default:
- break;
- }
+ pci_disable_device(pdev);
}
-#endif /* HAVE_HW_TIME_STAMP */
/**
* igb_sw_init - Initialize general software structures (struct igb_adapter)
* @adapter: board private structure to initialize
}
adapter->mac_table = kzalloc(sizeof(struct igb_mac_addr) *
- hw->mac.rar_entry_count,
+ hw->mac.rar_entry_count,
GFP_ATOMIC);
/* Setup and initialize a copy of the hw vlan table array */
GFP_ATOMIC);
#ifdef NO_KNI
/* These calls may decrease the number of queues */
- igb_set_sriov_capability(adapter);
+ if (hw->mac.type < e1000_i210) {
+ igb_set_sriov_capability(adapter);
+ }
- if (igb_init_interrupt_scheme(adapter)) {
+ if (igb_init_interrupt_scheme(adapter, true)) {
dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for queues\n");
return -ENOMEM;
}
if (err)
goto err_req_irq;
+ /* Notify the stack of the actual queue counts. */
+ netif_set_real_num_tx_queues(netdev,
+ adapter->vmdq_pools ? 1 :
+ adapter->num_tx_queues);
+
+ err = netif_set_real_num_rx_queues(netdev,
+ adapter->vmdq_pools ? 1 :
+ adapter->num_rx_queues);
+ if (err)
+ goto err_set_queues;
+
/* From here on the code is the same as igb_up() */
clear_bit(__IGB_DOWN, &adapter->state);
return E1000_SUCCESS;
+err_set_queues:
+ igb_free_irq(adapter);
err_req_irq:
igb_release_hw_control(adapter);
igb_power_down_link(adapter);
int igb_setup_tx_resources(struct igb_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
- int orig_node = dev_to_node(dev);
int size;
size = sizeof(struct igb_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
- if (!tx_ring->tx_buffer_info)
- tx_ring->tx_buffer_info = vzalloc(size);
+ tx_ring->tx_buffer_info = vzalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- set_dev_node(dev, tx_ring->numa_node);
- tx_ring->desc = dma_alloc_coherent(dev,
- tx_ring->size,
- &tx_ring->dma,
- GFP_KERNEL);
- set_dev_node(dev, orig_node);
- if (!tx_ring->desc)
- tx_ring->desc = dma_alloc_coherent(dev,
- tx_ring->size,
- &tx_ring->dma,
- GFP_KERNEL);
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
E1000_WRITE_REG(hw, E1000_TCTL, tctl);
}
+static u32 igb_tx_wthresh(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ switch (hw->mac.type) {
+ case e1000_i354:
+ return 4;
+ case e1000_82576:
+ if (adapter->msix_entries)
+ return 1;
+ default:
+ break;
+ }
+
+ return 16;
+}
+
/**
* igb_configure_tx_ring - Configure transmit ring after Reset
* @adapter: board private structure
txdctl |= IGB_TX_PTHRESH;
txdctl |= IGB_TX_HTHRESH << 8;
- txdctl |= IGB_TX_WTHRESH << 16;
+ txdctl |= igb_tx_wthresh(adapter) << 16;
txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), txdctl);
int igb_setup_rx_resources(struct igb_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
- int orig_node = dev_to_node(dev);
int size, desc_len;
size = sizeof(struct igb_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
- if (!rx_ring->rx_buffer_info)
- rx_ring->rx_buffer_info = vzalloc(size);
+ rx_ring->rx_buffer_info = vzalloc(size);
if (!rx_ring->rx_buffer_info)
goto err;
rx_ring->size = rx_ring->count * desc_len;
rx_ring->size = ALIGN(rx_ring->size, 4096);
- set_dev_node(dev, rx_ring->numa_node);
- rx_ring->desc = dma_alloc_coherent(dev,
- rx_ring->size,
- &rx_ring->dma,
- GFP_KERNEL);
- set_dev_node(dev, orig_node);
- if (!rx_ring->desc)
- rx_ring->desc = dma_alloc_coherent(dev,
- rx_ring->size,
- &rx_ring->dma,
- GFP_KERNEL);
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc)
goto err;
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
struct e1000_hw *hw = &adapter->hw;
u32 mrqc, rxcsum;
u32 j, num_rx_queues, shift = 0, shift2 = 0;
- union e1000_reta {
- u32 dword;
- u8 bytes[4];
- } reta;
- static const u8 rsshash[40] = {
- 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
- 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
- 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
- 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
+ static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
+ 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
+ 0xA32DCB77, 0x0CF23080, 0x3BB7426A,
+ 0xFA01ACBE };
/* Fill out hash function seeds */
- for (j = 0; j < 10; j++) {
- u32 rsskey = rsshash[(j * 4)];
- rsskey |= rsshash[(j * 4) + 1] << 8;
- rsskey |= rsshash[(j * 4) + 2] << 16;
- rsskey |= rsshash[(j * 4) + 3] << 24;
- E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), j, rsskey);
- }
+ for (j = 0; j < 10; j++)
+ E1000_WRITE_REG(hw, E1000_RSSRK(j), rsskey[j]);
num_rx_queues = adapter->rss_queues;
- if (adapter->vfs_allocated_count || adapter->vmdq_pools) {
- /* 82575 and 82576 supports 2 RSS queues for VMDq */
- switch (hw->mac.type) {
- case e1000_i350:
- case e1000_82580:
- num_rx_queues = 1;
- shift = 0;
- break;
- case e1000_82576:
- shift = 3;
- num_rx_queues = 2;
- break;
- case e1000_82575:
+ /* 82575 and 82576 supports 2 RSS queues for VMDq */
+ switch (hw->mac.type) {
+ case e1000_82575:
+ if (adapter->vmdq_pools) {
shift = 2;
shift2 = 6;
- default:
break;
}
- } else {
- if (hw->mac.type == e1000_82575)
- shift = 6;
+ shift = 6;
+ break;
+ case e1000_82576:
+ /* 82576 supports 2 RSS queues for SR-IOV */
+ if (adapter->vfs_allocated_count || adapter->vmdq_pools) {
+ shift = 3;
+ num_rx_queues = 2;
+ }
+ break;
+ default:
+ break;
}
- for (j = 0; j < (32 * 4); j++) {
- reta.bytes[j & 3] = (j % num_rx_queues) << shift;
+ /*
+ * Populate the redirection table 4 entries at a time. To do this
+ * we are generating the results for n and n+2 and then interleaving
+ * those with the results with n+1 and n+3.
+ */
+ for (j = 0; j < 32; j++) {
+ /* first pass generates n and n+2 */
+ u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
+ u32 reta = (base & 0x07800780) >> (7 - shift);
+
+ /* second pass generates n+1 and n+3 */
+ base += 0x00010001 * num_rx_queues;
+ reta |= (base & 0x07800780) << (1 + shift);
+
+ /* generate 2nd table for 82575 based parts */
if (shift2)
- reta.bytes[j & 3] |= num_rx_queues << shift2;
- if ((j & 3) == 3)
- E1000_WRITE_REG(hw, E1000_RETA(j >> 2), reta.dword);
+ reta |= (0x01010101 * num_rx_queues) << shift2;
+
+ E1000_WRITE_REG(hw, E1000_RETA(j), reta);
}
/*
/* Don't need to set TUOFL or IPOFL, they default to 1 */
E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
+ /* Generate RSS hash based on packet types, TCP/UDP
+ * port numbers and/or IPv4/v6 src and dst addresses
+ */
+ mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
+ E1000_MRQC_RSS_FIELD_IPV4_TCP |
+ E1000_MRQC_RSS_FIELD_IPV6 |
+ E1000_MRQC_RSS_FIELD_IPV6_TCP |
+ E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
+
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
+
/* If VMDq is enabled then we set the appropriate mode for that, else
* we default to RSS so that an RSS hash is calculated per packet even
* if we are only using one queue */
adapter->rss_queues << 7);
}
if (adapter->rss_queues > 1)
- mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
+ mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
else
- mrqc = E1000_MRQC_ENABLE_VMDQ;
+ mrqc |= E1000_MRQC_ENABLE_VMDQ;
} else {
- mrqc = E1000_MRQC_ENABLE_RSS_4Q;
+ mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
}
-
igb_vmm_control(adapter);
- /*
- * Generate RSS hash based on TCP port numbers and/or
- * IPv4/v6 src and dst addresses since UDP cannot be
- * hashed reliably due to IP fragmentation
- */
- mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
- E1000_MRQC_RSS_FIELD_IPV4_TCP |
- E1000_MRQC_RSS_FIELD_IPV6 |
- E1000_MRQC_RSS_FIELD_IPV6_TCP |
- E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
-
E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
}
vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
- vmolr |= E1000_VMOLR_LPE; /* Accept long packets */
+ vmolr |= E1000_VMOLR_LPE; /* Accept long packets */
E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
}
int reg_idx = ring->reg_idx;
u32 srrctl = 0, rxdctl = 0;
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ /*
+ * RLPML prevents us from receiving a frame larger than max_frame so
+ * it is safe to just set the rx_buffer_len to max_frame without the
+ * risk of an skb over panic.
+ */
+ ring->rx_buffer_len = max_t(u32, adapter->max_frame_size,
+ MAXIMUM_ETHERNET_VLAN_SIZE);
+
+#endif
/* disable the queue */
E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), 0);
E1000_WRITE_REG(hw, E1000_RDH(reg_idx), 0);
writel(0, ring->tail);
+ /* reset next-to- use/clean to place SW in sync with hardwdare */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ ring->next_to_alloc = 0;
+
+#endif
/* set descriptor configuration */
#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
-#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
- srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-#else
- srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-#endif
- srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
E1000_SRRCTL_BSIZEPKT_SHIFT;
- srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
-#ifdef IGB_PER_PKT_TIMESTAMP
+ srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+#ifdef HAVE_PTP_1588_CLOCK
if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
-#endif
+#endif /* HAVE_PTP_1588_CLOCK */
/*
* We should set the drop enable bit if:
* SR-IOV is enabled
igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
}
-#ifdef CONFIG_BQL
netdev_tx_reset_queue(txring_txq(tx_ring));
-#endif /* CONFIG_BQL */
size = sizeof(struct igb_tx_buffer) * tx_ring->count;
memset(tx_ring->tx_buffer_info, 0, size);
void igb_clean_rx_ring(struct igb_ring *rx_ring)
{
unsigned long size;
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
- const int bufsz = rx_ring->rx_buffer_len;
-#else
- const int bufsz = IGB_RX_HDR_LEN;
-#endif
u16 i;
if (!rx_ring->rx_buffer_info)
return;
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ if (rx_ring->skb)
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
+
+#endif
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
if (buffer_info->dma) {
dma_unmap_single(rx_ring->dev,
buffer_info->dma,
- bufsz,
+ rx_ring->rx_buffer_len,
DMA_FROM_DEVICE);
buffer_info->dma = 0;
}
dev_kfree_skb(buffer_info->skb);
buffer_info->skb = NULL;
}
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
- if (buffer_info->page_dma) {
- dma_unmap_page(rx_ring->dev,
- buffer_info->page_dma,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- buffer_info->page_dma = 0;
- }
- if (buffer_info->page) {
- put_page(buffer_info->page);
- buffer_info->page = NULL;
- buffer_info->page_offset = 0;
- }
+#else
+ if (!buffer_info->page)
+ continue;
+
+ dma_unmap_page(rx_ring->dev,
+ buffer_info->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ __free_page(buffer_info->page);
+
+ buffer_info->page = NULL;
#endif
}
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
}
if (netdev->flags & IFF_PROMISC) {
rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
+ /* retain VLAN HW filtering if in VT mode */
+ if (adapter->vfs_allocated_count || adapter->vmdq_pools)
+ rctl |= E1000_RCTL_VFE;
} else {
if (netdev->flags & IFF_ALLMULTI) {
rctl |= E1000_RCTL_MPE;
if (!adapter->wvbr)
return;
- for(j = 0; j < adapter->vfs_allocated_count; j++) {
- if (adapter->wvbr & (1 << j) ||
- adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
- DPRINTK(DRV, WARNING,
- "Spoof event(s) detected on VF %d\n", j);
- adapter->wvbr &=
- ~((1 << j) |
- (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ for (j = 0; j < adapter->vfs_allocated_count; j++) {
+ if (adapter->wvbr & (1 << j) ||
+ adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
+ DPRINTK(DRV, WARNING,
+ "Spoof event(s) detected on VF %d\n", j);
+ adapter->wvbr &=
+ ~((1 << j) |
+ (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
+ }
+ }
+ break;
+ case e1000_i350:
+ for (j = 0; j < adapter->vfs_allocated_count; j++) {
+ if (adapter->wvbr & (1 << j)) {
+ DPRINTK(DRV, WARNING,
+ "Spoof event(s) detected on VF %d\n", j);
+ adapter->wvbr &= ~(1 << j);
+ }
}
+ break;
+ default:
+ break;
}
}
break;
}
+ if (((hw->mac.type == e1000_i210) ||
+ (hw->mac.type == e1000_i211)) &&
+ (hw->phy.id == I210_I_PHY_ID)) {
+ if (!netif_carrier_ok(adapter->netdev)) {
+ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+ } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
+ adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ }
+ }
+
return link_active;
}
u32 link;
int i;
u32 thstat, ctrl_ext;
-
+ u32 connsw;
link = igb_has_link(adapter);
+ /* Force link down if we have fiber to swap to */
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ connsw = E1000_READ_REG(hw, E1000_CONNSW);
+ if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
+ link = 0;
+ }
+ }
+
+ if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
+ if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
+ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+ else
+ link = FALSE;
+ }
+
if (link) {
+ /* Perform a reset if the media type changed. */
+ if (hw->dev_spec._82575.media_changed) {
+ hw->dev_spec._82575.media_changed = false;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ igb_reset(adapter);
+ }
+
/* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent);
case SPEED_100:
/* maybe add some timeout factor ? */
break;
+ default:
+ break;
}
netif_carrier_on(netdev);
netdev->name);
}
if (thstat & E1000_THSTAT_LINK_THROTTLE) {
- printk(KERN_INFO
+ printk(KERN_INFO
"igb: %s The network "
"adapter supported "
"link speed "
if (!test_bit(__IGB_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
-
+ /* link is down, time to check for alternate media */
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ igb_check_swap_media(adapter);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ schedule_work(&adapter->reset_task);
+ /* return immediately */
+ return;
+ }
+ }
pm_schedule_suspend(netdev->dev.parent,
MSEC_PER_SEC * 5);
+
+ /* also check for alternate media here */
+ } else if (!netif_carrier_ok(netdev) &&
+ (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
+ hw->mac.ops.power_up_serdes(hw);
+ igb_check_swap_media(adapter);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ schedule_work(&adapter->reset_task);
+ /* return immediately */
+ return;
+ }
}
}
igb_spoof_check(adapter);
/* Reset the timer */
- if (!test_bit(__IGB_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer,
- round_jiffies(jiffies + 2 * HZ));
+ if (!test_bit(__IGB_DOWN, &adapter->state)) {
+ if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + HZ));
+ else
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 2 * HZ));
+ }
}
static void igb_dma_err_task(struct work_struct *work)
/* For non-gigabit speeds, just fix the interrupt rate at 4000
* ints/sec - ITR timer value of 120 ticks.
*/
- if (adapter->link_speed != SPEED_1000) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
new_val = IGB_4K_ITR;
goto set_itr_val;
+ default:
+ break;
}
packets = q_vector->rx.total_packets;
u8 current_itr = 0;
/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
- if (adapter->link_speed != SPEED_1000) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
current_itr = 0;
new_itr = IGB_4K_ITR;
goto set_itr_now;
+ default:
+ break;
}
igb_update_itr(q_vector, &q_vector->tx);
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
if (!skb_is_gso(skb))
#endif /* NETIF_F_TSO */
return 0;
if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
return;
} else {
- u8 l4_hdr = 0;
+ u8 nexthdr = 0;
switch (first->protocol) {
case __constant_htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb);
type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
- l4_hdr = ip_hdr(skb)->protocol;
+ nexthdr = ip_hdr(skb)->protocol;
break;
#ifdef NETIF_F_IPV6_CSUM
case __constant_htons(ETH_P_IPV6):
vlan_macip_lens |= skb_network_header_len(skb);
- l4_hdr = ipv6_hdr(skb)->nexthdr;
+ nexthdr = ipv6_hdr(skb)->nexthdr;
break;
#endif
default:
break;
}
- switch (l4_hdr) {
+ switch (nexthdr) {
case IPPROTO_TCP:
type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
mss_l4len_idx = tcp_hdrlen(skb) <<
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
"partial checksum but l4 proto=%x!\n",
- l4_hdr);
+ nexthdr);
}
break;
}
igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
}
-static __le32 igb_tx_cmd_type(u32 tx_flags)
+#define IGB_SET_FLAG(_input, _flag, _result) \
+ ((_flag <= _result) ? \
+ ((u32)(_input & _flag) * (_result / _flag)) : \
+ ((u32)(_input & _flag) / (_flag / _result)))
+
+static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
{
/* set type for advanced descriptor with frame checksum insertion */
- __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
- E1000_ADVTXD_DCMD_IFCS |
- E1000_ADVTXD_DCMD_DEXT);
+ u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
+ E1000_ADVTXD_DCMD_DEXT |
+ E1000_ADVTXD_DCMD_IFCS;
/* set HW vlan bit if vlan is present */
- if (tx_flags & IGB_TX_FLAGS_VLAN)
- cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
-
- /* set timestamp bit if present */
- if (tx_flags & IGB_TX_FLAGS_TSTAMP)
- cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
+ cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
+ (E1000_ADVTXD_DCMD_VLE));
/* set segmentation bits for TSO */
- if (tx_flags & IGB_TX_FLAGS_TSO)
- cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
+ cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
+ (E1000_ADVTXD_DCMD_TSE));
+
+ /* set timestamp bit if present */
+ cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
+ (E1000_ADVTXD_MAC_TSTAMP));
return cmd_type;
}
{
u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
- /* 82575 requires a unique index per ring if any offload is enabled */
- if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
- test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+ /* 82575 requires a unique index per ring */
+ if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
olinfo_status |= tx_ring->reg_idx << 4;
/* insert L4 checksum */
- if (tx_flags & IGB_TX_FLAGS_CSUM) {
- olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
+ olinfo_status |= IGB_SET_FLAG(tx_flags,
+ IGB_TX_FLAGS_CSUM,
+ (E1000_TXD_POPTS_TXSM << 8));
- /* insert IPv4 checksum */
- if (tx_flags & IGB_TX_FLAGS_IPV4)
- olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
- }
+ /* insert IPv4 checksum */
+ olinfo_status |= IGB_SET_FLAG(tx_flags,
+ IGB_TX_FLAGS_IPV4,
+ (E1000_TXD_POPTS_IXSM << 8));
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
-/*
- * The largest size we can write to the descriptor is 65535. In order to
- * maintain a power of two alignment we have to limit ourselves to 32K.
- */
-#define IGB_MAX_TXD_PWR 15
-#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
-
static void igb_tx_map(struct igb_ring *tx_ring,
struct igb_tx_buffer *first,
const u8 hdr_len)
struct sk_buff *skb = first->skb;
struct igb_tx_buffer *tx_buffer;
union e1000_adv_tx_desc *tx_desc;
+ struct skb_frag_struct *frag;
dma_addr_t dma;
-#ifdef MAX_SKB_FRAGS
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned int data_len = skb->data_len;
-#endif
- unsigned int size = skb_headlen(skb);
- unsigned int paylen = skb->len - hdr_len;
- __le32 cmd_type;
+ unsigned int data_len, size;
u32 tx_flags = first->tx_flags;
+ u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
u16 i = tx_ring->next_to_use;
tx_desc = IGB_TX_DESC(tx_ring, i);
- igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
- cmd_type = igb_tx_cmd_type(tx_flags);
+ igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
+
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
- /* record length, and DMA address */
- dma_unmap_len_set(first, len, size);
- dma_unmap_addr_set(first, dma, dma);
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ tx_buffer = first;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
-#ifdef MAX_SKB_FRAGS
- for (;;) {
-#endif
while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
tx_desc->read.cmd_type_len =
- cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
+ cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
i++;
tx_desc++;
tx_desc = IGB_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
dma += IGB_MAX_DATA_PER_TXD;
size -= IGB_MAX_DATA_PER_TXD;
- tx_desc->read.olinfo_status = 0;
tx_desc->read.buffer_addr = cpu_to_le64(dma);
}
-#ifdef MAX_SKB_FRAGS
if (likely(!data_len))
break;
- tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
i++;
tx_desc++;
tx_desc = IGB_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
size = skb_frag_size(frag);
data_len -= size;
- dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
+ size, DMA_TO_DEVICE);
tx_buffer = &tx_ring->tx_buffer_info[i];
- dma_unmap_len_set(tx_buffer, len, size);
- dma_unmap_addr_set(tx_buffer, dma, dma);
-
- tx_desc->read.olinfo_status = 0;
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
-
- frag++;
}
-#endif /* MAX_SKB_FRAGS */
-#ifdef CONFIG_BQL
- netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
-#endif /* CONFIG_BQL */
-
/* write last descriptor with RS and EOP bits */
- cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
- tx_desc->read.cmd_type_len = cmd_type;
+ cmd_type |= size | IGB_TXD_DCMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
/* set the timestamp */
first->time_stamp = jiffies;
/* clear dma mappings for failed tx_buffer_info map */
for (;;) {
- tx_buffer= &tx_ring->tx_buffer_info[i];
+ tx_buffer = &tx_ring->tx_buffer_info[i];
igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
if (tx_buffer == first)
break;
struct igb_tx_buffer *first;
int tso;
u32 tx_flags = 0;
+#if PAGE_SIZE > IGB_MAX_DATA_PER_TXD
+ unsigned short f;
+#endif
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
__be16 protocol = vlan_get_protocol(skb);
u8 hdr_len = 0;
- /* need: 1 descriptor per page,
+ /*
+ * need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head,
- * + 1 desc for skb->data,
* + 1 desc for context descriptor,
- * otherwise try next time */
- if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
+ * otherwise try next time
+ */
+#if PAGE_SIZE > IGB_MAX_DATA_PER_TXD
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+#else
+ count += skb_shinfo(skb)->nr_frags;
+#endif
+ if (igb_maybe_stop_tx(tx_ring, count + 3)) {
/* this is a hard error */
return NETDEV_TX_BUSY;
}
first->bytecount = skb->len;
first->gso_segs = 1;
-#ifdef HAVE_HW_TIME_STAMP
-#ifdef SKB_SHARED_TX_IS_UNION
- if (unlikely(skb_shinfo(skb)->tx_flags.flags & SKBTX_HW_TSTAMP)) {
- skb_shinfo(skb)->tx_flags.flags |= SKBTX_IN_PROGRESS;
- tx_flags |= IGB_TX_FLAGS_TSTAMP;
- }
-#else
+ skb_tx_timestamp(skb);
+
+#ifdef HAVE_PTP_1588_CLOCK
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- tx_flags |= IGB_TX_FLAGS_TSTAMP;
+ struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
+ if (!adapter->ptp_tx_skb) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_flags |= IGB_TX_FLAGS_TSTAMP;
+
+ adapter->ptp_tx_skb = skb_get(skb);
+ adapter->ptp_tx_start = jiffies;
+ if (adapter->hw.mac.type == e1000_82576)
+ schedule_work(&adapter->ptp_tx_work);
+ }
}
-#endif
+#endif /* HAVE_PTP_1588_CLOCK */
-#endif
if (vlan_tx_tag_present(skb)) {
tx_flags |= IGB_TX_FLAGS_VLAN;
tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
#endif
/* Make sure there is space in the ring for the next send. */
- igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
+ igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
return NETDEV_TX_OK;
static int igb_change_mtu(struct net_device *netdev, int new_mtu)
{
struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
- u32 rx_buffer_len, i;
-#endif
if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
dev_err(pci_dev_to_dev(pdev), "Invalid MTU setting\n");
return -EINVAL;
}
+ /* adjust max frame to be at least the size of a standard frame */
+ if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
+ max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
+
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
usleep_range(1000, 2000);
/* igb_down has a dependency on max_frame_size */
adapter->max_frame_size = max_frame;
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
-#ifdef IGB_PER_PKT_TIMESTAMP
- if (adapter->hw.mac.type >= e1000_82580)
- max_frame += IGB_TS_HDR_LEN;
-
-#endif
- /*
- * RLPML prevents us from receiving a frame larger than max_frame so
- * it is safe to just set the rx_buffer_len to max_frame without the
- * risk of an skb over panic.
- */
- if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
- rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- else
- rx_buffer_len = max_frame;
-
-#endif
if (netif_running(netdev))
igb_down(adapter);
dev_info(pci_dev_to_dev(pdev), "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
+ hw->dev_spec._82575.mtu = new_mtu;
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
- for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len;
-
-#endif
if (netif_running(netdev))
igb_up(adapter);
else
int i;
u64 bytes, packets;
#ifndef IGB_NO_LRO
- u32 flushed = 0, coal = 0, recycled = 0;
+ u32 flushed = 0, coal = 0;
struct igb_q_vector *q_vector;
#endif
#ifndef IGB_NO_LRO
for (i = 0; i < adapter->num_q_vectors; i++) {
q_vector = adapter->q_vector[i];
- if (!q_vector || !q_vector->lrolist)
+ if (!q_vector)
continue;
- flushed += q_vector->lrolist->stats.flushed;
- coal += q_vector->lrolist->stats.coal;
- recycled += q_vector->lrolist->stats.recycled;
+ flushed += q_vector->lrolist.stats.flushed;
+ coal += q_vector->lrolist.stats.coal;
}
adapter->lro_stats.flushed = flushed;
adapter->lro_stats.coal = coal;
- adapter->lro_stats.recycled = recycled;
#endif
bytes = 0;
reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
adapter->stats.rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
- adapter->stats.tncrs += E1000_READ_REG(hw, E1000_TNCRS);
- }
+ /* this stat has invalid values on i210/i211 */
+ if ((hw->mac.type != e1000_i210) &&
+ (hw->mac.type != e1000_i211))
+ adapter->stats.tncrs += E1000_READ_REG(hw, E1000_TNCRS);
+ }
adapter->stats.tsctc += E1000_READ_REG(hw, E1000_TSCTC);
adapter->stats.tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+#ifdef HAVE_PTP_1588_CLOCK
+ if (icr & E1000_ICR_TS) {
+ u32 tsicr = E1000_READ_REG(hw, E1000_TSICR);
+
+ if (tsicr & E1000_TSICR_TXTS) {
+ /* acknowledge the interrupt */
+ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS);
+ /* retrieve hardware timestamp */
+ schedule_work(&adapter->ptp_tx_work);
+ }
+ }
+#endif /* HAVE_PTP_1588_CLOCK */
+
/* Check for MDD event */
if (icr & E1000_ICR_MDDET)
igb_process_mdd_event(adapter);
}
#ifdef IGB_DCA
+static void igb_update_tx_dca(struct igb_adapter *adapter,
+ struct igb_ring *tx_ring,
+ int cpu)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
+
+ if (hw->mac.type != e1000_82575)
+ txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT_82576;
+
+ /*
+ * We can enable relaxed ordering for reads, but not writes when
+ * DCA is enabled. This is due to a known issue in some chipsets
+ * which will cause the DCA tag to be cleared.
+ */
+ txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
+ E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_DESC_DCA_EN;
+
+ E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
+}
+
+static void igb_update_rx_dca(struct igb_adapter *adapter,
+ struct igb_ring *rx_ring,
+ int cpu)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
+
+ if (hw->mac.type != e1000_82575)
+ rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT_82576;
+
+ /*
+ * We can enable relaxed ordering for reads, but not writes when
+ * DCA is enabled. This is due to a known issue in some chipsets
+ * which will cause the DCA tag to be cleared.
+ */
+ rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
+ E1000_DCA_RXCTRL_DESC_DCA_EN;
+
+ E1000_WRITE_REG(hw, E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
+}
+
static void igb_update_dca(struct igb_q_vector *q_vector)
{
struct igb_adapter *adapter = q_vector->adapter;
- struct e1000_hw *hw = &adapter->hw;
int cpu = get_cpu();
if (q_vector->cpu == cpu)
goto out_no_update;
- if (q_vector->tx.ring) {
- int q = q_vector->tx.ring->reg_idx;
- u32 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(q));
- if (hw->mac.type == e1000_82575) {
- dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
- dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
- } else {
- dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
- dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
- E1000_DCA_TXCTRL_CPUID_SHIFT_82576;
- }
- dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
- E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(q), dca_txctrl);
- }
- if (q_vector->rx.ring) {
- int q = q_vector->rx.ring->reg_idx;
- u32 dca_rxctrl = E1000_READ_REG(hw, E1000_DCA_RXCTRL(q));
- if (hw->mac.type == e1000_82575) {
- dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
- dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
- } else {
- dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
- dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
- E1000_DCA_RXCTRL_CPUID_SHIFT_82576;
- }
- dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
- dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
- dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
- E1000_WRITE_REG(hw, E1000_DCA_RXCTRL(q), dca_rxctrl);
- }
+ if (q_vector->tx.ring)
+ igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
+
+ if (q_vector->rx.ring)
+ igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
+
q_vector->cpu = cpu;
out_no_update:
put_cpu();
static int igb_vf_configure(struct igb_adapter *adapter, int vf)
{
unsigned char mac_addr[ETH_ALEN];
-#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
- struct pci_dev *pdev = adapter->pdev;
- struct e1000_hw *hw = &adapter->hw;
- struct pci_dev *pvfdev;
- unsigned int device_id;
- u16 thisvf_devfn;
-#endif
random_ether_addr(mac_addr);
igb_set_vf_mac(adapter, vf, mac_addr);
-#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
- switch (adapter->hw.mac.type) {
- case e1000_82576:
- device_id = IGB_82576_VF_DEV_ID;
- /* VF Stride for 82576 is 2 */
- thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) |
- (pdev->devfn & 1);
- break;
- case e1000_i350:
- device_id = IGB_I350_VF_DEV_ID;
- /* VF Stride for I350 is 4 */
- thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) |
- (pdev->devfn & 3);
- break;
- default:
- device_id = 0;
- thisvf_devfn = 0;
- break;
- }
-
- pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
- while (pvfdev) {
- if (pvfdev->devfn == thisvf_devfn)
- break;
- pvfdev = pci_get_device(hw->vendor_id,
- device_id, pvfdev);
- }
-
- if (pvfdev)
- adapter->vf_data[vf].vfdev = pvfdev;
- else
- dev_err(&pdev->dev,
- "Couldn't find pci dev ptr for VF %4.4x\n",
- thisvf_devfn);
- return pvfdev != NULL;
-#else
- return true;
+#ifdef IFLA_VF_MAX
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+ /* By default spoof check is enabled for all VFs */
+ adapter->vf_data[vf].spoofchk_enabled = true;
#endif
-}
-
-#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
-static int igb_find_enabled_vfs(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct pci_dev *pdev = adapter->pdev;
- struct pci_dev *pvfdev;
- u16 vf_devfn = 0;
- u16 vf_stride;
- unsigned int device_id;
- int vfs_found = 0;
-
- switch (adapter->hw.mac.type) {
- case e1000_82576:
- device_id = IGB_82576_VF_DEV_ID;
- /* VF Stride for 82576 is 2 */
- vf_stride = 2;
- break;
- case e1000_i350:
- device_id = IGB_I350_VF_DEV_ID;
- /* VF Stride for I350 is 4 */
- vf_stride = 4;
- break;
- default:
- device_id = 0;
- vf_stride = 0;
- break;
- }
-
- vf_devfn = pdev->devfn + 0x80;
- pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
- while (pvfdev) {
- if (pvfdev->devfn == vf_devfn)
- vfs_found++;
- vf_devfn += vf_stride;
- pvfdev = pci_get_device(hw->vendor_id,
- device_id, pvfdev);
- }
-
- return vfs_found;
-}
#endif
-static int igb_check_vf_assignment(struct igb_adapter *adapter)
-{
-#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
- int i;
- for (i = 0; i < adapter->vfs_allocated_count; i++) {
- if (adapter->vf_data[i].vfdev) {
- if (adapter->vf_data[i].vfdev->dev_flags &
- PCI_DEV_FLAGS_ASSIGNED)
- return true;
- }
- }
-#endif
- return false;
+ return true;
}
static void igb_ping_all_vfs(struct igb_adapter *adapter)
igb_set_vmolr(adapter, vf, !vlan);
adapter->vf_data[vf].pf_vlan = vlan;
adapter->vf_data[vf].pf_qos = qos;
- igb_set_vf_vlan_strip(adapter, vf, true);
+ igb_set_vf_vlan_strip(adapter, vf, true);
dev_info(&adapter->pdev->dev,
"Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
if (test_bit(__IGB_DOWN, &adapter->state)) {
false, vf);
igb_set_vmvir(adapter, vlan, vf);
igb_set_vmolr(adapter, vf, true);
- igb_set_vf_vlan_strip(adapter, vf, false);
+ igb_set_vf_vlan_strip(adapter, vf, false);
adapter->vf_data[vf].pf_vlan = 0;
adapter->vf_data[vf].pf_qos = 0;
}
out:
return err;
}
-#endif
+
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
+ bool setting)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 dtxswc, reg_offset;
+
+ if (!adapter->vfs_allocated_count)
+ return -EOPNOTSUPP;
+
+ if (vf >= adapter->vfs_allocated_count)
+ return -EINVAL;
+
+ reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
+ dtxswc = E1000_READ_REG(hw, reg_offset);
+ if (setting)
+ dtxswc |= ((1 << vf) |
+ (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
+ else
+ dtxswc &= ~((1 << vf) |
+ (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
+ E1000_WRITE_REG(hw, reg_offset, dtxswc);
+
+ adapter->vf_data[vf].spoofchk_enabled = setting;
+ return E1000_SUCCESS;
+}
+#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */
+#endif /* IFLA_VF_MAX */
+
+static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+ u32 reg;
+
+ /* Find the vlan filter for this id */
+ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
+ reg = E1000_READ_REG(hw, E1000_VLVF(i));
+ if ((reg & E1000_VLVF_VLANID_ENABLE) &&
+ vid == (reg & E1000_VLVF_VLANID_MASK))
+ break;
+ }
+
+ if (i >= E1000_VLVF_ARRAY_SIZE)
+ i = -1;
+
+ return i;
+}
static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
{
+ struct e1000_hw *hw = &adapter->hw;
int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
+ int err = 0;
if (vid)
igb_set_vf_vlan_strip(adapter, vf, true);
else
igb_set_vf_vlan_strip(adapter, vf, false);
- return igb_vlvf_set(adapter, vid, add, vf);
+ /* If in promiscuous mode we need to make sure the PF also has
+ * the VLAN filter set.
+ */
+ if (add && (adapter->netdev->flags & IFF_PROMISC))
+ err = igb_vlvf_set(adapter, vid, add,
+ adapter->vfs_allocated_count);
+ if (err)
+ goto out;
+
+ err = igb_vlvf_set(adapter, vid, add, vf);
+
+ if (err)
+ goto out;
+
+ /* Go through all the checks to see if the VLAN filter should
+ * be wiped completely.
+ */
+ if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
+ u32 vlvf, bits;
+
+ int regndx = igb_find_vlvf_entry(adapter, vid);
+ if (regndx < 0)
+ goto out;
+ /* See if any other pools are set for this VLAN filter
+ * entry other than the PF.
+ */
+ vlvf = bits = E1000_READ_REG(hw, E1000_VLVF(regndx));
+ bits &= 1 << (E1000_VLVF_POOLSEL_SHIFT +
+ adapter->vfs_allocated_count);
+ /* If the filter was removed then ensure PF pool bit
+ * is cleared if the PF only added itself to the pool
+ * because the PF is in promiscuous mode.
+ */
+ if ((vlvf & VLAN_VID_MASK) == vid &&
+#ifndef HAVE_VLAN_RX_REGISTER
+ !test_bit(vid, adapter->active_vlans) &&
+#endif
+ !bits)
+ igb_vlvf_set(adapter, vid, add,
+ adapter->vfs_allocated_count);
+ }
+
+out:
+ return err;
}
static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
/* Flush and reset the mta with the new values */
igb_set_rx_mode(adapter->netdev);
- /*
+ /*
* Reset the VFs TDWBAL and TDWBAH registers which are not
* cleared by a VFLR
*/
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+#ifdef HAVE_PTP_1588_CLOCK
+ if (icr & E1000_ICR_TS) {
+ u32 tsicr = E1000_READ_REG(hw, E1000_TSICR);
+
+ if (tsicr & E1000_TSICR_TXTS) {
+ /* acknowledge the interrupt */
+ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS);
+ /* retrieve hardware timestamp */
+ schedule_work(&adapter->ptp_tx_work);
+ }
+ }
+#endif /* HAVE_PTP_1588_CLOCK */
+
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+#ifdef HAVE_PTP_1588_CLOCK
+ if (icr & E1000_ICR_TS) {
+ u32 tsicr = E1000_READ_REG(hw, E1000_TSICR);
+
+ if (tsicr & E1000_TSICR_TXTS) {
+ /* acknowledge the interrupt */
+ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS);
+ /* retrieve hardware timestamp */
+ schedule_work(&adapter->ptp_tx_work);
+ }
+ }
+#endif /* HAVE_PTP_1588_CLOCK */
+
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
return 0;
}
-#ifdef HAVE_HW_TIME_STAMP
-/**
- * igb_systim_to_hwtstamp - convert system time value to hw timestamp
- * @adapter: board private structure
- * @shhwtstamps: timestamp structure to update
- * @regval: unsigned 64bit system time value.
- *
- * We need to convert the system time value stored in the RX/TXSTMP registers
- * into a hwtstamp which can be used by the upper level timestamping functions
- */
-static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
- struct skb_shared_hwtstamps *shhwtstamps,
- u64 regval)
-{
- u64 ns;
-
- /*
- * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
- * 24 to match clock shift we setup earlier.
- */
- if (adapter->hw.mac.type >= e1000_82580)
- regval <<= IGB_82580_TSYNC_SHIFT;
-
- ns = timecounter_cyc2time(&adapter->clock, regval);
-
- /*
- * force a timecompare_update here (even if less than a second
- * has passed) in order to prevent the case when ptpd or other
- * software jumps the clock offset. othwerise there is a small
- * window when the timestamp would be based on previous skew
- * and invalid results would be pushed to the network stack.
- */
- timecompare_update(&adapter->compare, 0);
- memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
- shhwtstamps->hwtstamp = ns_to_ktime(ns);
- shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
-}
-
-/**
- * igb_tx_hwtstamp - utility function which checks for TX time stamp
- * @q_vector: pointer to q_vector containing needed info
- * @buffer: pointer to igb_tx_buffer structure
- *
- * If we were asked to do hardware stamping and such a time stamp is
- * available, then it must have been for this skb here because we only
- * allow only one such packet into the queue.
- */
-static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
- struct igb_tx_buffer *buffer_info)
-{
- struct igb_adapter *adapter = q_vector->adapter;
- struct e1000_hw *hw = &adapter->hw;
- struct skb_shared_hwtstamps shhwtstamps;
- u64 regval;
-
- /* if skb does not support hw timestamp or TX stamp not valid exit */
- if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
- !(E1000_READ_REG(hw, E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
- return;
-
- regval = E1000_READ_REG(hw, E1000_TXSTMPL);
- regval |= (u64)E1000_READ_REG(hw, E1000_TXSTMPH) << 32;
-
- igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
- skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
-}
-
-#endif
/**
* igb_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: pointer to q_vector containing needed info
struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *tx_ring = q_vector->tx.ring;
struct igb_tx_buffer *tx_buffer;
- union e1000_adv_tx_desc *tx_desc, *eop_desc;
+ union e1000_adv_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
unsigned int budget = q_vector->tx.work_limit;
unsigned int i = tx_ring->next_to_clean;
tx_desc = IGB_TX_DESC(tx_ring, i);
i -= tx_ring->count;
- for (; budget; budget--) {
- eop_desc = tx_buffer->next_to_watch;
-
- /* prevent any other reads prior to eop_desc */
- rmb();
+ do {
+ union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
/* if next_to_watch is not set then there is no work pending */
if (!eop_desc)
break;
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
break;
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
-#ifdef HAVE_HW_TIME_STAMP
- /* retrieve hardware timestamp */
- igb_tx_hwtstamp(q_vector, tx_buffer);
-
-#endif
/* free the skb */
dev_kfree_skb_any(tx_buffer->skb);
tx_buffer = tx_ring->tx_buffer_info;
tx_desc = IGB_TX_DESC(tx_ring, 0);
}
- }
-#ifdef CONFIG_BQL
+ /* issue prefetch for next Tx descriptor */
+ prefetch(tx_desc);
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes);
-#endif /* CONFIG_BQL */
i += tx_ring->count;
tx_ring->next_to_clean = i;
q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets;
+#ifdef DEBUG
+ if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags) &&
+ !(adapter->disable_hw_reset && adapter->tx_hang_detected)) {
+#else
if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
+#endif
struct e1000_hw *hw = &adapter->hw;
- eop_desc = tx_buffer->next_to_watch;
-
/* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */
clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
- if (eop_desc &&
+ if (tx_buffer->next_to_watch &&
time_after(jiffies, tx_buffer->time_stamp +
(adapter->tx_timeout_factor * HZ))
&& !(E1000_READ_REG(hw, E1000_STATUS) &
E1000_STATUS_TXOFF)) {
/* detected Tx unit hang */
+#ifdef DEBUG
+ adapter->tx_hang_detected = TRUE;
+ if (adapter->disable_hw_reset) {
+ DPRINTK(DRV, WARNING,
+ "Deactivating netdev watchdog timer\n");
+ if (del_timer(&netdev_ring(tx_ring)->watchdog_timer))
+ dev_put(netdev_ring(tx_ring));
+#ifndef HAVE_NET_DEVICE_OPS
+ netdev_ring(tx_ring)->tx_timeout = NULL;
+#endif
+ }
+#endif /* DEBUG */
dev_err(tx_ring->dev,
"Detected Tx Unit Hang\n"
" Tx Queue <%d>\n"
tx_ring->next_to_use,
tx_ring->next_to_clean,
tx_buffer->time_stamp,
- eop_desc,
+ tx_buffer->next_to_watch,
jiffies,
- eop_desc->wb.status);
+ tx_buffer->next_to_watch->wb.status);
if (netif_is_multiqueue(netdev_ring(tx_ring)))
netif_stop_subqueue(netdev_ring(tx_ring),
ring_queue_index(tx_ring));
}
}
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(total_packets &&
netif_carrier_ok(netdev_ring(tx_ring)) &&
- igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
+ igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
}
#endif /* HAVE_VLAN_RX_REGISTER */
-static inline void igb_rx_checksum(struct igb_ring *ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+/**
+ * igb_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void igb_reuse_rx_page(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *old_buff)
{
- skb_checksum_none_assert(skb);
+ struct igb_rx_buffer *new_buff;
+ u16 nta = rx_ring->next_to_alloc;
- /* Ignore Checksum bit is set */
- if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
- return;
+ new_buff = &rx_ring->rx_buffer_info[nta];
- /* Rx checksum disabled via ethtool */
-#ifdef HAVE_NDO_SET_FEATURES
- if (!(netdev_ring(ring)->features & NETIF_F_RXCSUM))
-#else
- if (!test_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags))
-#endif
- return;
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
- /* TCP/UDP checksum error bit is set */
- if (igb_test_staterr(rx_desc,
- E1000_RXDEXT_STATERR_TCPE |
- E1000_RXDEXT_STATERR_IPE)) {
- /*
- * work around errata with sctp packets where the TCPE aka
- * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
- * packets, (aka let the stack check the crc32c)
- */
- if (!((skb->len == 60) &&
- test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags)))
- ring->rx_stats.csum_err++;
+ /* transfer page from old buffer to new buffer */
+ memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
- /* let the stack verify checksum errors */
- return;
- }
- /* It must be a TCP or UDP packet with a valid checksum */
- if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
- E1000_RXD_STAT_UDPCS))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
+ old_buff->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
}
-#ifdef NETIF_F_RXHASH
-static inline void igb_rx_hash(struct igb_ring *ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
+ struct page *page,
+ unsigned int truesize)
{
- if (netdev_ring(ring)->features & NETIF_F_RXHASH)
- skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
-}
+ /* avoid re-using remote pages */
+ if (unlikely(page_to_nid(page) != numa_node_id()))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= IGB_RX_BUFSZ;
+
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+ if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
+ return false;
#endif
-#ifdef HAVE_HW_TIME_STAMP
-static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
+
+ /* bump ref count on page before it is given to the stack */
+ get_page(page);
+
+ return true;
+}
+
+/**
+ * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static bool igb_add_rx_frag(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *rx_buffer,
union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
- struct igb_adapter *adapter = q_vector->adapter;
- struct e1000_hw *hw = &adapter->hw;
- u64 regval;
+ struct page *page = rx_buffer->page;
+ unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = IGB_RX_BUFSZ;
+#else
+ unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+#endif
- if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
- E1000_RXDADV_STAT_TS))
- return;
+ if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
- /*
- * If this bit is set, then the RX registers contain the time stamp. No
- * other packet will be time stamped until we read these registers, so
- * read the registers to make them available again. Because only one
- * packet can be time stamped at a time, we know that the register
- * values must belong to this one here and therefore we don't need to
- * compare any of the additional attributes stored for it.
- *
- * If nothing went wrong, then it should have a skb_shared_tx that we
- * can turn into a skb_shared_hwtstamps.
- */
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- u32 *stamp = (u32 *)skb->data;
- regval = le32_to_cpu(*(stamp + 2));
- regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
- skb_pull(skb, IGB_TS_HDR_LEN);
- } else {
- if(!(E1000_READ_REG(hw, E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
- return;
+#ifdef HAVE_PTP_1588_CLOCK
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+ va += IGB_TS_HDR_LEN;
+ size -= IGB_TS_HDR_LEN;
+ }
+#endif /* HAVE_PTP_1588_CLOCK */
+
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- regval = E1000_READ_REG(hw, E1000_RXSTMPL);
- regval |= (u64)E1000_READ_REG(hw, E1000_RXSTMPH) << 32;
+ /* we can reuse buffer as-is, just make sure it is local */
+ if (likely(page_to_nid(page) == numa_node_id()))
+ return true;
+
+ /* this page cannot be reused so discard it */
+ put_page(page);
+ return false;
}
- igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_buffer->page_offset, size, truesize);
+
+ return igb_can_reuse_rx_page(rx_buffer, page, truesize);
}
-#endif
-static void igb_rx_vlan(struct igb_ring *ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+
+static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
- if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
- u16 vid = 0;
- if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
- test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
- vid = be16_to_cpu(rx_desc->wb.upper.vlan);
- else
- vid = le16_to_cpu(rx_desc->wb.upper.vlan);
-#ifdef HAVE_VLAN_RX_REGISTER
- IGB_CB(skb)->vid = vid;
- } else {
- IGB_CB(skb)->vid = 0;
-#else
+ struct igb_rx_buffer *rx_buffer;
+ struct page *page;
-#ifdef HAVE_VLAN_PROTOCOL
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
-#else
- __vlan_hwaccel_put_tag(skb, vid);
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+
+ page = rx_buffer->page;
+ prefetchw(page);
+
+ if (likely(!skb)) {
+ void *page_addr = page_address(page) +
+ rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES);
#endif
+ /* allocate a skb to store the frags */
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ IGB_RX_HDR_LEN);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return NULL;
+ }
+
+ /*
+ * we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* pull page into skb */
+ if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ /* hand second half of page back to the ring */
+ igb_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of rx_buffer */
+ rx_buffer->page = NULL;
+
+ return skb;
+}
#endif
+static inline void igb_rx_checksum(struct igb_ring *ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ skb_checksum_none_assert(skb);
+
+ /* Ignore Checksum bit is set */
+ if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
+ return;
+
+ /* Rx checksum disabled via ethtool */
+ if (!(netdev_ring(ring)->features & NETIF_F_RXCSUM))
+ return;
+
+ /* TCP/UDP checksum error bit is set */
+ if (igb_test_staterr(rx_desc,
+ E1000_RXDEXT_STATERR_TCPE |
+ E1000_RXDEXT_STATERR_IPE)) {
+ /*
+ * work around errata with sctp packets where the TCPE aka
+ * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
+ * packets, (aka let the stack check the crc32c)
+ */
+ if (!((skb->len == 60) &&
+ test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags)))
+ ring->rx_stats.csum_err++;
+
+ /* let the stack verify checksum errors */
+ return;
}
+ /* It must be a TCP or UDP packet with a valid checksum */
+ if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
+ E1000_RXD_STAT_UDPCS))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
}
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
-static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
+#ifdef NETIF_F_RXHASH
+static inline void igb_rx_hash(struct igb_ring *ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
- /* HW will not DMA in data larger than the given buffer, even if it
- * parses the (NFS, of course) header to be larger. In that case, it
- * fills the header buffer and spills the rest into the page.
- */
- u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) &
- E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
- if (hlen > IGB_RX_HDR_LEN)
- hlen = IGB_RX_HDR_LEN;
- return hlen;
+ if (netdev_ring(ring)->features & NETIF_F_RXHASH)
+ skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
}
#endif
#ifndef IGB_NO_LRO
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
/**
* igb_merge_active_tail - merge active tail into lro skb
* @tail: pointer to active tail in frag_list
return true;
}
+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
/**
* igb_can_lro - returns true if packet is TCP/IPV4 and LRO is enabled
* @adapter: board private structure
struct iphdr *iph = (struct iphdr *)skb->data;
__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
- /* verify LRO is enabled */
- if (!(netdev_ring(rx_ring)->features & NETIF_F_LRO))
- return false;
-
/* verify hardware indicates this is IPv4/TCP */
if((!(pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_TCP)) ||
!(pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV4))))
return false;
- /* verify the header is large enough for us to read IP/TCP fields */
+ /* .. and LRO is enabled */
+ if (!(netdev_ring(rx_ring)->features & NETIF_F_LRO))
+ return false;
+
+ /* .. and we are not in promiscuous mode */
+ if (netdev_ring(rx_ring)->flags & IFF_PROMISC)
+ return false;
+
+ /* .. and the header is large enough for us to read IP/TCP fields */
if (!pskb_may_pull(skb, sizeof(struct igb_lrohdr)))
return false;
- /* verify there are no VLANs on packet */
+ /* .. and there are no VLANs on packet */
if (skb->protocol != __constant_htons(ETH_P_IP))
return false;
- /* ensure we are version 4 with no options */
+ /* .. and we are version 4 with no options */
if (*(u8 *)iph != 0x45)
return false;
static void igb_lro_flush(struct igb_q_vector *q_vector,
struct sk_buff *skb)
{
- struct igb_lro_list *lrolist = q_vector->lrolist;
+ struct igb_lro_list *lrolist = &q_vector->lrolist;
__skb_unlink(skb, &lrolist->active);
if (IGB_CB(skb)->append_cnt) {
struct igb_lrohdr *lroh = igb_lro_hdr(skb);
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
/* close any active lro contexts */
igb_close_active_frag_list(skb);
+#endif
/* incorporate ip header and re-calculate checksum */
lroh->iph.tot_len = ntohs(skb->len);
lroh->iph.check = 0;
lroh->ts[2] = IGB_CB(skb)->tsecr;
lroh->ts[1] = htonl(IGB_CB(skb)->tsval);
}
-#ifdef NETIF_F_TSO
+#ifdef NETIF_F_GSO
skb_shinfo(skb)->gso_size = IGB_CB(skb)->mss;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
#endif
}
static void igb_lro_flush_all(struct igb_q_vector *q_vector)
{
- struct igb_lro_list *lrolist = q_vector->lrolist;
+ struct igb_lro_list *lrolist = &q_vector->lrolist;
struct sk_buff *skb, *tmp;
skb_queue_reverse_walk_safe(&lrolist->active, skb, tmp)
struct igb_lrohdr *lroh = igb_lro_hdr(skb);
u16 opt_bytes, data_len;
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
IGB_CB(skb)->tail = NULL;
+#endif
IGB_CB(skb)->tsecr = 0;
IGB_CB(skb)->append_cnt = 0;
IGB_CB(skb)->mss = 0;
(lroh->ts[2] == 0)) {
return;
}
-
+
IGB_CB(skb)->tsval = ntohl(lroh->ts[1]);
IGB_CB(skb)->tsecr = lroh->ts[2];
}
#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
-static bool igb_merge_frags(struct sk_buff *lro_skb, struct sk_buff *new_skb)
+static void igb_merge_frags(struct sk_buff *lro_skb, struct sk_buff *new_skb)
{
- struct sk_buff *tail;
- struct skb_shared_info *tail_info;
+ struct skb_shared_info *sh_info;
struct skb_shared_info *new_skb_info;
- u16 data_len;
+ unsigned int data_len;
- /* header must be empty to pull frags into current skb */
- if (skb_headlen(new_skb))
- return false;
-
- if (IGB_CB(lro_skb)->tail)
- tail = IGB_CB(lro_skb)->tail;
- else
- tail = lro_skb;
-
- tail_info = skb_shinfo(tail);
+ sh_info = skb_shinfo(lro_skb);
new_skb_info = skb_shinfo(new_skb);
- /* make sure we have room in frags list */
- if (new_skb_info->nr_frags >= (MAX_SKB_FRAGS - tail_info->nr_frags))
- return false;
-
- /* bump append count */
- IGB_CB(lro_skb)->append_cnt++;
-
/* copy frags into the last skb */
- memcpy(tail_info->frags + tail_info->nr_frags,
+ memcpy(sh_info->frags + sh_info->nr_frags,
new_skb_info->frags,
new_skb_info->nr_frags * sizeof(skb_frag_t));
/* copy size data over */
- tail_info->nr_frags += new_skb_info->nr_frags;
+ sh_info->nr_frags += new_skb_info->nr_frags;
data_len = IGB_CB(new_skb)->mss;
- tail->len += data_len;
- tail->data_len += data_len;
- tail->truesize += data_len;
+ lro_skb->len += data_len;
+ lro_skb->data_len += data_len;
+ lro_skb->truesize += data_len;
/* wipe record of data from new_skb */
new_skb_info->nr_frags = 0;
new_skb->len = new_skb->data_len = 0;
- new_skb->truesize -= data_len;
- new_skb->data = new_skb->head + NET_SKB_PAD + NET_IP_ALIGN;
- skb_reset_tail_pointer(new_skb);
- new_skb->protocol = 0;
- new_skb->ip_summed = CHECKSUM_NONE;
-#ifdef HAVE_VLAN_RX_REGISTER
- IGB_CB(new_skb)->vid = 0;
-#else
- new_skb->vlan_tci = 0;
-#endif
-
- return true;
+ dev_kfree_skb_any(new_skb);
}
#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
/**
- * igb_lro_queue - if able, queue skb into lro chain
+ * igb_lro_receive - if able, queue skb into lro chain
* @q_vector: structure containing interrupt and ring information
* @new_skb: pointer to current skb being checked
*
* fine chains it to the existing lro_skb based on flowid. If an LRO for
* the flow doesn't exist create one.
**/
-static struct sk_buff *igb_lro_queue(struct igb_q_vector *q_vector,
- struct sk_buff *new_skb)
+static void igb_lro_receive(struct igb_q_vector *q_vector,
+ struct sk_buff *new_skb)
{
struct sk_buff *lro_skb;
- struct igb_lro_list *lrolist = q_vector->lrolist;
+ struct igb_lro_list *lrolist = &q_vector->lrolist;
struct igb_lrohdr *lroh = igb_lro_hdr(new_skb);
__be32 saddr = lroh->iph.saddr;
__be32 daddr = lroh->iph.daddr;
data_len = IGB_CB(new_skb)->mss;
- /*
- * malformed header, no tcp data, resultant packet would
- * be too large, or new skb is larger than our current mss.
+ /* Check for all of the above below
+ * malformed header
+ * no tcp data
+ * resultant packet would be too large
+ * new skb is larger than our current mss
+ * data would remain in header
+ * we would consume more frags then the sk_buff contains
+ * ack sequence numbers changed
+ * window size has changed
*/
if (data_len == 0 ||
data_len > IGB_CB(lro_skb)->mss ||
- data_len > IGB_CB(lro_skb)->free) {
- igb_lro_flush(q_vector, lro_skb);
- break;
- }
-
- /* ack sequence numbers or window size has changed */
- if (igb_lro_hdr(lro_skb)->th.ack_seq != lroh->th.ack_seq ||
+ data_len > IGB_CB(lro_skb)->free ||
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ data_len != new_skb->data_len ||
+ skb_shinfo(new_skb)->nr_frags >=
+ (MAX_SKB_FRAGS - skb_shinfo(lro_skb)->nr_frags) ||
+#endif
+ igb_lro_hdr(lro_skb)->th.ack_seq != lroh->th.ack_seq ||
igb_lro_hdr(lro_skb)->th.window != lroh->th.window) {
igb_lro_flush(q_vector, lro_skb);
break;
IGB_CB(lro_skb)->next_seq += data_len;
IGB_CB(lro_skb)->free -= data_len;
+ /* update append_cnt */
+ IGB_CB(lro_skb)->append_cnt++;
+
#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
/* if header is empty pull pages into current skb */
- if (igb_merge_frags(lro_skb, new_skb)) {
- lrolist->stats.recycled++;
- } else {
-#endif
- /* chain this new skb in frag_list */
- igb_add_active_tail(lro_skb, new_skb);
- new_skb = NULL;
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
- }
+ igb_merge_frags(lro_skb, new_skb);
+#else
+ /* chain this new skb in frag_list */
+ igb_add_active_tail(lro_skb, new_skb);
#endif
- if ((data_len < IGB_CB(lro_skb)->mss) || lroh->th.psh) {
+ if ((data_len < IGB_CB(lro_skb)->mss) || lroh->th.psh ||
+ skb_shinfo(lro_skb)->nr_frags == MAX_SKB_FRAGS) {
igb_lro_hdr(lro_skb)->th.psh |= lroh->th.psh;
igb_lro_flush(q_vector, lro_skb);
}
lrolist->stats.coal++;
- return new_skb;
+ return;
}
if (IGB_CB(new_skb)->mss && !lroh->th.psh) {
__skb_queue_head(&lrolist->active, new_skb);
lrolist->stats.coal++;
- return NULL;
+ return;
}
/* packet not handled by any of the above, pass it to the stack */
#else
napi_gro_receive(&q_vector->napi, new_skb);
#endif
- return NULL;
}
#endif /* IGB_NO_LRO */
+/**
+ * igb_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, timestamp, protocol, and
+ * other fields within the skb.
+ **/
+static void igb_process_skb_fields(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct net_device *dev = rx_ring->netdev;
+ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+
+#ifdef NETIF_F_RXHASH
+ igb_rx_hash(rx_ring, rx_desc, skb);
+
+#endif
+ igb_rx_checksum(rx_ring, rx_desc, skb);
+
+ /* update packet type stats */
+ if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV4))
+ rx_ring->rx_stats.ipv4_packets++;
+ else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV4_EX))
+ rx_ring->rx_stats.ipv4e_packets++;
+ else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV6))
+ rx_ring->rx_stats.ipv6_packets++;
+ else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV6_EX))
+ rx_ring->rx_stats.ipv6e_packets++;
+ else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_TCP))
+ rx_ring->rx_stats.tcp_packets++;
+ else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_UDP))
+ rx_ring->rx_stats.udp_packets++;
+ else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_SCTP))
+ rx_ring->rx_stats.sctp_packets++;
+ else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_NFS))
+ rx_ring->rx_stats.nfs_packets++;
+
+#ifdef HAVE_PTP_1588_CLOCK
+ igb_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
+#endif /* HAVE_PTP_1588_CLOCK */
+
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+#else
+ if ((dev->features & NETIF_F_HW_VLAN_RX) &&
+#endif
+ igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
+ u16 vid = 0;
+ if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
+ test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
+ vid = be16_to_cpu(rx_desc->wb.upper.vlan);
+ else
+ vid = le16_to_cpu(rx_desc->wb.upper.vlan);
+#ifdef HAVE_VLAN_RX_REGISTER
+ IGB_CB(skb)->vid = vid;
+ } else {
+ IGB_CB(skb)->vid = 0;
+#else
+
+#ifdef HAVE_VLAN_PROTOCOL
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+#else
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+#endif
+
+
+#endif
+ }
+
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+
+ skb->protocol = eth_type_trans(skb, dev);
+}
+
+/**
+ * igb_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool igb_is_non_eop(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(IGB_RX_DESC(rx_ring, ntc));
+
+ if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
+ return false;
+
+ return true;
+}
+
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+/* igb_clean_rx_irq -- * legacy */
static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
{
struct igb_ring *rx_ring = q_vector->rx.ring;
- union e1000_adv_rx_desc *rx_desc;
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
- const int current_node = numa_node_id();
-#endif
unsigned int total_bytes = 0, total_packets = 0;
u16 cleaned_count = igb_desc_unused(rx_ring);
- u16 i = rx_ring->next_to_clean;
- rx_desc = IGB_RX_DESC(rx_ring, i);
+ do {
+ struct igb_rx_buffer *rx_buffer;
+ union e1000_adv_rx_desc *rx_desc;
+ struct sk_buff *skb;
+ u16 ntc;
- while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
- struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
- struct sk_buff *skb = buffer_info->skb;
- union e1000_adv_rx_desc *next_rxd;
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
+ igb_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
- buffer_info->skb = NULL;
- prefetch(skb->data);
+ ntc = rx_ring->next_to_clean;
+ rx_desc = IGB_RX_DESC(rx_ring, ntc);
+ rx_buffer = &rx_ring->rx_buffer_info[ntc];
- i++;
- if (i == rx_ring->count)
- i = 0;
-
- next_rxd = IGB_RX_DESC(rx_ring, i);
- prefetch(next_rxd);
+ if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
+ break;
/*
* This memory barrier is needed to keep us from reading
*/
rmb();
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
- __skb_put(skb, le16_to_cpu(rx_desc->wb.upper.length));
- dma_unmap_single(rx_ring->dev, buffer_info->dma,
- rx_ring->rx_buffer_len,
- DMA_FROM_DEVICE);
- buffer_info->dma = 0;
-
-#else
- if (!skb_is_nonlinear(skb)) {
- __skb_put(skb, igb_get_hlen(rx_desc));
- dma_unmap_single(rx_ring->dev, buffer_info->dma,
- IGB_RX_HDR_LEN,
- DMA_FROM_DEVICE);
- buffer_info->dma = 0;
- }
+ skb = rx_buffer->skb;
- if (rx_desc->wb.upper.length) {
- u16 length = le16_to_cpu(rx_desc->wb.upper.length);
+ prefetch(skb->data);
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- buffer_info->page,
- buffer_info->page_offset,
- length);
+ /* pull the header of the skb in */
+ __skb_put(skb, le16_to_cpu(rx_desc->wb.upper.length));
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
+ /* clear skb reference in buffer info structure */
+ rx_buffer->skb = NULL;
- if ((page_count(buffer_info->page) != 1) ||
- (page_to_nid(buffer_info->page) != current_node))
- buffer_info->page = NULL;
- else
- get_page(buffer_info->page);
+ cleaned_count++;
- dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
- PAGE_SIZE / 2, DMA_FROM_DEVICE);
- buffer_info->page_dma = 0;
- }
+ BUG_ON(igb_is_non_eop(rx_ring, rx_desc));
- if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
- struct igb_rx_buffer *next_buffer;
- next_buffer = &rx_ring->rx_buffer_info[i];
- buffer_info->skb = next_buffer->skb;
- buffer_info->dma = next_buffer->dma;
- next_buffer->skb = skb;
- next_buffer->dma = 0;
- goto next_desc;
- }
+ dma_unmap_single(rx_ring->dev, rx_buffer->dma,
+ rx_ring->rx_buffer_len,
+ DMA_FROM_DEVICE);
+ rx_buffer->dma = 0;
-#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
if (igb_test_staterr(rx_desc,
E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
dev_kfree_skb_any(skb);
- goto next_desc;
+ continue;
}
-#ifdef HAVE_HW_TIME_STAMP
- igb_rx_hwtstamp(q_vector, rx_desc, skb);
-#endif
-#ifdef NETIF_F_RXHASH
- igb_rx_hash(rx_ring, rx_desc, skb);
-#endif
- igb_rx_checksum(rx_ring, rx_desc, skb);
- igb_rx_vlan(rx_ring, rx_desc, skb);
-
total_bytes += skb->len;
- total_packets++;
- skb->protocol = eth_type_trans(skb, netdev_ring(rx_ring));
+ /* populate checksum, timestamp, VLAN, and protocol */
+ igb_process_skb_fields(rx_ring, rx_desc, skb);
#ifndef IGB_NO_LRO
if (igb_can_lro(rx_ring, rx_desc, skb))
- buffer_info->skb = igb_lro_queue(q_vector, skb);
+ igb_lro_receive(q_vector, skb);
else
#endif
#ifdef HAVE_VLAN_RX_REGISTER
netdev_ring(rx_ring)->last_rx = jiffies;
#endif
- budget--;
-next_desc:
- cleaned_count++;
+ /* update budget accounting */
+ total_packets++;
+ } while (likely(total_packets < budget));
- if (!budget)
- break;
+ rx_ring->rx_stats.packets += total_packets;
+ rx_ring->rx_stats.bytes += total_bytes;
+ q_vector->rx.total_packets += total_packets;
+ q_vector->rx.total_bytes += total_bytes;
+
+ if (cleaned_count)
+ igb_alloc_rx_buffers(rx_ring, cleaned_count);
+
+#ifndef IGB_NO_LRO
+ igb_lro_flush_all(q_vector);
+
+#endif /* IGB_NO_LRO */
+ return (total_packets < budget);
+}
+#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
+/**
+ * igb_get_headlen - determine size of header for LRO/GRO
+ * @data: pointer to the start of the headers
+ * @max_len: total length of section to find headers in
+ *
+ * This function is meant to determine the length of headers that will
+ * be recognized by hardware for LRO, and GRO offloads. The main
+ * motivation of doing this is to only perform one pull for IPv4 TCP
+ * packets so that we can do basic things like calculating the gso_size
+ * based on the average data per packet.
+ **/
+static unsigned int igb_get_headlen(unsigned char *data,
+ unsigned int max_len)
+{
+ union {
+ unsigned char *network;
+ /* l2 headers */
+ struct ethhdr *eth;
+ struct vlan_hdr *vlan;
+ /* l3 headers */
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ } hdr;
+ __be16 protocol;
+ u8 nexthdr = 0; /* default to not TCP */
+ u8 hlen;
+
+ /* this should never happen, but better safe than sorry */
+ if (max_len < ETH_HLEN)
+ return max_len;
+
+ /* initialize network frame pointer */
+ hdr.network = data;
+
+ /* set first protocol and move network header forward */
+ protocol = hdr.eth->h_proto;
+ hdr.network += ETH_HLEN;
+
+ /* handle any vlan tag if present */
+ if (protocol == __constant_htons(ETH_P_8021Q)) {
+ if ((hdr.network - data) > (max_len - VLAN_HLEN))
+ return max_len;
+
+ protocol = hdr.vlan->h_vlan_encapsulated_proto;
+ hdr.network += VLAN_HLEN;
+ }
+
+ /* handle L3 protocols */
+ if (protocol == __constant_htons(ETH_P_IP)) {
+ if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
+ return max_len;
+
+ /* access ihl as a u8 to avoid unaligned access on ia64 */
+ hlen = (hdr.network[0] & 0x0F) << 2;
+
+ /* verify hlen meets minimum size requirements */
+ if (hlen < sizeof(struct iphdr))
+ return hdr.network - data;
+
+ /* record next protocol if header is present */
+ if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
+ nexthdr = hdr.ipv4->protocol;
+#ifdef NETIF_F_TSO6
+ } else if (protocol == __constant_htons(ETH_P_IPV6)) {
+ if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
+ return max_len;
+
+ /* record next protocol */
+ nexthdr = hdr.ipv6->nexthdr;
+ hlen = sizeof(struct ipv6hdr);
+#endif /* NETIF_F_TSO6 */
+ } else {
+ return hdr.network - data;
+ }
+
+ /* relocate pointer to start of L4 header */
+ hdr.network += hlen;
+
+ /* finally sort out TCP */
+ if (nexthdr == IPPROTO_TCP) {
+ if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
+ return max_len;
+
+ /* access doff as a u8 to avoid unaligned access on ia64 */
+ hlen = (hdr.network[12] & 0xF0) >> 2;
+
+ /* verify hlen meets minimum size requirements */
+ if (hlen < sizeof(struct tcphdr))
+ return hdr.network - data;
+
+ hdr.network += hlen;
+ } else if (nexthdr == IPPROTO_UDP) {
+ if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
+ return max_len;
+
+ hdr.network += sizeof(struct udphdr);
+ }
+
+ /*
+ * If everything has gone correctly hdr.network should be the
+ * data section of the packet and will be the end of the header.
+ * If not then it probably represents the end of the last recognized
+ * header.
+ */
+ if ((hdr.network - data) < max_len)
+ return hdr.network - data;
+ else
+ return max_len;
+}
+
+/**
+ * igb_pull_tail - igb specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an igb specific version of __pskb_pull_tail. The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void igb_pull_tail(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned char *va;
+ unsigned int pull_len;
+
+ /*
+ * it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
+
+#ifdef HAVE_PTP_1588_CLOCK
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ /* retrieve timestamp from buffer */
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+
+ /* update pointers to remove timestamp header */
+ skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
+ frag->page_offset += IGB_TS_HDR_LEN;
+ skb->data_len -= IGB_TS_HDR_LEN;
+ skb->len -= IGB_TS_HDR_LEN;
+
+ /* move va to start of packet data */
+ va += IGB_TS_HDR_LEN;
+ }
+#endif /* HAVE_PTP_1588_CLOCK */
+
+ /*
+ * we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
+
+/**
+ * igb_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool igb_cleanup_headers(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+
+ if (unlikely((igb_test_staterr(rx_desc,
+ E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
+ struct net_device *netdev = rx_ring->netdev;
+ if (!(netdev->features & NETIF_F_RXALL)) {
+ dev_kfree_skb_any(skb);
+ return true;
+ }
+ }
+
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ igb_pull_tail(rx_ring, rx_desc, skb);
+
+ /* if skb_pad returns an error the skb was freed */
+ if (unlikely(skb->len < 60)) {
+ int pad_len = 60 - skb->len;
+
+ if (skb_pad(skb, pad_len))
+ return true;
+ __skb_put(skb, pad_len);
+ }
+
+ return false;
+}
+
+/* igb_clean_rx_irq -- * packet split */
+static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
+{
+ struct igb_ring *rx_ring = q_vector->rx.ring;
+ struct sk_buff *skb = rx_ring->skb;
+ unsigned int total_bytes = 0, total_packets = 0;
+ u16 cleaned_count = igb_desc_unused(rx_ring);
+
+ do {
+ union e1000_adv_rx_desc *rx_desc;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
cleaned_count = 0;
}
- /* use prefetched values */
- rx_desc = next_rxd;
- }
+ rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
+ if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
+ break;
+
+ /*
+ * This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * RXD_STAT_DD bit is set
+ */
+ rmb();
+
+ /* retrieve a buffer from the ring */
+ skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb)
+ break;
+
+ cleaned_count++;
+
+ /* fetch next buffer in frame if non-eop */
+ if (igb_is_non_eop(rx_ring, rx_desc))
+ continue;
+
+ /* verify the packet layout is correct */
+ if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
+ skb = NULL;
+ continue;
+ }
+
+ /* probably a little skewed due to removing CRC */
+ total_bytes += skb->len;
+
+ /* populate checksum, timestamp, VLAN, and protocol */
+ igb_process_skb_fields(rx_ring, rx_desc, skb);
+
+#ifndef IGB_NO_LRO
+ if (igb_can_lro(rx_ring, rx_desc, skb))
+ igb_lro_receive(q_vector, skb);
+ else
+#endif
+#ifdef HAVE_VLAN_RX_REGISTER
+ igb_receive_skb(q_vector, skb);
+#else
+ napi_gro_receive(&q_vector->napi, skb);
+#endif
+#ifndef NETIF_F_GRO
+
+ netdev_ring(rx_ring)->last_rx = jiffies;
+#endif
+
+ /* reset skb pointer */
+ skb = NULL;
+
+ /* update budget accounting */
+ total_packets++;
+ } while (likely(total_packets < budget));
+
+ /* place incomplete frames back on ring for completion */
+ rx_ring->skb = skb;
- rx_ring->next_to_clean = i;
rx_ring->rx_stats.packets += total_packets;
rx_ring->rx_stats.bytes += total_bytes;
q_vector->rx.total_packets += total_packets;
igb_alloc_rx_buffers(rx_ring, cleaned_count);
#ifndef IGB_NO_LRO
- if (netdev_ring(rx_ring)->features & NETIF_F_LRO)
- igb_lro_flush_all(q_vector);
+ igb_lro_flush_all(q_vector);
#endif /* IGB_NO_LRO */
- return !!budget;
+ return (total_packets < budget);
}
+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
struct igb_rx_buffer *bi)
{
return true;
if (likely(!skb)) {
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
skb = netdev_alloc_skb_ip_align(netdev_ring(rx_ring),
rx_ring->rx_buffer_len);
-#else
- skb = netdev_alloc_skb_ip_align(netdev_ring(rx_ring),
- IGB_RX_HDR_LEN);
-#endif
bi->skb = skb;
if (!skb) {
rx_ring->rx_stats.alloc_failed++;
skb_record_rx_queue(skb, ring_queue_index(rx_ring));
}
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
- dma = dma_map_single(rx_ring->dev, skb->data,
- rx_ring->rx_buffer_len, DMA_FROM_DEVICE);
-#else
dma = dma_map_single(rx_ring->dev, skb->data,
- IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
-#endif
+ rx_ring->rx_buffer_len, DMA_FROM_DEVICE);
+ /* if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
+ */
if (dma_mapping_error(rx_ring->dev, dma)) {
+ dev_kfree_skb_any(skb);
+ bi->skb = NULL;
+
rx_ring->rx_stats.alloc_failed++;
return false;
}
return true;
}
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
struct igb_rx_buffer *bi)
{
struct page *page = bi->page;
- dma_addr_t page_dma = bi->page_dma;
- unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
+ dma_addr_t dma;
- if (page_dma)
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page))
return true;
- if (!page) {
- page = alloc_page(GFP_ATOMIC | __GFP_COLD);
- bi->page = page;
- if (unlikely(!page)) {
- rx_ring->rx_stats.alloc_failed++;
- return false;
- }
+ /* alloc new page for storage */
+ page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
}
- page_dma = dma_map_page(rx_ring->dev, page,
- page_offset, PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
+ /* map page for use */
+ dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /*
+ * if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
+ */
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_page(page);
- if (dma_mapping_error(rx_ring->dev, page_dma)) {
rx_ring->rx_stats.alloc_failed++;
return false;
}
- bi->page_dma = page_dma;
- bi->page_offset = page_offset;
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = 0;
+
return true;
}
struct igb_rx_buffer *bi;
u16 i = rx_ring->next_to_use;
+ /* nothing to do */
+ if (!cleaned_count)
+ return;
+
rx_desc = IGB_RX_DESC(rx_ring, i);
bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count;
- while (cleaned_count--) {
+ do {
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
if (!igb_alloc_mapped_skb(rx_ring, bi))
+#else
+ if (!igb_alloc_mapped_page(rx_ring, bi))
+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
break;
- /* Refresh the desc even if buffer_addrs didn't change
- * because each write-back erases this info. */
+ /*
+ * Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
#else
- rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
-
- if (!igb_alloc_mapped_page(rx_ring, bi))
- break;
-
- rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+#endif
-#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
rx_desc++;
bi++;
i++;
/* clear the hdr_addr for the next_to_use descriptor */
rx_desc->read.hdr_addr = 0;
- }
+
+ cleaned_count--;
+ } while (cleaned_count);
i += rx_ring->count;
if (rx_ring->next_to_use != i) {
+ /* record the next descriptor to use */
rx_ring->next_to_use = i;
- /* Force memory writes to complete before letting h/w
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = i;
+
+#endif
+ /*
+ * Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
writel(i, rx_ring->tail);
}
return E1000_SUCCESS;
}
-#endif
-#ifdef HAVE_HW_TIME_STAMP
-/**
- * igb_hwtstamp_ioctl - control hardware time stamping
- * @netdev:
- * @ifreq:
- * @cmd:
- *
- * Outgoing time stamping can be enabled and disabled. Play nice and
- * disable it when requested, although it shouldn't case any overhead
- * when no packet needs it. At most one packet in the queue may be
- * marked for time stamping, otherwise it would be impossible to tell
- * for sure to which packet the hardware time stamp belongs.
- *
- * Incoming time stamping has to be configured via the hardware
- * filters. Not all combinations are supported, in particular event
- * type has to be specified. Matching the kind of event packet is
- * not supported, with the exception of "all V2 events regardless of
- * level 2 or 4".
- *
- **/
-static int igb_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- struct hwtstamp_config config;
- u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
- u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
- u32 tsync_rx_cfg = 0;
- bool is_l4 = false;
- bool is_l2 = false;
- u32 regval;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- tsync_tx_ctl = 0;
- case HWTSTAMP_TX_ON:
- break;
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- tsync_rx_ctl = 0;
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_ALL:
- /*
- * register TSYNCRXCFG must be set, therefore it is not
- * possible to time stamp both Sync and Delay_Req messages
- * => fall back to time stamping all packets
- */
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
- is_l4 = true;
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
- is_l4 = true;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
- is_l2 = true;
- is_l4 = true;
- config.rx_filter = HWTSTAMP_FILTER_SOME;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
- is_l2 = true;
- is_l4 = true;
- config.rx_filter = HWTSTAMP_FILTER_SOME;
- break;
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- is_l2 = true;
- is_l4 = true;
- break;
- default:
- return -ERANGE;
- }
-
- if (hw->mac.type == e1000_82575) {
- if (tsync_rx_ctl | tsync_tx_ctl)
- return -EINVAL;
- return 0;
- }
-
-#ifdef IGB_PER_PKT_TIMESTAMP
- /*
- * Per-packet timestamping only works if all packets are
- * timestamped, so enable timestamping in all packets as
- * long as one rx filter was configured.
- */
- if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
- tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
- }
-#endif
-
- /* enable/disable TX */
- regval = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
- regval &= ~E1000_TSYNCTXCTL_ENABLED;
- regval |= tsync_tx_ctl;
- E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, regval);
-
- /* enable/disable RX */
- regval = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
- regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
- regval |= tsync_rx_ctl;
- E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, regval);
-
- /* define which PTP packets are time stamped */
- E1000_WRITE_REG(hw, E1000_TSYNCRXCFG, tsync_rx_cfg);
-
- /* define ethertype filter for timestamped packets */
- if (is_l2)
- E1000_WRITE_REG(hw, E1000_ETQF(3),
- (E1000_ETQF_FILTER_ENABLE | /* enable filter */
- E1000_ETQF_1588 | /* enable timestamping */
- ETH_P_1588)); /* 1588 eth protocol type */
- else
- E1000_WRITE_REG(hw, E1000_ETQF(3), 0);
-
-#define PTP_PORT 319
- /* L4 Queue Filter[3]: filter by destination port and protocol */
- if (is_l4) {
- u32 ftqf = (IPPROTO_UDP /* UDP */
- | E1000_FTQF_VF_BP /* VF not compared */
- | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
- | E1000_FTQF_MASK); /* mask all inputs */
- ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
-
- E1000_WRITE_REG(hw, E1000_IMIR(3), htons(PTP_PORT));
- E1000_WRITE_REG(hw, E1000_IMIREXT(3),
- (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
- if (hw->mac.type == e1000_82576) {
- /* enable source port check */
- E1000_WRITE_REG(hw, E1000_SPQF(3), htons(PTP_PORT));
- ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
- }
- E1000_WRITE_REG(hw, E1000_FTQF(3), ftqf);
- } else {
- E1000_WRITE_REG(hw, E1000_FTQF(3), E1000_FTQF_MASK);
- }
- E1000_WRITE_FLUSH(hw);
-
- adapter->hwtstamp_config = config;
-
- /* clear TX/RX time stamp registers, just to be sure */
- regval = E1000_READ_REG(hw, E1000_TXSTMPH);
- regval = E1000_READ_REG(hw, E1000_RXSTMPH);
-
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-
#endif
/**
* igb_ioctl -
case SIOCSMIIREG:
return igb_mii_ioctl(netdev, ifr, cmd);
#endif
-#ifdef HAVE_HW_TIME_STAMP
+#ifdef HAVE_PTP_1588_CLOCK
case SIOCSHWTSTAMP:
- return igb_hwtstamp_ioctl(netdev, ifr, cmd);
-#endif
+ return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
+#endif /* HAVE_PTP_1588_CLOCK */
#ifdef ETHTOOL_OPS_COMPAT
case SIOCETHTOOL:
return ethtool_ioctl(ifr);
if (!test_bit(__IGB_DOWN, &adapter->state))
igb_irq_enable(adapter);
+#else
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
#else
bool enable = !!(features & NETIF_F_HW_VLAN_RX);
+#endif
#endif
if (enable) {
#else
struct net_device *vnetdev;
vnetdev = adapter->vmdq_netdev[i-1];
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ enable = !!(vnetdev->features & NETIF_F_HW_VLAN_CTAG_RX);
+#else
enable = !!(vnetdev->features & NETIF_F_HW_VLAN_RX);
#endif
- igb_set_vf_vlan_strip(adapter,
+#endif
+ igb_set_vf_vlan_strip(adapter,
adapter->vfs_allocated_count + i,
enable);
}
#ifdef HAVE_VLAN_PROTOCOL
static int igb_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
#elif defined HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+static int igb_vlan_rx_add_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+#else
static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+#endif
#else
static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
#endif
#ifdef HAVE_VLAN_PROTOCOL
static int igb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
#elif defined HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+static int igb_vlan_rx_kill_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+#else
static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+#endif
#else
static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
#endif
for (vid = 0; vid < VLAN_N_VID; vid++) {
if (!vlan_group_get_device(adapter->vlgrp, vid))
continue;
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ igb_vlan_rx_add_vid(adapter->netdev,
+ htons(ETH_P_8021Q), vid);
+#else
igb_vlan_rx_add_vid(adapter->netdev, vid);
+#endif
}
}
#else
igb_vlan_mode(adapter->netdev, adapter->netdev->features);
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
-#ifdef HAVE_VLAN_PROTOCOL
- igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ igb_vlan_rx_add_vid(adapter->netdev,
+ htons(ETH_P_8021Q), vid);
#else
igb_vlan_rx_add_vid(adapter->netdev, vid);
#endif
-
#endif
}
mac->autoneg = 0;
- /* Fiber NIC's only allow 1000 gbps Full duplex */
- if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes ) &&
- spddplx != (SPEED_1000 + DUPLEX_FULL)) {
- dev_err(pci_dev_to_dev(pdev),
- "Unsupported Speed/Duplex configuration\n");
- return -EINVAL;
+ /* SerDes device's does not support 10Mbps Full/duplex
+ * and 100Mbps Half duplex
+ */
+ if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
+ switch (spddplx) {
+ case SPEED_10 + DUPLEX_HALF:
+ case SPEED_10 + DUPLEX_FULL:
+ case SPEED_100 + DUPLEX_HALF:
+ dev_err(pci_dev_to_dev(pdev),
+ "Unsupported Speed/Duplex configuration\n");
+ return -EINVAL;
+ default:
+ break;
+ }
}
switch (spddplx) {
dev_err(pci_dev_to_dev(pdev), "Unsupported Speed/Duplex configuration\n");
return -EINVAL;
}
+
+ /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+ adapter->hw.phy.mdix = AUTO_ALL_MODES;
+
return 0;
}
netif_device_detach(netdev);
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if (status & E1000_STATUS_LU)
+ wufc &= ~E1000_WUFC_LNKC;
+
if (netif_running(netdev))
__igb_close(netdev, true);
return retval;
#endif
- status = E1000_READ_REG(hw, E1000_STATUS);
- if (status & E1000_STATUS_LU)
- wufc &= ~E1000_WUFC_LNKC;
-
if (wufc) {
igb_setup_rctl(adapter);
igb_set_rx_mode(netdev);
#ifdef CONFIG_PM
#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
static int igb_suspend(struct device *dev)
+#else
+static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
{
+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
+ struct pci_dev *pdev = to_pci_dev(dev);
+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
int retval;
bool wake;
- struct pci_dev *pdev = to_pci_dev(dev);
retval = __igb_shutdown(pdev, &wake, 0);
if (retval)
return 0;
}
+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
static int igb_resume(struct device *dev)
+#else
+static int igb_resume(struct pci_dev *pdev)
+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
{
+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
struct pci_dev *pdev = to_pci_dev(dev);
+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
-#ifdef CONFIG_PM_RUNTIME
- if (!rtnl_is_locked()) {
- /*
- * shut up ASSERT_RTNL() warning in
- * netif_set_real_num_tx/rx_queues.
- */
- rtnl_lock();
- err = igb_init_interrupt_scheme(adapter);
- rtnl_unlock();
- } else {
- err = igb_init_interrupt_scheme(adapter);
- }
- if (err) {
-#else
- if (igb_init_interrupt_scheme(adapter)) {
-#endif /* CONFIG_PM_RUNTIME */
+ if (igb_init_interrupt_scheme(adapter, true)) {
dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for queues\n");
return -ENOMEM;
}
E1000_WRITE_REG(hw, E1000_WUS, ~0);
if (netdev->flags & IFF_UP) {
+ rtnl_lock();
err = __igb_open(netdev, true);
+ rtnl_unlock();
if (err)
return err;
}
}
#ifdef CONFIG_PM_RUNTIME
+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
static int igb_runtime_idle(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
{
return igb_resume(dev);
}
-#endif /* CONFIG_PM_RUNTIME */
#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
+#endif /* CONFIG_PM_RUNTIME */
#endif /* CONFIG_PM */
#ifdef USE_REBOOT_NOTIFIER
goto skip_bad_vf_detection;
bdev = pdev->bus->self;
- while (bdev && (bdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT))
+ while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
bdev = bdev->bus->self;
if (!bdev)
E1000_DEV_ID_82576_VF, vfdev);
}
/*
- * There's a slim chance the VF could have been hot plugged,
- * so if it is no longer present we don't need to issue the
- * VFLR. Just clean up the AER in that case.
- */
+ * There's a slim chance the VF could have been hot plugged,
+ * so if it is no longer present we don't need to issue the
+ * VFLR. Just clean up the AER in that case.
+ */
if (vfdev) {
dev_err(pci_dev_to_dev(pdev),
"Issuing VFLR to VF %d\n", vf);
}
/*
- * Even though the error may have occurred on the other port
- * we still need to increment the vf error reference count for
- * both ports because the I/O resume function will be called
- * for both of them.
- */
+ * Even though the error may have occurred on the other port
+ * we still need to increment the vf error reference count for
+ * both ports because the I/O resume function will be called
+ * for both of them.
+ */
adapter->vferr_refcount++;
return PCI_ERS_RESULT_RECOVERED;
return 100;
case SPEED_1000:
return 1000;
+ case SPEED_2500:
+ return 2500;
default:
return 0;
}
bool reset_rate = false;
/* VF TX rate limit was not set */
- if ((adapter->vf_rate_link_speed == 0) ||
+ if ((adapter->vf_rate_link_speed == 0) ||
(adapter->hw.mac.type != e1000_82576))
return;
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
int actual_link_speed;
-
+
if (hw->mac.type != e1000_82576)
return -EOPNOTSUPP;
ivi->tx_rate = adapter->vf_data[vf].tx_rate;
ivi->vlan = adapter->vf_data[vf].pf_vlan;
ivi->qos = adapter->vf_data[vf].pf_qos;
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+ ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
+#endif
return 0;
}
#endif
static void igb_vmm_control(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ int count;
u32 reg;
switch (hw->mac.type) {
reg |= E1000_RPLOLR_STRVLAN;
E1000_WRITE_REG(hw, E1000_RPLOLR, reg);
case e1000_i350:
+ case e1000_i354:
/* none of the above registers are supported by i350 */
break;
}
/* Enable Malicious Driver Detection */
- if ((hw->mac.type == e1000_i350) && (adapter->vfs_allocated_count) &&
- (adapter->mdd))
- igb_enable_mdd(adapter);
-
- /* enable replication and loopback support */
- e1000_vmdq_set_loopback_pf(hw, adapter->vfs_allocated_count ||
- adapter->vmdq_pools);
-
- e1000_vmdq_set_anti_spoofing_pf(hw, adapter->vfs_allocated_count ||
- adapter->vmdq_pools,
- adapter->vfs_allocated_count);
+ if ((adapter->vfs_allocated_count) &&
+ (adapter->mdd)) {
+ if (hw->mac.type == e1000_i350)
+ igb_enable_mdd(adapter);
+ }
+
+ /* enable replication and loopback support */
+ count = adapter->vfs_allocated_count || adapter->vmdq_pools;
+ if (adapter->flags & IGB_FLAG_LOOPBACK_ENABLE && count)
+ e1000_vmdq_set_loopback_pf(hw, 1);
+ e1000_vmdq_set_anti_spoofing_pf(hw,
+ adapter->vfs_allocated_count || adapter->vmdq_pools,
+ adapter->vfs_allocated_count);
e1000_vmdq_set_replication_pf(hw, adapter->vfs_allocated_count ||
adapter->vmdq_pools);
}
-static void igb_init_fw(struct igb_adapter *adapter)
+static void igb_init_fw(struct igb_adapter *adapter)
{
struct e1000_fw_drv_info fw_cmd;
struct e1000_hw *hw = &adapter->hw;
int i;
u16 mask;
- mask = E1000_SWFW_PHY0_SM;
+ if (hw->mac.type == e1000_i210)
+ mask = E1000_SWFW_EEP_SM;
+ else
+ mask = E1000_SWFW_PHY0_SM;
+ /* i211 parts do not support this feature */
+ if (hw->mac.type == e1000_i211)
+ hw->mac.arc_subsystem_valid = false;
if (!hw->mac.ops.acquire_swfw_sync(hw, mask)) {
for (i = 0; i <= FW_MAX_RETRIES; i++) {
struct e1000_hw *hw = &adapter->hw;
u32 dmac_thr;
u16 hwm;
+ u32 status;
+
+ if (hw->mac.type == e1000_i211)
+ return;
if (hw->mac.type > e1000_82580) {
if (adapter->dmac != IGB_DMAC_DISABLE) {
& E1000_FCRTC_RTH_COAL_MASK);
E1000_WRITE_REG(hw, E1000_FCRTC, reg);
- /*
+ /*
* Set the DMA Coalescing Rx threshold to PBA - 2 * max
- * frame size, capping it at PBA - 10KB.
- */
+ * frame size, capping it at PBA - 10KB.
+ */
dmac_thr = pba - adapter->max_frame_size / 512;
if (dmac_thr < pba - 10)
dmac_thr = pba - 10;
/* transition to L0x or L1 if available..*/
reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
- /* watchdog timer= msec values in 32usec intervals */
- reg |= ((adapter->dmac) >> 5);
+ /* Check if status is 2.5Gb backplane connection
+ * before configuration of watchdog timer, which is
+ * in msec values in 12.8usec intervals
+ * watchdog timer= msec values in 32usec intervals
+ * for non 2.5Gb connection
+ */
+ if (hw->mac.type == e1000_i354) {
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ (!(status & E1000_STATUS_2P5_SKU_OVER)))
+ reg |= ((adapter->dmac * 5) >> 6);
+ else
+ reg |= ((adapter->dmac) >> 5);
+ } else {
+ reg |= ((adapter->dmac) >> 5);
+ }
+
+ /*
+ * Disable BMC-to-OS Watchdog enable
+ * on devices that support OS-to-BMC
+ */
+ if (hw->mac.type != e1000_i354)
+ reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
E1000_WRITE_REG(hw, E1000_DMACR, reg);
/* no lower threshold to disable coalescing(smart fifb)-UTRESH=0*/
E1000_WRITE_REG(hw, E1000_DMCRTRH, 0);
- /*
- * This sets the time to wait before requesting transition to
- * low power state to number of usecs needed to receive 1 512
- * byte frame at gigabit line rate
+ /* This sets the time to wait before requesting
+ * transition to low power state to number of usecs
+ * needed to receive 1 512 byte frame at gigabit
+ * line rate. On i350 device, time to make transition
+ * to Lx state is delayed by 4 usec with flush disable
+ * bit set to avoid losing mailbox interrupts
*/
- reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
+ reg = E1000_READ_REG(hw, E1000_DMCTLX);
+ if (hw->mac.type == e1000_i350)
+ reg |= IGB_DMCTLX_DCFLUSH_DIS;
+ /* in 2.5Gb connection, TTLX unit is 0.4 usec
+ * which is 0x4*2 = 0xA. But delay is still 4 usec
+ */
+ if (hw->mac.type == e1000_i354) {
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ (!(status & E1000_STATUS_2P5_SKU_OVER)))
+ reg |= 0xA;
+ else
+ reg |= 0x4;
+ } else {
+ reg |= 0x4;
+ }
E1000_WRITE_REG(hw, E1000_DMCTLX, reg);
/* free space in tx packet buffer to wake from DMA coal */
}
}
+#ifdef HAVE_I2C_SUPPORT
+/* igb_read_i2c_byte - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @dev_addr: device address
+ * @data: value read
+ *
+ * Performs byte read operation over I2C interface at
+ * a specified device address.
+ */
+s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
+ struct i2c_client *this_client = adapter->i2c_client;
+ s32 status;
+ u16 swfw_mask = 0;
+
+ if (!this_client)
+ return E1000_ERR_I2C;
+
+ swfw_mask = E1000_SWFW_PHY0_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
+ != E1000_SUCCESS)
+ return E1000_ERR_SWFW_SYNC;
+
+ status = i2c_smbus_read_byte_data(this_client, byte_offset);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ if (status < 0)
+ return E1000_ERR_I2C;
+ else {
+ *data = status;
+ return E1000_SUCCESS;
+ }
+}
+
+/* igb_write_i2c_byte - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: device address
+ * @data: value to write
+ *
+ * Performs byte write operation over I2C interface at
+ * a specified device address.
+ */
+s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
+ struct i2c_client *this_client = adapter->i2c_client;
+ s32 status;
+ u16 swfw_mask = E1000_SWFW_PHY0_SM;
+
+ if (!this_client)
+ return E1000_ERR_I2C;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS)
+ return E1000_ERR_SWFW_SYNC;
+ status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ if (status)
+ return E1000_ERR_I2C;
+ else
+ return E1000_SUCCESS;
+}
+#endif /* HAVE_I2C_SUPPORT */
/* igb_main.c */
pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
-#endif /* NO_KNI */
+
err = -ENOMEM;
+#endif /* NO_KNI */
#ifdef HAVE_TX_MQ
netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
IGB_MAX_TX_QUEUES);
if (!netdev)
goto err_alloc_etherdev;
-
SET_MODULE_OWNER(netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
//pci_set_drvdata(pdev, netdev);
-
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pdev = pdev;
/* Copper options */
if (hw->phy.media_type == e1000_media_type_copper) {
-#ifdef ETH_TP_MDI_X
- hw->phy.mdix = ETH_TP_MDI_INVALID;
-#else
hw->phy.mdix = AUTO_ALL_MODES;
-#endif /* ETH_TP_MDI_X */
hw->phy.disable_polarity_correction = FALSE;
hw->phy.ms_type = e1000_ms_hw_default;
}
#ifdef NETIF_F_RXHASH
NETIF_F_RXHASH |
#endif
-#ifdef HAVE_NDO_SET_FEATURES
NETIF_F_RXCSUM |
-#endif
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
+#else
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_TX;
+#endif
+
+ if (hw->mac.type >= e1000_82576)
+ netdev->features |= NETIF_F_SCTP_CSUM;
#ifdef HAVE_NDO_SET_FEATURES
/* copy netdev features into list of user selectable features */
#endif
/* set this bit last since it cannot be part of hw_features */
+#ifdef NETIF_F_HW_VLAN_CTAG_FILTER
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#else
netdev->features |= NETIF_F_HW_VLAN_FILTER;
+#endif
#ifdef HAVE_NETDEV_VLAN_FEATURES
netdev->vlan_features |= NETIF_F_TSO |
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- if (hw->mac.type >= e1000_82576)
- netdev->features |= NETIF_F_SCTP_CSUM;
-
#ifdef NO_KNI
adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
+#ifdef DEBUG
+ if (adapter->dmac != IGB_DMAC_DISABLE)
+ printk("%s: DMA Coalescing is enabled..\n", netdev->name);
+#endif
/* before reading the NVM, reset the controller to put the device in a
* known good starting state */
e1000_reset_hw(hw);
-#endif
+#endif /* NO_KNI */
/* make sure the NVM is good */
if (e1000_validate_nvm_checksum(hw) < 0) {
igb_rar_set(adapter, 0);
/* get firmware version for ethtool -i */
- e1000_read_nvm(&adapter->hw, 5, 1, &adapter->fw_version);
+ igb_set_fw_version(adapter);
+
+ /* Check if Media Autosense is enabled */
+ if (hw->mac.type == e1000_82580)
+ igb_init_mas(adapter);
+
#ifdef NO_KNI
setup_timer(&adapter->watchdog_timer, &igb_watchdog,
(unsigned long) adapter);
e1000_validate_mdi_setting(hw);
- /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
- * enable the ACPI Magic Packet filter
- */
-
+ /* By default, support wake on port A */
if (hw->bus.func == 0)
- e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
- else if (hw->mac.type >= e1000_82580)
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+
+ /* Check the NVM for wake support for non-port A ports */
+ if (hw->mac.type >= e1000_82580)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
&eeprom_data);
e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
if (eeprom_data & IGB_EEPROM_APME)
- adapter->eeprom_wol |= E1000_WUFC_MAG;
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
/* now that we have the eeprom settings, apply the special cases where
* the eeprom may be wrong or the board simply won't support wake on
* lan on a particular port */
switch (pdev->device) {
case E1000_DEV_ID_82575GB_QUAD_COPPER:
- adapter->eeprom_wol = 0;
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
break;
case E1000_DEV_ID_82575EB_FIBER_SERDES:
case E1000_DEV_ID_82576_FIBER:
/* Wake events only supported on port A for dual fiber
* regardless of eeprom setting */
if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1)
- adapter->eeprom_wol = 0;
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
break;
case E1000_DEV_ID_82576_QUAD_COPPER:
case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
/* if quad port adapter, disable WoL on all but port A */
if (global_quad_port_a != 0)
- adapter->eeprom_wol = 0;
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
else
adapter->flags |= IGB_FLAG_QUAD_PORT_A;
/* Reset for multiple quad port adapters */
if (++global_quad_port_a == 4)
global_quad_port_a = 0;
break;
+ default:
+ /* If the device can't wake, don't set software support */
+ if (!device_can_wakeup(&adapter->pdev->dev))
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
+ break;
}
/* initialize the wol settings based on the eeprom settings */
- adapter->wol = adapter->eeprom_wol;
+ if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
+ adapter->wol |= E1000_WUFC_MAG;
+
+ /* Some vendors want WoL disabled by default, but still supported */
+ if ((hw->mac.type == e1000_i350) &&
+ (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+ adapter->wol = 0;
+ }
+
#ifdef NO_KNI
- device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol);
+ device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev),
+ adapter->flags & IGB_FLAG_WOL_SUPPORTED);
/* reset the hardware with the new settings */
igb_reset(adapter);
+ adapter->devrc = 0;
+
+#ifdef HAVE_I2C_SUPPORT
+ /* Init the I2C interface */
+ err = igb_init_i2c(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to init i2c interface\n");
+ goto err_eeprom;
+ }
+#endif /* HAVE_I2C_SUPPORT */
/* let the f/w know that the h/w is now under the control of the
* driver. */
}
#endif
-#ifdef HAVE_HW_TIME_STAMP
+#ifdef HAVE_PTP_1588_CLOCK
/* do hw tstamp init after resetting */
- igb_init_hw_timer(adapter);
-
-#endif
+ igb_ptp_init(adapter);
+#endif /* HAVE_PTP_1588_CLOCK */
#endif /* NO_KNI */
dev_info(pci_dev_to_dev(pdev), "Intel(R) Gigabit Ethernet Network Connection\n");
netdev->name,
((hw->bus.speed == e1000_bus_speed_2500) ? "2.5GT/s" :
(hw->bus.speed == e1000_bus_speed_5000) ? "5.0GT/s" :
+ (hw->mac.type == e1000_i354) ? "integrated" :
"unknown"),
- ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4\n" :
- (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2\n" :
- (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1\n" :
+ ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
+ (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
+ (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
+ (hw->mac.type == e1000_i354) ? "integrated" :
"unknown"));
dev_info(pci_dev_to_dev(pdev), "%s: MAC: ", netdev->name);
for (i = 0; i < 6; i++)
dev_info(pci_dev_to_dev(pdev), "%s: PBA No: %s\n", netdev->name,
pba_str);
+
/* Initialize the thermal sensor on i350 devices. */
- if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
- u16 ets_word;
+ if (hw->mac.type == e1000_i350) {
+ if (hw->bus.func == 0) {
+ u16 ets_word;
- /*
- * Read the NVM to determine if this i350 device supports an
- * external thermal sensor.
- */
- e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_word);
- if (ets_word != 0x0000 && ets_word != 0xFFFF)
- adapter->ets = true;
- else
- adapter->ets = false;
+ /*
+ * Read the NVM to determine if this i350 device
+ * supports an external thermal sensor.
+ */
+ e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_word);
+ if (ets_word != 0x0000 && ets_word != 0xFFFF)
+ adapter->ets = true;
+ else
+ adapter->ets = false;
+ }
#ifdef NO_KNI
-#ifdef IGB_SYSFS
+#ifdef IGB_HWMON
+
igb_sysfs_init(adapter);
#else
#ifdef IGB_PROCFS
+
igb_procfs_init(adapter);
#endif /* IGB_PROCFS */
-#endif /* IGB_SYSFS */
+#endif /* IGB_HWMON */
#endif /* NO_KNI */
} else {
adapter->ets = false;
}
- switch (hw->mac.type) {
- case e1000_i350:
- /* Enable EEE for internal copper PHY devices */
- if (hw->phy.media_type == e1000_media_type_copper)
- e1000_set_eee_i350(hw);
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ switch (hw->mac.type) {
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+ /* Enable EEE for internal copper PHY devices */
+ err = e1000_set_eee_i350(hw);
+ if ((!err) &&
+ (adapter->flags & IGB_FLAG_EEE))
+ adapter->eee_advert =
+ MDIO_EEE_100TX | MDIO_EEE_1000T;
+ break;
+ case e1000_i354:
+ if ((E1000_READ_REG(hw, E1000_CTRL_EXT)) &
+ (E1000_CTRL_EXT_LINK_MODE_SGMII)) {
+ err = e1000_set_eee_i354(hw);
+ if ((!err) &&
+ (adapter->flags & IGB_FLAG_EEE))
+ adapter->eee_advert =
+ MDIO_EEE_100TX | MDIO_EEE_1000T;
+ }
+ break;
+ default:
+ break;
+ }
+ }
- /* send driver version info to firmware */
+ /* send driver version info to firmware */
+ if (hw->mac.type >= e1000_i350)
igb_init_fw(adapter);
- break;
- default:
- break;
- }
+
#ifndef IGB_NO_LRO
if (netdev->features & NETIF_F_LRO)
dev_info(pci_dev_to_dev(pdev), "Internal LRO is enabled \n");
return 0;
//err_register:
- //igb_release_hw_control(adapter);
+// igb_release_hw_control(adapter);
+#ifdef HAVE_I2C_SUPPORT
+ memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
+#endif /* HAVE_I2C_SUPPORT */
err_eeprom:
- //if (!e1000_check_reset_block(hw))
- // e1000_phy_hw_reset(hw);
+// if (!e1000_check_reset_block(hw))
+// e1000_phy_hw_reset(hw);
if (hw->flash_address)
iounmap(hw->flash_address);
err_sw_init:
- //igb_clear_interrupt_scheme(adapter);
- //igb_reset_sriov_capability(adapter);
+// igb_clear_interrupt_scheme(adapter);
+// igb_reset_sriov_capability(adapter);
iounmap(hw->hw_addr);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- //pci_release_selected_regions(pdev,
- // pci_select_bars(pdev, IORESOURCE_MEM));
+// pci_release_selected_regions(pdev,
+// pci_select_bars(pdev, IORESOURCE_MEM));
//err_pci_reg:
//err_dma:
pci_disable_device(pdev);
{
pci_disable_device(pdev);
}
-
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
*/
#define IGB_PARAM(X, desc) \
- static const int __devinitdata X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
+ static const int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
MODULE_PARM(X, "1-" __MODULE_STRING(IGB_MAX_NIC) "i"); \
MODULE_PARM_DESC(X, desc);
#else
#define IGB_PARAM(X, desc) \
- static int __devinitdata X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
+ static int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
static unsigned int num_##X; \
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
*
* Default Value: 1
*/
-IGB_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues (0-8), default 1=number of cpus");
+IGB_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues (0-8), default 1, 0=number of cpus");
#define DEFAULT_RSS 1
#define MAX_RSS 8
IGB_PARAM(MDD, "Malicious Driver Detection (0/1), default 1 = enabled. "
"Only available when max_vfs is greater than 0");
+#ifdef DEBUG
+
+/* Disable Hardware Reset on Tx Hang
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0 (disabled, i.e. h/w will reset)
+ */
+IGB_PARAM(DisableHwReset, "Disable reset of hardware on Tx hang");
+
+/* Dump Transmit and Receive buffers
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0
+ */
+IGB_PARAM(DumpBuffers, "Dump Tx/Rx buffers on Tx hang or by request");
+
+#endif /* DEBUG */
/* QueuePairs (Enable TX/RX queue pairs for interrupt handling)
*
*
* Default Value: 1
*/
-IGB_PARAM(QueuePairs, "Enable TX/RX queue pairs for interrupt handling (0,1), default 1=on");
+IGB_PARAM(QueuePairs, "Enable Tx/Rx queue pairs for interrupt handling (0,1), default 1=on");
#define DEFAULT_QUEUE_PAIRS 1
#define MAX_QUEUE_PAIRS 1
} arg;
};
-static int __devinit igb_validate_option(unsigned int *value,
- struct igb_option *opt,
- struct igb_adapter *adapter)
+static int igb_validate_option(unsigned int *value,
+ struct igb_option *opt,
+ struct igb_adapter *adapter)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
* in a variable in the adapter structure.
**/
-void __devinit igb_check_options(struct igb_adapter *adapter)
+void igb_check_options(struct igb_adapter *adapter)
{
int bd = adapter->bd_number;
struct e1000_hw *hw = &adapter->hw;
case 0:
DPRINTK(PROBE, INFO, "%s turned off\n",
opt.name);
- if(hw->mac.type >= e1000_i350)
+ if (hw->mac.type >= e1000_i350)
adapter->dmac = IGB_DMAC_DISABLE;
adapter->rx_itr_setting = itr;
break;
switch (hw->mac.type) {
case e1000_82575:
case e1000_82580:
+ case e1000_i210:
+ case e1000_i211:
+ case e1000_i354:
adapter->vfs_allocated_count = 0;
DPRINTK(PROBE, INFO, "SR-IOV option max_vfs not supported.\n");
default:
.arg = { .r = { .min = MIN_VMDQ,
.max = (MAX_VMDQ - adapter->vfs_allocated_count) } }
};
+ if ((hw->mac.type != e1000_i210) ||
+ (hw->mac.type != e1000_i211)) {
#ifdef module_param_array
if (num_VMDQ > bd) {
#endif
adapter->vmdq_pools = 0;
}
#endif
+
+ } else {
+ DPRINTK(PROBE, INFO, "VMDq option is not supported.\n");
+ adapter->vmdq_pools = opt.def;
+ }
}
{ /* RSS - Enable RSS multiqueue receives */
struct igb_option opt = {
.max = MAX_RSS } }
};
- if (adapter->vmdq_pools) {
- switch (hw->mac.type) {
-#ifndef CONFIG_IGB_VMDQ_NETDEV
- case e1000_82576:
- opt.arg.r.max = 2;
- break;
- case e1000_82575:
- if (adapter->vmdq_pools == 2)
- opt.arg.r.max = 3;
- if (adapter->vmdq_pools <= 2)
- break;
-#endif
- default:
- opt.arg.r.max = 1;
- break;
- }
- }
-
switch (hw->mac.type) {
case e1000_82575:
+#ifndef CONFIG_IGB_VMDQ_NETDEV
+ if (!!adapter->vmdq_pools) {
+ if (adapter->vmdq_pools <= 2) {
+ if (adapter->vmdq_pools == 2)
+ opt.arg.r.max = 3;
+ } else {
+ opt.arg.r.max = 1;
+ }
+ } else {
+ opt.arg.r.max = 4;
+ }
+#else
+ opt.arg.r.max = !!adapter->vmdq_pools ? 1 : 4;
+#endif /* CONFIG_IGB_VMDQ_NETDEV */
+ break;
+ case e1000_i210:
opt.arg.r.max = 4;
break;
+ case e1000_i211:
+ opt.arg.r.max = 2;
+ break;
+ case e1000_82576:
+#ifndef CONFIG_IGB_VMDQ_NETDEV
+ if (!!adapter->vmdq_pools)
+ opt.arg.r.max = 2;
+ break;
+#endif /* CONFIG_IGB_VMDQ_NETDEV */
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
default:
+ if (!!adapter->vmdq_pools)
+ opt.arg.r.max = 1;
break;
}
+ if (adapter->int_mode != IGB_INT_MODE_MSIX) {
+ DPRINTK(PROBE, INFO, "RSS is not supported when in MSI/Legacy Interrupt mode, %s\n",
+ opt.err);
+ opt.arg.r.max = 1;
+ }
+
#ifdef module_param_array
if (num_RSS > bd) {
#endif
}
#endif
}
- { /* QueuePairs - Enable TX/RX queue pairs for interrupt handling */
+ { /* QueuePairs - Enable Tx/Rx queue pairs for interrupt handling */
struct igb_option opt = {
.type = enable_option,
- .name = "QueuePairs - TX/RX queue pairs for interrupt handling",
+ .name = "QueuePairs - Tx/Rx queue pairs for interrupt handling",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
#endif
unsigned int qp = QueuePairs[bd];
/*
- * we must enable queue pairs if the number of queues
- * exceeds the number of avaialble interrupts. We are
- * limited to 10, or 3 per unallocated vf.
+ * We must enable queue pairs if the number of queues
+ * exceeds the number of available interrupts. We are
+ * limited to 10, or 3 per unallocated vf. On I210 and
+ * I211 devices, we are limited to 5 interrupts.
+ * However, since I211 only supports 2 queues, we do not
+ * need to check and override the user option.
*/
- if ((adapter->rss_queues > 4) ||
- (adapter->vmdq_pools > 4) ||
- ((adapter->rss_queues > 1) &&
- ((adapter->vmdq_pools > 3) ||
- (adapter->vfs_allocated_count > 6)))) {
- if (qp == OPTION_DISABLED) {
+ if (qp == OPTION_DISABLED) {
+ if (adapter->rss_queues > 4)
qp = OPTION_ENABLED;
- DPRINTK(PROBE, INFO,
- "Number of queues exceeds available interrupts, %s\n",opt.err);
- }
+
+ if (adapter->vmdq_pools > 4)
+ qp = OPTION_ENABLED;
+
+ if (adapter->rss_queues > 1 &&
+ (adapter->vmdq_pools > 3 ||
+ adapter->vfs_allocated_count > 6))
+ qp = OPTION_ENABLED;
+
+ if (hw->mac.type == e1000_i210 &&
+ adapter->rss_queues > 2)
+ qp = OPTION_ENABLED;
+
+ if (qp == OPTION_ENABLED)
+ DPRINTK(PROBE, INFO, "Number of queues exceeds available interrupts, %s\n",
+ opt.err);
}
igb_validate_option(&qp, &opt, adapter);
adapter->flags |= qp ? IGB_FLAG_QUEUE_PAIRS : 0;
#endif
}
#endif /* IGB_NO_LRO */
- { /* Node assignment */
- static struct igb_option opt = {
- .type = range_option,
- .name = "Node to start on",
- .err = "defaulting to -1",
-#ifdef HAVE_EARLY_VMALLOC_NODE
- .def = 0,
-#else
- .def = -1,
-#endif
- .arg = { .r = { .min = 0,
- .max = (MAX_NUMNODES - 1)}}
- };
- int node_param = opt.def;
-
- /* if the default was zero then we need to set the
- * default value to an online node, which is not
- * necessarily zero, and the constant initializer
- * above can't take first_online_node */
- if (node_param == 0)
- /* must set opt.def for validate */
- opt.def = node_param = first_online_node;
-
-#ifdef module_param_array
- if (num_Node > bd) {
-#endif
- node_param = Node[bd];
- igb_validate_option((uint *)&node_param, &opt, adapter);
-
- if (node_param != OPTION_UNSET) {
- DPRINTK(PROBE, INFO, "node set to %d\n", node_param);
- }
-#ifdef module_param_array
- }
-#endif
-
- /* check sanity of the value */
- if (node_param != -1 && !node_online(node_param)) {
- DPRINTK(PROBE, INFO,
- "ignoring node set to invalid value %d\n",
- node_param);
- node_param = opt.def;
- }
-
- adapter->node = node_param;
- }
{ /* MDD - Enable Malicious Driver Detection. Only available when
SR-IOV is enabled. */
struct igb_option opt = {
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#include "e1000_hw.h"
#ifdef IGB_PROCFS
-#ifndef IGB_SYSFS
+#ifndef IGB_HWMON
#include <linux/module.h>
#include <linux/types.h>
static struct proc_dir_entry *igb_top_dir = NULL;
-static struct net_device_stats *procfs_get_stats(struct net_device *netdev)
-{
-#ifndef HAVE_NETDEV_STATS_IN_NETDEV
- struct igb_adapter *adapter;
-#endif
- if (netdev == NULL)
- return NULL;
-
-#ifdef HAVE_NETDEV_STATS_IN_NETDEV
- /* only return the current stats */
- return &netdev->stats;
-#else
- adapter = netdev_priv(netdev);
-
- /* only return the current stats */
- return &adapter->net_stats;
-#endif /* HAVE_NETDEV_STATS_IN_NETDEV */
-}
bool igb_thermal_present(struct igb_adapter *adapter)
{
return true;
}
-static int igb_fwbanner(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
-
- return snprintf(page, count, "%d.%d-%d\n",
- (adapter->fw_version & 0xF000) >> 12,
- (adapter->fw_version & 0x0FF0) >> 4,
- adapter->fw_version & 0x000F);
-}
-
-static int igb_numeports(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct e1000_hw *hw;
- int ports;
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(page, count, "error: no hw data\n");
-
- ports = 4;
-
- return snprintf(page, count, "%d\n", ports);
-}
-
-static int igb_porttype(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
-
- return snprintf(page, count, "%d\n",
- test_bit(__IGB_DOWN, &adapter->state));
-}
-
-static int igb_portspeed(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- int speed = 0;
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
-
- switch (adapter->link_speed) {
- case E1000_STATUS_SPEED_10:
- speed = 10;
- break;
- case E1000_STATUS_SPEED_100:
- speed = 100;
- break;
- case E1000_STATUS_SPEED_1000:
- speed = 1000;
- break;
- }
- return snprintf(page, count, "%d\n", speed);
-}
-
-static int igb_wqlflag(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
-
- return snprintf(page, count, "%d\n", adapter->wol);
-}
-
-static int igb_xflowctl(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct e1000_hw *hw;
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(page, count, "error: no hw data\n");
-
- return snprintf(page, count, "%d\n", hw->fc.current_mode);
-}
-
-static int igb_rxdrops(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- struct net_device_stats *net_stats;
-
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
- net_stats = procfs_get_stats(adapter->netdev);
- if (net_stats == NULL)
- return snprintf(page, count, "error: no net stats\n");
-
- return snprintf(page, count, "%lu\n",
- net_stats->rx_dropped);
-}
-
-static int igb_rxerrors(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- struct net_device_stats *net_stats;
-
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
- net_stats = procfs_get_stats(adapter->netdev);
- if (net_stats == NULL)
- return snprintf(page, count, "error: no net stats\n");
-
- return snprintf(page, count, "%lu\n", net_stats->rx_errors);
-}
-
-static int igb_rxupacks(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct e1000_hw *hw;
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(page, count, "error: no hw data\n");
-
- return snprintf(page, count, "%d\n", E1000_READ_REG(hw, E1000_TPR));
-}
-
-static int igb_rxmpacks(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct e1000_hw *hw;
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(page, count, "error: no hw data\n");
-
- return snprintf(page, count, "%d\n",
- E1000_READ_REG(hw, E1000_MPRC));
-}
-
-static int igb_rxbpacks(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct e1000_hw *hw;
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(page, count, "error: no hw data\n");
-
- return snprintf(page, count, "%d\n",
- E1000_READ_REG(hw, E1000_BPRC));
-}
-
-static int igb_txupacks(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct e1000_hw *hw;
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(page, count, "error: no hw data\n");
-
- return snprintf(page, count, "%d\n", E1000_READ_REG(hw, E1000_TPT));
-}
-
-static int igb_txmpacks(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct e1000_hw *hw;
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(page, count, "error: no hw data\n");
-
- return snprintf(page, count, "%d\n",
- E1000_READ_REG(hw, E1000_MPTC));
-}
-
-static int igb_txbpacks(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct e1000_hw *hw;
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(page, count, "error: no hw data\n");
-
- return snprintf(page, count, "%d\n",
- E1000_READ_REG(hw, E1000_BPTC));
-
-}
-
-static int igb_txerrors(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- struct net_device_stats *net_stats;
-
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
- net_stats = procfs_get_stats(adapter->netdev);
- if (net_stats == NULL)
- return snprintf(page, count, "error: no net stats\n");
-
- return snprintf(page, count, "%lu\n",
- net_stats->tx_errors);
-}
-
-static int igb_txdrops(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- struct net_device_stats *net_stats;
-
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
- net_stats = procfs_get_stats(adapter->netdev);
- if (net_stats == NULL)
- return snprintf(page, count, "error: no net stats\n");
-
- return snprintf(page, count, "%lu\n",
- net_stats->tx_dropped);
-}
-
-static int igb_rxframes(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- struct net_device_stats *net_stats;
-
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
- net_stats = procfs_get_stats(adapter->netdev);
- if (net_stats == NULL)
- return snprintf(page, count, "error: no net stats\n");
-
- return snprintf(page, count, "%lu\n",
- net_stats->rx_packets);
-}
-
-static int igb_rxbytes(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- struct net_device_stats *net_stats;
-
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
- net_stats = procfs_get_stats(adapter->netdev);
- if (net_stats == NULL)
- return snprintf(page, count, "error: no net stats\n");
-
- return snprintf(page, count, "%lu\n",
- net_stats->rx_bytes);
-}
-
-static int igb_txframes(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- struct net_device_stats *net_stats;
-
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
- net_stats = procfs_get_stats(adapter->netdev);
- if (net_stats == NULL)
- return snprintf(page, count, "error: no net stats\n");
-
- return snprintf(page, count, "%lu\n",
- net_stats->tx_packets);
-}
-
-static int igb_txbytes(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- struct net_device_stats *net_stats;
-
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
- net_stats = procfs_get_stats(adapter->netdev);
- if (net_stats == NULL)
- return snprintf(page, count, "error: no net stats\n");
-
- return snprintf(page, count, "%lu\n",
- net_stats->tx_bytes);
-}
-
-static int igb_linkstat(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- int bitmask = 0;
- struct e1000_hw *hw;
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(page, count, "error: no hw data\n");
-
- if (test_bit(__IGB_DOWN, &adapter->state))
- bitmask |= 1;
-
- if (igb_has_link(adapter))
- bitmask |= 2;
- return snprintf(page, count, "0x%X\n", bitmask);
-}
-
-static int igb_funcid(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- struct net_device* netdev;
-
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
- netdev = adapter->netdev;
- if (netdev == NULL)
- return snprintf(page, count, "error: no net device\n");
-
- return snprintf(page, count, "0x%lX\n", netdev->base_addr);
-}
-
-static int igb_funcvers(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- struct net_device* netdev;
-
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
- netdev = adapter->netdev;
- if (netdev == NULL)
- return snprintf(page, count, "error: no net device\n");
-
- return snprintf(page, count, "%s\n", igb_driver_version);
-}
static int igb_macburn(char *page, char **start, off_t off, int count,
int *eof, void *data)
if (hw == NULL)
return snprintf(page, count, "error: no hw data\n");
- return snprintf(page, count, "0x%X%X%X%X%X%X\n",
+ return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n",
(unsigned int)hw->mac.perm_addr[0],
(unsigned int)hw->mac.perm_addr[1],
(unsigned int)hw->mac.perm_addr[2],
if (hw == NULL)
return snprintf(page, count, "error: no hw data\n");
- return snprintf(page, count, "0x%X%X%X%X%X%X\n",
+ return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n",
(unsigned int)hw->mac.addr[0],
(unsigned int)hw->mac.addr[1],
(unsigned int)hw->mac.addr[2],
(unsigned int)hw->mac.addr[5]);
}
-static int igb_maclla1(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int igb_numeports(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
{
struct e1000_hw *hw;
- u16 eeprom_buff[6];
- int first_word = 0x37;
- int word_count = 6;
- int rc;
-
+ int ports;
struct igb_adapter *adapter = (struct igb_adapter *)data;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
if (hw == NULL)
return snprintf(page, count, "error: no hw data\n");
- rc = e1000_read_nvm(hw, first_word, word_count,
- eeprom_buff);
- if (rc != E1000_SUCCESS)
- return 0;
-
- switch (hw->bus.func) {
- case 0:
- return snprintf(page, count, "0x%04X%04X%04X\n",
- eeprom_buff[0],
- eeprom_buff[1],
- eeprom_buff[2]);
- case 1:
- return snprintf(page, count, "0x%04X%04X%04X\n",
- eeprom_buff[3],
- eeprom_buff[4],
- eeprom_buff[5]);
- }
- return snprintf(page, count, "unexpected port %d\n", hw->bus.func);
-}
-
-static int igb_mtusize(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- struct net_device* netdev;
-
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
- netdev = adapter->netdev;
- if (netdev == NULL)
- return snprintf(page, count, "error: no net device\n");
-
- return snprintf(page, count, "%d\n", netdev->mtu);
-}
-
-static int igb_featflag(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- int bitmask = 0;
-#ifndef HAVE_NDO_SET_FEATURES
- struct igb_ring *ring;
-#endif
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- struct net_device *netdev;
-
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
- netdev = adapter->netdev;
- if (netdev == NULL)
- return snprintf(page, count, "error: no net device\n");
-
-#ifndef HAVE_NDO_SET_FEATURES
- /* igb_get_rx_csum(netdev) doesn't compile so hard code */
- ring = adapter->rx_ring[0];
- bitmask = test_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags);
- return snprintf(page, count, "%d\n", bitmask);
-#else
- if (netdev->features & NETIF_F_RXCSUM)
- bitmask |= 1;
- return snprintf(page, count, "%d\n", bitmask);
-#endif
-}
-
-static int igb_lsominct(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- return snprintf(page, count, "%d\n", 1);
-}
-
-static int igb_prommode(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- struct net_device *netdev;
-
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
- netdev = adapter->netdev;
- if (netdev == NULL)
- return snprintf(page, count, "error: no net device\n");
-
- return snprintf(page, count, "%d\n",
- netdev->flags & IFF_PROMISC);
-}
-
-static int igb_txdscqsz(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
-
- return snprintf(page, count, "%d\n", adapter->tx_ring[0]->count);
-}
-
-static int igb_rxdscqsz(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
-
- return snprintf(page, count, "%d\n", adapter->rx_ring[0]->count);
-}
-
-static int igb_rxqavg(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- int index;
- int totaldiff = 0;
- u16 ntc;
- u16 ntu;
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
-
- if (adapter->num_rx_queues <= 0)
- return snprintf(page, count,
- "can't calculate, number of queues %d\n",
- adapter->num_rx_queues);
-
- for (index = 0; index < adapter->num_rx_queues; index++) {
- ntc = adapter->rx_ring[index]->next_to_clean;
- ntu = adapter->rx_ring[index]->next_to_use;
-
- if (ntc >= ntu)
- totaldiff += (ntc - ntu);
- else
- totaldiff += (adapter->rx_ring[index]->count
- - ntu + ntc);
- }
- if (adapter->num_rx_queues <= 0)
- return snprintf(page, count,
- "can't calculate, number of queues %d\n",
- adapter->num_rx_queues);
- return snprintf(page, count, "%d\n", totaldiff/adapter->num_rx_queues);
-}
-
-static int igb_txqavg(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- int index;
- int totaldiff = 0;
- u16 ntc;
- u16 ntu;
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
-
- if (adapter->num_tx_queues <= 0)
- return snprintf(page, count,
- "can't calculate, number of queues %d\n",
- adapter->num_tx_queues);
-
- for (index = 0; index < adapter->num_tx_queues; index++) {
- ntc = adapter->tx_ring[index]->next_to_clean;
- ntu = adapter->tx_ring[index]->next_to_use;
-
- if (ntc >= ntu)
- totaldiff += (ntc - ntu);
- else
- totaldiff += (adapter->tx_ring[index]->count
- - ntu + ntc);
- }
- if (adapter->num_tx_queues <= 0)
- return snprintf(page, count,
- "can't calculate, number of queues %d\n",
- adapter->num_tx_queues);
- return snprintf(page, count, "%d\n",
- totaldiff/adapter->num_tx_queues);
-}
+ ports = 4;
-static int igb_iovotype(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- return snprintf(page, count, "2\n");
+ return snprintf(page, count, "%d\n", ports);
}
-static int igb_funcnbr(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int igb_porttype(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
{
struct igb_adapter *adapter = (struct igb_adapter *)data;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
- return snprintf(page, count, "%d\n", adapter->vfs_allocated_count);
+ return snprintf(page, count, "%d\n",
+ test_bit(__IGB_DOWN, &adapter->state));
}
static int igb_therm_location(char *page, char **start, off_t off,
};
struct igb_proc_type igb_proc_entries[] = {
- {"fwbanner", &igb_fwbanner},
{"numeports", &igb_numeports},
{"porttype", &igb_porttype},
- {"portspeed", &igb_portspeed},
- {"wqlflag", &igb_wqlflag},
- {"xflowctl", &igb_xflowctl},
- {"rxdrops", &igb_rxdrops},
- {"rxerrors", &igb_rxerrors},
- {"rxupacks", &igb_rxupacks},
- {"rxmpacks", &igb_rxmpacks},
- {"rxbpacks", &igb_rxbpacks},
- {"txdrops", &igb_txdrops},
- {"txerrors", &igb_txerrors},
- {"txupacks", &igb_txupacks},
- {"txmpacks", &igb_txmpacks},
- {"txbpacks", &igb_txbpacks},
- {"rxframes", &igb_rxframes},
- {"rxbytes", &igb_rxbytes},
- {"txframes", &igb_txframes},
- {"txbytes", &igb_txbytes},
- {"linkstat", &igb_linkstat},
- {"funcid", &igb_funcid},
- {"funcvers", &igb_funcvers},
{"macburn", &igb_macburn},
{"macadmn", &igb_macadmn},
- {"maclla1", &igb_maclla1},
- {"mtusize", &igb_mtusize},
- {"featflag", &igb_featflag},
- {"lsominct", &igb_lsominct},
- {"prommode", &igb_prommode},
- {"txdscqsz", &igb_txdscqsz},
- {"rxdscqsz", &igb_rxdscqsz},
- {"txqavg", &igb_txqavg},
- {"rxqavg", &igb_rxqavg},
- {"iovotype", &igb_iovotype},
- {"funcnbr", &igb_funcnbr},
{"", NULL}
};
void igb_procfs_topdir_exit(void)
{
-// remove_proc_entry("driver", proc_root_driver);
remove_proc_entry("driver/igb", NULL);
}
return rc;
}
-#endif /* !IGB_SYSFS */
+#endif /* !IGB_HWMON */
#endif /* IGB_PROCFS */
--- /dev/null
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/******************************************************************************
+ Copyright(c) 2011 Richard Cochran <richardcochran@gmail.com> for some of the
+ 82576 and 82580 code
+******************************************************************************/
+
+#include "igb.h"
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/ptp_classify.h>
+
+#define INCVALUE_MASK 0x7fffffff
+#define ISGN 0x80000000
+
+/*
+ * The 82580 timesync updates the system timer every 8ns by 8ns,
+ * and this update value cannot be reprogrammed.
+ *
+ * Neither the 82576 nor the 82580 offer registers wide enough to hold
+ * nanoseconds time values for very long. For the 82580, SYSTIM always
+ * counts nanoseconds, but the upper 24 bits are not availible. The
+ * frequency is adjusted by changing the 32 bit fractional nanoseconds
+ * register, TIMINCA.
+ *
+ * For the 82576, the SYSTIM register time unit is affect by the
+ * choice of the 24 bit TININCA:IV (incvalue) field. Five bits of this
+ * field are needed to provide the nominal 16 nanosecond period,
+ * leaving 19 bits for fractional nanoseconds.
+ *
+ * We scale the NIC clock cycle by a large factor so that relatively
+ * small clock corrections can be added or subtracted at each clock
+ * tick. The drawbacks of a large factor are a) that the clock
+ * register overflows more quickly (not such a big deal) and b) that
+ * the increment per tick has to fit into 24 bits. As a result we
+ * need to use a shift of 19 so we can fit a value of 16 into the
+ * TIMINCA register.
+ *
+ *
+ * SYSTIMH SYSTIML
+ * +--------------+ +---+---+------+
+ * 82576 | 32 | | 8 | 5 | 19 |
+ * +--------------+ +---+---+------+
+ * \________ 45 bits _______/ fract
+ *
+ * +----------+---+ +--------------+
+ * 82580 | 24 | 8 | | 32 |
+ * +----------+---+ +--------------+
+ * reserved \______ 40 bits _____/
+ *
+ *
+ * The 45 bit 82576 SYSTIM overflows every
+ * 2^45 * 10^-9 / 3600 = 9.77 hours.
+ *
+ * The 40 bit 82580 SYSTIM overflows every
+ * 2^40 * 10^-9 / 60 = 18.3 minutes.
+ */
+
+#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
+#define IGB_PTP_TX_TIMEOUT (HZ * 15)
+#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
+#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
+#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
+#define IGB_NBITS_82580 40
+
+/*
+ * SYSTIM read access for the 82576
+ */
+
+static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
+{
+ struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
+ struct e1000_hw *hw = &igb->hw;
+ u64 val;
+ u32 lo, hi;
+
+ lo = E1000_READ_REG(hw, E1000_SYSTIML);
+ hi = E1000_READ_REG(hw, E1000_SYSTIMH);
+
+ val = ((u64) hi) << 32;
+ val |= lo;
+
+ return val;
+}
+
+/*
+ * SYSTIM read access for the 82580
+ */
+
+static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
+{
+ struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
+ struct e1000_hw *hw = &igb->hw;
+ u64 val;
+ u32 lo, hi;
+
+ /* The timestamp latches on lowest register read. For the 82580
+ * the lowest register is SYSTIMR instead of SYSTIML. However we only
+ * need to provide nanosecond resolution, so we just ignore it.
+ */
+ E1000_READ_REG(hw, E1000_SYSTIMR);
+ lo = E1000_READ_REG(hw, E1000_SYSTIML);
+ hi = E1000_READ_REG(hw, E1000_SYSTIMH);
+
+ val = ((u64) hi) << 32;
+ val |= lo;
+
+ return val;
+}
+
+/*
+ * SYSTIM read access for I210/I211
+ */
+
+static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 sec, nsec;
+
+ /* The timestamp latches on lowest register read. For I210/I211, the
+ * lowest register is SYSTIMR. Since we only need to provide nanosecond
+ * resolution, we can ignore it.
+ */
+ E1000_READ_REG(hw, E1000_SYSTIMR);
+ nsec = E1000_READ_REG(hw, E1000_SYSTIML);
+ sec = E1000_READ_REG(hw, E1000_SYSTIMH);
+
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+}
+
+static void igb_ptp_write_i210(struct igb_adapter *adapter,
+ const struct timespec *ts)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /*
+ * Writing the SYSTIMR register is not necessary as it only provides
+ * sub-nanosecond resolution.
+ */
+ E1000_WRITE_REG(hw, E1000_SYSTIML, ts->tv_nsec);
+ E1000_WRITE_REG(hw, E1000_SYSTIMH, ts->tv_sec);
+}
+
+/**
+ * igb_ptp_systim_to_hwtstamp - convert system time value to hw timestamp
+ * @adapter: board private structure
+ * @hwtstamps: timestamp structure to update
+ * @systim: unsigned 64bit system time value.
+ *
+ * We need to convert the system time value stored in the RX/TXSTMP registers
+ * into a hwtstamp which can be used by the upper level timestamping functions.
+ *
+ * The 'tmreg_lock' spinlock is used to protect the consistency of the
+ * system time value. This is needed because reading the 64 bit time
+ * value involves reading two (or three) 32 bit registers. The first
+ * read latches the value. Ditto for writing.
+ *
+ * In addition, here have extended the system time with an overflow
+ * counter in software.
+ **/
+static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
+ struct skb_shared_hwtstamps *hwtstamps,
+ u64 systim)
+{
+ unsigned long flags;
+ u64 ns;
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
+ ns = timecounter_cyc2time(&adapter->tc, systim);
+
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+ break;
+ case e1000_i210:
+ case e1000_i211:
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ /* Upper 32 bits contain s, lower 32 bits contain ns. */
+ hwtstamps->hwtstamp = ktime_set(systim >> 32,
+ systim & 0xFFFFFFFF);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * PTP clock operations
+ */
+
+static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ struct e1000_hw *hw = &igb->hw;
+ int neg_adj = 0;
+ u64 rate;
+ u32 incvalue;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+ rate = ppb;
+ rate <<= 14;
+ rate = div_u64(rate, 1953125);
+
+ incvalue = 16 << IGB_82576_TSYNC_SHIFT;
+
+ if (neg_adj)
+ incvalue -= rate;
+ else
+ incvalue += rate;
+
+ E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK));
+
+ return 0;
+}
+
+static int igb_ptp_adjfreq_82580(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ struct e1000_hw *hw = &igb->hw;
+ int neg_adj = 0;
+ u64 rate;
+ u32 inca;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+ rate = ppb;
+ rate <<= 26;
+ rate = div_u64(rate, 1953125);
+
+ /* At 2.5G speeds, the TIMINCA register on I354 updates the clock 2.5x
+ * as quickly. Account for this by dividing the adjustment by 2.5.
+ */
+ if (hw->mac.type == e1000_i354) {
+ u32 status = E1000_READ_REG(hw, E1000_STATUS);
+
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER)) {
+ rate <<= 1;
+ rate = div_u64(rate, 5);
+ }
+ }
+
+ inca = rate & INCVALUE_MASK;
+ if (neg_adj)
+ inca |= ISGN;
+
+ E1000_WRITE_REG(hw, E1000_TIMINCA, inca);
+
+ return 0;
+}
+
+static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ unsigned long flags;
+ s64 now;
+
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+ now = timecounter_read(&igb->tc);
+ now += delta;
+ timecounter_init(&igb->tc, &igb->cc, now);
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+ return 0;
+}
+
+static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ unsigned long flags;
+ struct timespec now, then = ns_to_timespec(delta);
+
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+ igb_ptp_read_i210(igb, &now);
+ now = timespec_add(now, then);
+ igb_ptp_write_i210(igb, (const struct timespec *)&now);
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+ return 0;
+}
+
+static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
+ struct timespec *ts)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ unsigned long flags;
+ u64 ns;
+ u32 remainder;
+
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+ ns = timecounter_read(&igb->tc);
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
+ ts->tv_nsec = remainder;
+
+ return 0;
+}
+
+static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp,
+ struct timespec *ts)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ unsigned long flags;
+
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+ igb_ptp_read_i210(igb, ts);
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+ return 0;
+}
+
+static int igb_ptp_settime_82576(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ unsigned long flags;
+ u64 ns;
+
+ ns = ts->tv_sec * 1000000000ULL;
+ ns += ts->tv_nsec;
+
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+ timecounter_init(&igb->tc, &igb->cc, ns);
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+ return 0;
+}
+
+static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ unsigned long flags;
+
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+ igb_ptp_write_i210(igb, ts);
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+ return 0;
+}
+
+static int igb_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * igb_ptp_tx_work
+ * @work: pointer to work struct
+ *
+ * This work function polls the TSYNCTXCTL valid bit to determine when a
+ * timestamp has been taken for the current stored skb.
+ */
+void igb_ptp_tx_work(struct work_struct *work)
+{
+ struct igb_adapter *adapter = container_of(work, struct igb_adapter,
+ ptp_tx_work);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 tsynctxctl;
+
+ if (!adapter->ptp_tx_skb)
+ return;
+
+ if (time_is_before_jiffies(adapter->ptp_tx_start +
+ IGB_PTP_TX_TIMEOUT)) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ adapter->tx_hwtstamp_timeouts++;
+ dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang");
+ return;
+ }
+
+ tsynctxctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
+ if (tsynctxctl & E1000_TSYNCTXCTL_VALID)
+ igb_ptp_tx_hwtstamp(adapter);
+ else
+ /* reschedule to check later */
+ schedule_work(&adapter->ptp_tx_work);
+}
+
+static void igb_ptp_overflow_check(struct work_struct *work)
+{
+ struct igb_adapter *igb =
+ container_of(work, struct igb_adapter, ptp_overflow_work.work);
+ struct timespec ts;
+
+ igb->ptp_caps.gettime(&igb->ptp_caps, &ts);
+
+ pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+
+ schedule_delayed_work(&igb->ptp_overflow_work,
+ IGB_SYSTIM_OVERFLOW_PERIOD);
+}
+
+/**
+ * igb_ptp_rx_hang - detect error case when Rx timestamp registers latched
+ * @adapter: private network adapter structure
+ *
+ * This watchdog task is scheduled to detect error case where hardware has
+ * dropped an Rx packet that was timestamped when the ring is full. The
+ * particular error is rare but leaves the device in a state unable to timestamp
+ * any future packets.
+ */
+void igb_ptp_rx_hang(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct igb_ring *rx_ring;
+ u32 tsyncrxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
+ unsigned long rx_event;
+ int n;
+
+ if (hw->mac.type != e1000_82576)
+ return;
+
+ /* If we don't have a valid timestamp in the registers, just update the
+ * timeout counter and exit
+ */
+ if (!(tsyncrxctl & E1000_TSYNCRXCTL_VALID)) {
+ adapter->last_rx_ptp_check = jiffies;
+ return;
+ }
+
+ /* Determine the most recent watchdog or rx_timestamp event */
+ rx_event = adapter->last_rx_ptp_check;
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ if (time_after(rx_ring->last_rx_timestamp, rx_event))
+ rx_event = rx_ring->last_rx_timestamp;
+ }
+
+ /* Only need to read the high RXSTMP register to clear the lock */
+ if (time_is_before_jiffies(rx_event + 5 * HZ)) {
+ E1000_READ_REG(hw, E1000_RXSTMPH);
+ adapter->last_rx_ptp_check = jiffies;
+ adapter->rx_hwtstamp_cleared++;
+ dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang");
+ }
+}
+
+/**
+ * igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp
+ * @adapter: Board private structure.
+ *
+ * If we were asked to do hardware stamping and such a time stamp is
+ * available, then it must have been for this skb here because we only
+ * allow only one such packet into the queue.
+ */
+void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct skb_shared_hwtstamps shhwtstamps;
+ u64 regval;
+
+ regval = E1000_READ_REG(hw, E1000_TXSTMPL);
+ regval |= (u64)E1000_READ_REG(hw, E1000_TXSTMPH) << 32;
+
+ igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+ skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+}
+
+/**
+ * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
+ * @q_vector: Pointer to interrupt specific structure
+ * @va: Pointer to address containing Rx buffer
+ * @skb: Buffer containing timestamp and packet
+ *
+ * This function is meant to retrieve a timestamp from the first buffer of an
+ * incoming frame. The value is stored in little endian format starting on
+ * byte 8.
+ */
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
+ unsigned char *va,
+ struct sk_buff *skb)
+{
+ __le64 *regval = (__le64 *)va;
+
+ /*
+ * The timestamp is recorded in little endian format.
+ * DWORD: 0 1 2 3
+ * Field: Reserved Reserved SYSTIML SYSTIMH
+ */
+ igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb),
+ le64_to_cpu(regval[1]));
+}
+
+/**
+ * igb_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register
+ * @q_vector: Pointer to interrupt specific structure
+ * @skb: Buffer containing timestamp and packet
+ *
+ * This function is meant to retrieve a timestamp from the internal registers
+ * of the adapter and store it in the skb.
+ */
+void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
+ struct sk_buff *skb)
+{
+ struct igb_adapter *adapter = q_vector->adapter;
+ struct e1000_hw *hw = &adapter->hw;
+ u64 regval;
+
+ /*
+ * If this bit is set, then the RX registers contain the time stamp. No
+ * other packet will be time stamped until we read these registers, so
+ * read the registers to make them available again. Because only one
+ * packet can be time stamped at a time, we know that the register
+ * values must belong to this one here and therefore we don't need to
+ * compare any of the additional attributes stored for it.
+ *
+ * If nothing went wrong, then it should have a shared tx_flags that we
+ * can turn into a skb_shared_hwtstamps.
+ */
+ if (!(E1000_READ_REG(hw, E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+ return;
+
+ regval = E1000_READ_REG(hw, E1000_RXSTMPL);
+ regval |= (u64)E1000_READ_REG(hw, E1000_RXSTMPH) << 32;
+
+ igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+}
+
+/**
+ * igb_ptp_hwtstamp_ioctl - control hardware time stamping
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ *
+ * Outgoing time stamping can be enabled and disabled. Play nice and
+ * disable it when requested, although it shouldn't case any overhead
+ * when no packet needs it. At most one packet in the queue may be
+ * marked for time stamping, otherwise it would be impossible to tell
+ * for sure to which packet the hardware time stamp belongs.
+ *
+ * Incoming time stamping has to be configured via the hardware
+ * filters. Not all combinations are supported, in particular event
+ * type has to be specified. Matching the kind of event packet is
+ * not supported, with the exception of "all V2 events regardless of
+ * level 2 or 4".
+ *
+ **/
+int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct hwtstamp_config config;
+ u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
+ u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
+ u32 tsync_rx_cfg = 0;
+ bool is_l4 = false;
+ bool is_l2 = false;
+ u32 regval;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tsync_tx_ctl = 0;
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tsync_rx_ctl = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
+ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
+ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ is_l2 = true;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_ALL:
+ /*
+ * 82576 cannot timestamp all packets, which it needs to do to
+ * support both V1 Sync and Delay_Req messages
+ */
+ if (hw->mac.type != e1000_82576) {
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+ /* fall through */
+ default:
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ return -ERANGE;
+ }
+
+ if (hw->mac.type == e1000_82575) {
+ if (tsync_rx_ctl | tsync_tx_ctl)
+ return -EINVAL;
+ return 0;
+ }
+
+ /*
+ * Per-packet timestamping only works if all packets are
+ * timestamped, so enable timestamping in all packets as
+ * long as one rx filter was configured.
+ */
+ if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
+ tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ is_l2 = true;
+ is_l4 = true;
+
+ if ((hw->mac.type == e1000_i210) ||
+ (hw->mac.type == e1000_i211)) {
+ regval = E1000_READ_REG(hw, E1000_RXPBS);
+ regval |= E1000_RXPBS_CFG_TS_EN;
+ E1000_WRITE_REG(hw, E1000_RXPBS, regval);
+ }
+ }
+
+ /* enable/disable TX */
+ regval = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
+ regval &= ~E1000_TSYNCTXCTL_ENABLED;
+ regval |= tsync_tx_ctl;
+ E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, regval);
+
+ /* enable/disable RX */
+ regval = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
+ regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
+ regval |= tsync_rx_ctl;
+ E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, regval);
+
+ /* define which PTP packets are time stamped */
+ E1000_WRITE_REG(hw, E1000_TSYNCRXCFG, tsync_rx_cfg);
+
+ /* define ethertype filter for timestamped packets */
+ if (is_l2)
+ E1000_WRITE_REG(hw, E1000_ETQF(3),
+ (E1000_ETQF_FILTER_ENABLE | /* enable filter */
+ E1000_ETQF_1588 | /* enable timestamping */
+ ETH_P_1588)); /* 1588 eth protocol type */
+ else
+ E1000_WRITE_REG(hw, E1000_ETQF(3), 0);
+
+ /* L4 Queue Filter[3]: filter by destination port and protocol */
+ if (is_l4) {
+ u32 ftqf = (IPPROTO_UDP /* UDP */
+ | E1000_FTQF_VF_BP /* VF not compared */
+ | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
+ | E1000_FTQF_MASK); /* mask all inputs */
+ ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
+
+ E1000_WRITE_REG(hw, E1000_IMIR(3), htons(PTP_EV_PORT));
+ E1000_WRITE_REG(hw, E1000_IMIREXT(3),
+ (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
+ if (hw->mac.type == e1000_82576) {
+ /* enable source port check */
+ E1000_WRITE_REG(hw, E1000_SPQF(3), htons(PTP_EV_PORT));
+ ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
+ }
+ E1000_WRITE_REG(hw, E1000_FTQF(3), ftqf);
+ } else {
+ E1000_WRITE_REG(hw, E1000_FTQF(3), E1000_FTQF_MASK);
+ }
+ E1000_WRITE_FLUSH(hw);
+
+ /* clear TX/RX time stamp registers, just to be sure */
+ regval = E1000_READ_REG(hw, E1000_TXSTMPL);
+ regval = E1000_READ_REG(hw, E1000_TXSTMPH);
+ regval = E1000_READ_REG(hw, E1000_RXSTMPL);
+ regval = E1000_READ_REG(hw, E1000_RXSTMPH);
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+void igb_ptp_init(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+ adapter->ptp_caps.owner = THIS_MODULE;
+ adapter->ptp_caps.max_adj = 999999881;
+ adapter->ptp_caps.n_ext_ts = 0;
+ adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
+ adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
+ adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
+ adapter->ptp_caps.settime = igb_ptp_settime_82576;
+ adapter->ptp_caps.enable = igb_ptp_enable;
+ adapter->cc.read = igb_ptp_read_82576;
+ adapter->cc.mask = CLOCKSOURCE_MASK(64);
+ adapter->cc.mult = 1;
+ adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
+ /* Dial the nominal frequency. */
+ E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 |
+ INCVALUE_82576);
+ break;
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+ adapter->ptp_caps.owner = THIS_MODULE;
+ adapter->ptp_caps.max_adj = 62499999;
+ adapter->ptp_caps.n_ext_ts = 0;
+ adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
+ adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
+ adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
+ adapter->ptp_caps.settime = igb_ptp_settime_82576;
+ adapter->ptp_caps.enable = igb_ptp_enable;
+ adapter->cc.read = igb_ptp_read_82580;
+ adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
+ adapter->cc.mult = 1;
+ adapter->cc.shift = 0;
+ /* Enable the timer functions by clearing bit 31. */
+ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0);
+ break;
+ case e1000_i210:
+ case e1000_i211:
+ snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+ adapter->ptp_caps.owner = THIS_MODULE;
+ adapter->ptp_caps.max_adj = 62499999;
+ adapter->ptp_caps.n_ext_ts = 0;
+ adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
+ adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
+ adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
+ adapter->ptp_caps.settime = igb_ptp_settime_i210;
+ adapter->ptp_caps.enable = igb_ptp_enable;
+ /* Enable the timer functions by clearing bit 31. */
+ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0);
+ break;
+ default:
+ adapter->ptp_clock = NULL;
+ return;
+ }
+
+ E1000_WRITE_FLUSH(hw);
+
+ spin_lock_init(&adapter->tmreg_lock);
+ INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
+
+ /* Initialize the clock and overflow work for devices that need it. */
+ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
+ struct timespec ts = ktime_to_timespec(ktime_get_real());
+
+ igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
+ } else {
+ timecounter_init(&adapter->tc, &adapter->cc,
+ ktime_to_ns(ktime_get_real()));
+
+ INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
+ igb_ptp_overflow_check);
+
+ schedule_delayed_work(&adapter->ptp_overflow_work,
+ IGB_SYSTIM_OVERFLOW_PERIOD);
+ }
+
+ /* Initialize the time sync interrupts for devices that support it. */
+ if (hw->mac.type >= e1000_82580) {
+ E1000_WRITE_REG(hw, E1000_TSIM, E1000_TSIM_TXTS);
+ E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS);
+ }
+
+ adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
+ &adapter->pdev->dev);
+ if (IS_ERR(adapter->ptp_clock)) {
+ adapter->ptp_clock = NULL;
+ dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
+ } else {
+ dev_info(&adapter->pdev->dev, "added PHC on %s\n",
+ adapter->netdev->name);
+ adapter->flags |= IGB_FLAG_PTP;
+ }
+}
+
+/**
+ * igb_ptp_stop - Disable PTP device and stop the overflow check.
+ * @adapter: Board private structure.
+ *
+ * This function stops the PTP support and cancels the delayed work.
+ **/
+void igb_ptp_stop(struct igb_adapter *adapter)
+{
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ cancel_delayed_work_sync(&adapter->ptp_overflow_work);
+ break;
+ case e1000_i210:
+ case e1000_i211:
+ /* No delayed work to cancel. */
+ break;
+ default:
+ return;
+ }
+
+ cancel_work_sync(&adapter->ptp_tx_work);
+ if (adapter->ptp_tx_skb) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ }
+
+ if (adapter->ptp_clock) {
+ ptp_clock_unregister(adapter->ptp_clock);
+ dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
+ adapter->netdev->name);
+ adapter->flags &= ~IGB_FLAG_PTP;
+ }
+}
+
+/**
+ * igb_ptp_reset - Re-enable the adapter for PTP following a reset.
+ * @adapter: Board private structure.
+ *
+ * This function handles the reset work required to re-enable the PTP device.
+ **/
+void igb_ptp_reset(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (!(adapter->flags & IGB_FLAG_PTP))
+ return;
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ /* Dial the nominal frequency. */
+ E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 |
+ INCVALUE_82576);
+ break;
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+ /* Enable the timer functions and interrupts. */
+ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0);
+ E1000_WRITE_REG(hw, E1000_TSIM, E1000_TSIM_TXTS);
+ E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS);
+ break;
+ default:
+ /* No work to do. */
+ return;
+ }
+
+ /* Re-initialize the timer. */
+ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
+ struct timespec ts = ktime_to_timespec(ktime_get_real());
+
+ igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
+ } else {
+ timecounter_init(&adapter->tc, &adapter->cc,
+ ktime_to_ns(ktime_get_real()));
+ }
+}
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#define TABLE64_TEST_LO 5
#define TABLE64_TEST_HI 6
+/* i210 reg test */
+static struct igb_reg_test reg_test_i210[] = {
+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ /* RDH is read-only for i210, only test RDT. */
+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 },
+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RA, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_HI,
+ 0x900FFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
/* i350 reg test */
static struct igb_reg_test reg_test_i350[] = {
{ E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+++ /dev/null
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include "igb.h"
-#include "e1000_82575.h"
-#include "e1000_hw.h"
-#ifdef IGB_SYSFS
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/sysfs.h>
-#include <linux/kobject.h>
-#include <linux/device.h>
-#include <linux/netdevice.h>
-
-static struct net_device_stats *sysfs_get_stats(struct net_device *netdev)
-{
-#ifndef HAVE_NETDEV_STATS_IN_NETDEV
- struct igb_adapter *adapter;
-#endif
- if (netdev == NULL)
- return NULL;
-
-#ifdef HAVE_NETDEV_STATS_IN_NETDEV
- /* only return the current stats */
- return &netdev->stats;
-#else
- adapter = netdev_priv(netdev);
-
- /* only return the current stats */
- return &adapter->net_stats;
-#endif /* HAVE_NETDEV_STATS_IN_NETDEV */
-}
-
-struct net_device *igb_get_netdev(struct kobject *kobj)
-{
- struct net_device *netdev;
- struct kobject *parent = kobj->parent;
- struct device *device_info_kobj;
-
- if (kobj == NULL)
- return NULL;
-
- device_info_kobj = container_of(parent, struct device, kobj);
- if (device_info_kobj == NULL)
- return NULL;
-
- netdev = container_of(device_info_kobj, struct net_device, dev);
- return netdev;
-}
-struct igb_adapter *igb_get_adapter(struct kobject *kobj)
-{
- struct igb_adapter *adapter;
- struct net_device *netdev = igb_get_netdev(kobj);
- if (netdev == NULL)
- return NULL;
- adapter = netdev_priv(netdev);
- return adapter;
-}
-
-bool igb_thermal_present(struct kobject *kobj)
-{
- s32 status;
- struct igb_adapter *adapter = igb_get_adapter(kobj);
-
- if (adapter == NULL)
- return false;
-
- /*
- * Only set I2C bit-bang mode if an external thermal sensor is
- * supported on this device.
- */
- if (adapter->ets) {
- status = e1000_set_i2c_bb(&(adapter->hw));
- if (status != E1000_SUCCESS)
- return false;
- }
-
- status = e1000_init_thermal_sensor_thresh(&(adapter->hw));
- if (status != E1000_SUCCESS)
- return false;
-
- return true;
-}
-
-/*
- * Convert the directory to the sensor offset.
- *
- * Note: We know the name will be in the form of 'sensor_n' where 'n' is 0
- * - 'IGB_MAX_SENSORS'. E1000_MAX_SENSORS < 10.
- */
-static int igb_name_to_idx(const char *c) {
-
- /* find first digit */
- while (*c < '0' || *c > '9') {
- if (*c == '\n')
- return -1;
- c++;
- }
-
- return ((int)(*c - '0'));
-}
-
-/*
- * We are a statistics entry; we do not take in data-this should be the
- * same for all attributes
- */
-static ssize_t igb_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
-{
- return -1;
-}
-
-static ssize_t igb_fwbanner(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct igb_adapter *adapter = igb_get_adapter(kobj);
- u16 nvm_ver;
-
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
- nvm_ver = adapter->fw_version;
-
- return snprintf(buf, PAGE_SIZE, "0x%08x\n", nvm_ver);
-}
-
-static ssize_t igb_numeports(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct e1000_hw *hw;
- int ports = 0;
- struct igb_adapter *adapter = igb_get_adapter(kobj);
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
-
- /* CMW taking the original out so and assigning ports generally
- * by mac type for now. Want to have the daemon handle this some
- * other way due to the variability of the 1GB parts.
- */
- switch (hw->mac.type) {
- case e1000_82575:
- ports = 2;
- break;
- case e1000_82576:
- ports = 2;
- break;
- case e1000_82580:
- case e1000_i350:
- ports = 4;
- break;
- default:
- break;
- }
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ports);
-}
-
-static ssize_t igb_porttype(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct igb_adapter *adapter = igb_get_adapter(kobj);
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- return snprintf(buf, PAGE_SIZE, "%d\n",
- test_bit(__IGB_DOWN, &adapter->state));
-}
-
-static ssize_t igb_portspeed(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct igb_adapter *adapter = igb_get_adapter(kobj);
- int speed = 0;
-
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- switch (adapter->link_speed) {
- case E1000_STATUS_SPEED_10:
- speed = 10;
- break;;
- case E1000_STATUS_SPEED_100:
- speed = 100;
- break;
- case E1000_STATUS_SPEED_1000:
- speed = 1000;
- break;
- }
- return snprintf(buf, PAGE_SIZE, "%d\n", speed);
-}
-
-static ssize_t igb_wqlflag(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct igb_adapter *adapter = igb_get_adapter(kobj);
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- return snprintf(buf, PAGE_SIZE, "%d\n", adapter->wol);
-}
-
-static ssize_t igb_xflowctl(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct e1000_hw *hw;
- struct igb_adapter *adapter = igb_get_adapter(kobj);
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
-
- return snprintf(buf, PAGE_SIZE, "%d\n", hw->fc.current_mode);
-}
-
-static ssize_t igb_rxdrops(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct net_device_stats *net_stats;
- struct net_device *netdev = igb_get_netdev(kobj);
- if (netdev == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no net device\n");
-
- net_stats = sysfs_get_stats(netdev);
- if (net_stats == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no net stats\n");
-
- return snprintf(buf, PAGE_SIZE, "%lu\n",
- net_stats->rx_dropped);
-}
-
-static ssize_t igb_rxerrors(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct net_device_stats *net_stats;
- struct net_device *netdev = igb_get_netdev(kobj);
- if (netdev == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no net device\n");
-
- net_stats = sysfs_get_stats(netdev);
- if (net_stats == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no net stats\n");
- return snprintf(buf, PAGE_SIZE, "%lu\n", net_stats->rx_errors);
-}
-
-static ssize_t igb_rxupacks(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct e1000_hw *hw;
- struct igb_adapter *adapter = igb_get_adapter(kobj);
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
-
- return snprintf(buf, PAGE_SIZE, "%d\n", E1000_READ_REG(hw, E1000_TPR));
-}
-
-static ssize_t igb_rxmpacks(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct e1000_hw *hw;
- struct igb_adapter *adapter = igb_get_adapter(kobj);
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
-
- return snprintf(buf, PAGE_SIZE, "%d\n", E1000_READ_REG(hw, E1000_MPRC));
-}
-
-static ssize_t igb_rxbpacks(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct e1000_hw *hw;
- struct igb_adapter *adapter = igb_get_adapter(kobj);
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
-
- return snprintf(buf, PAGE_SIZE, "%d\n", E1000_READ_REG(hw, E1000_BPRC));
-}
-
-static ssize_t igb_txupacks(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct e1000_hw *hw;
- struct igb_adapter *adapter = igb_get_adapter(kobj);
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
-
- return snprintf(buf, PAGE_SIZE, "%d\n", E1000_READ_REG(hw, E1000_TPT));
-}
-
-static ssize_t igb_txmpacks(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct e1000_hw *hw;
- struct igb_adapter *adapter = igb_get_adapter(kobj);
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
-
- return snprintf(buf, PAGE_SIZE, "%d\n", E1000_READ_REG(hw, E1000_MPTC));
-}
-
-static ssize_t igb_txbpacks(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct e1000_hw *hw;
- struct igb_adapter *adapter = igb_get_adapter(kobj);
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
-
- return snprintf(buf, PAGE_SIZE, "%d\n", E1000_READ_REG(hw, E1000_BPTC));
-
-}
-
-static ssize_t igb_txerrors(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct net_device_stats *net_stats;
- struct net_device *netdev = igb_get_netdev(kobj);
- if (netdev == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no net device\n");
-
- net_stats = sysfs_get_stats(netdev);
- if (net_stats == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no net stats\n");
-
- return snprintf(buf, PAGE_SIZE, "%lu\n",
- net_stats->tx_errors);
-}
-
-static ssize_t igb_txdrops(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct net_device_stats *net_stats;
- struct net_device *netdev = igb_get_netdev(kobj);
- if (netdev == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no net device\n");
-
- net_stats = sysfs_get_stats(netdev);
- if (net_stats == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no net stats\n");
- return snprintf(buf, PAGE_SIZE, "%lu\n",
- net_stats->tx_dropped);
-}
-
-static ssize_t igb_rxframes(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct net_device_stats *net_stats;
- struct net_device *netdev = igb_get_netdev(kobj);
- if (netdev == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no net device\n");
-
- net_stats = sysfs_get_stats(netdev);
- if (net_stats == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no net stats\n");
-
- return snprintf(buf, PAGE_SIZE, "%lu\n",
- net_stats->rx_packets);
-}
-
-static ssize_t igb_rxbytes(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct net_device_stats *net_stats;
- struct net_device *netdev = igb_get_netdev(kobj);
- if (netdev == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no net device\n");
-
- net_stats = sysfs_get_stats(netdev);
- if (net_stats == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no net stats\n");
-
- return snprintf(buf, PAGE_SIZE, "%lu\n",
- net_stats->rx_bytes);
-}
-
-static ssize_t igb_txframes(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct net_device_stats *net_stats;
- struct net_device *netdev = igb_get_netdev(kobj);
- if (netdev == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no net device\n");
-
- net_stats = sysfs_get_stats(netdev);
- if (net_stats == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no net stats\n");
-
- return snprintf(buf, PAGE_SIZE, "%lu\n",
- net_stats->tx_packets);
-}
-
-static ssize_t igb_txbytes(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct net_device_stats *net_stats;
- struct net_device *netdev = igb_get_netdev(kobj);
- if (netdev == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no net device\n");
-
- net_stats = sysfs_get_stats(netdev);
- if (net_stats == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no net stats\n");
-
- return snprintf(buf, PAGE_SIZE, "%lu\n",
- net_stats->tx_bytes);
-}
-
-static ssize_t igb_linkstat(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- bool link_up = false;
- int bitmask = 0;
- struct e1000_hw *hw;
- struct igb_adapter *adapter = igb_get_adapter(kobj);
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
-
- if (test_bit(__IGB_DOWN, &adapter->state))
- bitmask |= 1;
-
- if (hw->mac.ops.check_for_link) {
- hw->mac.ops.check_for_link(hw);
- }
- else {
- /* always assume link is up, if no check link function */
- link_up = true;
- }
- if (link_up)
- bitmask |= 2;
- return snprintf(buf, PAGE_SIZE, "0x%X\n", bitmask);
-}
-
-static ssize_t igb_funcid(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct net_device *netdev = igb_get_netdev(kobj);
- if (netdev == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no net device\n");
-
- return snprintf(buf, PAGE_SIZE, "0x%lX\n", netdev->base_addr);
-}
-
-static ssize_t igb_funcvers(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", igb_driver_version);
-}
-
-static ssize_t igb_macburn(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct e1000_hw *hw;
- struct igb_adapter *adapter = igb_get_adapter(kobj);
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
-
- return snprintf(buf, PAGE_SIZE, "0x%X%X%X%X%X%X\n",
- (unsigned int)hw->mac.perm_addr[0],
- (unsigned int)hw->mac.perm_addr[1],
- (unsigned int)hw->mac.perm_addr[2],
- (unsigned int)hw->mac.perm_addr[3],
- (unsigned int)hw->mac.perm_addr[4],
- (unsigned int)hw->mac.perm_addr[5]);
-}
-
-static ssize_t igb_macadmn(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct igb_adapter *adapter = igb_get_adapter(kobj);
- struct e1000_hw *hw;
-
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
-
- return snprintf(buf, PAGE_SIZE, "0x%X%X%X%X%X%X\n",
- (unsigned int)hw->mac.addr[0],
- (unsigned int)hw->mac.addr[1],
- (unsigned int)hw->mac.addr[2],
- (unsigned int)hw->mac.addr[3],
- (unsigned int)hw->mac.addr[4],
- (unsigned int)hw->mac.addr[5]);
-}
-
-static ssize_t igb_maclla1(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct e1000_hw *hw;
- u16 eeprom_buff[6];
- int first_word = 0x37;
- int word_count = 6;
- int rc;
-
- struct igb_adapter *adapter = igb_get_adapter(kobj);
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
-
- return 0;
-
- rc = e1000_read_nvm(hw, first_word, word_count,
- eeprom_buff);
- if (rc != E1000_SUCCESS)
- return 0;
-
- switch (hw->bus.func) {
- case 0:
- return snprintf(buf, PAGE_SIZE, "0x%04X%04X%04X\n",
- eeprom_buff[0], eeprom_buff[1], eeprom_buff[2]);
- case 1:
- return snprintf(buf, PAGE_SIZE, "0x%04X%04X%04X\n",
- eeprom_buff[3], eeprom_buff[4], eeprom_buff[5]);
- }
- return snprintf(buf, PAGE_SIZE, "unexpected port %d\n", hw->bus.func);
-}
-
-static ssize_t igb_mtusize(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct igb_adapter *adapter = igb_get_adapter(kobj);
- struct net_device *netdev = igb_get_netdev(kobj);
- if (netdev == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no net device\n");
-
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- return snprintf(buf, PAGE_SIZE, "%d\n", netdev->mtu);
-}
-
-static ssize_t igb_featflag(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
-#ifdef HAVE_NDO_SET_FEATURES
- int bitmask = 0;
-#endif
- struct net_device *netdev = igb_get_netdev(kobj);
- struct igb_adapter *adapter = igb_get_adapter(kobj);
-
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
- if (netdev == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no net device\n");
-
-#ifndef HAVE_NDO_SET_FEATURES
- /* igb_get_rx_csum(netdev) doesn't compile so hard code */
- return snprintf(buf, PAGE_SIZE, "%d\n",
- test_bit(IGB_RING_FLAG_RX_CSUM,
- &adapter->rx_ring[0]->flags));
-#else
- if (netdev->features & NETIF_F_RXCSUM)
- bitmask |= 1;
- return snprintf(buf, PAGE_SIZE, "%d\n", bitmask);
-#endif
-}
-
-static ssize_t igb_lsominct(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", 1);
-}
-
-static ssize_t igb_prommode(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct net_device *netdev = igb_get_netdev(kobj);
- if (netdev == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no net device\n");
-
- return snprintf(buf, PAGE_SIZE, "%d\n",
- netdev->flags & IFF_PROMISC);
-}
-
-static ssize_t igb_txdscqsz(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct igb_adapter *adapter = igb_get_adapter(kobj);
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- return snprintf(buf, PAGE_SIZE, "%d\n", adapter->tx_ring[0]->count);
-}
-
-static ssize_t igb_rxdscqsz(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct igb_adapter *adapter = igb_get_adapter(kobj);
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- return snprintf(buf, PAGE_SIZE, "%d\n", adapter->rx_ring[0]->count);
-}
-
-static ssize_t igb_rxqavg(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- int index;
- int diff = 0;
- u16 ntc;
- u16 ntu;
- struct igb_adapter *adapter = igb_get_adapter(kobj);
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- for (index = 0; index < adapter->num_rx_queues; index++) {
- ntc = adapter->rx_ring[index]->next_to_clean;
- ntu = adapter->rx_ring[index]->next_to_use;
-
- if (ntc >= ntu)
- diff += (ntc - ntu);
- else
- diff += (adapter->rx_ring[index]->count - ntu + ntc);
- }
- if (adapter->num_rx_queues <= 0)
- return snprintf(buf, PAGE_SIZE,
- "can't calculate, number of queues %d\n",
- adapter->num_rx_queues);
- return snprintf(buf, PAGE_SIZE, "%d\n", diff/adapter->num_rx_queues);
-}
-
-static ssize_t igb_txqavg(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- int index;
- int diff = 0;
- u16 ntc;
- u16 ntu;
- struct igb_adapter *adapter = igb_get_adapter(kobj);
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- for (index = 0; index < adapter->num_tx_queues; index++) {
- ntc = adapter->tx_ring[index]->next_to_clean;
- ntu = adapter->tx_ring[index]->next_to_use;
-
- if (ntc >= ntu)
- diff += (ntc - ntu);
- else
- diff += (adapter->tx_ring[index]->count - ntu + ntc);
- }
- if (adapter->num_tx_queues <= 0)
- return snprintf(buf, PAGE_SIZE,
- "can't calculate, number of queues %d\n",
- adapter->num_tx_queues);
- return snprintf(buf, PAGE_SIZE, "%d\n",
- diff/adapter->num_tx_queues);
-}
-
-static ssize_t igb_iovotype(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "2\n");
-}
-
-static ssize_t igb_funcnbr(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct igb_adapter *adapter = igb_get_adapter(kobj);
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- return snprintf(buf, PAGE_SIZE, "%d\n", adapter->vfs_allocated_count);
-}
-
-s32 igb_sysfs_get_thermal_data(struct kobject *kobj, char *buf)
-{
- s32 status;
- struct igb_adapter *adapter = igb_get_adapter(kobj->parent);
-
- if (adapter == NULL) {
- snprintf(buf, PAGE_SIZE, "error: missing adapter\n");
- return 0;
- }
-
- if (&adapter->hw == NULL) {
- snprintf(buf, PAGE_SIZE, "error: missing hw\n");
- return 0;
- }
-
- status = e1000_get_thermal_sensor_data_generic(&(adapter->hw));
- if (status != E1000_SUCCESS)
- snprintf(buf, PAGE_SIZE, "error: status %d returned\n", status);
-
- return status;
-}
-
-static ssize_t igb_sysfs_location(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- struct igb_adapter *adapter = igb_get_adapter(kobj->parent);
- int idx;
-
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- idx = igb_name_to_idx(kobj->name);
- if (idx == -1)
- return snprintf(buf, PAGE_SIZE,
- "error: invalid sensor name %s\n", kobj->name);
-
- return snprintf(buf, PAGE_SIZE, "%d\n",
- adapter->hw.mac.thermal_sensor_data.sensor[idx].location);
-}
-
-static ssize_t igb_sysfs_temp(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- struct igb_adapter *adapter = igb_get_adapter(kobj->parent);
- int idx;
-
- s32 status = igb_sysfs_get_thermal_data(kobj, buf);
-
- if (status != E1000_SUCCESS)
- return snprintf(buf, PAGE_SIZE, "error: status %d returned",
- status);
-
- idx = igb_name_to_idx(kobj->name);
- if (idx == -1)
- return snprintf(buf, PAGE_SIZE,
- "error: invalid sensor name %s\n", kobj->name);
-
- return snprintf(buf, PAGE_SIZE, "%d\n",
- adapter->hw.mac.thermal_sensor_data.sensor[idx].temp);
-}
-
-static ssize_t igb_sysfs_maxopthresh(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- struct igb_adapter *adapter = igb_get_adapter(kobj->parent);
- int idx;
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- idx = igb_name_to_idx(kobj->name);
- if (idx == -1)
- return snprintf(buf, PAGE_SIZE,
- "error: invalid sensor name %s\n", kobj->name);
-
- return snprintf(buf, PAGE_SIZE, "%d\n",
- adapter->hw.mac.thermal_sensor_data.sensor[idx].max_op_thresh);
-}
-
-static ssize_t igb_sysfs_cautionthresh(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- struct igb_adapter *adapter = igb_get_adapter(kobj->parent);
- int idx;
-
- if (adapter == NULL)
- return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
-
- idx = igb_name_to_idx(kobj->name);
- if (idx == -1)
- return snprintf(buf, PAGE_SIZE,
- "error: invalid sensor name %s\n", kobj->name);
-
- return snprintf(buf, PAGE_SIZE, "%d\n",
- adapter->hw.mac.thermal_sensor_data.sensor[0].caution_thresh);
-}
-
-/* Initialize the attributes */
-static struct kobj_attribute igb_sysfs_location_attr =
- __ATTR(location, 0444, igb_sysfs_location, igb_store);
-static struct kobj_attribute igb_sysfs_temp_attr =
- __ATTR(temp, 0444, igb_sysfs_temp, igb_store);
-static struct kobj_attribute igb_sysfs_cautionthresh_attr =
- __ATTR(cautionthresh, 0444, igb_sysfs_cautionthresh, igb_store);
-static struct kobj_attribute igb_sysfs_maxopthresh_attr =
- __ATTR(maxopthresh, 0444, igb_sysfs_maxopthresh, igb_store);
-
-static struct kobj_attribute igb_sysfs_fwbanner_attr =
- __ATTR(fwbanner, 0444, igb_fwbanner, igb_store);
-static struct kobj_attribute igb_sysfs_numeports_attr =
- __ATTR(numeports, 0444, igb_numeports, igb_store);
-static struct kobj_attribute igb_sysfs_porttype_attr =
- __ATTR(porttype, 0444, igb_porttype, igb_store);
-static struct kobj_attribute igb_sysfs_portspeed_attr =
- __ATTR(portspeed, 0444, igb_portspeed, igb_store);
-static struct kobj_attribute igb_sysfs_wqlflag_attr =
- __ATTR(wqlflag, 0444, igb_wqlflag, igb_store);
-static struct kobj_attribute igb_sysfs_xflowctl_attr =
- __ATTR(xflowctl, 0444, igb_xflowctl, igb_store);
-static struct kobj_attribute igb_sysfs_rxdrops_attr =
- __ATTR(rxdrops, 0444, igb_rxdrops, igb_store);
-static struct kobj_attribute igb_sysfs_rxerrors_attr =
- __ATTR(rxerrors, 0444, igb_rxerrors, igb_store);
-static struct kobj_attribute igb_sysfs_rxupacks_attr =
- __ATTR(rxupacks, 0444, igb_rxupacks, igb_store);
-static struct kobj_attribute igb_sysfs_rxmpacks_attr =
- __ATTR(rxmpacks, 0444, igb_rxmpacks, igb_store);
-static struct kobj_attribute igb_sysfs_rxbpacks_attr =
- __ATTR(rxbpacks, 0444, igb_rxbpacks, igb_store);
-static struct kobj_attribute igb_sysfs_txupacks_attr =
- __ATTR(txupacks, 0444, igb_txupacks, igb_store);
-static struct kobj_attribute igb_sysfs_txmpacks_attr =
- __ATTR(txmpacks, 0444, igb_txmpacks, igb_store);
-static struct kobj_attribute igb_sysfs_txbpacks_attr =
- __ATTR(txbpacks, 0444, igb_txbpacks, igb_store);
-static struct kobj_attribute igb_sysfs_txerrors_attr =
- __ATTR(txerrors, 0444, igb_txerrors, igb_store);
-static struct kobj_attribute igb_sysfs_txdrops_attr =
- __ATTR(txdrops, 0444, igb_txdrops, igb_store);
-static struct kobj_attribute igb_sysfs_rxframes_attr =
- __ATTR(rxframes, 0444, igb_rxframes, igb_store);
-static struct kobj_attribute igb_sysfs_rxbytes_attr =
- __ATTR(rxbytes, 0444, igb_rxbytes, igb_store);
-static struct kobj_attribute igb_sysfs_txframes_attr =
- __ATTR(txframes, 0444, igb_txframes, igb_store);
-static struct kobj_attribute igb_sysfs_txbytes_attr =
- __ATTR(txbytes, 0444, igb_txbytes, igb_store);
-static struct kobj_attribute igb_sysfs_linkstat_attr =
- __ATTR(linkstat, 0444, igb_linkstat, igb_store);
-static struct kobj_attribute igb_sysfs_funcid_attr =
- __ATTR(funcid, 0444, igb_funcid, igb_store);
-static struct kobj_attribute igb_sysfs_funvers_attr =
- __ATTR(funcvers, 0444, igb_funcvers, igb_store);
-static struct kobj_attribute igb_sysfs_macburn_attr =
- __ATTR(macburn, 0444, igb_macburn, igb_store);
-static struct kobj_attribute igb_sysfs_macadmn_attr =
- __ATTR(macadmn, 0444, igb_macadmn, igb_store);
-static struct kobj_attribute igb_sysfs_maclla1_attr =
- __ATTR(maclla1, 0444, igb_maclla1, igb_store);
-static struct kobj_attribute igb_sysfs_mtusize_attr =
- __ATTR(mtusize, 0444, igb_mtusize, igb_store);
-static struct kobj_attribute igb_sysfs_featflag_attr =
- __ATTR(featflag, 0444, igb_featflag, igb_store);
-static struct kobj_attribute igb_sysfs_lsominct_attr =
- __ATTR(lsominct, 0444, igb_lsominct, igb_store);
-static struct kobj_attribute igb_sysfs_prommode_attr =
- __ATTR(prommode, 0444, igb_prommode, igb_store);
-static struct kobj_attribute igb_sysfs_txdscqsz_attr =
- __ATTR(txdscqsz, 0444, igb_txdscqsz, igb_store);
-static struct kobj_attribute igb_sysfs_rxdscqsz_attr =
- __ATTR(rxdscqsz, 0444, igb_rxdscqsz, igb_store);
-static struct kobj_attribute igb_sysfs_txqavg_attr =
- __ATTR(txqavg, 0444, igb_txqavg, igb_store);
-static struct kobj_attribute igb_sysfs_rxqavg_attr =
- __ATTR(rxqavg, 0444, igb_rxqavg, igb_store);
-static struct kobj_attribute igb_sysfs_iovotype_attr =
- __ATTR(iovotype, 0444, igb_iovotype, igb_store);
-static struct kobj_attribute igb_sysfs_funcnbr_attr =
- __ATTR(funcnbr, 0444, igb_funcnbr, igb_store);
-
-/* Add the attributes into an array, to be added to a group */
-static struct attribute *therm_attrs[] = {
- &igb_sysfs_location_attr.attr,
- &igb_sysfs_temp_attr.attr,
- &igb_sysfs_cautionthresh_attr.attr,
- &igb_sysfs_maxopthresh_attr.attr,
- NULL
-};
-
-static struct attribute *attrs[] = {
- &igb_sysfs_fwbanner_attr.attr,
- &igb_sysfs_numeports_attr.attr,
- &igb_sysfs_porttype_attr.attr,
- &igb_sysfs_portspeed_attr.attr,
- &igb_sysfs_wqlflag_attr.attr,
- &igb_sysfs_xflowctl_attr.attr,
- &igb_sysfs_rxdrops_attr.attr,
- &igb_sysfs_rxerrors_attr.attr,
- &igb_sysfs_rxupacks_attr.attr,
- &igb_sysfs_rxmpacks_attr.attr,
- &igb_sysfs_rxbpacks_attr.attr,
- &igb_sysfs_txdrops_attr.attr,
- &igb_sysfs_txerrors_attr.attr,
- &igb_sysfs_txupacks_attr.attr,
- &igb_sysfs_txmpacks_attr.attr,
- &igb_sysfs_txbpacks_attr.attr,
- &igb_sysfs_rxframes_attr.attr,
- &igb_sysfs_rxbytes_attr.attr,
- &igb_sysfs_txframes_attr.attr,
- &igb_sysfs_txbytes_attr.attr,
- &igb_sysfs_linkstat_attr.attr,
- &igb_sysfs_funcid_attr.attr,
- &igb_sysfs_funvers_attr.attr,
- &igb_sysfs_macburn_attr.attr,
- &igb_sysfs_macadmn_attr.attr,
- &igb_sysfs_maclla1_attr.attr,
- &igb_sysfs_mtusize_attr.attr,
- &igb_sysfs_featflag_attr.attr,
- &igb_sysfs_lsominct_attr.attr,
- &igb_sysfs_prommode_attr.attr,
- &igb_sysfs_txdscqsz_attr.attr,
- &igb_sysfs_rxdscqsz_attr.attr,
- &igb_sysfs_txqavg_attr.attr,
- &igb_sysfs_rxqavg_attr.attr,
- &igb_sysfs_iovotype_attr.attr,
- &igb_sysfs_funcnbr_attr.attr,
- NULL
-};
-
-/* add attributes to a group */
-static struct attribute_group therm_attr_group = {
- .attrs = therm_attrs,
-};
-
-/* add attributes to a group */
-static struct attribute_group attr_group = {
- .attrs = attrs,
-};
-
-void igb_del_adapter(struct igb_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < E1000_MAX_SENSORS; i++) {
- if (adapter->therm_kobj[i] == NULL)
- continue;
- sysfs_remove_group(adapter->therm_kobj[i], &therm_attr_group);
- kobject_put(adapter->therm_kobj[i]);
- }
- if (adapter->info_kobj != NULL) {
- sysfs_remove_group(adapter->info_kobj, &attr_group);
- kobject_put(adapter->info_kobj);
- }
-}
-
-/* cleanup goes here */
-void igb_sysfs_exit(struct igb_adapter *adapter)
-{
- igb_del_adapter(adapter);
-}
-
-int igb_sysfs_init(struct igb_adapter *adapter)
-{
- struct net_device *netdev;
- int rc = 0;
- int i;
- char buf[16];
-
- if ( adapter == NULL )
- goto del_adapter;
- netdev = adapter->netdev;
- if (netdev == NULL)
- goto del_adapter;
-
- adapter->info_kobj = NULL;
- for (i = 0; i < E1000_MAX_SENSORS; i++)
- adapter->therm_kobj[i] = NULL;
-
- /* create stats kobj and attribute listings in kobj */
- adapter->info_kobj = kobject_create_and_add("info",
- &(netdev->dev.kobj));
- if (adapter->info_kobj == NULL)
- goto del_adapter;
- if (sysfs_create_group(adapter->info_kobj, &attr_group))
- goto del_adapter;
-
- /* Don't create thermal subkobjs if no data present */
- if (igb_thermal_present(adapter->info_kobj) != true)
- goto exit;
-
- for (i = 0; i < E1000_MAX_SENSORS; i++) {
-
- /*
- * Likewise only create individual kobjs that have
- * meaningful data.
- */
- if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0)
- continue;
-
- /* directory named after sensor offset */
- snprintf(buf, sizeof(buf), "sensor_%d", i);
- adapter->therm_kobj[i] =
- kobject_create_and_add(buf, adapter->info_kobj);
- if (adapter->therm_kobj[i] == NULL)
- goto del_adapter;
- if (sysfs_create_group(adapter->therm_kobj[i],
- &therm_attr_group))
- goto del_adapter;
- }
-
- goto exit;
-
-del_adapter:
- igb_del_adapter(adapter);
- rc = -1;
-exit:
- return rc;
-}
-
-#endif /* IGB_SYSFS */
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/* get the precision */
precision = -1;
if (*fmt == '.') {
- ++fmt;
+ ++fmt;
if (isdigit(*fmt))
precision = skip_atoi(&fmt);
else if (*fmt == '*') {
return ret;
}
+#ifndef do_div
+#if BITS_PER_LONG == 32
+uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base)
+{
+ uint64_t rem = *n;
+ uint64_t b = base;
+ uint64_t res, d = 1;
+ uint32_t high = rem >> 32;
+
+ /* Reduce the thing a bit first */
+ res = 0;
+ if (high >= base) {
+ high /= base;
+ res = (uint64_t) high << 32;
+ rem -= (uint64_t) (high*base) << 32;
+ }
+
+ while ((int64_t)b > 0 && b < rem) {
+ b = b+b;
+ d = d+d;
+ }
+
+ do {
+ if (rem >= b) {
+ rem -= b;
+ res += d;
+ }
+ b >>= 1;
+ d >>= 1;
+ } while (d);
+
+ *n = res;
+ return rem;
+}
+#endif /* BITS_PER_LONG == 32 */
+#endif /* do_div */
#endif /* 2.6.0 => 2.4.6 */
/*****************************************************************************/
int _kc_skb_pad(struct sk_buff *skb, int pad)
{
int ntail;
-
+
/* If the skbuff is non linear tailroom is always zero.. */
if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
memset(skb->data+skb->len, 0, pad);
return 0;
}
-
+
ntail = skb->data_len + pad - (skb->end - skb->tail);
if (likely(skb_cloned(skb) || ntail > 0)) {
if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC));
free_skb:
kfree_skb(skb);
return -ENOMEM;
-}
+}
#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
int _kc_pci_save_state(struct pci_dev *pdev)
}
#endif /* <= 2.6.19 */
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
+struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev)
+{
+ return ((struct adapter_struct *)netdev_priv(netdev))->pdev;
+}
+#endif /* < 2.6.21 */
+
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
/* hexdump code taken from lib/hexdump.c */
}
}
}
+
+#ifdef HAVE_I2C_SUPPORT
+struct i2c_client *
+_kc_i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
+{
+ struct i2c_client *client;
+ int status;
+
+ client = kzalloc(sizeof *client, GFP_KERNEL);
+ if (!client)
+ return NULL;
+
+ client->adapter = adap;
+
+ client->dev.platform_data = info->platform_data;
+
+ client->flags = info->flags;
+ client->addr = info->addr;
+
+ strlcpy(client->name, info->type, sizeof(client->name));
+
+ /* Check for address business */
+ status = i2c_check_addr(adap, client->addr);
+ if (status)
+ goto out_err;
+
+ client->dev.parent = &client->adapter->dev;
+ client->dev.bus = &i2c_bus_type;
+
+ status = i2c_attach_client(client);
+ if (status)
+ goto out_err;
+
+ dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n",
+ client->name, dev_name(&client->dev));
+
+ return client;
+
+out_err:
+ dev_err(&adap->dev, "Failed to register i2c client %s at 0x%02x "
+ "(%d)\n", client->name, client->addr, status);
+ kfree(client);
+ return NULL;
+}
+#endif /* HAVE_I2C_SUPPORT */
#endif /* < 2.6.22 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
#ifdef NAPI
-struct net_device *napi_to_poll_dev(struct napi_struct *napi)
+struct net_device *napi_to_poll_dev(const struct napi_struct *napi)
{
struct adapter_q_vector *q_vector = container_of(napi,
struct adapter_q_vector,
out:
return err;
}
+#endif /* < 2.6.28 */
-void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
- int off, int size)
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
+static void __kc_pci_set_master(struct pci_dev *pdev, bool enable)
{
- skb_fill_page_desc(skb, i, page, off, size);
- skb->len += size;
- skb->data_len += size;
- skb->truesize += size;
+ u16 old_cmd, cmd;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &old_cmd);
+ if (enable)
+ cmd = old_cmd | PCI_COMMAND_MASTER;
+ else
+ cmd = old_cmd & ~PCI_COMMAND_MASTER;
+ if (cmd != old_cmd) {
+ dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n",
+ enable ? "enabling" : "disabling");
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ }
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) )
+ pdev->is_busmaster = enable;
+#endif
}
-#endif /* < 2.6.28 */
+
+void _kc_pci_clear_master(struct pci_dev *dev)
+{
+ __kc_pci_set_master(dev, false);
+}
+#endif /* < 2.6.29 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+int _kc_pci_num_vf(struct pci_dev *dev)
+{
+ int num_vf = 0;
+#ifdef CONFIG_PCI_IOV
+ struct pci_dev *vfdev;
+
+ /* loop through all ethernet devices starting at PF dev */
+ vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL);
+ while (vfdev) {
+ if (vfdev->is_virtfn && vfdev->physfn == dev)
+ num_vf++;
+
+ vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev);
+ }
+
+#endif
+ return num_vf;
+}
+#endif /* RHEL_RELEASE_CODE */
+#endif /* < 2.6.34 */
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
#ifdef HAVE_TX_MQ
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
#ifndef CONFIG_NETDEVICES_MULTIQUEUE
void _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
{
for (i = txq; i < dev->num_tx_queues; i++) {
qdisc = netdev_get_tx_queue(dev, i)->qdisc;
if (qdisc) {
- spin_lock_bh(qdisc_lock(qdisc));
+ spin_lock_bh(qdisc_lock(qdisc));
qdisc_reset(qdisc);
spin_unlock_bh(qdisc_lock(qdisc));
}
}
}
#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
#endif /* HAVE_TX_MQ */
+
+ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
+ const void __user *from, size_t count)
+{
+ loff_t pos = *ppos;
+ size_t res;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= available || !count)
+ return 0;
+ if (count > available - pos)
+ count = available - pos;
+ res = copy_from_user(to + pos, from, count);
+ if (res == count)
+ return -EFAULT;
+ count -= res;
+ *ppos = pos + count;
+ return count;
+}
+
#endif /* < 2.6.35 */
/*****************************************************************************/
}
#endif /* < 2.6.36 */
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) )
-#ifdef HAVE_NETDEV_SELECT_QUEUE
-#include <net/ip.h>
-static u32 _kc_simple_tx_hashrnd;
-static u32 _kc_simple_tx_hashrnd_initialized;
-
-u16 ___kc_skb_tx_hash(struct net_device *dev, const struct sk_buff *skb,
- u16 num_tx_queues)
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
+
+
+
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
+#endif /* < 2.6.39 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
+void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
+ int off, int size, unsigned int truesize)
{
- u32 hash;
+ skb_fill_page_desc(skb, i, page, off, size);
+ skb->len += size;
+ skb->data_len += size;
+ skb->truesize += truesize;
+}
- if (skb_rx_queue_recorded(skb)) {
- hash = skb_get_rx_queue(skb);
- while (unlikely(hash >= num_tx_queues))
- hash -= num_tx_queues;
- return hash;
+int _kc_simple_open(struct inode *inode, struct file *file)
+{
+ if (inode->i_private)
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+#endif /* < 3.4.0 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) )
+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
+static inline int __kc_pcie_cap_version(struct pci_dev *dev)
+{
+ int pos;
+ u16 reg16;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return 0;
+ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16);
+ return reg16 & PCI_EXP_FLAGS_VERS;
+}
+
+static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev)
+{
+ return true;
+}
+
+static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev)
+{
+ int type = pci_pcie_type(dev);
+
+ return __kc_pcie_cap_version(dev) > 1 ||
+ type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_ENDPOINT ||
+ type == PCI_EXP_TYPE_LEG_END;
+}
+
+static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev)
+{
+ int type = pci_pcie_type(dev);
+ int pos;
+ u16 pcie_flags_reg;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return 0;
+ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg);
+
+ return __kc_pcie_cap_version(dev) > 1 ||
+ type == PCI_EXP_TYPE_ROOT_PORT ||
+ (type == PCI_EXP_TYPE_DOWNSTREAM &&
+ pcie_flags_reg & PCI_EXP_FLAGS_SLOT);
+}
+
+static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev)
+{
+ int type = pci_pcie_type(dev);
+
+ return __kc_pcie_cap_version(dev) > 1 ||
+ type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_RC_EC;
+}
+
+static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
+{
+ if (!pci_is_pcie(dev))
+ return false;
+
+ switch (pos) {
+ case PCI_EXP_FLAGS_TYPE:
+ return true;
+ case PCI_EXP_DEVCAP:
+ case PCI_EXP_DEVCTL:
+ case PCI_EXP_DEVSTA:
+ return __kc_pcie_cap_has_devctl(dev);
+ case PCI_EXP_LNKCAP:
+ case PCI_EXP_LNKCTL:
+ case PCI_EXP_LNKSTA:
+ return __kc_pcie_cap_has_lnkctl(dev);
+ case PCI_EXP_SLTCAP:
+ case PCI_EXP_SLTCTL:
+ case PCI_EXP_SLTSTA:
+ return __kc_pcie_cap_has_sltctl(dev);
+ case PCI_EXP_RTCTL:
+ case PCI_EXP_RTCAP:
+ case PCI_EXP_RTSTA:
+ return __kc_pcie_cap_has_rtctl(dev);
+ case PCI_EXP_DEVCAP2:
+ case PCI_EXP_DEVCTL2:
+ case PCI_EXP_LNKCAP2:
+ case PCI_EXP_LNKCTL2:
+ case PCI_EXP_LNKSTA2:
+ return __kc_pcie_cap_version(dev) > 1;
+ default:
+ return false;
}
+}
+
+/*
+ * Note that these accessor functions are only for the "PCI Express
+ * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
+ * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
+ */
+int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
+{
+ int ret;
- if (unlikely(!_kc_simple_tx_hashrnd_initialized)) {
- get_random_bytes(&_kc_simple_tx_hashrnd, 4);
- _kc_simple_tx_hashrnd_initialized = 1;
+ *val = 0;
+ if (pos & 1)
+ return -EINVAL;
+
+ if (__kc_pcie_capability_reg_implemented(dev, pos)) {
+ ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
+ /*
+ * Reset *val to 0 if pci_read_config_word() fails, it may
+ * have been written as 0xFFFF if hardware error happens
+ * during pci_read_config_word().
+ */
+ if (ret)
+ *val = 0;
+ return ret;
}
- if (skb->sk && skb->sk->sk_hash)
- hash = skb->sk->sk_hash;
- else
-#ifdef NETIF_F_RXHASH
- hash = (__force u16) skb->protocol ^ skb->rxhash;
-#else
- hash = skb->protocol;
-#endif
+ /*
+ * For Functions that do not implement the Slot Capabilities,
+ * Slot Status, and Slot Control registers, these spaces must
+ * be hardwired to 0b, with the exception of the Presence Detect
+ * State bit in the Slot Status register of Downstream Ports,
+ * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
+ */
+ if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
+ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
+ *val = PCI_EXP_SLTSTA_PDS;
+ }
+
+ return 0;
+}
- hash = jhash_1word(hash, _kc_simple_tx_hashrnd);
+int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
+{
+ if (pos & 1)
+ return -EINVAL;
+
+ if (!__kc_pcie_capability_reg_implemented(dev, pos))
+ return 0;
+
+ return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
+}
+
+int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
+ u16 clear, u16 set)
+{
+ int ret;
+ u16 val;
+
+ ret = __kc_pcie_capability_read_word(dev, pos, &val);
+ if (!ret) {
+ val &= ~clear;
+ val |= set;
+ ret = __kc_pcie_capability_write_word(dev, pos, val);
+ }
- return (u16) (((u64) hash * num_tx_queues) >> 32);
+ return ret;
}
-#endif /* HAVE_NETDEV_SELECT_QUEUE */
-#endif /* < 2.6.38 */
+#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */
+#endif /* < 3.7.0 */
/******************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
-#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
-#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
-#endif /* < 2.6.39 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
+#endif /* 3.9.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+#ifdef CONFIG_PCI_IOV
+int __kc_pci_vfs_assigned(struct pci_dev *dev)
+{
+ unsigned int vfs_assigned = 0;
+#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
+ int pos;
+ struct pci_dev *vfdev;
+ unsigned short dev_id;
+
+ /* only search if we are a PF */
+ if (!dev->is_physfn)
+ return 0;
+
+ /* find SR-IOV capability */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return 0;
+
+ /*
+ * determine the device ID for the VFs, the vendor ID will be the
+ * same as the PF so there is no need to check for that one
+ */
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id);
+
+ /* loop through all the VFs to see if we own any that are assigned */
+ vfdev = pci_get_device(dev->vendor, dev_id, NULL);
+ while (vfdev) {
+ /*
+ * It is considered assigned if it is a virtual function with
+ * our dev as the physical function and the assigned bit is set
+ */
+ if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
+ (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED))
+ vfs_assigned++;
+
+ vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
+ }
+
+#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */
+ return vfs_assigned;
+}
+
+#endif /* CONFIG_PCI_IOV */
+#endif /* 3.10.0 */
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#if !defined(IXGBE_DCA) && !defined(IGB_DCA)
#define dca_get_tag(b) 0
#define dca_add_requester(a) -1
-#define dca_remove_requester(b) do { } while(0)
+#define dca_remove_requester(b) do { } while(0)
#define DCA_PROVIDER_ADD 0x0001
#define DCA_PROVIDER_REMOVE 0x0002
#endif
#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0)
#endif
-#ifndef NETIF_F_HW_VLAN_TX
+#ifdef IS_ENABLED
+#undef IS_ENABLED
+#undef __ARG_PLACEHOLDER_1
+#undef config_enabled
+#undef _config_enabled
+#undef __config_enabled
+#undef ___config_enabled
+#endif
+
+#define __ARG_PLACEHOLDER_1 0,
+#define config_enabled(cfg) _config_enabled(cfg)
+#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value)
+#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0)
+#define ___config_enabled(__ignored, val, ...) val
+
+#define IS_ENABLED(option) \
+ (config_enabled(option) || config_enabled(option##_MODULE))
+
+#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX)
struct _kc_vlan_ethhdr {
unsigned char h_dest[ETH_ALEN];
unsigned char h_source[ETH_ALEN];
#define vlan_tx_tag_present(_skb) 0
#define vlan_tx_tag_get(_skb) 0
#endif
-#endif
+#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */
#ifndef VLAN_PRIO_SHIFT
#define VLAN_PRIO_SHIFT 13
#define __GFP_COLD 0
#endif
+#ifndef __GFP_COMP
+#define __GFP_COMP 0
+#endif
+
/*****************************************************************************/
/* Installations with ethtool version without eeprom, adapter id, or statistics
* support */
#define ETHTOOL_BUSINFO_LEN 32
#endif
-#ifndef RHEL_RELEASE_CODE
-/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */
-#define RHEL_RELEASE_CODE 0
-#endif
#ifndef RHEL_RELEASE_VERSION
#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b))
#endif
+#ifndef AX_RELEASE_VERSION
+#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b))
+#endif
+
#ifndef AX_RELEASE_CODE
#define AX_RELEASE_CODE 0
#endif
-#ifndef AX_RELEASE_VERSION
-#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b))
+
+#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0))
+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0)
+#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1))
+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1)
+#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2))
+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3)
+#endif
+
+#ifndef RHEL_RELEASE_CODE
+/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */
+#define RHEL_RELEASE_CODE 0
#endif
/* SuSE version macro is the same as Linux kernel version */
#ifndef SLE_VERSION
#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c)
#endif
-#ifndef SLE_VERSION_CODE
#ifdef CONFIG_SUSE_KERNEL
-/* SLES11 GA is 2.6.27 based */
#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) )
+/* SLES11 GA is 2.6.27 based */
#define SLE_VERSION_CODE SLE_VERSION(11,0,0)
#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) )
/* SLES11 SP1 is 2.6.32 based */
#define SLE_VERSION_CODE SLE_VERSION(11,1,0)
-#else
-#define SLE_VERSION_CODE 0
-#endif
-#else /* CONFIG_SUSE_KERNEL */
-#define SLE_VERSION_CODE 0
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,61)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)))
+/* SLES11 SP3 is at least 3.0.61+ based */
+#define SLE_VERSION_CODE SLE_VERSION(11,3,0)
+#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */
#endif /* CONFIG_SUSE_KERNEL */
+#ifndef SLE_VERSION_CODE
+#define SLE_VERSION_CODE 0
#endif /* SLE_VERSION_CODE */
#ifdef __KLOCWORK__
#endif
#else
- /* For Kernel 3.8 these are not defined - so undefine all */
- #undef __devexit_p
- #undef __devexit
- #undef __devinit
- #undef __devinitdata
- #define __devexit_p(x) &(x)
- #define __devexit
- #define __devinit
- #define __devinitdata
+ /* For Kernel 3.8 these are not defined - so undefine all */
+ #undef __devexit_p
+ #undef __devexit
+ #undef __devinit
+ #undef __devinitdata
+ #define __devexit_p(x) &(x)
+ #define __devexit
+ #define __devinit
+ #define __devinitdata
+
#endif /* 2.4.17 => 2.4.13 */
/*****************************************************************************/
/* 2.4.22 => 2.4.17 */
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
#define pci_name(x) ((x)->slot_name)
+
+#ifndef SUPPORTED_10000baseT_Full
+#define SUPPORTED_10000baseT_Full (1 << 12)
+#endif
+#ifndef ADVERTISED_10000baseT_Full
+#define ADVERTISED_10000baseT_Full (1 << 12)
+#endif
#endif
/*****************************************************************************/
#define ETHTOOL_OPS_COMPAT
#endif /* 2.6.4 => 2.6.0 */
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) )
+#define __user
+#endif /* < 2.4.27 */
+
/*****************************************************************************/
/* 2.5.71 => 2.4.x */
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) )
pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args)
#define dev_warn(dev, fmt, args...) \
pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args)
+#define dev_notice(dev, fmt, args...) \
+ pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args)
+#define dev_dbg(dev, fmt, args...) \
+ pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args)
/* NOTE: dangerous! we ignore the 'gfp' argument */
#define dma_alloc_coherent(dev,sz,dma,gfp) \
#define dma_unmap_single(dev,a,b,c) \
pci_unmap_single(to_pci_dev(dev),(a),(b),(c))
+#define dma_map_sg(dev, sg, nents, dir) \
+ pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir)
+#define dma_unmap_sg(dev, sg, nents, dir) \
+ pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir)
+
#define dma_sync_single(dev,a,b,c) \
pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c))
h->next = NULL;
h->pprev = NULL;
}
-#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
-
-#define hlist_for_each_entry(tpos, pos, head, member) \
- for (pos = (head)->first; \
- pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = pos->next)
-
-#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
- for (pos = (head)->first; \
- pos && ({ n = pos->next; 1; }) && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = n)
#ifndef might_sleep
#define might_sleep()
/* 2.5.28 => 2.4.23 */
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
-static inline void _kc_synchronize_irq(void)
-{
- synchronize_irq();
-}
-#undef synchronize_irq
-#define synchronize_irq(X) _kc_synchronize_irq()
-
#include <linux/tqueue.h>
#define work_struct tq_struct
#undef INIT_WORK
/*****************************************************************************/
/* 2.6.0 => 2.5.28 */
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+#ifndef read_barrier_depends
+#define read_barrier_depends() rmb()
+#endif
+
#undef get_cpu
#define get_cpu() smp_processor_id()
#undef put_cpu
extern size_t _kc_strlcpy(char *dest, const char *src, size_t size);
#endif /* strlcpy */
+#ifndef do_div
+#if BITS_PER_LONG == 64
+# define do_div(n,base) ({ \
+ uint32_t __base = (base); \
+ uint32_t __rem; \
+ __rem = ((uint64_t)(n)) % __base; \
+ (n) = ((uint64_t)(n)) / __base; \
+ __rem; \
+ })
+#elif BITS_PER_LONG == 32
+extern uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor);
+# define do_div(n,base) ({ \
+ uint32_t __base = (base); \
+ uint32_t __rem; \
+ if (likely(((n) >> 32) == 0)) { \
+ __rem = (uint32_t)(n) % __base; \
+ (n) = (uint32_t)(n) / __base; \
+ } else \
+ __rem = _kc__div64_32(&(n), __base); \
+ __rem; \
+ })
+#else /* BITS_PER_LONG == ?? */
+# error do_div() does not yet support the C64
+#endif /* BITS_PER_LONG */
+#endif /* do_div */
+
+#ifndef NSEC_PER_SEC
+#define NSEC_PER_SEC 1000000000L
+#endif
+
+#undef HAVE_I2C_SUPPORT
+#else /* 2.6.0 */
+#if IS_ENABLED(CONFIG_I2C_ALGOBIT) && \
+ (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,9)))
+#define HAVE_I2C_SUPPORT
+#endif /* IS_ENABLED(CONFIG_I2C_ALGOBIT) */
+
#endif /* 2.6.0 => 2.5.28 */
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )
+#define dma_pool pci_pool
+#define dma_pool_destroy pci_pool_destroy
+#define dma_pool_alloc pci_pool_alloc
+#define dma_pool_free pci_pool_free
+
+#define dma_pool_create(name,dev,size,align,allocation) \
+ pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation))
+#endif /* < 2.6.3 */
/*****************************************************************************/
/* 2.6.4 => 2.6.0 */
#ifndef PCI_EXP_DEVCTL_CERE
#define PCI_EXP_DEVCTL_CERE 0x0001
#endif
+#define PCI_EXP_FLAGS 2 /* Capabilities register */
+#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */
+#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */
+#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */
+#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */
+#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */
+#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
+#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
+#define PCI_EXP_DEVCAP 4 /* Device capabilities */
+#define PCI_EXP_DEVSTA 10 /* Device Status */
#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \
schedule_timeout((x * HZ)/1000 + 2); \
} while (0)
extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES);
#undef node_online_map
#define node_online_map _kcompat_node_online_map
+#define pci_get_class pci_find_class
#endif /* < 2.6.10 */
/*****************************************************************************/
return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC;
#endif
}
+
+#define PCI_EXP_LNKCAP 12 /* Link Capabilities */
+#define PCI_EXP_LNKSTA 18 /* Link Status */
+#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */
+#define PCI_EXP_SLTCTL 24 /* Slot Control */
+#define PCI_EXP_SLTSTA 26 /* Slot Status */
+#define PCI_EXP_RTCTL 28 /* Root Control */
+#define PCI_EXP_RTCAP 30 /* Root Capabilities */
+#define PCI_EXP_RTSTA 32 /* Root Status */
#endif /* < 2.6.11 */
/*****************************************************************************/
/* Advertisement control register. */
#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */
#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */
+/* Link partner ability register. */
+#define LPA_PAUSE_CAP 0x0400 /* Can pause */
+#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */
/* 1000BASE-T Control register */
#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */
+#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */
+/* 1000BASE-T Status register */
+#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */
+#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */
+
#ifndef is_zero_ether_addr
#define is_zero_ether_addr _kc_is_zero_ether_addr
static inline int _kc_is_zero_ether_addr(const u8 *addr)
#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
+#define SUPPORTED_Pause (1 << 13)
+#define SUPPORTED_Asym_Pause (1 << 14)
#define ADVERTISED_Pause (1 << 13)
#define ADVERTISED_Asym_Pause (1 << 14)
#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) )
#ifdef CONFIG_X86_64
-#define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
- dma_sync_single_for_cpu(dev, dma_handle, size, dir)
-#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
- dma_sync_single_for_device(dev, dma_handle, size, dir)
+#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \
+ dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir))
+#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \
+ dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir))
#endif
#endif
#endif /* < 2.6.14 */
#else /* 2.6.16 and above */
#undef HAVE_PCI_ERS
#define HAVE_PCI_ERS
+#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) )
+#ifdef device_can_wakeup
+#undef device_can_wakeup
+#endif /* device_can_wakeup */
+#define device_can_wakeup(dev) 1
+#endif /* SLE_VERSION(10,4,0) */
#endif /* < 2.6.16 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) )
+#ifndef dev_notice
+#define dev_notice(dev, fmt, args...) \
+ dev_printk(KERN_NOTICE, dev, fmt, ## args)
+#endif
+
#ifndef first_online_node
#define first_online_node 0
#endif
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0)))
+#define i_private u.generic_ip
+#endif /* >= RHEL 5.0 */
+
#ifndef DIV_ROUND_UP
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
#endif
+#ifndef __ALIGN_MASK
+#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#endif
#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
#if (!((RHEL_RELEASE_CODE && \
((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \
RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \
- (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))) || \
- (AX_RELEASE_CODE && AX_RELEASE_CODE > AX_RELEASE_VERSION(3,0))))
+ (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0))))))
typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);
#endif
#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
PCI_ANY_ID, PCI_ANY_ID, 0, 0
#endif
+#ifndef PCI_VENDOR_ID_INTEL
+#define PCI_VENDOR_ID_INTEL 0x8086
+#endif
+
#ifndef round_jiffies
#define round_jiffies(x) x
#endif
#define pci_request_selected_regions(pdev, bars, name) \
pci_request_regions(pdev, name)
#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev);
+
+#ifndef __aligned
+#define __aligned(x) __attribute__((aligned(x)))
+#endif
+
+extern struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev);
+#define netdev_to_dev(netdev) \
+ pci_dev_to_dev(_kc_netdev_to_pdev(netdev))
+#else
+static inline struct device *netdev_to_dev(struct net_device *netdev)
+{
+ return &netdev->dev;
+}
+
#endif /* < 2.6.21 */
/*****************************************************************************/
do { \
skb->tail = skb->data; \
} while (0)
+#define skb_set_tail_pointer(skb, offset) \
+ do { \
+ skb->tail = skb->data + offset; \
+ } while (0)
#define skb_copy_to_linear_data(skb, from, len) \
memcpy(skb->data, from, len)
#define skb_copy_to_linear_data_offset(skb, offset, from, len) \
const void *buf, size_t len, bool ascii);
#define print_hex_dump(lvl, s, t, r, g, b, l, a) \
_kc_print_hex_dump(lvl, s, t, r, g, b, l, a)
+#ifndef ADVERTISED_2500baseX_Full
+#define ADVERTISED_2500baseX_Full (1 << 15)
+#endif
+#ifndef SUPPORTED_2500baseX_Full
+#define SUPPORTED_2500baseX_Full (1 << 15)
+#endif
+
+#ifdef HAVE_I2C_SUPPORT
+#include <linux/i2c.h>
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)))
+struct i2c_board_info {
+ char driver_name[KOBJ_NAME_LEN];
+ char type[I2C_NAME_SIZE];
+ unsigned short flags;
+ unsigned short addr;
+ void *platform_data;
+};
+#define I2C_BOARD_INFO(driver, dev_addr) .driver_name = (driver),\
+ .addr = (dev_addr)
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */
+#define i2c_new_device(adap, info) _kc_i2c_new_device(adap, info)
+extern struct i2c_client *
+_kc_i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
+#endif /* HAVE_I2C_SUPPORT */
+
#else /* 2.6.22 */
#define ETH_TYPE_TRANS_SETS_DEV
#define HAVE_NETDEV_STATS_IN_NETDEV
#ifdef NAPI
extern int __kc_adapter_clean(struct net_device *, int *);
-extern struct net_device *napi_to_poll_dev(struct napi_struct *napi);
+extern struct net_device *napi_to_poll_dev(const struct napi_struct *napi);
#define netif_napi_add(_netdev, _napi, _poll, _weight) \
do { \
struct napi_struct *__napi = (_napi); \
} while (0)
#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi))
#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi))
+#ifdef CONFIG_SMP
+static inline void napi_synchronize(const struct napi_struct *n)
+{
+ struct net_device *dev = napi_to_poll_dev(n);
+
+ while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
+ /* No hurry. */
+ msleep(1);
+ }
+}
+#else
+#define napi_synchronize(n) barrier()
+#endif /* CONFIG_SMP */
#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi))
#ifndef NETIF_F_GRO
#define napi_complete(_napi) netif_rx_complete(napi_to_poll_dev(_napi))
#ifndef KERN_CONT
#define KERN_CONT ""
#endif
+#ifndef pr_err
+#define pr_err(fmt, arg...) \
+ printk(KERN_ERR fmt, ##arg)
+#endif
#else /* < 2.6.24 */
#define HAVE_ETHTOOL_GET_SSET_COUNT
#define HAVE_NETDEV_NAPI_LIST
#endif /* DEFINE_PCI_DEVICE_TABLE */
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
#ifndef IGB_PROCFS
#define IGB_PROCFS
#endif /* IGB_PROCFS */
+#endif /* >= 2.6.0 */
#else /* < 2.6.25 */
-#ifndef IGB_SYSFS
-#define IGB_SYSFS
-#endif /* IGB_SYSFS */
+#if IS_ENABLED(CONFIG_HWMON)
+#ifndef IGB_HWMON
+#define IGB_HWMON
+#endif /* IGB_HWMON */
+#endif /* CONFIG_HWMON */
#endif /* < 2.6.25 */
#else /* < 2.6.26 */
#include <linux/pci-aspm.h>
#define HAVE_NETDEV_VLAN_FEATURES
+#ifndef PCI_EXP_LNKCAP_ASPMS
+#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
+#endif /* PCI_EXP_LNKCAP_ASPMS */
#endif /* < 2.6.26 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
unlikely(__ret_warn_on); \
})
#endif /* WARN */
+#undef HAVE_IXGBE_DEBUG_FS
+#undef HAVE_IGB_DEBUG_FS
#else /* < 2.6.27 */
#define HAVE_TX_MQ
#define HAVE_NETDEV_SELECT_QUEUE
+#ifdef CONFIG_DEBUG_FS
+#define HAVE_IXGBE_DEBUG_FS
+#define HAVE_IGB_DEBUG_FS
+#endif /* CONFIG_DEBUG_FS */
#endif /* < 2.6.27 */
/*****************************************************************************/
}
#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q)
#endif
-#ifndef skb_add_rx_frag
-#define skb_add_rx_frag _kc_skb_add_rx_frag
-extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int);
-#endif
+
+#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
+#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
+
#endif /* < 2.6.28 */
/*****************************************************************************/
#ifndef pcie_aspm_enabled
#define pcie_aspm_enabled() (1)
#endif /* pcie_aspm_enabled */
+
+#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */
+
+#ifndef pci_clear_master
+extern void _kc_pci_clear_master(struct pci_dev *dev);
+#define pci_clear_master(dev) _kc_pci_clear_master(dev)
+#endif
+
+#ifndef PCI_EXP_LNKCTL_ASPMC
+#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */
+#endif
#else /* < 2.6.29 */
#ifndef HAVE_NET_DEVICE_OPS
#define HAVE_NET_DEVICE_OPS
#define skb_get_rx_queue(a) 0
#define skb_record_rx_queue(a, b) do {} while (0)
#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues)
-#ifdef IXGBE_FCOE
-#undef CONFIG_FCOE
-#undef CONFIG_FCOE_MODULE
-#endif /* IXGBE_FCOE */
#ifndef CONFIG_PCI_IOV
#undef pci_enable_sriov
#define pci_enable_sriov(a, b) -ENOTSUPP
#define pr_cont(fmt, ...) \
printk(KERN_CONT fmt, ##__VA_ARGS__)
#endif /* pr_cont */
-#else
+static inline void _kc_synchronize_irq(unsigned int a)
+{
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
+ synchronize_irq();
+#else /* < 2.5.28 */
+ synchronize_irq(a);
+#endif /* < 2.5.28 */
+}
+#undef synchronize_irq
+#define synchronize_irq(a) _kc_synchronize_irq(a)
+
+#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
+
+#else /* < 2.6.30 */
#define HAVE_ASPM_QUIRKS
#endif /* < 2.6.30 */
#define netdev_for_each_uc_addr(uclist, dev) \
for (uclist = dev->uc_list; uclist; uclist = uclist->next)
#endif
-#else
+#ifndef PORT_OTHER
+#define PORT_OTHER 0xff
+#endif
+#ifndef MDIO_PHY_ID_PRTAD
+#define MDIO_PHY_ID_PRTAD 0x03e0
+#endif
+#ifndef MDIO_PHY_ID_DEVAD
+#define MDIO_PHY_ID_DEVAD 0x001f
+#endif
+#ifndef skb_dst
+#define skb_dst(s) ((s)->dst)
+#endif
+
+#ifndef SUPPORTED_1000baseKX_Full
+#define SUPPORTED_1000baseKX_Full (1 << 17)
+#endif
+#ifndef SUPPORTED_10000baseKX4_Full
+#define SUPPORTED_10000baseKX4_Full (1 << 18)
+#endif
+#ifndef SUPPORTED_10000baseKR_Full
+#define SUPPORTED_10000baseKR_Full (1 << 19)
+#endif
+
+#ifndef ADVERTISED_1000baseKX_Full
+#define ADVERTISED_1000baseKX_Full (1 << 17)
+#endif
+#ifndef ADVERTISED_10000baseKX4_Full
+#define ADVERTISED_10000baseKX4_Full (1 << 18)
+#endif
+#ifndef ADVERTISED_10000baseKR_Full
+#define ADVERTISED_10000baseKR_Full (1 << 19)
+#endif
+
+#else /* < 2.6.31 */
#ifndef HAVE_NETDEV_STORAGE_ADDRESS
#define HAVE_NETDEV_STORAGE_ADDRESS
#endif
#ifndef HAVE_TRANS_START_IN_QUEUE
#define HAVE_TRANS_START_IN_QUEUE
#endif
+#ifndef HAVE_INCLUDE_LINUX_MDIO_H
+#define HAVE_INCLUDE_LINUX_MDIO_H
+#endif
#endif /* < 2.6.31 */
/*****************************************************************************/
#endif
#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+static inline int _kc_pm_runtime_get_sync()
+{
+ return 1;
+}
+#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync()
+#else /* 2.6.0 => 2.6.32 */
+static inline int _kc_pm_runtime_get_sync(struct device *dev)
+{
+ return 1;
+}
#ifndef pm_runtime_get_sync
-#define pm_runtime_get_sync(dev) do {} while (0)
+#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev)
#endif
+#endif /* 2.6.0 => 2.6.32 */
#ifndef pm_runtime_put
#define pm_runtime_put(dev) do {} while (0)
#endif
#ifndef __percpu
#define __percpu
#endif /* __percpu */
+#ifndef PORT_DA
+#define PORT_DA PORT_OTHER
+#endif
+#ifndef PORT_NONE
+#define PORT_NONE PORT_OTHER
+#endif
+
+#if ((RHEL_RELEASE_CODE && \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))))
+#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE)
+#undef DEFINE_DMA_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
+#undef DEFINE_DMA_UNMAP_LEN
+#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
+#undef dma_unmap_addr
+#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
+#undef dma_unmap_addr_set
+#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
+#undef dma_unmap_len
+#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
+#undef dma_unmap_len_set
+#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
+#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */
+#endif /* RHEL_RELEASE_CODE */
+
+#if (!(RHEL_RELEASE_CODE && \
+ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \
+ ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))))))
+static inline bool pci_is_pcie(struct pci_dev *dev)
+{
+ return !!pci_pcie_cap(dev);
+}
+#endif /* RHEL_RELEASE_CODE */
+
+#ifndef __always_unused
+#define __always_unused __attribute__((__unused__))
+#endif
+#ifndef __maybe_unused
+#define __maybe_unused __attribute__((__unused__))
+#endif
+
+#if (!(RHEL_RELEASE_CODE && \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2))))
+#define sk_tx_queue_get(_sk) (-1)
+#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0)
+#endif /* !(RHEL >= 6.2) */
+
+#if (RHEL_RELEASE_CODE && \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
+#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#define HAVE_ETHTOOL_SET_PHYS_ID
+#define HAVE_ETHTOOL_GET_TS_INFO
+#endif /* RHEL >= 6.4 && RHEL < 7.0 */
+
#else /* < 2.6.33 */
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
#define HAVE_NETDEV_OPS_FCOE_GETWWN
#endif
#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
-#define HAVE_ETHTOOL_SFP_DISPLAY_PORT
#endif /* < 2.6.33 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+#ifndef pci_num_vf
+#define pci_num_vf(pdev) _kc_pci_num_vf(pdev)
+extern int _kc_pci_num_vf(struct pci_dev *dev);
+#endif
+#endif /* RHEL_RELEASE_CODE */
+
#ifndef ETH_FLAG_NTUPLE
#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE
#endif
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
#define netdev_printk(level, netdev, format, args...) \
do { \
- struct adapter_struct *kc_adapter = netdev_priv(netdev);\
- struct pci_dev *pdev = kc_adapter->pdev; \
- printk("%s %s: " format, level, pci_name(pdev), \
- ##args); \
+ struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \
+ printk(level "%s: " format, pci_name(pdev), ##args); \
} while(0)
#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
#define netdev_printk(level, netdev, format, args...) \
do { \
- struct adapter_struct *kc_adapter = netdev_priv(netdev);\
- struct pci_dev *pdev = kc_adapter->pdev; \
+ struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \
struct device *dev = pci_dev_to_dev(pdev); \
dev_printk(level, dev, "%s: " format, \
netdev_name(netdev), ##args); \
#define netdev_info(dev, format, args...) \
netdev_printk(KERN_INFO, dev, format, ##args)
#undef netdev_dbg
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(DEBUG)
+#define netdev_dbg(__dev, format, args...) \
+ netdev_printk(KERN_DEBUG, __dev, format, ##args)
+#elif defined(CONFIG_DYNAMIC_DEBUG)
#define netdev_dbg(__dev, format, args...) \
do { \
dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \
#undef netif_info
#define netif_info(priv, type, dev, fmt, args...) \
netif_level(info, priv, type, dev, fmt, ##args)
+#undef netif_dbg
+#define netif_dbg(priv, type, dev, fmt, args...) \
+ netif_level(dbg, priv, type, dev, fmt, ##args)
#ifdef SET_SYSTEM_SLEEP_PM_OPS
#define HAVE_SYSTEM_SLEEP_PM_OPS
#endif /* for_each_set_bit */
#ifndef DEFINE_DMA_UNMAP_ADDR
-#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR
#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN
#define dma_unmap_addr pci_unmap_addr
#define dma_unmap_addr_set pci_unmap_addr_set
#define dma_unmap_len pci_unmap_len
#define dma_unmap_len_set pci_unmap_len_set
#endif /* DEFINE_DMA_UNMAP_ADDR */
+
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3))
+#ifdef IGB_HWMON
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define sysfs_attr_init(attr) \
+ do { \
+ static struct lock_class_key __key; \
+ (attr)->key = &__key; \
+ } while (0)
+#else
+#define sysfs_attr_init(attr) do {} while (0)
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
+#endif /* IGB_HWMON */
+#endif /* RHEL_RELEASE_CODE */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+static inline bool _kc_pm_runtime_suspended()
+{
+ return false;
+}
+#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended()
+#else /* 2.6.0 => 2.6.34 */
+static inline bool _kc_pm_runtime_suspended(struct device *dev)
+{
+ return false;
+}
+#ifndef pm_runtime_suspended
+#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev)
+#endif
+#endif /* 2.6.0 => 2.6.34 */
+
#else /* < 2.6.34 */
#define HAVE_SYSTEM_SLEEP_PM_OPS
#ifndef HAVE_SET_RX_MODE
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
+
+ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
+ const void __user *from, size_t count);
+#define simple_write_to_buffer _kc_simple_write_to_buffer
+
#ifndef numa_node_id
#define numa_node_id() 0
#endif
#ifdef HAVE_TX_MQ
#include <net/sch_generic.h>
#ifndef CONFIG_NETDEVICES_MULTIQUEUE
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
void _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int);
#define netif_set_real_num_tx_queues _kc_netif_set_real_num_tx_queues
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
#else /* CONFIG_NETDEVICES_MULTI_QUEUE */
#define netif_set_real_num_tx_queues(_netdev, _count) \
do { \
(_netdev)->egress_subqueue_count = _count; \
} while (0)
#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */
-#else
+#else /* HAVE_TX_MQ */
#define netif_set_real_num_tx_queues(_netdev, _count) do {} while(0)
#endif /* HAVE_TX_MQ */
#ifndef ETH_FLAG_RXHASH
#define ETH_FLAG_RXHASH (1<<28)
#endif /* ETH_FLAG_RXHASH */
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))
+#define HAVE_IRQ_AFFINITY_HINT
+#endif
#else /* < 2.6.35 */
#define HAVE_PM_QOS_REQUEST_LIST
#define HAVE_IRQ_AFFINITY_HINT
} while (0)
#undef usleep_range
-#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
+
+#define u64_stats_update_begin(a) do { } while(0)
+#define u64_stats_update_end(a) do { } while(0)
+#define u64_stats_fetch_begin(a) do { } while(0)
+#define u64_stats_fetch_retry_bh(a) (0)
+#define u64_stats_fetch_begin_bh(a) (0)
+
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1))
+#define HAVE_8021P_SUPPORT
+#endif
#else /* < 2.6.36 */
+
+
#define HAVE_PM_QOS_REQUEST_ACTIVE
#define HAVE_8021P_SUPPORT
#define HAVE_NDO_GET_STATS64
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) )
+#ifndef netif_set_real_num_rx_queues
+static inline int __kc_netif_set_real_num_rx_queues(struct net_device *dev,
+ unsigned int rxq)
+{
+ return 0;
+}
+#define netif_set_real_num_rx_queues(dev, rxq) \
+ __kc_netif_set_real_num_rx_queues((dev), (rxq))
+#endif
#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR
#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2)
#endif
#define SKBTX_IN_PROGRESS (1 << 2)
#define SKB_SHARED_TX_IS_UNION
#endif
+
+#ifndef device_wakeup_enable
+#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true)
+#endif
+
#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) )
#ifndef HAVE_VLAN_RX_REGISTER
#define HAVE_VLAN_RX_REGISTER
#define DCB_CAP_DCBX_STATIC 0x10
#endif
#endif /* CONFIG_DCB */
-extern u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16);
-#define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q))
-#else /* < 2.6.38 */
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2))
+#define CONFIG_XPS
+#endif /* RHEL_RELEASE_VERSION(6,2) */
#endif /* < 2.6.38 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
+#ifndef NETIF_F_RXCSUM
+#define NETIF_F_RXCSUM (1 << 29)
+#endif
#ifndef skb_queue_reverse_walk_safe
#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
for (skb = (queue)->prev, tmp = skb->prev; \
/* use < 2.6.40 because of a Fedora 15 kernel update where they
* updated the kernel version to 2.6.40.x and they back-ported 3.0 features
* like set_phys_id for ethtool.
- */
+ */
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) )
#ifdef ETHTOOL_GRXRINGS
#ifndef FLOW_EXT
#define HAVE_ETHTOOL_SET_PHYS_ID
#endif /* < 2.6.40 */
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) )
+#define USE_LEGACY_PM_SUPPORT
+#endif /* < 3.0.0 */
+
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
#ifndef __netdev_alloc_skb_ip_align
#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app)
#define dcb_ieee_delapp(dev, app) 0
#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority)
+
+/* 1000BASE-T Control register */
+#define CTL1000_AS_MASTER 0x0800
+#define CTL1000_ENABLE_MASTER 0x1000
+
#else /* < 3.1.0 */
#ifndef HAVE_DCBNL_IEEE_DELAPP
#define HAVE_DCBNL_IEEE_DELAPP
#endif /* skb_frag_address */
#ifndef skb_frag_dma_map
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
+#include <linux/dma-mapping.h>
+#endif
#define skb_frag_dma_map(dev,frag,offset,size,dir) \
_kc_skb_frag_dma_map(dev,frag,offset,size,dir)
static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev,
put_page(skb_frag_page(frag));
}
#endif /* __skb_frag_unref */
+
+#ifndef SPEED_UNKNOWN
+#define SPEED_UNKNOWN -1
+#endif
+#ifndef DUPLEX_UNKNOWN
+#define DUPLEX_UNKNOWN 0xff
+#endif
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3))
+#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
+#define HAVE_PCI_DEV_FLAGS_ASSIGNED
+#endif
+#endif
#else /* < 3.2.0 */
#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
#define HAVE_PCI_DEV_FLAGS_ASSIGNED
#undef ixgbe_get_netdev_tc_txq
#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc])
#endif
-
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) )
typedef u32 netdev_features_t;
+#undef PCI_EXP_TYPE_RC_EC
+#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
+#ifndef CONFIG_BQL
+#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0)
+#define netdev_completed_queue(_n, _p, _b) do {} while (0)
+#define netdev_tx_sent_queue(_q, _b) do {} while (0)
+#define netdev_sent_queue(_n, _b) do {} while (0)
+#define netdev_tx_reset_queue(_q) do {} while (0)
+#define netdev_reset_queue(_n) do {} while (0)
+#endif
#else /* ! < 3.3.0 */
#define HAVE_INT_NDO_VLAN_RX_ADD_VID
#ifdef ETHTOOL_SRXNTUPLE
#define NETIF_F_RXALL 0
#endif /* NETIF_F_RXALL */
+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
#define NUMTCS_RETURNS_U8
+int _kc_simple_open(struct inode *inode, struct file *file);
+#define simple_open _kc_simple_open
+#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */
-#endif /* < 3.4.0 */
+
+#ifndef skb_add_rx_frag
+#define skb_add_rx_frag _kc_skb_add_rx_frag
+extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *,
+ int, int, unsigned int);
+#endif
+#ifdef NET_ADDR_RANDOM
+#define eth_hw_addr_random(N) do { \
+ random_ether_addr(N->dev_addr); \
+ N->addr_assign_type |= NET_ADDR_RANDOM; \
+ } while (0)
+#else /* NET_ADDR_RANDOM */
+#define eth_hw_addr_random(N) random_ether_addr(N->dev_addr)
+#endif /* NET_ADDR_RANDOM */
+#else /* < 3.4.0 */
+#include <linux/kconfig.h>
+#endif /* >= 3.4.0 */
+
+/*****************************************************************************/
+#if defined(E1000E_PTP) || defined(IGB_PTP) || defined(IXGBE_PTP) || defined(I40E_PTP)
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) && IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+#define HAVE_PTP_1588_CLOCK
+#else
+#error Cannot enable PTP Hardware Clock support due to a pre-3.0 kernel version or CONFIG_PTP_1588_CLOCK not enabled in the kernel
+#endif /* > 3.0.0 && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+#endif /* E1000E_PTP || IGB_PTP || IXGBE_PTP || I40E_PTP */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) )
+#define skb_tx_timestamp(skb) do {} while (0)
#else
#define HAVE_FDB_OPS
+#define HAVE_ETHTOOL_GET_TS_INFO
#endif /* < 3.5.0 */
/*****************************************************************************/
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) )
-#define NETIF_F_HW_VLAN_TX NETIF_F_HW_VLAN_CTAG_TX
-#define NETIF_F_HW_VLAN_RX NETIF_F_HW_VLAN_CTAG_RX
-#define NETIF_F_HW_VLAN_FILTER NETIF_F_HW_VLAN_CTAG_FILTER
-#define HAVE_VLAN_PROTOCOL
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) )
+#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */
+
+#ifndef MDIO_EEE_100TX
+#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */
+#endif
+#ifndef MDIO_EEE_1000T
+#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */
+#endif
+#ifndef MDIO_EEE_10GT
+#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */
+#endif
+#ifndef MDIO_EEE_1000KX
+#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */
+#endif
+#ifndef MDIO_EEE_10GKX4
+#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */
+#endif
+#ifndef MDIO_EEE_10GKR
+#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */
+#endif
+#else /* < 3.6.0 */
+#include <linux/mdio.h>
+#endif /* < 3.6.0 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) )
+#ifndef ADVERTISED_40000baseKR4_Full
+/* these defines were all added in one commit, so should be safe
+ * to trigger activiation on one define
+ */
+#define SUPPORTED_40000baseKR4_Full (1 << 23)
+#define SUPPORTED_40000baseCR4_Full (1 << 24)
+#define SUPPORTED_40000baseSR4_Full (1 << 25)
+#define SUPPORTED_40000baseLR4_Full (1 << 26)
+#define ADVERTISED_40000baseKR4_Full (1 << 23)
+#define ADVERTISED_40000baseCR4_Full (1 << 24)
+#define ADVERTISED_40000baseSR4_Full (1 << 25)
+#define ADVERTISED_40000baseLR4_Full (1 << 26)
+#endif
+/**
+ * mmd_eee_cap_to_ethtool_sup_t
+ * @eee_cap: value of the MMD EEE Capability register
+ *
+ * A small helper function that translates MMD EEE Capability (3.20) bits
+ * to ethtool supported settings.
+ */
+static inline u32 mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap)
+{
+ u32 supported = 0;
+
+ if (eee_cap & MDIO_EEE_100TX)
+ supported |= SUPPORTED_100baseT_Full;
+ if (eee_cap & MDIO_EEE_1000T)
+ supported |= SUPPORTED_1000baseT_Full;
+ if (eee_cap & MDIO_EEE_10GT)
+ supported |= SUPPORTED_10000baseT_Full;
+ if (eee_cap & MDIO_EEE_1000KX)
+ supported |= SUPPORTED_1000baseKX_Full;
+ if (eee_cap & MDIO_EEE_10GKX4)
+ supported |= SUPPORTED_10000baseKX4_Full;
+ if (eee_cap & MDIO_EEE_10GKR)
+ supported |= SUPPORTED_10000baseKR_Full;
+
+ return supported;
+}
+
+/**
+ * mmd_eee_adv_to_ethtool_adv_t
+ * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers
+ *
+ * A small helper function that translates the MMD EEE Advertisment (7.60)
+ * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement
+ * settings.
+ */
+static inline u32 mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv)
+{
+ u32 adv = 0;
+
+ if (eee_adv & MDIO_EEE_100TX)
+ adv |= ADVERTISED_100baseT_Full;
+ if (eee_adv & MDIO_EEE_1000T)
+ adv |= ADVERTISED_1000baseT_Full;
+ if (eee_adv & MDIO_EEE_10GT)
+ adv |= ADVERTISED_10000baseT_Full;
+ if (eee_adv & MDIO_EEE_1000KX)
+ adv |= ADVERTISED_1000baseKX_Full;
+ if (eee_adv & MDIO_EEE_10GKX4)
+ adv |= ADVERTISED_10000baseKX4_Full;
+ if (eee_adv & MDIO_EEE_10GKR)
+ adv |= ADVERTISED_10000baseKR_Full;
+
+ return adv;
+}
+
+/**
+ * ethtool_adv_to_mmd_eee_adv_t
+ * @adv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement settings
+ * to EEE advertisements for the MMD EEE Advertisement (7.60) and
+ * MMD EEE Link Partner Ability (7.61) registers.
+ */
+static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv)
+{
+ u16 reg = 0;
+
+ if (adv & ADVERTISED_100baseT_Full)
+ reg |= MDIO_EEE_100TX;
+ if (adv & ADVERTISED_1000baseT_Full)
+ reg |= MDIO_EEE_1000T;
+ if (adv & ADVERTISED_10000baseT_Full)
+ reg |= MDIO_EEE_10GT;
+ if (adv & ADVERTISED_1000baseKX_Full)
+ reg |= MDIO_EEE_1000KX;
+ if (adv & ADVERTISED_10000baseKX4_Full)
+ reg |= MDIO_EEE_10GKX4;
+ if (adv & ADVERTISED_10000baseKR_Full)
+ reg |= MDIO_EEE_10GKR;
+
+ return reg;
+}
+
+#ifndef pci_pcie_type
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+static inline u8 pci_pcie_type(struct pci_dev *pdev)
+{
+ int pos;
+ u16 reg16;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (!pos)
+ BUG();
+ pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16);
+ return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
+}
+#else /* < 2.6.24 */
+#define pci_pcie_type(x) (x)->pcie_type
+#endif /* < 2.6.24 */
+#endif /* pci_pcie_type */
+
+#define ptp_clock_register(caps, args...) ptp_clock_register(caps)
+
+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
+int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
+#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v)
+int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
+#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v)
+int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
+ u16 clear, u16 set);
+#define pcie_capability_clear_and_set_word(d,p,c,s) \
+ __kc_pcie_capability_clear_and_set_word(d,p,c,s)
+
+#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
+
+static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
+ u16 clear)
+{
+ return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0);
+}
+#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */
+
+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
+#define USE_CONST_DEV_UC_CHAR
+#endif
+
+#else /* >= 3.7.0 */
+#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS
+#define USE_CONST_DEV_UC_CHAR
+#endif /* >= 3.7.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) )
+#ifndef PCI_EXP_LNKCTL_ASPM_L0S
+#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */
+#endif
+#ifndef PCI_EXP_LNKCTL_ASPM_L1
+#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */
+#endif
+#define HAVE_CONFIG_HOTPLUG
+/* Reserved Ethernet Addresses per IEEE 802.1Q */
+static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = {
+ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
+static inline bool is_link_local_ether_addr(const u8 *addr)
+{
+ __be16 *a = (__be16 *)addr;
+ static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
+ static const __be16 m = cpu_to_be16(0xfff0);
+
+ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
+}
+#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */
+#else /* >= 3.8.0 */
+#ifndef __devinit
+#define __devinit
+#define HAVE_ENCAP_CSUM_OFFLOAD
+#endif
+
+#ifndef __devinitdata
+#define __devinitdata
+#endif
+
+#ifndef __devexit
+#define __devexit
+#endif
+
+#ifndef __devexit_p
+#define __devexit_p
+#endif
+
+#ifndef HAVE_SRIOV_CONFIGURE
+#define HAVE_SRIOV_CONFIGURE
+#endif
+
+#define HAVE_BRIDGE_ATTRIBS
+#ifndef BRIDGE_MODE_VEB
+#define BRIDGE_MODE_VEB 0 /* Default loopback mode */
+#endif /* BRIDGE_MODE_VEB */
+#ifndef BRIDGE_MODE_VEPA
+#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */
+#endif /* BRIDGE_MODE_VEPA */
+#endif /* >= 3.8.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
+
+#undef hlist_entry
+#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+
+#undef hlist_entry_safe
+#define hlist_entry_safe(ptr, type, member) \
+ (ptr) ? hlist_entry(ptr, type, member) : NULL
+
+#undef hlist_for_each_entry
+#define hlist_for_each_entry(pos, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \
+ pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+#undef hlist_for_each_entry_safe
+#define hlist_for_each_entry_safe(pos, n, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \
+ pos && ({ n = pos->member.next; 1; }); \
+ pos = hlist_entry_safe(n, typeof(*pos), member))
+
+#ifdef CONFIG_XPS
+extern int __kc_netif_set_xps_queue(struct net_device *, struct cpumask *, u16);
+#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx))
+#else /* CONFIG_XPS */
+#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0)
+#endif /* CONFIG_XPS */
+
+#ifdef HAVE_NETDEV_SELECT_QUEUE
+#define _kc_hashrnd 0xd631614b /* not so random hash salt */
+extern u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
+#define __netdev_pick_tx __kc_netdev_pick_tx
+#endif /* HAVE_NETDEV_SELECT_QUEUE */
+#else
+#define HAVE_BRIDGE_FILTER
+#define USE_DEFAULT_FDB_DEL_DUMP
+#endif /* < 3.9.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+#ifdef CONFIG_PCI_IOV
+extern int __kc_pci_vfs_assigned(struct pci_dev *dev);
+#else
+static inline int __kc_pci_vfs_assigned(struct pci_dev *dev)
+{
+ return 0;
+}
+#endif
+#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev)
+
+#ifndef VLAN_TX_COOKIE_MAGIC
+static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb,
+ u16 vlan_tci)
+{
+#ifdef VLAN_TAG_PRESENT
+ vlan_tci |= VLAN_TAG_PRESENT;
+#endif
+ skb->vlan_tci = vlan_tci;
+ return skb;
+}
+#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \
+ __kc__vlan_hwaccel_put_tag(skb, vlan_tci)
+#endif
+
+#else /* >= 3.10.0 */
+#define HAVE_ENCAP_TSO_OFFLOAD
#endif /* >= 3.10.0 */
#endif /* _KCOMPAT_H_ */
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
}
#endif /* < 2.4.8 */
+
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+#ifdef CONFIG_PCI_IOV
+int __kc_pci_vfs_assigned(struct pci_dev *dev)
+{
+ unsigned int vfs_assigned = 0;
+#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
+ int pos;
+ struct pci_dev *vfdev;
+ unsigned short dev_id;
+
+ /* only search if we are a PF */
+ if (!dev->is_physfn)
+ return 0;
+
+ /* find SR-IOV capability */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return 0;
+
+ /*
+ * * determine the device ID for the VFs, the vendor ID will be the
+ * * same as the PF so there is no need to check for that one
+ * */
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id);
+
+ /* loop through all the VFs to see if we own any that are assigned */
+ vfdev = pci_get_device(dev->vendor, dev_id, NULL);
+ while (vfdev) {
+ /*
+ * * It is considered assigned if it is a virtual function with
+ * * our dev as the physical function and the assigned bit is set
+ * */
+ if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
+ (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED))
+ vfs_assigned++;
+
+ vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
+ }
+
+#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */
+ return vfs_assigned;
+}
+
+#endif /* CONFIG_PCI_IOV */
+#endif /* 3.10.0 */
+
+
+
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
out:
return err;
}
+#endif /* < 2.6.28 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
int off, int size)
{
skb->data_len += size;
skb->truesize += size;
}
-#endif /* < 2.6.28 */
+#endif /* < 3.4.0 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
}
#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q)
#endif
+#endif /* < 2.6.28 */
+
#ifndef skb_add_rx_frag
#define skb_add_rx_frag _kc_skb_add_rx_frag
extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int);
#endif
-#endif /* < 2.6.28 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
#define NETIF_F_HW_VLAN_FILTER NETIF_F_HW_VLAN_CTAG_FILTER
#endif /* >= 3.10.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+#ifdef CONFIG_PCI_IOV
+extern int __kc_pci_vfs_assigned(struct pci_dev *dev);
+#else
+static inline int __kc_pci_vfs_assigned(struct pci_dev *dev)
+{
+ return 0;
+}
+#endif
+#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev)
+
+#endif
#endif /* _KCOMPAT_H_ */