#include <stdint.h>
#include <stdarg.h>
+#include <rte_string_fns.h>
#include <rte_common.h>
#include <rte_interrupts.h>
#include <rte_byteorder.h>
#define IGB_DEFAULT_TX_HTHRESH 1
#define IGB_DEFAULT_TX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 16)
-#define IGB_HKEY_MAX_INDEX 10
-
/* Bit shift and mask */
#define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
#define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
#define E1000_VET_VET_EXT 0xFFFF0000
#define E1000_VET_VET_EXT_SHIFT 16
+/* MSI-X other interrupt vector */
+#define IGB_MSIX_OTHER_INTR_VEC 0
+
static int eth_igb_configure(struct rte_eth_dev *dev);
static int eth_igb_start(struct rte_eth_dev *dev);
static void eth_igb_stop(struct rte_eth_dev *dev);
static int eth_igb_dev_set_link_up(struct rte_eth_dev *dev);
static int eth_igb_dev_set_link_down(struct rte_eth_dev *dev);
static void eth_igb_close(struct rte_eth_dev *dev);
+static int eth_igb_reset(struct rte_eth_dev *dev);
static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
static int eth_igb_led_on(struct rte_eth_dev *dev);
static int eth_igb_led_off(struct rte_eth_dev *dev);
-static void igb_intr_disable(struct e1000_hw *hw);
+static void igb_intr_disable(struct rte_eth_dev *dev);
static int igb_get_rx_buffer_size(struct e1000_hw *hw);
static int eth_igb_rar_set(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr,
+ struct rte_ether_addr *mac_addr,
uint32_t index, uint32_t pool);
static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
-static void eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
- struct ether_addr *addr);
+static int eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct rte_ether_addr *addr);
static void igbvf_intr_disable(struct e1000_hw *hw);
static int igbvf_dev_configure(struct rte_eth_dev *dev);
uint16_t vlan_id, int on);
static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
-static void igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
- struct ether_addr *addr);
+static int igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct rte_ether_addr *addr);
static int igbvf_get_reg_length(struct rte_eth_dev *dev);
static int igbvf_get_regs(struct rte_eth_dev *dev,
struct rte_dev_reg_info *regs);
struct rte_dev_eeprom_info *eeprom);
static int eth_igb_set_eeprom(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *eeprom);
+static int eth_igb_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo);
+static int eth_igb_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info);
static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
- struct ether_addr *mc_addr_set,
+ struct rte_ether_addr *mc_addr_set,
uint32_t nb_mc_addr);
static int igb_timesync_enable(struct rte_eth_dev *dev);
static int igb_timesync_disable(struct rte_eth_dev *dev);
.dev_set_link_up = eth_igb_dev_set_link_up,
.dev_set_link_down = eth_igb_dev_set_link_down,
.dev_close = eth_igb_close,
+ .dev_reset = eth_igb_reset,
.promiscuous_enable = eth_igb_promiscuous_enable,
.promiscuous_disable = eth_igb_promiscuous_disable,
.allmulticast_enable = eth_igb_allmulticast_enable,
.get_eeprom_length = eth_igb_get_eeprom_length,
.get_eeprom = eth_igb_get_eeprom,
.set_eeprom = eth_igb_set_eeprom,
+ .get_module_info = eth_igb_get_module_info,
+ .get_module_eeprom = eth_igb_get_module_eeprom,
.timesync_adjust_time = igb_timesync_adjust_time,
.timesync_read_time = igb_timesync_read_time,
.timesync_write_time = igb_timesync_write_time,
.dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
.rx_queue_setup = eth_igb_rx_queue_setup,
.rx_queue_release = eth_igb_rx_queue_release,
+ .rx_descriptor_done = eth_igb_rx_descriptor_done,
+ .rx_descriptor_status = eth_igb_rx_descriptor_status,
+ .tx_descriptor_status = eth_igb_tx_descriptor_status,
.tx_queue_setup = eth_igb_tx_queue_setup,
.tx_queue_release = eth_igb_tx_queue_release,
.set_mc_addr_list = eth_igb_set_mc_addr_list,
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ if (rte_intr_allow_others(intr_handle) &&
+ dev->data->dev_conf.intr_conf.lsc != 0) {
+ E1000_WRITE_REG(hw, E1000_EIMS, 1 << IGB_MSIX_OTHER_INTR_VEC);
+ }
E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
E1000_WRITE_FLUSH(hw);
}
static void
-igb_intr_disable(struct e1000_hw *hw)
+igb_intr_disable(struct rte_eth_dev *dev)
{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ if (rte_intr_allow_others(intr_handle) &&
+ dev->data->dev_conf.intr_conf.lsc != 0) {
+ E1000_WRITE_REG(hw, E1000_EIMC, 1 << IGB_MSIX_OTHER_INTR_VEC);
+ }
+
E1000_WRITE_REG(hw, E1000_IMC, ~0);
E1000_WRITE_FLUSH(hw);
}
}
/* Copy the permanent MAC address */
- ether_addr_copy((struct ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]);
+ ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
+ ð_dev->data->mac_addrs[0]);
/* initialize the vfta */
memset(shadow_vfta, 0, sizeof(*shadow_vfta));
/* Reset any pending lock */
igb_reset_swfw_lock(hw);
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
-
/* uninitialize PF if max_vfs not zero */
igb_pf_host_uninit(eth_dev);
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
int diag;
- struct ether_addr *perm_addr = (struct ether_addr *)hw->mac.perm_addr;
+ struct rte_ether_addr *perm_addr =
+ (struct rte_ether_addr *)hw->mac.perm_addr;
PMD_INIT_FUNC_TRACE();
return diag;
}
/* Copy the permanent MAC address */
- ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
+ ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
ð_dev->data->mac_addrs[0]);
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
-
/* disable uio intr before callback unregister */
rte_intr_disable(&pci_dev->intr_handle);
rte_intr_callback_unregister(&pci_dev->intr_handle,
eth_igb_rxtx_control(dev, false);
- igb_intr_disable(hw);
+ igb_intr_disable(dev);
/* disable intr eventfd mapping */
rte_intr_disable(intr_handle);
rte_eth_linkstatus_set(dev, &link);
}
+/*
+ * Reset PF device.
+ */
+static int
+eth_igb_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ /* When a DPDK PMD PF begin to reset PF port, it should notify all
+ * its VF to make them align with it. The detailed notification
+ * mechanism is PMD specific and is currently not implemented.
+ * To avoid unexpected behavior in VF, currently reset of PF with
+ * SR-IOV activation is not supported. It might be supported later.
+ */
+ if (dev->data->sriov.active)
+ return -ENOTSUP;
+
+ ret = eth_igb_dev_uninit(dev);
+ if (ret)
+ return ret;
+
+ ret = eth_igb_dev_init(dev);
+
+ return ret;
+}
+
+
static int
igb_get_rx_buffer_size(struct e1000_hw *hw)
{
/* Note: limit checked in rte_eth_xstats_names() */
for (i = 0; i < IGB_NB_XSTATS; i++) {
- snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
- "%s", rte_igb_stats_strings[i].name);
+ strlcpy(xstats_names[i].name, rte_igb_stats_strings[i].name,
+ sizeof(xstats_names[i].name));
}
return IGB_NB_XSTATS;
return IGB_NB_XSTATS;
for (i = 0; i < IGB_NB_XSTATS; i++)
- snprintf(xstats_names[i].name,
- sizeof(xstats_names[i].name),
- "%s", rte_igb_stats_strings[i].name);
+ strlcpy(xstats_names[i].name,
+ rte_igb_stats_strings[i].name,
+ sizeof(xstats_names[i].name));
return IGB_NB_XSTATS;
if (xstats_names != NULL)
for (i = 0; i < IGBVF_NB_XSTATS; i++) {
- snprintf(xstats_names[i].name,
- sizeof(xstats_names[i].name), "%s",
- rte_igbvf_stats_strings[i].name);
+ strlcpy(xstats_names[i].name,
+ rte_igbvf_stats_strings[i].name,
+ sizeof(xstats_names[i].name));
}
return IGBVF_NB_XSTATS;
}
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
+ dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
+ dev_info->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
+ dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
+ dev_info->tx_queue_offload_capa;
switch (hw->mac.type) {
case e1000_82575:
},
.rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
.hthresh = IGB_DEFAULT_TX_HTHRESH,
.wthresh = IGB_DEFAULT_TX_WTHRESH,
},
- .txq_flags = 0,
+ .offloads = 0,
};
dev_info->rx_desc_lim = rx_desc_lim;
dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
ETH_LINK_SPEED_1G;
+
+ dev_info->max_mtu = dev_info->max_rx_pktlen - E1000_ETH_OVERHEAD;
+ dev_info->min_mtu = ETHER_MIN_MTU;
+
}
static const uint32_t *
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
break;
}
+ dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
+ dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
+ dev_info->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
+ dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
+ dev_info->tx_queue_offload_capa;
+
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
.pthresh = IGB_DEFAULT_RX_PTHRESH,
},
.rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
.hthresh = IGB_DEFAULT_TX_HTHRESH,
.wthresh = IGB_DEFAULT_TX_WTHRESH,
},
- .txq_flags = 0,
+ .offloads = 0,
};
dev_info->rx_desc_lim = rx_desc_lim;
E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
/* Update maximum packet length */
- if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
E1000_WRITE_REG(hw, E1000_RLPML,
dev->data->dev_conf.rxmode.max_rx_pkt_len +
VLAN_TAG_SIZE);
E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
/* Update maximum packet length */
- if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
E1000_WRITE_REG(hw, E1000_RLPML,
dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * VLAN_TAG_SIZE);
static int
eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
+ struct rte_eth_rxmode *rxmode;
+
+ rxmode = &dev->data->dev_conf.rxmode;
if(mask & ETH_VLAN_STRIP_MASK){
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
igb_vlan_hw_strip_enable(dev);
else
igb_vlan_hw_strip_disable(dev);
}
if(mask & ETH_VLAN_FILTER_MASK){
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
igb_vlan_hw_filter_enable(dev);
else
igb_vlan_hw_filter_disable(dev);
}
if(mask & ETH_VLAN_EXTEND_MASK){
- if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
igb_vlan_hw_extend_enable(dev);
else
igb_vlan_hw_extend_disable(dev);
uint32_t mask, regval;
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0;
struct rte_eth_dev_info dev_info;
memset(&dev_info, 0, sizeof(dev_info));
eth_igb_infos_get(dev, &dev_info);
- mask = 0xFFFFFFFF >> (32 - dev_info.max_rx_queues);
+ mask = (0xFFFFFFFF >> (32 - dev_info.max_rx_queues)) << misc_shift;
regval = E1000_READ_REG(hw, E1000_EIMS);
E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- igb_intr_disable(hw);
+ igb_intr_disable(dev);
/* read-on-clear nic registers here */
icr = E1000_READ_REG(hw, E1000_ICR);
#define E1000_RAH_POOLSEL_SHIFT (18)
static int
-eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+eth_igb_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
uint32_t index, uint32_t pool)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
e1000_rar_set(hw, addr, index);
}
-static void
+static int
eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
- struct ether_addr *addr)
+ struct rte_ether_addr *addr)
{
eth_igb_rar_clear(dev, 0);
-
eth_igb_rar_set(dev, (void *)addr, 0, 0);
+
+ return 0;
}
/*
* Virtual Function operations
* Keep the persistent behavior the same as Host PF
*/
#ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
- if (!conf->rxmode.hw_strip_crc) {
+ if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
- conf->rxmode.hw_strip_crc = 1;
+ conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
}
#else
- if (conf->rxmode.hw_strip_crc) {
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
- conf->rxmode.hw_strip_crc = 0;
+ conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
}
#endif
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(dev->data->dev_private);
- struct ether_addr addr;
+ struct rte_ether_addr addr;
PMD_INIT_FUNC_TRACE();
return 0;
}
-static void
-igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
+static int
+igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* index is not used by rar_set() */
hw->mac.ops.rar_set(hw, (void *)addr, 0);
+ return 0;
}
uint32_t rctl;
struct e1000_hw *hw;
struct rte_eth_dev_info dev_info;
- uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN +
- VLAN_TAG_SIZE);
+ uint32_t frame_size = mtu + E1000_ETH_OVERHEAD;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* switch to jumbo mode if needed */
if (frame_size > ETHER_MAX_LEN) {
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl |= E1000_RCTL_LPE;
} else {
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl &= ~E1000_RCTL_LPE;
}
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
static int
eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
- struct ether_addr *mc_addr_set,
+ struct rte_ether_addr *mc_addr_set,
uint32_t nb_mc_addr)
{
struct e1000_hw *hw;
return nvm->ops.write(hw, first, length, data);
}
+static int
+eth_igb_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ uint32_t status = 0;
+ uint16_t sff8472_rev, addr_mode;
+ bool page_swap = false;
+
+ if (hw->phy.media_type == e1000_media_type_copper ||
+ hw->phy.media_type == e1000_media_type_unknown)
+ return -EOPNOTSUPP;
+
+ /* Check whether we support SFF-8472 or not */
+ status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev);
+ if (status)
+ return -EIO;
+
+ /* addressing mode is not supported */
+ status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode);
+ if (status)
+ return -EIO;
+
+ /* addressing mode is not supported */
+ if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) {
+ PMD_DRV_LOG(ERR,
+ "Address change required to access page 0xA2, "
+ "but not supported. Please report the module "
+ "type to the driver maintainers.\n");
+ page_swap = true;
+ }
+
+ if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) {
+ /* We have an SFP, but it does not support SFF-8472 */
+ modinfo->type = RTE_ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+ } else {
+ /* We have an SFP which supports a revision of SFF-8472 */
+ modinfo->type = RTE_ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
+ }
+
+ return 0;
+}
+
+static int
+eth_igb_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ uint32_t status = 0;
+ uint16_t dataword[RTE_ETH_MODULE_SFF_8472_LEN / 2 + 1];
+ u16 first_word, last_word;
+ int i = 0;
+
+ if (info->length == 0)
+ return -EINVAL;
+
+ first_word = info->offset >> 1;
+ last_word = (info->offset + info->length - 1) >> 1;
+
+ /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
+ for (i = 0; i < last_word - first_word + 1; i++) {
+ status = e1000_read_phy_reg_i2c(hw, (first_word + i) * 2,
+ &dataword[i]);
+ if (status) {
+ /* Error occurred while reading module */
+ return -EIO;
+ }
+
+ dataword[i] = rte_be_to_cpu_16(dataword[i]);
+ }
+
+ memcpy(info->data, (u8 *)dataword + (info->offset & 1), info->length);
+
+ return 0;
+}
+
static int
eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
E1000_GPIE_NSICR);
intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
misc_shift;
+
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ intr_mask |= (1 << IGB_MSIX_OTHER_INTR_VEC);
+
regval = E1000_READ_REG(hw, E1000_EIAC);
E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask);
/* enable msix_other interrupt */
regval = E1000_READ_REG(hw, E1000_EIMS);
E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask);
- tmpval = (dev->data->nb_rx_queues | E1000_IVAR_VALID) << 8;
+ tmpval = (IGB_MSIX_OTHER_INTR_VEC | E1000_IVAR_VALID) << 8;
E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval);
}
*/
intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
misc_shift;
+
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ intr_mask |= (1 << IGB_MSIX_OTHER_INTR_VEC);
+
regval = E1000_READ_REG(hw, E1000_EIAM);
E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- if (filter_info->rss_info.num)
+ if (filter_info->rss_info.conf.queue_num)
igb_config_rss_filter(dev, &filter_info->rss_info, TRUE);
}
RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio-pci");
+
+/* see e1000_logs.c */
+RTE_INIT(e1000_init_log)
+{
+ e1000_igb_init_log();
+}