drivers: advertise kmod dependencies in pmdinfo
[dpdk.git] / drivers / net / e1000 / igb_ethdev.c
index e3f50cc..08f2a68 100644 (file)
  * Default values for port configuration
  */
 #define IGB_DEFAULT_RX_FREE_THRESH  32
-#define IGB_DEFAULT_RX_PTHRESH      8
+
+#define IGB_DEFAULT_RX_PTHRESH      ((hw->mac.type == e1000_i354) ? 12 : 8)
 #define IGB_DEFAULT_RX_HTHRESH      8
-#define IGB_DEFAULT_RX_WTHRESH      0
+#define IGB_DEFAULT_RX_WTHRESH      ((hw->mac.type == e1000_82576) ? 1 : 4)
 
-#define IGB_DEFAULT_TX_PTHRESH      32
-#define IGB_DEFAULT_TX_HTHRESH      0
-#define IGB_DEFAULT_TX_WTHRESH      0
+#define IGB_DEFAULT_TX_PTHRESH      ((hw->mac.type == e1000_i354) ? 20 : 8)
+#define IGB_DEFAULT_TX_HTHRESH      1
+#define IGB_DEFAULT_TX_WTHRESH      ((hw->mac.type == e1000_82576) ? 1 : 16)
 
 #define IGB_HKEY_MAX_INDEX 10
 
 #define E1000_INCVALUE_82576         (16 << IGB_82576_TSYNC_SHIFT)
 #define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000
 
+#define E1000_VTIVAR_MISC                0x01740
+#define E1000_VTIVAR_MISC_MASK           0xFF
+#define E1000_VTIVAR_VALID               0x80
+#define E1000_VTIVAR_MISC_MAILBOX        0
+#define E1000_VTIVAR_MISC_INTR_MASK      0x3
+
+/* External VLAN Enable bit mask */
+#define E1000_CTRL_EXT_EXT_VLAN      (1 << 26)
+
+/* External VLAN Ether Type bit mask and shift */
+#define E1000_VET_VET_EXT            0xFFFF0000
+#define E1000_VET_VET_EXT_SHIFT      16
+
 static int  eth_igb_configure(struct rte_eth_dev *dev);
 static int  eth_igb_start(struct rte_eth_dev *dev);
 static void eth_igb_stop(struct rte_eth_dev *dev);
+static int  eth_igb_dev_set_link_up(struct rte_eth_dev *dev);
+static int  eth_igb_dev_set_link_down(struct rte_eth_dev *dev);
 static void eth_igb_close(struct rte_eth_dev *dev);
 static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
 static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
@@ -98,11 +114,15 @@ static int  eth_igb_link_update(struct rte_eth_dev *dev,
 static void eth_igb_stats_get(struct rte_eth_dev *dev,
                                struct rte_eth_stats *rte_stats);
 static int eth_igb_xstats_get(struct rte_eth_dev *dev,
-                             struct rte_eth_xstats *xstats, unsigned n);
+                             struct rte_eth_xstat *xstats, unsigned n);
+static int eth_igb_xstats_get_names(struct rte_eth_dev *dev,
+                                   struct rte_eth_xstat_name *xstats_names,
+                                   unsigned limit);
 static void eth_igb_stats_reset(struct rte_eth_dev *dev);
 static void eth_igb_xstats_reset(struct rte_eth_dev *dev);
 static void eth_igb_infos_get(struct rte_eth_dev *dev,
                              struct rte_eth_dev_info *dev_info);
+static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev);
 static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
                                struct rte_eth_dev_info *dev_info);
 static int  eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
@@ -162,7 +182,10 @@ static int eth_igbvf_link_update(struct e1000_hw *hw);
 static void eth_igbvf_stats_get(struct rte_eth_dev *dev,
                                struct rte_eth_stats *rte_stats);
 static int eth_igbvf_xstats_get(struct rte_eth_dev *dev,
-                               struct rte_eth_xstats *xstats, unsigned n);
+                               struct rte_eth_xstat *xstats, unsigned n);
+static int eth_igbvf_xstats_get_names(struct rte_eth_dev *dev,
+                                     struct rte_eth_xstat_name *xstats_names,
+                                     unsigned limit);
 static void eth_igbvf_stats_reset(struct rte_eth_dev *dev);
 static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
                uint16_t vlan_id, int on);
@@ -257,6 +280,9 @@ static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
 static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
                               uint8_t index, uint8_t offset);
 static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
+static void eth_igbvf_interrupt_handler(struct rte_intr_handle *handle,
+                                       void *param);
+static void igbvf_mbx_process(struct rte_eth_dev *dev);
 
 /*
  * Define VF Stats MACRO for Non "cleared on read" register
@@ -280,22 +306,57 @@ static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
  * The set of PCI devices this driver supports
  */
 static const struct rte_pci_id pci_id_igb_map[] = {
-
-#define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
-#include "rte_pci_dev_ids.h"
-
-{0},
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_FIBER) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER_ET2) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS_SERDES) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES_QUAD) },
+
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_COPPER) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_FIBER_SERDES) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575GB_QUAD_COPPER) },
+
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_FIBER) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SERDES) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SGMII) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER_DUAL) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_QUAD_FIBER) },
+
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_COPPER) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_FIBER) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SERDES) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SGMII) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_DA4) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_OEM1) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_IT) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_FIBER) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SGMII) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I211_COPPER) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_SGMII) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SGMII) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SERDES) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_BACKPLANE) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SFP) },
+       { .vendor_id = 0, /* sentinel */ },
 };
 
 /*
  * The set of PCI devices this driver supports (for 82576&I350 VF)
  */
 static const struct rte_pci_id pci_id_igbvf_map[] = {
-
-#define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
-#include "rte_pci_dev_ids.h"
-
-{0},
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF_HV) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF_HV) },
+       { .vendor_id = 0, /* sentinel */ },
 };
 
 static const struct rte_eth_desc_lim rx_desc_lim = {
@@ -314,6 +375,8 @@ static const struct eth_dev_ops eth_igb_ops = {
        .dev_configure        = eth_igb_configure,
        .dev_start            = eth_igb_start,
        .dev_stop             = eth_igb_stop,
+       .dev_set_link_up      = eth_igb_dev_set_link_up,
+       .dev_set_link_down    = eth_igb_dev_set_link_down,
        .dev_close            = eth_igb_close,
        .promiscuous_enable   = eth_igb_promiscuous_enable,
        .promiscuous_disable  = eth_igb_promiscuous_disable,
@@ -322,9 +385,11 @@ static const struct eth_dev_ops eth_igb_ops = {
        .link_update          = eth_igb_link_update,
        .stats_get            = eth_igb_stats_get,
        .xstats_get           = eth_igb_xstats_get,
+       .xstats_get_names     = eth_igb_xstats_get_names,
        .stats_reset          = eth_igb_stats_reset,
        .xstats_reset         = eth_igb_xstats_reset,
        .dev_infos_get        = eth_igb_infos_get,
+       .dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
        .mtu_set              = eth_igb_mtu_set,
        .vlan_filter_set      = eth_igb_vlan_filter_set,
        .vlan_tpid_set        = eth_igb_vlan_tpid_set,
@@ -356,7 +421,6 @@ static const struct eth_dev_ops eth_igb_ops = {
        .timesync_disable     = igb_timesync_disable,
        .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
        .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp,
-       .get_reg_length       = eth_igb_get_reg_length,
        .get_reg              = eth_igb_get_regs,
        .get_eeprom_length    = eth_igb_get_eeprom_length,
        .get_eeprom           = eth_igb_get_eeprom,
@@ -382,10 +446,12 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = {
        .link_update          = eth_igb_link_update,
        .stats_get            = eth_igbvf_stats_get,
        .xstats_get           = eth_igbvf_xstats_get,
+       .xstats_get_names     = eth_igbvf_xstats_get_names,
        .stats_reset          = eth_igbvf_stats_reset,
        .xstats_reset         = eth_igbvf_stats_reset,
        .vlan_filter_set      = igbvf_vlan_filter_set,
        .dev_infos_get        = eth_igbvf_infos_get,
+       .dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
        .rx_queue_setup       = eth_igb_rx_queue_setup,
        .rx_queue_release     = eth_igb_rx_queue_release,
        .tx_queue_setup       = eth_igb_tx_queue_setup,
@@ -394,7 +460,6 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = {
        .rxq_info_get         = igb_rxq_info_get,
        .txq_info_get         = igb_txq_info_get,
        .mac_addr_set         = igbvf_default_mac_addr_set,
-       .get_reg_length       = igbvf_get_reg_length,
        .get_reg              = igbvf_get_regs,
 };
 
@@ -550,6 +615,41 @@ igb_intr_disable(struct e1000_hw *hw)
        E1000_WRITE_FLUSH(hw);
 }
 
+static inline void
+igbvf_intr_enable(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /* only for mailbox */
+       E1000_WRITE_REG(hw, E1000_EIAM, 1 << E1000_VTIVAR_MISC_MAILBOX);
+       E1000_WRITE_REG(hw, E1000_EIAC, 1 << E1000_VTIVAR_MISC_MAILBOX);
+       E1000_WRITE_REG(hw, E1000_EIMS, 1 << E1000_VTIVAR_MISC_MAILBOX);
+       E1000_WRITE_FLUSH(hw);
+}
+
+/* only for mailbox now. If RX/TX needed, should extend this function.  */
+static void
+igbvf_set_ivar_map(struct e1000_hw *hw, uint8_t msix_vector)
+{
+       uint32_t tmp = 0;
+
+       /* mailbox */
+       tmp |= (msix_vector & E1000_VTIVAR_MISC_INTR_MASK);
+       tmp |= E1000_VTIVAR_VALID;
+       E1000_WRITE_REG(hw, E1000_VTIVAR_MISC, tmp);
+}
+
+static void
+eth_igbvf_configure_msix_intr(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /* Configure VF other cause ivar */
+       igbvf_set_ivar_map(hw, E1000_VTIVAR_MISC_MAILBOX);
+}
+
 static inline int32_t
 igb_pf_reset_hw(struct e1000_hw *hw)
 {
@@ -938,6 +1038,10 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
                     eth_dev->data->port_id, pci_dev->id.vendor_id,
                     pci_dev->id.device_id, "igb_mac_82576_vf");
 
+       rte_intr_callback_register(&pci_dev->intr_handle,
+                                  eth_igbvf_interrupt_handler,
+                                  (void *)eth_dev);
+
        return 0;
 }
 
@@ -946,6 +1050,7 @@ eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
 {
        struct e1000_adapter *adapter =
                E1000_DEV_PRIVATE(eth_dev->data->dev_private);
+       struct rte_pci_device *pci_dev = eth_dev->pci_dev;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -962,15 +1067,22 @@ eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
        rte_free(eth_dev->data->mac_addrs);
        eth_dev->data->mac_addrs = NULL;
 
+       /* disable uio intr before callback unregister */
+       rte_intr_disable(&pci_dev->intr_handle);
+       rte_intr_callback_unregister(&pci_dev->intr_handle,
+                                    eth_igbvf_interrupt_handler,
+                                    (void *)eth_dev);
+
        return 0;
 }
 
 static struct eth_driver rte_igb_pmd = {
        .pci_drv = {
-               .name = "rte_igb_pmd",
                .id_table = pci_id_igb_map,
                .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
                        RTE_PCI_DRV_DETACHABLE,
+               .probe = rte_eth_dev_pci_probe,
+               .remove = rte_eth_dev_pci_remove,
        },
        .eth_dev_init = eth_igb_dev_init,
        .eth_dev_uninit = eth_igb_dev_uninit,
@@ -982,22 +1094,16 @@ static struct eth_driver rte_igb_pmd = {
  */
 static struct eth_driver rte_igbvf_pmd = {
        .pci_drv = {
-               .name = "rte_igbvf_pmd",
                .id_table = pci_id_igbvf_map,
                .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
+               .probe = rte_eth_dev_pci_probe,
+               .remove = rte_eth_dev_pci_remove,
        },
        .eth_dev_init = eth_igbvf_dev_init,
        .eth_dev_uninit = eth_igbvf_dev_uninit,
        .dev_private_size = sizeof(struct e1000_adapter),
 };
 
-static int
-rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
-{
-       rte_eth_driver_register(&rte_igb_pmd);
-       return 0;
-}
-
 static void
 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
 {
@@ -1009,20 +1115,6 @@ igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
        E1000_WRITE_REG(hw, E1000_RCTL, rctl);
 }
 
-/*
- * VF Driver initialization routine.
- * Invoked one at EAL init time.
- * Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices.
- */
-static int
-rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
-{
-       PMD_INIT_FUNC_TRACE();
-
-       rte_eth_driver_register(&rte_igbvf_pmd);
-       return 0;
-}
-
 static int
 igb_check_mq_mode(struct rte_eth_dev *dev)
 {
@@ -1129,6 +1221,9 @@ eth_igb_start(struct rte_eth_dev *dev)
        int ret, mask;
        uint32_t intr_vector = 0;
        uint32_t ctrl_ext;
+       uint32_t *speeds;
+       int num_speeds;
+       bool autoneg;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -1136,7 +1231,7 @@ eth_igb_start(struct rte_eth_dev *dev)
        rte_intr_disable(intr_handle);
 
        /* Power up the phy. Needed to make the link go Up */
-       e1000_power_up_phy(hw);
+       eth_igb_dev_set_link_up(dev);
 
        /*
         * Packet Buffer Allocation (PBA)
@@ -1229,48 +1324,58 @@ eth_igb_start(struct rte_eth_dev *dev)
        }
 
        /* Setup link speed and duplex */
-       switch (dev->data->dev_conf.link_speed) {
-       case ETH_LINK_SPEED_AUTONEG:
-               if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
-                       hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
-               else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
-                       hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX;
-               else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
-                       hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX;
-               else
-                       goto error_invalid_config;
-               break;
-       case ETH_LINK_SPEED_10:
-               if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
-                       hw->phy.autoneg_advertised = E1000_ALL_10_SPEED;
-               else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
-                       hw->phy.autoneg_advertised = ADVERTISE_10_HALF;
-               else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
-                       hw->phy.autoneg_advertised = ADVERTISE_10_FULL;
-               else
-                       goto error_invalid_config;
-               break;
-       case ETH_LINK_SPEED_100:
-               if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
-                       hw->phy.autoneg_advertised = E1000_ALL_100_SPEED;
-               else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
-                       hw->phy.autoneg_advertised = ADVERTISE_100_HALF;
-               else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
-                       hw->phy.autoneg_advertised = ADVERTISE_100_FULL;
-               else
+       speeds = &dev->data->dev_conf.link_speeds;
+       if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+               hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
+               hw->mac.autoneg = 1;
+       } else {
+               num_speeds = 0;
+               autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
+
+               /* Reset */
+               hw->phy.autoneg_advertised = 0;
+
+               if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
+                               ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
+                               ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
+                       num_speeds = -1;
                        goto error_invalid_config;
-               break;
-       case ETH_LINK_SPEED_1000:
-               if ((dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) ||
-                               (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX))
-                       hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
-               else
+               }
+               if (*speeds & ETH_LINK_SPEED_10M_HD) {
+                       hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
+                       num_speeds++;
+               }
+               if (*speeds & ETH_LINK_SPEED_10M) {
+                       hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
+                       num_speeds++;
+               }
+               if (*speeds & ETH_LINK_SPEED_100M_HD) {
+                       hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
+                       num_speeds++;
+               }
+               if (*speeds & ETH_LINK_SPEED_100M) {
+                       hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
+                       num_speeds++;
+               }
+               if (*speeds & ETH_LINK_SPEED_1G) {
+                       hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
+                       num_speeds++;
+               }
+               if (num_speeds == 0 || (!autoneg && (num_speeds > 1)))
                        goto error_invalid_config;
-               break;
-       case ETH_LINK_SPEED_10000:
-       default:
-               goto error_invalid_config;
+
+               /* Set/reset the mac.autoneg based on the link speed,
+                * fixed or not
+                */
+               if (!autoneg) {
+                       hw->mac.autoneg = 0;
+                       hw->mac.forced_speed_duplex =
+                                       hw->phy.autoneg_advertised;
+               } else {
+                       hw->mac.autoneg = 1;
+               }
        }
+
        e1000_setup_link(hw);
 
        if (rte_intr_allow_others(intr_handle)) {
@@ -1302,9 +1407,8 @@ eth_igb_start(struct rte_eth_dev *dev)
        return 0;
 
 error_invalid_config:
-       PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
-                    dev->data->dev_conf.link_speed,
-                    dev->data->dev_conf.link_duplex, dev->data->port_id);
+       PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u",
+                    dev->data->dev_conf.link_speeds, dev->data->port_id);
        igb_dev_clear_queues(dev);
        return -EINVAL;
 }
@@ -1345,10 +1449,7 @@ eth_igb_stop(struct rte_eth_dev *dev)
        }
 
        /* Power down the phy. Needed to make the link go Down */
-       if (hw->phy.media_type == e1000_media_type_copper)
-               e1000_power_down_phy(hw);
-       else
-               e1000_shutdown_fiber_serdes_link(hw);
+       eth_igb_dev_set_link_down(dev);
 
        igb_dev_clear_queues(dev);
 
@@ -1395,6 +1496,32 @@ eth_igb_stop(struct rte_eth_dev *dev)
        }
 }
 
+static int
+eth_igb_dev_set_link_up(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (hw->phy.media_type == e1000_media_type_copper)
+               e1000_power_up_phy(hw);
+       else
+               e1000_power_up_fiber_serdes_link(hw);
+
+       return 0;
+}
+
+static int
+eth_igb_dev_set_link_down(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (hw->phy.media_type == e1000_media_type_copper)
+               e1000_power_down_phy(hw);
+       else
+               e1000_shutdown_fiber_serdes_link(hw);
+
+       return 0;
+}
+
 static void
 eth_igb_close(struct rte_eth_dev *dev)
 {
@@ -1650,7 +1777,6 @@ eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
        rte_stats->imissed = stats->mpc;
        rte_stats->ierrors = stats->crcerrs +
                             stats->rlec + stats->ruc + stats->roc +
-                            rte_stats->imissed +
                             stats->rxerrc + stats->algnerrc + stats->cexterr;
 
        /* Tx Errors */
@@ -1688,8 +1814,27 @@ eth_igb_xstats_reset(struct rte_eth_dev *dev)
        memset(stats, 0, sizeof(*stats));
 }
 
+static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+       struct rte_eth_xstat_name *xstats_names,
+       __rte_unused unsigned limit)
+{
+       unsigned i;
+
+       if (xstats_names == NULL)
+               return IGB_NB_XSTATS;
+
+       /* Note: limit checked in rte_eth_xstats_names() */
+
+       for (i = 0; i < IGB_NB_XSTATS; i++) {
+               snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
+                        "%s", rte_igb_stats_strings[i].name);
+       }
+
+       return IGB_NB_XSTATS;
+}
+
 static int
-eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                   unsigned n)
 {
        struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1710,8 +1855,7 @@ eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
 
        /* Extended stats */
        for (i = 0; i < IGB_NB_XSTATS; i++) {
-               snprintf(xstats[i].name, sizeof(xstats[i].name),
-                        "%s", rte_igb_stats_strings[i].name);
+               xstats[i].id = i;
                xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
                        rte_igb_stats_strings[i].offset);
        }
@@ -1759,8 +1903,23 @@ igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats)
            hw_stats->last_gotlbc, hw_stats->gotlbc);
 }
 
+static int eth_igbvf_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+                                    struct rte_eth_xstat_name *xstats_names,
+                                    __rte_unused unsigned limit)
+{
+       unsigned i;
+
+       if (xstats_names != NULL)
+               for (i = 0; i < IGBVF_NB_XSTATS; i++) {
+                       snprintf(xstats_names[i].name,
+                               sizeof(xstats_names[i].name), "%s",
+                               rte_igbvf_stats_strings[i].name);
+               }
+       return IGBVF_NB_XSTATS;
+}
+
 static int
-eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                     unsigned n)
 {
        struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1777,8 +1936,7 @@ eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
                return 0;
 
        for (i = 0; i < IGBVF_NB_XSTATS; i++) {
-               snprintf(xstats[i].name, sizeof(xstats[i].name), "%s",
-                        rte_igbvf_stats_strings[i].name);
+               xstats[i].id = i;
                xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
                        rte_igbvf_stats_strings[i].offset);
        }
@@ -1802,11 +1960,6 @@ eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
        rte_stats->ibytes = hw_stats->gorc;
        rte_stats->opackets = hw_stats->gptc;
        rte_stats->obytes = hw_stats->gotc;
-       rte_stats->imcasts = hw_stats->mprc;
-       rte_stats->ilbpackets = hw_stats->gprlbc;
-       rte_stats->ilbbytes = hw_stats->gorlbc;
-       rte_stats->olbpackets = hw_stats->gptlbc;
-       rte_stats->olbbytes = hw_stats->gotlbc;
 }
 
 static void
@@ -1918,6 +2071,37 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
        dev_info->rx_desc_lim = rx_desc_lim;
        dev_info->tx_desc_lim = tx_desc_lim;
+
+       dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
+                       ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
+                       ETH_LINK_SPEED_1G;
+}
+
+static const uint32_t *
+eth_igb_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+       static const uint32_t ptypes[] = {
+               /* refers to igb_rxd_pkt_info_to_pkt_type() */
+               RTE_PTYPE_L2_ETHER,
+               RTE_PTYPE_L3_IPV4,
+               RTE_PTYPE_L3_IPV4_EXT,
+               RTE_PTYPE_L3_IPV6,
+               RTE_PTYPE_L3_IPV6_EXT,
+               RTE_PTYPE_L4_TCP,
+               RTE_PTYPE_L4_UDP,
+               RTE_PTYPE_L4_SCTP,
+               RTE_PTYPE_TUNNEL_IP,
+               RTE_PTYPE_INNER_L3_IPV6,
+               RTE_PTYPE_INNER_L3_IPV6_EXT,
+               RTE_PTYPE_INNER_L4_TCP,
+               RTE_PTYPE_INNER_L4_UDP,
+               RTE_PTYPE_UNKNOWN
+       };
+
+       if (dev->rx_pkt_burst == eth_igb_recv_pkts ||
+           dev->rx_pkt_burst == eth_igb_recv_scattered_pkts)
+               return ptypes;
+       return NULL;
 }
 
 static void
@@ -2027,13 +2211,20 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 
        /* Now we check if a transition has happened */
        if (link_check) {
-               hw->mac.ops.get_link_up_info(hw, &link.link_speed,
-                                         &link.link_duplex);
-               link.link_status = 1;
+               uint16_t duplex, speed;
+               hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
+               link.link_duplex = (duplex == FULL_DUPLEX) ?
+                               ETH_LINK_FULL_DUPLEX :
+                               ETH_LINK_HALF_DUPLEX;
+               link.link_speed = speed;
+               link.link_status = ETH_LINK_UP;
+               link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+                               ETH_LINK_SPEED_FIXED);
        } else if (!link_check) {
                link.link_speed = 0;
-               link.link_duplex = 0;
-               link.link_status = 0;
+               link.link_duplex = ETH_LINK_HALF_DUPLEX;
+               link.link_status = ETH_LINK_DOWN;
+               link.link_autoneg = ETH_LINK_SPEED_FIXED;
        }
        rte_igb_dev_atomic_write_link_status(dev, &link);
 
@@ -2201,21 +2392,25 @@ eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
 {
        struct e1000_hw *hw =
                E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t reg = ETHER_TYPE_VLAN;
-       int ret = 0;
+       uint32_t reg, qinq;
+
+       qinq = E1000_READ_REG(hw, E1000_CTRL_EXT);
+       qinq &= E1000_CTRL_EXT_EXT_VLAN;
 
-       switch (vlan_type) {
-       case ETH_VLAN_TYPE_INNER:
-               reg |= (tpid << 16);
+       /* only outer TPID of double VLAN can be configured*/
+       if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) {
+               reg = E1000_READ_REG(hw, E1000_VET);
+               reg = (reg & (~E1000_VET_VET_EXT)) |
+                       ((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT);
                E1000_WRITE_REG(hw, E1000_VET, reg);
-               break;
-       default:
-               ret = -EINVAL;
-               PMD_DRV_LOG(ERR, "Unsupported vlan type %d\n", vlan_type);
-               break;
+
+               return 0;
        }
 
-       return ret;
+       /* all other TPID values are read-only*/
+       PMD_DRV_LOG(ERR, "Not supported");
+
+       return -ENOTSUP;
 }
 
 static void
@@ -2500,7 +2695,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev)
                E1000_WRITE_REG(hw, E1000_TCTL, tctl);
                E1000_WRITE_REG(hw, E1000_RCTL, rctl);
                E1000_WRITE_FLUSH(hw);
-               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
        }
 
        return 0;
@@ -2527,6 +2722,69 @@ eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
        eth_igb_interrupt_action(dev);
 }
 
+static int
+eth_igbvf_interrupt_get_status(struct rte_eth_dev *dev)
+{
+       uint32_t eicr;
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_interrupt *intr =
+               E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+       igbvf_intr_disable(hw);
+
+       /* read-on-clear nic registers here */
+       eicr = E1000_READ_REG(hw, E1000_EICR);
+       intr->flags = 0;
+
+       if (eicr == E1000_VTIVAR_MISC_MAILBOX)
+               intr->flags |= E1000_FLAG_MAILBOX;
+
+       return 0;
+}
+
+void igbvf_mbx_process(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       u32 in_msg = 0;
+
+       if (mbx->ops.read(hw, &in_msg, 1, 0))
+               return;
+
+       /* PF reset VF event */
+       if (in_msg == E1000_PF_CONTROL_MSG)
+               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
+}
+
+static int
+eth_igbvf_interrupt_action(struct rte_eth_dev *dev)
+{
+       struct e1000_interrupt *intr =
+               E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+       if (intr->flags & E1000_FLAG_MAILBOX) {
+               igbvf_mbx_process(dev);
+               intr->flags &= ~E1000_FLAG_MAILBOX;
+       }
+
+       igbvf_intr_enable(dev);
+       rte_intr_enable(&dev->pci_dev->intr_handle);
+
+       return 0;
+}
+
+static void
+eth_igbvf_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+                           void *param)
+{
+       struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+       eth_igbvf_interrupt_get_status(dev);
+       eth_igbvf_interrupt_action(dev);
+}
+
 static int
 eth_igb_led_on(struct rte_eth_dev *dev)
 {
@@ -2798,6 +3056,8 @@ igbvf_dev_start(struct rte_eth_dev *dev)
        struct e1000_adapter *adapter =
                E1000_DEV_PRIVATE(dev->data->dev_private);
        int ret;
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       uint32_t intr_vector = 0;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -2817,12 +3077,41 @@ igbvf_dev_start(struct rte_eth_dev *dev)
                return ret;
        }
 
+       /* check and configure queue intr-vector mapping */
+       if (dev->data->dev_conf.intr_conf.rxq != 0) {
+               intr_vector = dev->data->nb_rx_queues;
+               ret = rte_intr_efd_enable(intr_handle, intr_vector);
+               if (ret)
+                       return ret;
+       }
+
+       if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+               intr_handle->intr_vec =
+                       rte_zmalloc("intr_vec",
+                                   dev->data->nb_rx_queues * sizeof(int), 0);
+               if (!intr_handle->intr_vec) {
+                       PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+                                    " intr_vec\n", dev->data->nb_rx_queues);
+                       return -ENOMEM;
+               }
+       }
+
+       eth_igbvf_configure_msix_intr(dev);
+
+       /* enable uio/vfio intr/eventfd mapping */
+       rte_intr_enable(intr_handle);
+
+       /* resume enabled intr since hw reset */
+       igbvf_intr_enable(dev);
+
        return 0;
 }
 
 static void
 igbvf_dev_stop(struct rte_eth_dev *dev)
 {
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+
        PMD_INIT_FUNC_TRACE();
 
        igbvf_stop_adapter(dev);
@@ -2834,6 +3123,16 @@ igbvf_dev_stop(struct rte_eth_dev *dev)
        igbvf_set_vfta_all(dev,0);
 
        igb_dev_clear_queues(dev);
+
+       /* disable intr eventfd mapping */
+       rte_intr_disable(intr_handle);
+
+       /* Clean datapath event and queue/vec mapping */
+       rte_intr_efd_disable(intr_handle);
+       if (intr_handle->intr_vec) {
+               rte_free(intr_handle->intr_vec);
+               intr_handle->intr_vec = NULL;
+       }
 }
 
 static void
@@ -4672,6 +4971,12 @@ eth_igb_get_regs(struct rte_eth_dev *dev,
        int count = 0;
        const struct reg_info *reg_group;
 
+       if (data == NULL) {
+               regs->length = eth_igb_get_reg_length(dev);
+               regs->width = sizeof(uint32_t);
+               return 0;
+       }
+
        /* Support only full register dump */
        if ((regs->length == 0) ||
            (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) {
@@ -4696,6 +5001,12 @@ igbvf_get_regs(struct rte_eth_dev *dev,
        int count = 0;
        const struct reg_info *reg_group;
 
+       if (data == NULL) {
+               regs->length = igbvf_get_reg_length(dev);
+               regs->width = sizeof(uint32_t);
+               return 0;
+       }
+
        /* Support only full register dump */
        if ((regs->length == 0) ||
            (regs->length == (uint32_t)igbvf_get_reg_length(dev))) {
@@ -4766,16 +5077,6 @@ eth_igb_set_eeprom(struct rte_eth_dev *dev,
        return nvm->ops.write(hw,  first, length, data);
 }
 
-static struct rte_driver pmd_igb_drv = {
-       .type = PMD_PDEV,
-       .init = rte_igb_pmd_init,
-};
-
-static struct rte_driver pmd_igbvf_drv = {
-       .type = PMD_PDEV,
-       .init = rte_igbvf_pmd_init,
-};
-
 static int
 eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
@@ -4937,5 +5238,9 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
        E1000_WRITE_FLUSH(hw);
 }
 
-PMD_REGISTER_DRIVER(pmd_igb_drv);
-PMD_REGISTER_DRIVER(pmd_igbvf_drv);
+RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb, pci_id_igb_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio");