net: add rte prefix to ether defines
[dpdk.git] / drivers / net / e1000 / igb_ethdev.c
index 64dfe68..bd1c64c 100644 (file)
@@ -8,6 +8,7 @@
 #include <stdint.h>
 #include <stdarg.h>
 
+#include <rte_string_fns.h>
 #include <rte_common.h>
 #include <rte_interrupts.h>
 #include <rte_byteorder.h>
 #define E1000_VET_VET_EXT            0xFFFF0000
 #define E1000_VET_VET_EXT_SHIFT      16
 
+/* MSI-X other interrupt vector */
+#define IGB_MSIX_OTHER_INTR_VEC      0
+
 static int  eth_igb_configure(struct rte_eth_dev *dev);
 static int  eth_igb_start(struct rte_eth_dev *dev);
 static void eth_igb_stop(struct rte_eth_dev *dev);
 static int  eth_igb_dev_set_link_up(struct rte_eth_dev *dev);
 static int  eth_igb_dev_set_link_down(struct rte_eth_dev *dev);
 static void eth_igb_close(struct rte_eth_dev *dev);
+static int eth_igb_reset(struct rte_eth_dev *dev);
 static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
 static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
 static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
@@ -137,14 +142,14 @@ static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
 static int eth_igb_led_on(struct rte_eth_dev *dev);
 static int eth_igb_led_off(struct rte_eth_dev *dev);
 
-static void igb_intr_disable(struct e1000_hw *hw);
+static void igb_intr_disable(struct rte_eth_dev *dev);
 static int  igb_get_rx_buffer_size(struct e1000_hw *hw);
 static int eth_igb_rar_set(struct rte_eth_dev *dev,
-                          struct ether_addr *mac_addr,
+                          struct rte_ether_addr *mac_addr,
                           uint32_t index, uint32_t pool);
 static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
 static int eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
-               struct ether_addr *addr);
+               struct rte_ether_addr *addr);
 
 static void igbvf_intr_disable(struct e1000_hw *hw);
 static int igbvf_dev_configure(struct rte_eth_dev *dev);
@@ -169,7 +174,7 @@ static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
 static int igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
-               struct ether_addr *addr);
+               struct rte_ether_addr *addr);
 static int igbvf_get_reg_length(struct rte_eth_dev *dev);
 static int igbvf_get_regs(struct rte_eth_dev *dev,
                struct rte_dev_reg_info *regs);
@@ -226,7 +231,7 @@ static int eth_igb_get_module_info(struct rte_eth_dev *dev,
 static int eth_igb_get_module_eeprom(struct rte_eth_dev *dev,
                                     struct rte_dev_eeprom_info *info);
 static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
-                                   struct ether_addr *mc_addr_set,
+                                   struct rte_ether_addr *mc_addr_set,
                                    uint32_t nb_mc_addr);
 static int igb_timesync_enable(struct rte_eth_dev *dev);
 static int igb_timesync_disable(struct rte_eth_dev *dev);
@@ -351,6 +356,7 @@ static const struct eth_dev_ops eth_igb_ops = {
        .dev_set_link_up      = eth_igb_dev_set_link_up,
        .dev_set_link_down    = eth_igb_dev_set_link_down,
        .dev_close            = eth_igb_close,
+       .dev_reset            = eth_igb_reset,
        .promiscuous_enable   = eth_igb_promiscuous_enable,
        .promiscuous_disable  = eth_igb_promiscuous_disable,
        .allmulticast_enable  = eth_igb_allmulticast_enable,
@@ -536,14 +542,31 @@ igb_intr_enable(struct rte_eth_dev *dev)
                E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
        struct e1000_hw *hw =
                E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+       if (rte_intr_allow_others(intr_handle) &&
+               dev->data->dev_conf.intr_conf.lsc != 0) {
+               E1000_WRITE_REG(hw, E1000_EIMS, 1 << IGB_MSIX_OTHER_INTR_VEC);
+       }
 
        E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
        E1000_WRITE_FLUSH(hw);
 }
 
 static void
-igb_intr_disable(struct e1000_hw *hw)
+igb_intr_disable(struct rte_eth_dev *dev)
 {
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+       if (rte_intr_allow_others(intr_handle) &&
+               dev->data->dev_conf.intr_conf.lsc != 0) {
+               E1000_WRITE_REG(hw, E1000_EIMC, 1 << IGB_MSIX_OTHER_INTR_VEC);
+       }
+
        E1000_WRITE_REG(hw, E1000_IMC, ~0);
        E1000_WRITE_FLUSH(hw);
 }
@@ -807,17 +830,18 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
 
        /* Allocate memory for storing MAC addresses */
        eth_dev->data->mac_addrs = rte_zmalloc("e1000",
-               ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
+               RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
        if (eth_dev->data->mac_addrs == NULL) {
                PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
                                                "store MAC addresses",
-                               ETHER_ADDR_LEN * hw->mac.rar_entry_count);
+                               RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
                error = -ENOMEM;
                goto err_late;
        }
 
        /* Copy the permanent MAC address */
-       ether_addr_copy((struct ether_addr *)hw->mac.addr, &eth_dev->data->mac_addrs[0]);
+       rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
+                       &eth_dev->data->mac_addrs[0]);
 
        /* initialize the vfta */
        memset(shadow_vfta, 0, sizeof(*shadow_vfta));
@@ -915,9 +939,6 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
        /* Reset any pending lock */
        igb_reset_swfw_lock(hw);
 
-       rte_free(eth_dev->data->mac_addrs);
-       eth_dev->data->mac_addrs = NULL;
-
        /* uninitialize PF if max_vfs not zero */
        igb_pf_host_uninit(eth_dev);
 
@@ -963,7 +984,8 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
        struct e1000_hw *hw =
                E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
        int diag;
-       struct ether_addr *perm_addr = (struct ether_addr *)hw->mac.perm_addr;
+       struct rte_ether_addr *perm_addr =
+               (struct rte_ether_addr *)hw->mac.perm_addr;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -1006,19 +1028,19 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
        diag = hw->mac.ops.reset_hw(hw);
 
        /* Allocate memory for storing MAC addresses */
-       eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
+       eth_dev->data->mac_addrs = rte_zmalloc("igbvf", RTE_ETHER_ADDR_LEN *
                hw->mac.rar_entry_count, 0);
        if (eth_dev->data->mac_addrs == NULL) {
                PMD_INIT_LOG(ERR,
                        "Failed to allocate %d bytes needed to store MAC "
                        "addresses",
-                       ETHER_ADDR_LEN * hw->mac.rar_entry_count);
+                       RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
                return -ENOMEM;
        }
 
        /* Generate a random MAC address, if none was assigned by PF. */
-       if (is_zero_ether_addr(perm_addr)) {
-               eth_random_addr(perm_addr->addr_bytes);
+       if (rte_is_zero_ether_addr(perm_addr)) {
+               rte_eth_random_addr(perm_addr->addr_bytes);
                PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
                PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
                             "%02x:%02x:%02x:%02x:%02x:%02x",
@@ -1037,7 +1059,7 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
                return diag;
        }
        /* Copy the permanent MAC address */
-       ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
+       rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
                        &eth_dev->data->mac_addrs[0]);
 
        PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
@@ -1071,9 +1093,6 @@ eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
        eth_dev->rx_pkt_burst = NULL;
        eth_dev->tx_pkt_burst = NULL;
 
-       rte_free(eth_dev->data->mac_addrs);
-       eth_dev->data->mac_addrs = NULL;
-
        /* disable uio intr before callback unregister */
        rte_intr_disable(&pci_dev->intr_handle);
        rte_intr_callback_unregister(&pci_dev->intr_handle,
@@ -1303,7 +1322,8 @@ eth_igb_start(struct rte_eth_dev *dev)
        }
        adapter->stopped = 0;
 
-       E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
+       E1000_WRITE_REG(hw, E1000_VET,
+                       RTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN);
 
        ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
        /* Set PF Reset Done bit so PF/VF Mail Ops can work */
@@ -1490,7 +1510,7 @@ eth_igb_stop(struct rte_eth_dev *dev)
 
        eth_igb_rxtx_control(dev, false);
 
-       igb_intr_disable(hw);
+       igb_intr_disable(dev);
 
        /* disable intr eventfd mapping */
        rte_intr_disable(intr_handle);
@@ -1593,6 +1613,33 @@ eth_igb_close(struct rte_eth_dev *dev)
        rte_eth_linkstatus_set(dev, &link);
 }
 
+/*
+ * Reset PF device.
+ */
+static int
+eth_igb_reset(struct rte_eth_dev *dev)
+{
+       int ret;
+
+       /* When a DPDK PMD PF begin to reset PF port, it should notify all
+        * its VF to make them align with it. The detailed notification
+        * mechanism is PMD specific and is currently not implemented.
+        * To avoid unexpected behavior in VF, currently reset of PF with
+        * SR-IOV activation is not supported. It might be supported later.
+        */
+       if (dev->data->sriov.active)
+               return -ENOTSUP;
+
+       ret = eth_igb_dev_uninit(dev);
+       if (ret)
+               return ret;
+
+       ret = eth_igb_dev_init(dev);
+
+       return ret;
+}
+
+
 static int
 igb_get_rx_buffer_size(struct e1000_hw *hw)
 {
@@ -1643,7 +1690,7 @@ igb_hardware_init(struct e1000_hw *hw)
         */
        rx_buf_size = igb_get_rx_buffer_size(hw);
 
-       hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
+       hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2);
        hw->fc.low_water = hw->fc.high_water - 1500;
        hw->fc.pause_time = IGB_FC_PAUSE_TIME;
        hw->fc.send_xon = 1;
@@ -1662,7 +1709,8 @@ igb_hardware_init(struct e1000_hw *hw)
        if (diag < 0)
                return diag;
 
-       E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
+       E1000_WRITE_REG(hw, E1000_VET,
+                       RTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN);
        e1000_get_phy_info(hw);
        e1000_check_for_link(hw);
 
@@ -1726,10 +1774,10 @@ igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats)
        /* Workaround CRC bytes included in size, take away 4 bytes/packet */
        stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
        stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
-       stats->gorc -= (stats->gprc - old_gprc) * ETHER_CRC_LEN;
+       stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN;
        stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
        stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
-       stats->gotc -= (stats->gptc - old_gptc) * ETHER_CRC_LEN;
+       stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN;
 
        stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
        stats->ruc += E1000_READ_REG(hw, E1000_RUC);
@@ -1742,10 +1790,10 @@ igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats)
 
        stats->tor += E1000_READ_REG(hw, E1000_TORL);
        stats->tor += ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32);
-       stats->tor -= (stats->tpr - old_tpr) * ETHER_CRC_LEN;
+       stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;
        stats->tot += E1000_READ_REG(hw, E1000_TOTL);
        stats->tot += ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32);
-       stats->tot -= (stats->tpt - old_tpt) * ETHER_CRC_LEN;
+       stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN;
 
        stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
        stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
@@ -1779,10 +1827,10 @@ igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats)
        stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
        stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
        stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
-       stats->hgorc -= (stats->rpthc - old_rpthc) * ETHER_CRC_LEN;
+       stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN;
        stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
        stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
-       stats->hgotc -= (stats->hgptc - old_hgptc) * ETHER_CRC_LEN;
+       stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN;
        stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
        stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
        stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
@@ -1861,8 +1909,8 @@ static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
        /* Note: limit checked in rte_eth_xstats_names() */
 
        for (i = 0; i < IGB_NB_XSTATS; i++) {
-               snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
-                        "%s", rte_igb_stats_strings[i].name);
+               strlcpy(xstats_names[i].name, rte_igb_stats_strings[i].name,
+                       sizeof(xstats_names[i].name));
        }
 
        return IGB_NB_XSTATS;
@@ -1879,9 +1927,9 @@ static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev,
                        return IGB_NB_XSTATS;
 
                for (i = 0; i < IGB_NB_XSTATS; i++)
-                       snprintf(xstats_names[i].name,
-                                       sizeof(xstats_names[i].name),
-                                       "%s", rte_igb_stats_strings[i].name);
+                       strlcpy(xstats_names[i].name,
+                               rte_igb_stats_strings[i].name,
+                               sizeof(xstats_names[i].name));
 
                return IGB_NB_XSTATS;
 
@@ -2028,9 +2076,9 @@ static int eth_igbvf_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 
        if (xstats_names != NULL)
                for (i = 0; i < IGBVF_NB_XSTATS; i++) {
-                       snprintf(xstats_names[i].name,
-                               sizeof(xstats_names[i].name), "%s",
-                               rte_igbvf_stats_strings[i].name);
+                       strlcpy(xstats_names[i].name,
+                               rte_igbvf_stats_strings[i].name,
+                               sizeof(xstats_names[i].name));
                }
        return IGBVF_NB_XSTATS;
 }
@@ -2240,6 +2288,10 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
                        ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
                        ETH_LINK_SPEED_1G;
+
+       dev_info->max_mtu = dev_info->max_rx_pktlen - E1000_ETH_OVERHEAD;
+       dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+
 }
 
 static const uint32_t *
@@ -2745,12 +2797,15 @@ static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev)
        uint32_t mask, regval;
        struct e1000_hw *hw =
                E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0;
        struct rte_eth_dev_info dev_info;
 
        memset(&dev_info, 0, sizeof(dev_info));
        eth_igb_infos_get(dev, &dev_info);
 
-       mask = 0xFFFFFFFF >> (32 - dev_info.max_rx_queues);
+       mask = (0xFFFFFFFF >> (32 - dev_info.max_rx_queues)) << misc_shift;
        regval = E1000_READ_REG(hw, E1000_EIMS);
        E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
 
@@ -2777,7 +2832,7 @@ eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
        struct e1000_interrupt *intr =
                E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 
-       igb_intr_disable(hw);
+       igb_intr_disable(dev);
 
        /* read-on-clear nic registers here */
        icr = E1000_READ_REG(hw, E1000_ICR);
@@ -3028,7 +3083,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
 
        /* At least reserve one Ethernet frame for watermark */
-       max_high_water = rx_buf_size - ETHER_MAX_LEN;
+       max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
        if ((fc_conf->high_water > max_high_water) ||
            (fc_conf->high_water < fc_conf->low_water)) {
                PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
@@ -3068,7 +3123,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 #define E1000_RAH_POOLSEL_SHIFT      (18)
 static int
-eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+eth_igb_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
                uint32_t index, uint32_t pool)
 {
        struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -3084,7 +3139,7 @@ eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
 static void
 eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
 {
-       uint8_t addr[ETHER_ADDR_LEN];
+       uint8_t addr[RTE_ETHER_ADDR_LEN];
        struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        memset(addr, 0, sizeof(addr));
@@ -3094,7 +3149,7 @@ eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
 
 static int
 eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
-                               struct ether_addr *addr)
+                               struct rte_ether_addr *addr)
 {
        eth_igb_rar_clear(dev, 0);
        eth_igb_rar_set(dev, (void *)addr, 0, 0);
@@ -3197,14 +3252,14 @@ igbvf_dev_configure(struct rte_eth_dev *dev)
         * Keep the persistent behavior the same as Host PF
         */
 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
-       if (rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
+       if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
                PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-               conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+               conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
        }
 #else
-       if (!rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
+       if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
                PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-               conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
+               conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
        }
 #endif
 
@@ -3307,7 +3362,7 @@ igbvf_dev_close(struct rte_eth_dev *dev)
        struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct e1000_adapter *adapter =
                E1000_DEV_PRIVATE(dev->data->dev_private);
-       struct ether_addr addr;
+       struct rte_ether_addr addr;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -3452,7 +3507,7 @@ igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 }
 
 static int
-igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
+igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
 {
        struct e1000_hw *hw =
                E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -4420,8 +4475,7 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        uint32_t rctl;
        struct e1000_hw *hw;
        struct rte_eth_dev_info dev_info;
-       uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN +
-                                    VLAN_TAG_SIZE);
+       uint32_t frame_size = mtu + E1000_ETH_OVERHEAD;
 
        hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -4433,8 +4487,8 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        eth_igb_infos_get(dev, &dev_info);
 
        /* check that mtu is within the allowed range */
-       if ((mtu < ETHER_MIN_MTU) ||
-           (frame_size > dev_info.max_rx_pktlen))
+       if (mtu < RTE_ETHER_MIN_MTU ||
+                       frame_size > dev_info.max_rx_pktlen)
                return -EINVAL;
 
        /* refuse mtu that requires the support of scattered packets when this
@@ -4446,7 +4500,7 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        rctl = E1000_READ_REG(hw, E1000_RCTL);
 
        /* switch to jumbo mode if needed */
-       if (frame_size > ETHER_MAX_LEN) {
+       if (frame_size > RTE_ETHER_MAX_LEN) {
                dev->data->dev_conf.rxmode.offloads |=
                        DEV_RX_OFFLOAD_JUMBO_FRAME;
                rctl |= E1000_RCTL_LPE;
@@ -4692,8 +4746,8 @@ igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
        uint32_t etqf = 0;
        int ret;
 
-       if (filter->ether_type == ETHER_TYPE_IPv4 ||
-               filter->ether_type == ETHER_TYPE_IPv6) {
+       if (filter->ether_type == RTE_ETHER_TYPE_IPv4 ||
+               filter->ether_type == RTE_ETHER_TYPE_IPv6) {
                PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
                        " ethertype filter.", filter->ether_type);
                return -EINVAL;
@@ -4855,7 +4909,7 @@ eth_igb_filter_ctrl(struct rte_eth_dev *dev,
 
 static int
 eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
-                        struct ether_addr *mc_addr_set,
+                        struct rte_ether_addr *mc_addr_set,
                         uint32_t nb_mc_addr)
 {
        struct e1000_hw *hw;
@@ -5104,7 +5158,7 @@ igb_timesync_enable(struct rte_eth_dev *dev)
 
        /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
        E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588),
-                       (ETHER_TYPE_1588 |
+                       (RTE_ETHER_TYPE_1588 |
                         E1000_ETQF_FILTER_ENABLE |
                         E1000_ETQF_1588));
 
@@ -5560,13 +5614,17 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
                                        E1000_GPIE_NSICR);
                intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
                        misc_shift;
+
+               if (dev->data->dev_conf.intr_conf.lsc != 0)
+                       intr_mask |= (1 << IGB_MSIX_OTHER_INTR_VEC);
+
                regval = E1000_READ_REG(hw, E1000_EIAC);
                E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask);
 
                /* enable msix_other interrupt */
                regval = E1000_READ_REG(hw, E1000_EIMS);
                E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask);
-               tmpval = (dev->data->nb_rx_queues | E1000_IVAR_VALID) << 8;
+               tmpval = (IGB_MSIX_OTHER_INTR_VEC | E1000_IVAR_VALID) << 8;
                E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval);
        }
 
@@ -5575,6 +5633,10 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
         */
        intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
                misc_shift;
+
+       if (dev->data->dev_conf.intr_conf.lsc != 0)
+               intr_mask |= (1 << IGB_MSIX_OTHER_INTR_VEC);
+
        regval = E1000_READ_REG(hw, E1000_EIAM);
        E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask);