igb: avoid enabling interrupt with zero vector
[dpdk.git] / drivers / net / e1000 / igb_ethdev.c
index 2b081b1..76d2acc 100644 (file)
@@ -96,7 +96,10 @@ static int  eth_igb_link_update(struct rte_eth_dev *dev,
                                int wait_to_complete);
 static void eth_igb_stats_get(struct rte_eth_dev *dev,
                                struct rte_eth_stats *rte_stats);
+static int eth_igb_xstats_get(struct rte_eth_dev *dev,
+                             struct rte_eth_xstats *xstats, unsigned n);
 static void eth_igb_stats_reset(struct rte_eth_dev *dev);
+static void eth_igb_xstats_reset(struct rte_eth_dev *dev);
 static void eth_igb_infos_get(struct rte_eth_dev *dev,
                              struct rte_eth_dev_info *dev_info);
 static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
@@ -149,7 +152,10 @@ static int igbvf_dev_start(struct rte_eth_dev *dev);
 static void igbvf_dev_stop(struct rte_eth_dev *dev);
 static void igbvf_dev_close(struct rte_eth_dev *dev);
 static int eth_igbvf_link_update(struct e1000_hw *hw);
-static void eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats);
+static void eth_igbvf_stats_get(struct rte_eth_dev *dev,
+                               struct rte_eth_stats *rte_stats);
+static int eth_igbvf_xstats_get(struct rte_eth_dev *dev,
+                               struct rte_eth_xstats *xstats, unsigned n);
 static void eth_igbvf_stats_reset(struct rte_eth_dev *dev);
 static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
                uint16_t vlan_id, int on);
@@ -246,11 +252,10 @@ static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
 #define UPDATE_VF_STAT(reg, last, cur)            \
 {                                                 \
        u32 latest = E1000_READ_REG(hw, reg);     \
-       cur += latest - last;                     \
+       cur += (latest - last) & UINT_MAX;        \
        last = latest;                            \
 }
 
-
 #define IGB_FC_PAUSE_TIME 0x0680
 #define IGB_LINK_UPDATE_CHECK_TIMEOUT  90  /* 9s */
 #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
@@ -281,6 +286,18 @@ static const struct rte_pci_id pci_id_igbvf_map[] = {
 {0},
 };
 
+static const struct rte_eth_desc_lim rx_desc_lim = {
+       .nb_max = E1000_MAX_RING_DESC,
+       .nb_min = E1000_MIN_RING_DESC,
+       .nb_align = IGB_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+       .nb_max = E1000_MAX_RING_DESC,
+       .nb_min = E1000_MIN_RING_DESC,
+       .nb_align = IGB_RXD_ALIGN,
+};
+
 static const struct eth_dev_ops eth_igb_ops = {
        .dev_configure        = eth_igb_configure,
        .dev_start            = eth_igb_start,
@@ -292,7 +309,9 @@ static const struct eth_dev_ops eth_igb_ops = {
        .allmulticast_disable = eth_igb_allmulticast_disable,
        .link_update          = eth_igb_link_update,
        .stats_get            = eth_igb_stats_get,
+       .xstats_get           = eth_igb_xstats_get,
        .stats_reset          = eth_igb_stats_reset,
+       .xstats_reset         = eth_igb_xstats_reset,
        .dev_infos_get        = eth_igb_infos_get,
        .mtu_set              = eth_igb_mtu_set,
        .vlan_filter_set      = eth_igb_vlan_filter_set,
@@ -319,6 +338,8 @@ static const struct eth_dev_ops eth_igb_ops = {
        .rss_hash_conf_get    = eth_igb_rss_hash_conf_get,
        .filter_ctrl          = eth_igb_filter_ctrl,
        .set_mc_addr_list     = eth_igb_set_mc_addr_list,
+       .rxq_info_get         = igb_rxq_info_get,
+       .txq_info_get         = igb_txq_info_get,
        .timesync_enable      = igb_timesync_enable,
        .timesync_disable     = igb_timesync_disable,
        .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
@@ -341,7 +362,9 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = {
        .dev_close            = igbvf_dev_close,
        .link_update          = eth_igb_link_update,
        .stats_get            = eth_igbvf_stats_get,
+       .xstats_get           = eth_igbvf_xstats_get,
        .stats_reset          = eth_igbvf_stats_reset,
+       .xstats_reset         = eth_igbvf_stats_reset,
        .vlan_filter_set      = igbvf_vlan_filter_set,
        .dev_infos_get        = eth_igbvf_infos_get,
        .rx_queue_setup       = eth_igb_rx_queue_setup,
@@ -349,11 +372,94 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = {
        .tx_queue_setup       = eth_igb_tx_queue_setup,
        .tx_queue_release     = eth_igb_tx_queue_release,
        .set_mc_addr_list     = eth_igb_set_mc_addr_list,
+       .rxq_info_get         = igb_rxq_info_get,
+       .txq_info_get         = igb_txq_info_get,
        .mac_addr_set         = igbvf_default_mac_addr_set,
        .get_reg_length       = igbvf_get_reg_length,
        .get_reg              = igbvf_get_regs,
 };
 
+/* store statistics names and its offset in stats structure */
+struct rte_igb_xstats_name_off {
+       char name[RTE_ETH_XSTATS_NAME_SIZE];
+       unsigned offset;
+};
+
+static const struct rte_igb_xstats_name_off rte_igb_stats_strings[] = {
+       {"rx_crc_errors", offsetof(struct e1000_hw_stats, crcerrs)},
+       {"rx_align_errors", offsetof(struct e1000_hw_stats, algnerrc)},
+       {"rx_symbol_errors", offsetof(struct e1000_hw_stats, symerrs)},
+       {"rx_missed_packets", offsetof(struct e1000_hw_stats, mpc)},
+       {"tx_single_collision_packets", offsetof(struct e1000_hw_stats, scc)},
+       {"tx_multiple_collision_packets", offsetof(struct e1000_hw_stats, mcc)},
+       {"tx_excessive_collision_packets", offsetof(struct e1000_hw_stats,
+               ecol)},
+       {"tx_late_collisions", offsetof(struct e1000_hw_stats, latecol)},
+       {"tx_total_collisions", offsetof(struct e1000_hw_stats, colc)},
+       {"tx_deferred_packets", offsetof(struct e1000_hw_stats, dc)},
+       {"tx_no_carrier_sense_packets", offsetof(struct e1000_hw_stats, tncrs)},
+       {"rx_carrier_ext_errors", offsetof(struct e1000_hw_stats, cexterr)},
+       {"rx_length_errors", offsetof(struct e1000_hw_stats, rlec)},
+       {"rx_xon_packets", offsetof(struct e1000_hw_stats, xonrxc)},
+       {"tx_xon_packets", offsetof(struct e1000_hw_stats, xontxc)},
+       {"rx_xoff_packets", offsetof(struct e1000_hw_stats, xoffrxc)},
+       {"tx_xoff_packets", offsetof(struct e1000_hw_stats, xofftxc)},
+       {"rx_flow_control_unsupported_packets", offsetof(struct e1000_hw_stats,
+               fcruc)},
+       {"rx_size_64_packets", offsetof(struct e1000_hw_stats, prc64)},
+       {"rx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, prc127)},
+       {"rx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, prc255)},
+       {"rx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, prc511)},
+       {"rx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats,
+               prc1023)},
+       {"rx_size_1024_to_max_packets", offsetof(struct e1000_hw_stats,
+               prc1522)},
+       {"rx_broadcast_packets", offsetof(struct e1000_hw_stats, bprc)},
+       {"rx_multicast_packets", offsetof(struct e1000_hw_stats, mprc)},
+       {"rx_undersize_errors", offsetof(struct e1000_hw_stats, ruc)},
+       {"rx_fragment_errors", offsetof(struct e1000_hw_stats, rfc)},
+       {"rx_oversize_errors", offsetof(struct e1000_hw_stats, roc)},
+       {"rx_jabber_errors", offsetof(struct e1000_hw_stats, rjc)},
+       {"rx_management_packets", offsetof(struct e1000_hw_stats, mgprc)},
+       {"rx_management_dropped", offsetof(struct e1000_hw_stats, mgpdc)},
+       {"tx_management_packets", offsetof(struct e1000_hw_stats, mgptc)},
+       {"rx_total_packets", offsetof(struct e1000_hw_stats, tpr)},
+       {"tx_total_packets", offsetof(struct e1000_hw_stats, tpt)},
+       {"rx_total_bytes", offsetof(struct e1000_hw_stats, tor)},
+       {"tx_total_bytes", offsetof(struct e1000_hw_stats, tot)},
+       {"tx_size_64_packets", offsetof(struct e1000_hw_stats, ptc64)},
+       {"tx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, ptc127)},
+       {"tx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, ptc255)},
+       {"tx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, ptc511)},
+       {"tx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats,
+               ptc1023)},
+       {"tx_size_1023_to_max_packets", offsetof(struct e1000_hw_stats,
+               ptc1522)},
+       {"tx_multicast_packets", offsetof(struct e1000_hw_stats, mptc)},
+       {"tx_broadcast_packets", offsetof(struct e1000_hw_stats, bptc)},
+       {"tx_tso_packets", offsetof(struct e1000_hw_stats, tsctc)},
+       {"tx_tso_errors", offsetof(struct e1000_hw_stats, tsctfc)},
+       {"rx_sent_to_host_packets", offsetof(struct e1000_hw_stats, rpthc)},
+       {"tx_sent_by_host_packets", offsetof(struct e1000_hw_stats, hgptc)},
+       {"rx_code_violation_packets", offsetof(struct e1000_hw_stats, scvpc)},
+
+       {"interrupt_assert_count", offsetof(struct e1000_hw_stats, iac)},
+};
+
+#define IGB_NB_XSTATS (sizeof(rte_igb_stats_strings) / \
+               sizeof(rte_igb_stats_strings[0]))
+
+static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings[] = {
+       {"rx_multicast_packets", offsetof(struct e1000_vf_stats, mprc)},
+       {"rx_good_loopback_packets", offsetof(struct e1000_vf_stats, gprlbc)},
+       {"tx_good_loopback_packets", offsetof(struct e1000_vf_stats, gptlbc)},
+       {"rx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gorlbc)},
+       {"tx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gotlbc)},
+};
+
+#define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \
+               sizeof(rte_igbvf_stats_strings[0]))
+
 /**
  * Atomically reads the link status information from global
  * structure rte_eth_dev.
@@ -531,6 +637,9 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
        uint32_t ctrl_ext;
 
        pci_dev = eth_dev->pci_dev;
+
+       rte_eth_copy_pci_info(eth_dev, pci_dev);
+
        eth_dev->dev_ops = &eth_igb_ops;
        eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
        eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
@@ -739,6 +848,8 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
 
        pci_dev = eth_dev->pci_dev;
 
+       rte_eth_copy_pci_info(eth_dev, pci_dev);
+
        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
        hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
@@ -865,17 +976,99 @@ rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unuse
        return (0);
 }
 
+static int
+igb_check_mq_mode(struct rte_eth_dev *dev)
+{
+       enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
+       enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
+       uint16_t nb_rx_q = dev->data->nb_rx_queues;
+       uint16_t nb_tx_q = dev->data->nb_rx_queues;
+
+       if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
+           tx_mq_mode == ETH_MQ_TX_DCB ||
+           tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+               PMD_INIT_LOG(ERR, "DCB mode is not supported.");
+               return -EINVAL;
+       }
+       if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
+               /* Check multi-queue mode.
+                * To no break software we accept ETH_MQ_RX_NONE as this might
+                * be used to turn off VLAN filter.
+                */
+
+               if (rx_mq_mode == ETH_MQ_RX_NONE ||
+                   rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+                       dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+                       RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
+               } else {
+                       /* Only support one queue on VFs.
+                        * RSS together with SRIOV is not supported.
+                        */
+                       PMD_INIT_LOG(ERR, "SRIOV is active,"
+                                       " wrong mq_mode rx %d.",
+                                       rx_mq_mode);
+                       return -EINVAL;
+               }
+               /* TX mode is not used here, so mode might be ignored.*/
+               if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+                       /* SRIOV only works in VMDq enable mode */
+                       PMD_INIT_LOG(WARNING, "SRIOV is active,"
+                                       " TX mode %d is not supported. "
+                                       " Driver will behave as %d mode.",
+                                       tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY);
+               }
+
+               /* check valid queue number */
+               if ((nb_rx_q > 1) || (nb_tx_q > 1)) {
+                       PMD_INIT_LOG(ERR, "SRIOV is active,"
+                                       " only support one queue on VFs.");
+                       return -EINVAL;
+               }
+       } else {
+               /* To no break software that set invalid mode, only display
+                * warning if invalid mode is used.
+                */
+               if (rx_mq_mode != ETH_MQ_RX_NONE &&
+                   rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY &&
+                   rx_mq_mode != ETH_MQ_RX_RSS) {
+                       /* RSS together with VMDq not supported*/
+                       PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
+                                    rx_mq_mode);
+                       return -EINVAL;
+               }
+
+               if (tx_mq_mode != ETH_MQ_TX_NONE &&
+                   tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+                       PMD_INIT_LOG(WARNING, "TX mode %d is not supported."
+                                       " Due to txmode is meaningless in this"
+                                       " driver, just ignore.",
+                                       tx_mq_mode);
+               }
+       }
+       return 0;
+}
+
 static int
 eth_igb_configure(struct rte_eth_dev *dev)
 {
        struct e1000_interrupt *intr =
                E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+       int ret;
 
        PMD_INIT_FUNC_TRACE();
+
+       /* multipe queue mode checking */
+       ret  = igb_check_mq_mode(dev);
+       if (ret != 0) {
+               PMD_DRV_LOG(ERR, "igb_check_mq_mode fails with %d.",
+                           ret);
+               return ret;
+       }
+
        intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
        PMD_INIT_FUNC_TRACE();
 
-       return (0);
+       return 0;
 }
 
 static int
@@ -929,11 +1122,11 @@ eth_igb_start(struct rte_eth_dev *dev)
        igb_pf_host_configure(dev);
 
        /* check and configure queue intr-vector mapping */
-       if (dev->data->dev_conf.intr_conf.rxq != 0)
+       if (dev->data->dev_conf.intr_conf.rxq != 0) {
                intr_vector = dev->data->nb_rx_queues;
-
-       if (rte_intr_efd_enable(intr_handle, intr_vector))
-               return -1;
+               if (rte_intr_efd_enable(intr_handle, intr_vector))
+                       return -1;
+       }
 
        if (rte_intr_dp_is_en(intr_handle)) {
                intr_handle->intr_vec =
@@ -1257,11 +1450,8 @@ igb_hardware_init(struct e1000_hw *hw)
 
 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */
 static void
-eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
+igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct e1000_hw_stats *stats =
-                       E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
        int pause_frames;
 
        if(hw->phy.media_type == e1000_media_type_copper ||
@@ -1367,6 +1557,16 @@ eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
        stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
        stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
        stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
+}
+
+static void
+eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
+{
+       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw_stats *stats =
+                       E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+       igb_read_stats_registers(hw, stats);
 
        if (rte_stats == NULL)
                return;
@@ -1409,12 +1609,52 @@ eth_igb_stats_reset(struct rte_eth_dev *dev)
 }
 
 static void
-eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
+eth_igb_xstats_reset(struct rte_eth_dev *dev)
+{
+       struct e1000_hw_stats *stats =
+                       E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+       /* HW registers are cleared on read */
+       eth_igb_xstats_get(dev, NULL, IGB_NB_XSTATS);
+
+       /* Reset software totals */
+       memset(stats, 0, sizeof(*stats));
+}
+
+static int
+eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+                  unsigned n)
 {
        struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
-                         E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+       struct e1000_hw_stats *hw_stats =
+                       E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+       unsigned i;
 
+       if (n < IGB_NB_XSTATS)
+               return IGB_NB_XSTATS;
+
+       igb_read_stats_registers(hw, hw_stats);
+
+       /* If this is a reset xstats is NULL, and we have cleared the
+        * registers by reading them.
+        */
+       if (!xstats)
+               return 0;
+
+       /* Extended stats */
+       for (i = 0; i < IGB_NB_XSTATS; i++) {
+               snprintf(xstats[i].name, sizeof(xstats[i].name),
+                        "%s", rte_igb_stats_strings[i].name);
+               xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
+                       rte_igb_stats_strings[i].offset);
+       }
+
+       return IGB_NB_XSTATS;
+}
+
+static void
+igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats)
+{
        /* Good Rx packets, include VF loopback */
        UPDATE_VF_STAT(E1000_VFGPRC,
            hw_stats->last_gprc, hw_stats->gprc);
@@ -1450,6 +1690,43 @@ eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
        /* Good Tx loopback octets */
        UPDATE_VF_STAT(E1000_VFGOTLBC,
            hw_stats->last_gotlbc, hw_stats->gotlbc);
+}
+
+static int
+eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+                    unsigned n)
+{
+       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *)
+                       E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+       unsigned i;
+
+       if (n < IGBVF_NB_XSTATS)
+               return IGBVF_NB_XSTATS;
+
+       igbvf_read_stats_registers(hw, hw_stats);
+
+       if (!xstats)
+               return 0;
+
+       for (i = 0; i < IGBVF_NB_XSTATS; i++) {
+               snprintf(xstats[i].name, sizeof(xstats[i].name), "%s",
+                        rte_igbvf_stats_strings[i].name);
+               xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
+                       rte_igbvf_stats_strings[i].offset);
+       }
+
+       return IGBVF_NB_XSTATS;
+}
+
+static void
+eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
+{
+       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *)
+                         E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+       igbvf_read_stats_registers(hw, hw_stats);
 
        if (rte_stats == NULL)
                return;
@@ -1463,7 +1740,6 @@ eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
        rte_stats->ilbbytes = hw_stats->gorlbc;
        rte_stats->olbpackets = hw_stats->gptlbc;
        rte_stats->olbbytes = hw_stats->gotlbc;
-
 }
 
 static void
@@ -1478,7 +1754,6 @@ eth_igbvf_stats_reset(struct rte_eth_dev *dev)
        /* reset HW current stats*/
        memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
               offsetof(struct e1000_vf_stats, gprc));
-
 }
 
 static void
@@ -1499,7 +1774,8 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                DEV_TX_OFFLOAD_IPV4_CKSUM  |
                DEV_TX_OFFLOAD_UDP_CKSUM   |
                DEV_TX_OFFLOAD_TCP_CKSUM   |
-               DEV_TX_OFFLOAD_SCTP_CKSUM;
+               DEV_TX_OFFLOAD_SCTP_CKSUM  |
+               DEV_TX_OFFLOAD_TCP_TSO;
 
        switch (hw->mac.type) {
        case e1000_82575:
@@ -1572,6 +1848,9 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                },
                .txq_flags = 0,
        };
+
+       dev_info->rx_desc_lim = rx_desc_lim;
+       dev_info->tx_desc_lim = tx_desc_lim;
 }
 
 static void
@@ -1590,7 +1869,8 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                                DEV_TX_OFFLOAD_IPV4_CKSUM  |
                                DEV_TX_OFFLOAD_UDP_CKSUM   |
                                DEV_TX_OFFLOAD_TCP_CKSUM   |
-                               DEV_TX_OFFLOAD_SCTP_CKSUM;
+                               DEV_TX_OFFLOAD_SCTP_CKSUM  |
+                               DEV_TX_OFFLOAD_TCP_TSO;
        switch (hw->mac.type) {
        case e1000_vfadapt:
                dev_info->max_rx_queues = 2;
@@ -1623,6 +1903,9 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                },
                .txq_flags = 0,
        };
+
+       dev_info->rx_desc_lim = rx_desc_lim;
+       dev_info->tx_desc_lim = tx_desc_lim;
 }
 
 /* return 0 means link status changed, -1 means not changed */
@@ -4212,7 +4495,10 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
        uint32_t tmpval, regval, intr_mask;
        struct e1000_hw *hw =
                E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t vec = 0;
+       uint32_t vec = E1000_MISC_VEC_ID;
+       uint32_t base = E1000_MISC_VEC_ID;
+       uint32_t misc_shift = 0;
+
        struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
 
        /* won't configure msix register if no mapping is done
@@ -4221,6 +4507,11 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
        if (!rte_intr_dp_is_en(intr_handle))
                return;
 
+       if (rte_intr_allow_others(intr_handle)) {
+               vec = base = E1000_RX_VEC_START;
+               misc_shift = 1;
+       }
+
        /* set interrupt vector for other causes */
        if (hw->mac.type == e1000_82575) {
                tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT);
@@ -4249,8 +4540,8 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
                E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
                                        E1000_GPIE_PBA | E1000_GPIE_EIAME |
                                        E1000_GPIE_NSICR);
-
-               intr_mask = (1 << intr_handle->max_intr) - 1;
+               intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
+                       misc_shift;
                regval = E1000_READ_REG(hw, E1000_EIAC);
                E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask);
 
@@ -4264,14 +4555,15 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
        /* use EIAM to auto-mask when MSI-X interrupt
         * is asserted, this saves a register write for every interrupt
         */
-       intr_mask = (1 << intr_handle->nb_efd) - 1;
+       intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
+               misc_shift;
        regval = E1000_READ_REG(hw, E1000_EIAM);
        E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask);
 
        for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
                eth_igb_assign_msix_vector(hw, 0, queue_id, vec);
                intr_handle->intr_vec[queue_id] = vec;
-               if (vec < intr_handle->nb_efd - 1)
+               if (vec < base + intr_handle->nb_efd - 1)
                        vec++;
        }