ethdev: add new offload flag to keep CRC
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
index 72fc02d..997683d 100644 (file)
@@ -59,9 +59,6 @@
  */
 #define IXGBE_FC_LO    0x40
 
-/* Default minimum inter-interrupt interval for EITR configuration */
-#define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT    0x79E
-
 /* Timer value included in XOFF frames. */
 #define IXGBE_FC_PAUSE 0x680
 
@@ -192,6 +189,9 @@ static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
                uint16_t queue, bool on);
 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
                int on);
+static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev,
+                                                 int mask);
+static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask);
 static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
@@ -240,8 +240,8 @@ static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
 static int  ixgbevf_dev_reset(struct rte_eth_dev *dev);
-static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
-static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
+static void ixgbevf_intr_disable(struct rte_eth_dev *dev);
+static void ixgbevf_intr_enable(struct rte_eth_dev *dev);
 static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
                struct rte_eth_stats *stats);
 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
@@ -249,6 +249,7 @@ static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
                uint16_t vlan_id, int on);
 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
                uint16_t queue, int on);
+static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
 static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
@@ -1606,7 +1607,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
        ixgbevf_dev_stats_reset(eth_dev);
 
        /* Disable the interrupts for VF */
-       ixgbevf_intr_disable(hw);
+       ixgbevf_intr_disable(eth_dev);
 
        hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
        diag = hw->mac.ops.reset_hw(hw);
@@ -1675,7 +1676,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
        rte_intr_callback_register(intr_handle,
                                   ixgbevf_dev_interrupt_handler, eth_dev);
        rte_intr_enable(intr_handle);
-       ixgbevf_intr_enable(hw);
+       ixgbevf_intr_enable(eth_dev);
 
        PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
                     eth_dev->data->port_id, pci_dev->id.vendor_id,
@@ -1708,7 +1709,7 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
        eth_dev->tx_pkt_burst = NULL;
 
        /* Disable the interrupts for VF */
-       ixgbevf_intr_disable(hw);
+       ixgbevf_intr_disable(eth_dev);
 
        rte_free(eth_dev->data->mac_addrs);
        eth_dev->data->mac_addrs = NULL;
@@ -1977,10 +1978,13 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
 
        rxq = dev->data->rx_queues[queue];
 
-       if (on)
+       if (on) {
                rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-       else
+               rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+       } else {
                rxq->vlan_flags = PKT_RX_VLAN;
+               rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+       }
 }
 
 static void
@@ -2132,8 +2136,30 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
        }
 }
 
+static void
+ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
+{
+       uint16_t i;
+       struct rte_eth_rxmode *rxmode;
+       struct ixgbe_rx_queue *rxq;
+
+       if (mask & ETH_VLAN_STRIP_MASK) {
+               rxmode = &dev->data->dev_conf.rxmode;
+               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+                       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+                               rxq = dev->data->rx_queues[i];
+                               rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+                       }
+               else
+                       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+                               rxq = dev->data->rx_queues[i];
+                               rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+                       }
+       }
+}
+
 static int
-ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 {
        struct rte_eth_rxmode *rxmode;
        rxmode = &dev->data->dev_conf.rxmode;
@@ -2159,6 +2185,16 @@ ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
        return 0;
 }
 
+static int
+ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+       ixgbe_config_vlan_strip_on_all_queues(dev, mask);
+
+       ixgbe_vlan_offload_config(dev, mask);
+
+       return 0;
+}
+
 static void
 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
 {
@@ -2307,11 +2343,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
                if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
                        const struct rte_eth_dcb_rx_conf *conf;
 
-                       if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
-                               PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
-                                                IXGBE_DCB_NB_QUEUES);
-                               return -EINVAL;
-                       }
                        conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
                        if (!(conf->nb_tcs == ETH_4_TCS ||
                               conf->nb_tcs == ETH_8_TCS)) {
@@ -2325,11 +2356,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
                if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
                        const struct rte_eth_dcb_tx_conf *conf;
 
-                       if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
-                               PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
-                                                IXGBE_DCB_NB_QUEUES);
-                               return -EINVAL;
-                       }
                        conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
                        if (!(conf->nb_tcs == ETH_4_TCS ||
                               conf->nb_tcs == ETH_8_TCS)) {
@@ -2365,9 +2391,6 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
                IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
        struct ixgbe_adapter *adapter =
                (struct ixgbe_adapter *)dev->data->dev_private;
-       struct rte_eth_dev_info dev_info;
-       uint64_t rx_offloads;
-       uint64_t tx_offloads;
        int ret;
 
        PMD_INIT_FUNC_TRACE();
@@ -2379,22 +2402,6 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
                return ret;
        }
 
-       ixgbe_dev_info_get(dev, &dev_info);
-       rx_offloads = dev->data->dev_conf.rxmode.offloads;
-       if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-               PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-                           "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-                           rx_offloads, dev_info.rx_offload_capa);
-               return -ENOTSUP;
-       }
-       tx_offloads = dev->data->dev_conf.txmode.offloads;
-       if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-               PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-                           "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-                           tx_offloads, dev_info.tx_offload_capa);
-               return -ENOTSUP;
-       }
-
        /* set flag to update link status after init */
        intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
 
@@ -2599,7 +2606,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 
        mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
                ETH_VLAN_EXTEND_MASK;
-       err = ixgbe_vlan_offload_set(dev, mask);
+       err = ixgbe_vlan_offload_config(dev, mask);
        if (err) {
                PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
                goto error;
@@ -3113,9 +3120,18 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
        }
 
        /* Flow Director Stats registers */
-       hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
-       hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
-
+       if (hw->mac.type != ixgbe_mac_82598EB) {
+               hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+               hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+               hw_stats->fdirustat_add += IXGBE_READ_REG(hw,
+                                       IXGBE_FDIRUSTAT) & 0xFFFF;
+               hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw,
+                                       IXGBE_FDIRUSTAT) >> 16) & 0xFFFF;
+               hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw,
+                                       IXGBE_FDIRFSTAT) & 0xFFFF;
+               hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw,
+                                       IXGBE_FDIRFSTAT) >> 16) & 0xFFFF;
+       }
        /* MACsec Stats registers */
        macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
        macsec_stats->out_pkts_encrypted +=
@@ -3748,6 +3764,14 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
                dev_info->speed_capa |= ETH_LINK_SPEED_5G;
        }
+
+       /* Driver-preferred Rx/Tx parameters */
+       dev_info->default_rxportconf.burst_size = 32;
+       dev_info->default_txportconf.burst_size = 32;
+       dev_info->default_rxportconf.nb_queues = 1;
+       dev_info->default_txportconf.nb_queues = 1;
+       dev_info->default_rxportconf.ring_size = 256;
+       dev_info->default_txportconf.ring_size = 256;
 }
 
 static const uint32_t *
@@ -4930,19 +4954,32 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
  * Virtual Function operations
  */
 static void
-ixgbevf_intr_disable(struct ixgbe_hw *hw)
+ixgbevf_intr_disable(struct rte_eth_dev *dev)
 {
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
        PMD_INIT_FUNC_TRACE();
 
        /* Clear interrupt mask to stop from interrupts being generated */
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
 
        IXGBE_WRITE_FLUSH(hw);
+
+       /* Clear mask value. */
+       intr->mask = 0;
 }
 
 static void
-ixgbevf_intr_enable(struct ixgbe_hw *hw)
+ixgbevf_intr_enable(struct rte_eth_dev *dev)
 {
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
        PMD_INIT_FUNC_TRACE();
 
        /* VF enable interrupt autoclean */
@@ -4951,6 +4988,9 @@ ixgbevf_intr_enable(struct ixgbe_hw *hw)
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
 
        IXGBE_WRITE_FLUSH(hw);
+
+       /* Save IXGBE_VTEIMS value to mask. */
+       intr->mask = IXGBE_VF_IRQ_ENABLE_MASK;
 }
 
 static int
@@ -4959,40 +4999,21 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
        struct rte_eth_conf *conf = &dev->data->dev_conf;
        struct ixgbe_adapter *adapter =
                        (struct ixgbe_adapter *)dev->data->dev_private;
-       struct rte_eth_dev_info dev_info;
-       uint64_t rx_offloads;
-       uint64_t tx_offloads;
 
        PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
                     dev->data->port_id);
 
-       ixgbevf_dev_info_get(dev, &dev_info);
-       rx_offloads = dev->data->dev_conf.rxmode.offloads;
-       if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-               PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-                           "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-                           rx_offloads, dev_info.rx_offload_capa);
-               return -ENOTSUP;
-       }
-       tx_offloads = dev->data->dev_conf.txmode.offloads;
-       if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-               PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-                           "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-                           tx_offloads, dev_info.tx_offload_capa);
-               return -ENOTSUP;
-       }
-
        /*
         * VF has no ability to enable/disable HW CRC
         * Keep the persistent behavior the same as Host PF
         */
 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
-       if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
+       if (rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
                PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
                conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
        }
 #else
-       if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+       if (!rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
                PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
                conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
        }
@@ -5047,7 +5068,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
        /* Set HW strip */
        mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
                ETH_VLAN_EXTEND_MASK;
-       err = ixgbevf_vlan_offload_set(dev, mask);
+       err = ixgbevf_vlan_offload_config(dev, mask);
        if (err) {
                PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
                ixgbe_dev_clear_queues(dev);
@@ -5093,7 +5114,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
        rte_intr_enable(intr_handle);
 
        /* Re-enable interrupt for VF */
-       ixgbevf_intr_enable(hw);
+       ixgbevf_intr_enable(dev);
 
        return 0;
 }
@@ -5107,7 +5128,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
-       ixgbevf_intr_disable(hw);
+       ixgbevf_intr_disable(dev);
 
        hw->adapter_stopped = 1;
        ixgbe_stop_adapter(hw);
@@ -5245,7 +5266,7 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
 }
 
 static int
-ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 {
        struct ixgbe_rx_queue *rxq;
        uint16_t i;
@@ -5263,6 +5284,16 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
        return 0;
 }
 
+static int
+ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+       ixgbe_config_vlan_strip_on_all_queues(dev, mask);
+
+       ixgbevf_vlan_offload_config(dev, mask);
+
+       return 0;
+}
+
 int
 ixgbe_vt_check(struct ixgbe_hw *hw)
 {
@@ -5599,17 +5630,17 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
-       uint32_t mask;
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t vec = IXGBE_MISC_VEC_ID;
 
-       mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
        if (rte_intr_allow_others(intr_handle))
                vec = IXGBE_RX_VEC_START;
-       mask |= (1 << vec);
+       intr->mask |= (1 << vec);
        RTE_SET_USED(queue_id);
-       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
 
        rte_intr_enable(intr_handle);
 
@@ -5619,19 +5650,19 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 static int
 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
-       uint32_t mask;
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        uint32_t vec = IXGBE_MISC_VEC_ID;
 
-       mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
        if (rte_intr_allow_others(intr_handle))
                vec = IXGBE_RX_VEC_START;
-       mask &= ~(1 << vec);
+       intr->mask &= ~(1 << vec);
        RTE_SET_USED(queue_id);
-       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
 
        return 0;
 }
@@ -5797,6 +5828,13 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
                if (vector_idx < base + intr_handle->nb_efd - 1)
                        vector_idx++;
        }
+
+       /* As RX queue setting above show, all queues use the vector 0.
+        * Set only the ITR value of IXGBE_MISC_VEC_ID.
+        */
+       IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID),
+                       IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
+                       | IXGBE_EITR_CNT_WDIS);
 }
 
 /**
@@ -5818,8 +5856,12 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
 
        /* won't configure msix register if no mapping is done
         * between intr vector and event fd
+        * but if misx has been enabled already, need to configure
+        * auto clean, auto mask and throttling.
         */
-       if (!rte_intr_dp_is_en(intr_handle))
+       gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+       if (!rte_intr_dp_is_en(intr_handle) &&
+           !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT)))
                return;
 
        if (rte_intr_allow_others(intr_handle))
@@ -5843,30 +5885,34 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
        /* Populate the IVAR table and set the ITR values to the
         * corresponding register.
         */
-       for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
-            queue_id++) {
-               /* by default, 1:1 mapping */
-               ixgbe_set_ivar_map(hw, 0, queue_id, vec);
-               intr_handle->intr_vec[queue_id] = vec;
-               if (vec < base + intr_handle->nb_efd - 1)
-                       vec++;
-       }
+       if (rte_intr_dp_is_en(intr_handle)) {
+               for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
+                       queue_id++) {
+                       /* by default, 1:1 mapping */
+                       ixgbe_set_ivar_map(hw, 0, queue_id, vec);
+                       intr_handle->intr_vec[queue_id] = vec;
+                       if (vec < base + intr_handle->nb_efd - 1)
+                               vec++;
+               }
 
-       switch (hw->mac.type) {
-       case ixgbe_mac_82598EB:
-               ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
-                                  IXGBE_MISC_VEC_ID);
-               break;
-       case ixgbe_mac_82599EB:
-       case ixgbe_mac_X540:
-       case ixgbe_mac_X550:
-               ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
-               break;
-       default:
-               break;
+               switch (hw->mac.type) {
+               case ixgbe_mac_82598EB:
+                       ixgbe_set_ivar_map(hw, -1,
+                                          IXGBE_IVAR_OTHER_CAUSES_INDEX,
+                                          IXGBE_MISC_VEC_ID);
+                       break;
+               case ixgbe_mac_82599EB:
+               case ixgbe_mac_X540:
+               case ixgbe_mac_X550:
+                       ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
+                       break;
+               default:
+                       break;
+               }
        }
        IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
-                       IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
+                       IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
+                       | IXGBE_EITR_CNT_WDIS);
 
        /* set up to autoclear timer, and the vectors */
        mask = IXGBE_EIMS_ENABLE_MASK;
@@ -8262,7 +8308,7 @@ ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct ixgbe_interrupt *intr =
                IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-       ixgbevf_intr_disable(hw);
+       ixgbevf_intr_disable(dev);
 
        /* read-on-clear nic registers here */
        eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
@@ -8279,7 +8325,6 @@ ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
 static int
 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct ixgbe_interrupt *intr =
                IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 
@@ -8288,7 +8333,7 @@ ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
                intr->flags &= ~IXGBE_FLAG_MAILBOX;
        }
 
-       ixgbevf_intr_enable(hw);
+       ixgbevf_intr_enable(dev);
 
        return 0;
 }