i40e: do not report deprecated statistics
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
index df9db04..619dc2d 100644 (file)
@@ -39,6 +39,7 @@
 #include <unistd.h>
 #include <stdarg.h>
 #include <inttypes.h>
+#include <assert.h>
 
 #include <rte_string_fns.h>
 #include <rte_pci.h>
 /* Maximun number of VSI */
 #define I40E_MAX_NUM_VSIS          (384UL)
 
-/* Default queue interrupt throttling time in microseconds */
-#define I40E_ITR_INDEX_DEFAULT          0
-#define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
-#define I40E_QUEUE_ITR_INTERVAL_MAX     8160 /* 8160 us */
-
 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
 
 /* Flow control default timer */
@@ -292,7 +288,6 @@ static void i40e_dev_stats_get(struct rte_eth_dev *dev,
 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
                               struct rte_eth_xstats *xstats, unsigned n);
 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
-static void i40e_dev_xstats_reset(struct rte_eth_dev *dev);
 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
                                            uint16_t queue_id,
                                            uint8_t stat_idx,
@@ -343,7 +338,7 @@ static void i40e_stat_update_48(struct i40e_hw *hw,
                               bool offset_loaded,
                               uint64_t *offset,
                               uint64_t *stat);
-static void i40e_pf_config_irq0(struct i40e_hw *hw);
+static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
 static void i40e_dev_interrupt_handler(
                __rte_unused struct rte_intr_handle *handle, void *param);
 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
@@ -404,7 +399,10 @@ static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
                                           struct timespec *timestamp);
 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
-
+static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
+                                        uint16_t queue_id);
+static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
+                                         uint16_t queue_id);
 
 static const struct rte_pci_id pci_id_i40e_map[] = {
 #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
@@ -427,7 +425,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
        .stats_get                    = i40e_dev_stats_get,
        .xstats_get                   = i40e_dev_xstats_get,
        .stats_reset                  = i40e_dev_stats_reset,
-       .xstats_reset                 = i40e_dev_xstats_reset,
+       .xstats_reset                 = i40e_dev_stats_reset,
        .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
        .dev_infos_get                = i40e_dev_info_get,
        .vlan_filter_set              = i40e_vlan_filter_set,
@@ -440,6 +438,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
        .tx_queue_start               = i40e_dev_tx_queue_start,
        .tx_queue_stop                = i40e_dev_tx_queue_stop,
        .rx_queue_setup               = i40e_dev_rx_queue_setup,
+       .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
+       .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
        .rx_queue_release             = i40e_dev_rx_queue_release,
        .rx_queue_count               = i40e_dev_rx_queue_count,
        .rx_descriptor_done           = i40e_dev_rx_descriptor_done,
@@ -489,6 +489,9 @@ static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
        {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
 };
 
+#define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
+               sizeof(rte_i40e_stats_strings[0]))
+
 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
        {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
                tx_dropped_link_down)},
@@ -555,15 +558,30 @@ static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
                rx_lpi_count)},
 };
 
-/* Q Stats: 5 stats are exposed for each queue, implemented in xstats_get() */
-#define I40E_NB_HW_PORT_Q_STATS (8 * 5)
-
-#define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
-               sizeof(rte_i40e_stats_strings[0]))
 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
                sizeof(rte_i40e_hw_port_strings[0]))
-#define I40E_NB_XSTATS (I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS + \
-               I40E_NB_HW_PORT_Q_STATS)
+
+static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
+       {"xon_packets", offsetof(struct i40e_hw_port_stats,
+               priority_xon_rx)},
+       {"xoff_packets", offsetof(struct i40e_hw_port_stats,
+               priority_xoff_rx)},
+};
+
+#define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
+               sizeof(rte_i40e_rxq_prio_strings[0]))
+
+static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
+       {"xon_packets", offsetof(struct i40e_hw_port_stats,
+               priority_xon_tx)},
+       {"xoff_packets", offsetof(struct i40e_hw_port_stats,
+               priority_xoff_tx)},
+       {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
+               priority_xon_2_xoff)},
+};
+
+#define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
+               sizeof(rte_i40e_txq_prio_strings[0]))
 
 static struct eth_driver rte_i40e_pmd = {
        .pci_drv = {
@@ -875,7 +893,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
                i40e_dev_interrupt_handler, (void *)dev);
 
        /* configure and enable device interrupt */
-       i40e_pf_config_irq0(hw);
+       i40e_pf_config_irq0(hw, TRUE);
        i40e_pf_enable_irq0(hw);
 
        /* enable uio intr after callback register */
@@ -1056,6 +1074,8 @@ err:
 void
 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
 {
+       struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
        uint16_t msix_vect = vsi->msix_intr;
        uint16_t i;
@@ -1067,52 +1087,50 @@ i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
        }
 
        if (vsi->type != I40E_VSI_SRIOV) {
-               I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), 0);
-               I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
-                               msix_vect - 1), 0);
+               if (!rte_intr_allow_others(intr_handle)) {
+                       I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
+                                      I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
+                       I40E_WRITE_REG(hw,
+                                      I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
+                                      0);
+               } else {
+                       I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
+                                      I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
+                       I40E_WRITE_REG(hw,
+                                      I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
+                                                      msix_vect - 1), 0);
+               }
        } else {
                uint32_t reg;
                reg = (hw->func_caps.num_msix_vectors_vf - 1) *
                        vsi->user_param + (msix_vect - 1);
 
-               I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), 0);
+               I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
+                              I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
        }
        I40E_WRITE_FLUSH(hw);
 }
 
-static inline uint16_t
-i40e_calc_itr_interval(int16_t interval)
-{
-       if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
-               interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
-
-       /* Convert to hardware count, as writing each 1 represents 2 us */
-       return (interval/2);
-}
-
-void
-i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
+static void
+__vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
+                      int base_queue, int nb_queue)
 {
+       int i;
        uint32_t val;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
-       uint16_t msix_vect = vsi->msix_intr;
-       int i;
-
-       for (i = 0; i < vsi->nb_qps; i++)
-               I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
 
        /* Bind all RX queues to allocated MSIX interrupt */
-       for (i = 0; i < vsi->nb_qps; i++) {
+       for (i = 0; i < nb_queue; i++) {
                val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
                        I40E_QINT_RQCTL_ITR_INDX_MASK |
-                       ((vsi->base_queue + i + 1) <<
-                       I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+                       ((base_queue + i + 1) <<
+                        I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
                        (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
                        I40E_QINT_RQCTL_CAUSE_ENA_MASK;
 
-               if (i == vsi->nb_qps - 1)
+               if (i == nb_queue - 1)
                        val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
-               I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), val);
+               I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
        }
 
        /* Write first RX queue to Link list register as the head element */
@@ -1120,56 +1138,172 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
                uint16_t interval =
                        i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
 
-               I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
-                                               (vsi->base_queue <<
-                               I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
-                       (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
-
-               I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
-                                               msix_vect - 1), interval);
-
-#ifndef I40E_GLINT_CTL
-#define I40E_GLINT_CTL                     0x0003F800
-#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK 0x4
-#endif
-               /* Disable auto-mask on enabling of all none-zero  interrupt */
-               I40E_WRITE_REG(hw, I40E_GLINT_CTL,
-                       I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
+               if (msix_vect == I40E_MISC_VEC_ID) {
+                       I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
+                                      (base_queue <<
+                                       I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
+                                      (0x0 <<
+                                       I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
+                       I40E_WRITE_REG(hw,
+                                      I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
+                                      interval);
+               } else {
+                       I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
+                                      (base_queue <<
+                                       I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
+                                      (0x0 <<
+                                       I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
+                       I40E_WRITE_REG(hw,
+                                      I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
+                                                      msix_vect - 1),
+                                      interval);
+               }
        } else {
                uint32_t reg;
 
-               /* num_msix_vectors_vf needs to minus irq0 */
-               reg = (hw->func_caps.num_msix_vectors_vf - 1) *
-                       vsi->user_param + (msix_vect - 1);
+               if (msix_vect == I40E_MISC_VEC_ID) {
+                       I40E_WRITE_REG(hw,
+                                      I40E_VPINT_LNKLST0(vsi->user_param),
+                                      (base_queue <<
+                                       I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
+                                      (0x0 <<
+                                       I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
+               } else {
+                       /* num_msix_vectors_vf needs to minus irq0 */
+                       reg = (hw->func_caps.num_msix_vectors_vf - 1) *
+                               vsi->user_param + (msix_vect - 1);
 
-               I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), (vsi->base_queue <<
+                       I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
+                                      (base_queue <<
                                        I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
-                               (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
+                                      (0x0 <<
+                                       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
+               }
        }
 
        I40E_WRITE_FLUSH(hw);
 }
 
+void
+i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
+{
+       struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+       uint16_t msix_vect = vsi->msix_intr;
+       uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
+       uint16_t queue_idx = 0;
+       int record = 0;
+       uint32_t val;
+       int i;
+
+       for (i = 0; i < vsi->nb_qps; i++) {
+               I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
+               I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
+       }
+
+       /* INTENA flag is not auto-cleared for interrupt */
+       val = I40E_READ_REG(hw, I40E_GLINT_CTL);
+       val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
+               I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK |
+               I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
+       I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
+
+       /* VF bind interrupt */
+       if (vsi->type == I40E_VSI_SRIOV) {
+               __vsi_queues_bind_intr(vsi, msix_vect,
+                                      vsi->base_queue, vsi->nb_qps);
+               return;
+       }
+
+       /* PF & VMDq bind interrupt */
+       if (rte_intr_dp_is_en(intr_handle)) {
+               if (vsi->type == I40E_VSI_MAIN) {
+                       queue_idx = 0;
+                       record = 1;
+               } else if (vsi->type == I40E_VSI_VMDQ2) {
+                       struct i40e_vsi *main_vsi =
+                               I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
+                       queue_idx = vsi->base_queue - main_vsi->nb_qps;
+                       record = 1;
+               }
+       }
+
+       for (i = 0; i < vsi->nb_used_qps; i++) {
+               if (nb_msix <= 1) {
+                       if (!rte_intr_allow_others(intr_handle))
+                               /* allow to share MISC_VEC_ID */
+                               msix_vect = I40E_MISC_VEC_ID;
+
+                       /* no enough msix_vect, map all to one */
+                       __vsi_queues_bind_intr(vsi, msix_vect,
+                                              vsi->base_queue + i,
+                                              vsi->nb_used_qps - i);
+                       for (; !!record && i < vsi->nb_used_qps; i++)
+                               intr_handle->intr_vec[queue_idx + i] =
+                                       msix_vect;
+                       break;
+               }
+               /* 1:1 queue/msix_vect mapping */
+               __vsi_queues_bind_intr(vsi, msix_vect,
+                                      vsi->base_queue + i, 1);
+               if (!!record)
+                       intr_handle->intr_vec[queue_idx + i] = msix_vect;
+
+               msix_vect++;
+               nb_msix--;
+       }
+}
+
 static void
 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
 {
+       struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
        uint16_t interval = i40e_calc_itr_interval(\
-                       RTE_LIBRTE_I40E_ITR_INTERVAL);
-
-       I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1),
-                                       I40E_PFINT_DYN_CTLN_INTENA_MASK |
-                                       I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+               RTE_LIBRTE_I40E_ITR_INTERVAL);
+       uint16_t msix_intr, i;
+
+       if (rte_intr_allow_others(intr_handle))
+               for (i = 0; i < vsi->nb_msix; i++) {
+                       msix_intr = vsi->msix_intr + i;
+                       I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
+                               I40E_PFINT_DYN_CTLN_INTENA_MASK |
+                               I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
                                (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
-                       (interval << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+                               (interval <<
+                                I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+               }
+       else
+               I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+                              I40E_PFINT_DYN_CTL0_INTENA_MASK |
+                              I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+                              (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) |
+                              (interval <<
+                               I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT));
+
+       I40E_WRITE_FLUSH(hw);
 }
 
 static void
 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
 {
+       struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+       uint16_t msix_intr, i;
+
+       if (rte_intr_allow_others(intr_handle))
+               for (i = 0; i < vsi->nb_msix; i++) {
+                       msix_intr = vsi->msix_intr + i;
+                       I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
+                                      0);
+               }
+       else
+               I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
 
-       I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
+       I40E_WRITE_FLUSH(hw);
 }
 
 static inline uint8_t
@@ -1279,6 +1413,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct i40e_vsi *main_vsi = pf->main_vsi;
        int ret, i;
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       uint32_t intr_vector = 0;
 
        hw->adapter_stopped = 0;
 
@@ -1290,6 +1426,28 @@ i40e_dev_start(struct rte_eth_dev *dev)
                return -EINVAL;
        }
 
+       rte_intr_disable(intr_handle);
+
+       if ((rte_intr_cap_multiple(intr_handle) ||
+            !RTE_ETH_DEV_SRIOV(dev).active) &&
+           dev->data->dev_conf.intr_conf.rxq != 0) {
+               intr_vector = dev->data->nb_rx_queues;
+               if (rte_intr_efd_enable(intr_handle, intr_vector))
+                       return -1;
+       }
+
+       if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+               intr_handle->intr_vec =
+                       rte_zmalloc("intr_vec",
+                                   dev->data->nb_rx_queues * sizeof(int),
+                                   0);
+               if (!intr_handle->intr_vec) {
+                       PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+                                    " intr_vec\n", dev->data->nb_rx_queues);
+                       return -ENOMEM;
+               }
+       }
+
        /* Initialize VSI */
        ret = i40e_dev_rxtx_init(pf);
        if (ret != I40E_SUCCESS) {
@@ -1298,11 +1456,14 @@ i40e_dev_start(struct rte_eth_dev *dev)
        }
 
        /* Map queues with MSIX interrupt */
+       main_vsi->nb_used_qps = dev->data->nb_rx_queues -
+               pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
        i40e_vsi_queues_bind_intr(main_vsi);
        i40e_vsi_enable_queues_intr(main_vsi);
 
        /* Map VMDQ VSI queues with MSIX interrupt */
        for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+               pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
                i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
                i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
        }
@@ -1339,6 +1500,22 @@ i40e_dev_start(struct rte_eth_dev *dev)
                goto err_up;
        }
 
+       if (!rte_intr_allow_others(intr_handle)) {
+               rte_intr_callback_unregister(intr_handle,
+                                            i40e_dev_interrupt_handler,
+                                            (void *)dev);
+               /* configure and enable device interrupt */
+               i40e_pf_config_irq0(hw, FALSE);
+               i40e_pf_enable_irq0(hw);
+
+               if (dev->data->dev_conf.intr_conf.lsc != 0)
+                       PMD_INIT_LOG(INFO, "lsc won't enable because of"
+                                    " no intr multiplex\n");
+       }
+
+       /* enable uio intr after callback register */
+       rte_intr_enable(intr_handle);
+
        return I40E_SUCCESS;
 
 err_up:
@@ -1354,6 +1531,7 @@ i40e_dev_stop(struct rte_eth_dev *dev)
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_vsi *main_vsi = pf->main_vsi;
        struct i40e_mirror_rule *p_mirror;
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
        int i;
 
        /* Disable all queues */
@@ -1369,8 +1547,8 @@ i40e_dev_stop(struct rte_eth_dev *dev)
        }
 
        if (pf->fdir.fdir_vsi) {
-               i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
-               i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
+               i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
+               i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
        }
        /* Clear all queues and release memory */
        i40e_dev_clear_queues(dev);
@@ -1385,6 +1563,18 @@ i40e_dev_stop(struct rte_eth_dev *dev)
        }
        pf->nb_mirror_rule = 0;
 
+       if (!rte_intr_allow_others(intr_handle))
+               /* resume to the default handler */
+               rte_intr_callback_register(intr_handle,
+                                          i40e_dev_interrupt_handler,
+                                          (void *)dev);
+
+       /* Clean datapath event and queue/vec mapping */
+       rte_intr_efd_disable(intr_handle);
+       if (intr_handle->intr_vec) {
+               rte_free(intr_handle->intr_vec);
+               intr_handle->intr_vec = NULL;
+       }
 }
 
 static void
@@ -1870,15 +2060,14 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        stats->oerrors  = ns->eth.tx_errors +
                        pf->main_vsi->eth_stats.tx_errors;
        stats->imcasts  = pf->main_vsi->eth_stats.rx_multicast;
-       stats->fdirmatch = ns->fd_sb_match;
 
        /* Rx Errors */
-       stats->ibadcrc  = ns->crc_errors;
-       stats->ibadlen  = ns->rx_length_errors + ns->rx_undersize +
-                       ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
        stats->imissed  = ns->eth.rx_discards +
                        pf->main_vsi->eth_stats.rx_discards;
-       stats->ierrors  = stats->ibadcrc + stats->ibadlen + stats->imissed;
+       stats->ierrors  = ns->crc_errors +
+                       ns->rx_length_errors + ns->rx_undersize +
+                       ns->rx_oversize + ns->rx_fragments + ns->rx_jabber +
+                       stats->imissed;
 
        PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
        PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
@@ -1951,19 +2140,28 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
 }
 
+/* Reset the statistics */
 static void
-i40e_dev_xstats_reset(struct rte_eth_dev *dev)
+i40e_dev_stats_reset(struct rte_eth_dev *dev)
 {
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct i40e_hw_port_stats *hw_stats = &pf->stats;
 
-       /* The hw registers are cleared on read */
+       /* Mark PF and VSI stats to update the offset, aka "reset" */
        pf->offset_loaded = false;
+       if (pf->main_vsi)
+               pf->main_vsi->offset_loaded = false;
+
+       /* read the stats, reading current register values into offset */
        i40e_read_stats_registers(pf, hw);
+}
 
-       /* reset software counters */
-       memset(hw_stats, 0, sizeof(*hw_stats));
+static uint32_t
+i40e_xstats_calc_num(void)
+{
+       return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
+               (I40E_NB_RXQ_PRIO_XSTATS * 8) +
+               (I40E_NB_TXQ_PRIO_XSTATS * 8);
 }
 
 static int
@@ -1972,18 +2170,20 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
 {
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       unsigned i, count = 0;
+       unsigned i, count, prio;
        struct i40e_hw_port_stats *hw_stats = &pf->stats;
 
-       if (n < I40E_NB_XSTATS)
-               return I40E_NB_XSTATS;
+       count = i40e_xstats_calc_num();
+       if (n < count)
+               return count;
 
        i40e_read_stats_registers(pf, hw);
 
-       /* Reset */
        if (xstats == NULL)
                return 0;
 
+       count = 0;
+
        /* Get stats from i40e_eth_stats struct */
        for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
                snprintf(xstats[count].name, sizeof(xstats[count].name),
@@ -2002,55 +2202,35 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
                count++;
        }
 
-       /* Get per-queue stats from i40e_hw_port struct */
-       for (i = 0; i < 8; i++) {
-               snprintf(xstats[count].name, sizeof(xstats[count].name),
-                        "rx_q%u_xon_priority_packets", i);
-               xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
-                               offsetof(struct i40e_hw_port_stats,
-                                        priority_xon_rx[i]));
-               count++;
-
-               snprintf(xstats[count].name, sizeof(xstats[count].name),
-                        "rx_q%u_xoff_priority_packets", i);
-               xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
-                               offsetof(struct i40e_hw_port_stats,
-                                        priority_xoff_rx[i]));
-               count++;
-
-               snprintf(xstats[count].name, sizeof(xstats[count].name),
-                        "tx_q%u_xon_priority_packets", i);
-               xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
-                               offsetof(struct i40e_hw_port_stats,
-                                        priority_xon_tx[i]));
-               count++;
-
-               snprintf(xstats[count].name, sizeof(xstats[count].name),
-                        "tx_q%u_xoff_priority_packets", i);
-               xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
-                               offsetof(struct i40e_hw_port_stats,
-                                        priority_xoff_tx[i]));
-               count++;
-
-               snprintf(xstats[count].name, sizeof(xstats[count].name),
-                        "xx_q%u_xon_to_xoff_priority_packets", i);
-               xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
-                               offsetof(struct i40e_hw_port_stats,
-                                        priority_xon_2_xoff[i]));
-               count++;
+       for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
+               for (prio = 0; prio < 8; prio++) {
+                       snprintf(xstats[count].name,
+                                sizeof(xstats[count].name),
+                                "rx_priority%u_%s", prio,
+                                rte_i40e_rxq_prio_strings[i].name);
+                       xstats[count].value =
+                               *(uint64_t *)(((char *)hw_stats) +
+                               rte_i40e_rxq_prio_strings[i].offset +
+                               (sizeof(uint64_t) * prio));
+                       count++;
+               }
        }
 
-       return I40E_NB_XSTATS;
-}
-
-/* Reset the statistics */
-static void
-i40e_dev_stats_reset(struct rte_eth_dev *dev)
-{
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
+               for (prio = 0; prio < 8; prio++) {
+                       snprintf(xstats[count].name,
+                                sizeof(xstats[count].name),
+                                "tx_priority%u_%s", prio,
+                                rte_i40e_txq_prio_strings[i].name);
+                       xstats[count].value =
+                               *(uint64_t *)(((char *)hw_stats) +
+                               rte_i40e_txq_prio_strings[i].offset +
+                               (sizeof(uint64_t) * prio));
+                       count++;
+               }
+       }
 
-       /* It results in reloading the start point of each counter */
-       pf->offset_loaded = false;
+       return count;
 }
 
 static int
@@ -2737,15 +2917,13 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
                        u64 size,
                        u32 alignment)
 {
-       static uint64_t id = 0;
        const struct rte_memzone *mz = NULL;
        char z_name[RTE_MEMZONE_NAMESIZE];
 
        if (!mem)
                return I40E_ERR_PARAM;
 
-       id++;
-       snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
+       snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
 #ifdef RTE_LIBRTE_XEN_DOM0
        mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
                                         alignment, RTE_PGSIZE_2M);
@@ -2756,7 +2934,6 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
        if (!mz)
                return I40E_ERR_NO_MEMORY;
 
-       mem->id = id;
        mem->size = size;
        mem->va = mz->addr;
 #ifdef RTE_LIBRTE_XEN_DOM0
@@ -2764,6 +2941,9 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
 #else
        mem->pa = mz->phys_addr;
 #endif
+       mem->zone = (const void *)mz;
+       PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: "
+                   "%"PRIu64, mz->name, mem->pa);
 
        return I40E_SUCCESS;
 }
@@ -2777,9 +2957,14 @@ enum i40e_status_code
 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
                    struct i40e_dma_mem *mem)
 {
-       if (!mem || !mem->va)
+       if (!mem)
                return I40E_ERR_PARAM;
 
+       PMD_DRV_LOG(DEBUG, "memzone %s to be freed with physical address: "
+                   "%"PRIu64, ((const struct rte_memzone *)mem->zone)->name,
+                   mem->pa);
+       rte_memzone_free((const struct rte_memzone *)mem->zone);
+       mem->zone = NULL;
        mem->va = NULL;
        mem->pa = (u64)0;
 
@@ -2947,17 +3132,36 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 
        /* VMDq queue/VSI allocation */
        pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
+       pf->vmdq_nb_qps = 0;
+       pf->max_nb_vmdq_vsi = 0;
        if (hw->func_caps.vmdq) {
-               pf->flags |= I40E_FLAG_VMDQ;
-               pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
-               pf->max_nb_vmdq_vsi = 1;
-               PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues per VMDQ VSI, "
-                           "in total %u queues", pf->max_nb_vmdq_vsi,
-                           pf->vmdq_nb_qps,
-                           pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
-       } else {
-               pf->vmdq_nb_qps = 0;
-               pf->max_nb_vmdq_vsi = 0;
+               if (qp_count < hw->func_caps.num_tx_qp &&
+                       vsi_count < hw->func_caps.num_vsis) {
+                       pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
+                               qp_count) / pf->vmdq_nb_qp_max;
+
+                       /* Limit the maximum number of VMDq vsi to the maximum
+                        * ethdev can support
+                        */
+                       pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
+                               hw->func_caps.num_vsis - vsi_count);
+                       pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
+                               ETH_64_POOLS);
+                       if (pf->max_nb_vmdq_vsi) {
+                               pf->flags |= I40E_FLAG_VMDQ;
+                               pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
+                               PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues "
+                                           "per VMDQ VSI, in total %u queues",
+                                           pf->max_nb_vmdq_vsi,
+                                           pf->vmdq_nb_qps, pf->vmdq_nb_qps *
+                                           pf->max_nb_vmdq_vsi);
+                       } else {
+                               PMD_DRV_LOG(INFO, "No enough queues left for "
+                                           "VMDq");
+                       }
+               } else {
+                       PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
+               }
        }
        qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
        vsi_count += pf->max_nb_vmdq_vsi;
@@ -3245,7 +3449,7 @@ bitmap_is_subset(uint8_t src1, uint8_t src2)
        return !((src1 ^ src2) & src2);
 }
 
-static int
+static enum i40e_status_code
 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
 {
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
@@ -3253,14 +3457,14 @@ validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
        /* If DCB is not supported, only default TC is supported */
        if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
                PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
-               return -EINVAL;
+               return I40E_NOT_SUPPORTED;
        }
 
        if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
                PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
                            "HW support 0x%x", hw->func_caps.enabled_tcmap,
                            enabled_tcmap);
-               return -EINVAL;
+               return I40E_NOT_SUPPORTED;
        }
        return I40E_SUCCESS;
 }
@@ -3345,12 +3549,13 @@ i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
        return I40E_SUCCESS;
 }
 
-static int
+static enum i40e_status_code
 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
                                 struct i40e_aqc_vsi_properties_data *info,
                                 uint8_t enabled_tcmap)
 {
-       int ret, i, total_tc = 0;
+       enum i40e_status_code ret;
+       int i, total_tc = 0;
        uint16_t qpnum_per_tc, bsf, qp_idx;
 
        ret = validate_tcmap_parameter(vsi, enabled_tcmap);
@@ -3706,15 +3911,30 @@ i40e_vsi_setup(struct i40e_pf *pf,
                vsi->base_queue = I40E_FDIR_QUEUE_ID;
 
        /* VF has MSIX interrupt in VF range, don't allocate here */
-       if (type != I40E_VSI_SRIOV) {
+       if (type == I40E_VSI_MAIN) {
+               ret = i40e_res_pool_alloc(&pf->msix_pool,
+                                         RTE_MIN(vsi->nb_qps,
+                                                 RTE_MAX_RXTX_INTR_VEC_ID));
+               if (ret < 0) {
+                       PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d",
+                                   vsi->seid, ret);
+                       goto fail_queue_alloc;
+               }
+               vsi->msix_intr = ret;
+               vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
+       } else if (type != I40E_VSI_SRIOV) {
                ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
                if (ret < 0) {
                        PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
                        goto fail_queue_alloc;
                }
                vsi->msix_intr = ret;
-       } else
+               vsi->nb_msix = 1;
+       } else {
                vsi->msix_intr = 0;
+               vsi->nb_msix = 0;
+       }
+
        /* Add VSI */
        if (type == I40E_VSI_MAIN) {
                /* For main VSI, no need to add since it's default one */
@@ -4558,7 +4778,7 @@ i40e_pf_enable_irq0(struct i40e_hw *hw)
 }
 
 static void
-i40e_pf_config_irq0(struct i40e_hw *hw)
+i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
 {
        /* read pending request and disable first */
        i40e_pf_disable_irq0(hw);
@@ -4566,9 +4786,10 @@ i40e_pf_config_irq0(struct i40e_hw *hw)
        I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
                I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
 
-       /* Link no queues with irq0 */
-       I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
-               I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
+       if (no_queue)
+               /* Link no queues with irq0 */
+               I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
+                              I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
 }
 
 static void
@@ -7739,13 +7960,14 @@ i40e_parse_dcb_configure(struct rte_eth_dev *dev,
  *
  * Returns 0 on success, negative value on failure
  */
-static int
+static enum i40e_status_code
 i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
 {
        struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
        struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
-       int i, ret;
+       enum i40e_status_code ret;
+       int i;
        uint32_t tc_bw_max;
 
        /* Get the VSI level BW configuration */
@@ -7755,7 +7977,7 @@ i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
                         "couldn't get PF vsi bw config, err %s aq_err %s\n",
                         i40e_stat_str(hw, ret),
                         i40e_aq_str(hw, hw->aq.asq_last_status));
-               return -EINVAL;
+               return ret;
        }
 
        /* Get the VSI level BW configuration per TC */
@@ -7766,7 +7988,7 @@ i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
                         "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
                         i40e_stat_str(hw, ret),
                         i40e_aq_str(hw, hw->aq.asq_last_status));
-               return -EINVAL;
+               return ret;
        }
 
        if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
@@ -7794,15 +8016,16 @@ i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
                         __func__, vsi->seid, i, bw_config.qs_handles[i]);
        }
 
-       return 0;
+       return ret;
 }
 
-static int
+static enum i40e_status_code
 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
                              struct i40e_aqc_vsi_properties_data *info,
                              uint8_t enabled_tcmap)
 {
-       int ret, i, total_tc = 0;
+       enum i40e_status_code ret;
+       int i, total_tc = 0;
        uint16_t qpnum_per_tc, bsf, qp_idx;
        struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
 
@@ -7869,13 +8092,13 @@ i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
  *
  * Returns 0 on success, negative value on failure
  */
-static int
+static enum i40e_status_code
 i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 tc_map)
 {
        struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
        struct i40e_vsi_context ctxt;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
-       int ret = 0;
+       enum i40e_status_code ret = I40E_SUCCESS;
        int i;
 
        /* Check if enabled_tc is same as existing or new TCs */
@@ -7961,7 +8184,8 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
        struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
        struct i40e_vsi *main_vsi = pf->main_vsi;
        struct i40e_vsi_list *vsi_list;
-       int i, ret;
+       enum i40e_status_code ret;
+       int i;
        uint32_t val;
 
        /* Use the FW API if FW > v4.4*/
@@ -8152,6 +8376,7 @@ i40e_dcb_setup(struct rte_eth_dev *dev)
                PMD_INIT_LOG(ERR, "dcb sw configure fails");
                return -ENOSYS;
        }
+
        return 0;
 }
 
@@ -8191,5 +8416,59 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
                                dcb_info->tc_queue.tc_rxq[0][i].nb_queue;
                }
        }
+
+       return 0;
+}
+
+static int
+i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint16_t interval =
+               i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
+       uint16_t msix_intr;
+
+       msix_intr = intr_handle->intr_vec[queue_id];
+       if (msix_intr == I40E_MISC_VEC_ID)
+               I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+                              I40E_PFINT_DYN_CTLN_INTENA_MASK |
+                              I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+                              (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+                              (interval <<
+                               I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+       else
+               I40E_WRITE_REG(hw,
+                              I40E_PFINT_DYN_CTLN(msix_intr -
+                                                  I40E_RX_VEC_START),
+                              I40E_PFINT_DYN_CTLN_INTENA_MASK |
+                              I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+                              (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+                              (interval <<
+                               I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+
+       I40E_WRITE_FLUSH(hw);
+       rte_intr_enable(&dev->pci_dev->intr_handle);
+
+       return 0;
+}
+
+static int
+i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint16_t msix_intr;
+
+       msix_intr = intr_handle->intr_vec[queue_id];
+       if (msix_intr == I40E_MISC_VEC_ID)
+               I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+       else
+               I40E_WRITE_REG(hw,
+                              I40E_PFINT_DYN_CTLN(msix_intr -
+                                                  I40E_RX_VEC_START),
+                              0);
+       I40E_WRITE_FLUSH(hw);
+
        return 0;
 }