i40e: add or delete flow director
[dpdk.git] / lib / librte_pmd_i40e / i40e_ethdev.c
index 34981e4..d898a53 100644 (file)
 #include <rte_memzone.h>
 #include <rte_malloc.h>
 #include <rte_memcpy.h>
+#include <rte_alarm.h>
 #include <rte_dev.h>
 #include <rte_eth_ctrl.h>
 
 #include "i40e_logs.h"
-#include "i40e/i40e_register_x710_int.h"
 #include "i40e/i40e_prototype.h"
 #include "i40e/i40e_adminq_cmd.h"
 #include "i40e/i40e_type.h"
 #include "i40e_rxtx.h"
 #include "i40e_pf.h"
 
-#define I40E_DEFAULT_RX_FREE_THRESH  32
-#define I40E_DEFAULT_RX_PTHRESH      8
-#define I40E_DEFAULT_RX_HTHRESH      8
-#define I40E_DEFAULT_RX_WTHRESH      0
-
-#define I40E_DEFAULT_TX_FREE_THRESH  32
-#define I40E_DEFAULT_TX_PTHRESH      32
-#define I40E_DEFAULT_TX_HTHRESH      0
-#define I40E_DEFAULT_TX_WTHRESH      0
-#define I40E_DEFAULT_TX_RSBIT_THRESH 32
-
 /* Maximun number of MAC addresses */
 #define I40E_NUM_MACADDR_MAX       64
 #define I40E_CLEAR_PXE_WAIT_MS     200
 /* Maximun number of VSI */
 #define I40E_MAX_NUM_VSIS          (384UL)
 
-/* Bit shift and mask */
-#define I40E_16_BIT_SHIFT 16
-#define I40E_16_BIT_MASK  0xFFFF
-#define I40E_32_BIT_SHIFT 32
-#define I40E_32_BIT_MASK  0xFFFFFFFF
-#define I40E_48_BIT_SHIFT 48
-#define I40E_48_BIT_MASK  0xFFFFFFFFFFFFULL
-
 /* Default queue interrupt throttling time in microseconds*/
 #define I40E_ITR_INDEX_DEFAULT          0
 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
 
 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
 
+/* Mask of PF interrupt causes */
+#define I40E_PFINT_ICR0_ENA_MASK ( \
+               I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
+               I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
+               I40E_PFINT_ICR0_ENA_GRST_MASK | \
+               I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
+               I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
+               I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | \
+               I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
+               I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
+               I40E_PFINT_ICR0_ENA_VFLR_MASK | \
+               I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
+
 static int eth_i40e_dev_init(\
                        __attribute__((unused)) struct eth_driver *eth_drv,
                        struct rte_eth_dev *eth_dev);
@@ -142,14 +136,17 @@ static void i40e_macaddr_add(struct rte_eth_dev *dev,
                          uint32_t pool);
 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
-                                   struct rte_eth_rss_reta *reta_conf);
+                                   struct rte_eth_rss_reta_entry64 *reta_conf,
+                                   uint16_t reta_size);
 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
-                                  struct rte_eth_rss_reta *reta_conf);
+                                  struct rte_eth_rss_reta_entry64 *reta_conf,
+                                  uint16_t reta_size);
 
 static int i40e_get_cap(struct i40e_hw *hw);
 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
 static int i40e_pf_setup(struct i40e_pf *pf);
-static int i40e_vsi_init(struct i40e_vsi *vsi);
+static int i40e_dev_rxtx_init(struct i40e_pf *pf);
+static int i40e_vmdq_setup(struct rte_eth_dev *dev);
 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
                bool offset_loaded, uint64_t *offset, uint64_t *stat);
 static void i40e_stat_update_48(struct i40e_hw *hw,
@@ -255,28 +252,18 @@ static struct eth_driver rte_i40e_pmd = {
        {
                .name = "rte_i40e_pmd",
                .id_table = pci_id_i40e_map,
-               .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+               .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
        },
        .eth_dev_init = eth_i40e_dev_init,
        .dev_private_size = sizeof(struct i40e_adapter),
 };
 
 static inline int
-i40e_prev_power_of_2(int n)
+i40e_align_floor(int n)
 {
-       int p = n;
-
-       --p;
-       p |= p >> 1;
-       p |= p >> 2;
-       p |= p >> 4;
-       p |= p >> 8;
-       p |= p >> 16;
-       if (p == (n - 1))
-               return n;
-       p >>= 1;
-
-       return ++p;
+       if (n == 0)
+               return 0;
+       return (1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n)));
 }
 
 static inline int
@@ -329,6 +316,35 @@ static struct rte_driver rte_i40e_driver = {
 
 PMD_REGISTER_DRIVER(rte_i40e_driver);
 
+/*
+ * Initialize registers for flexible payload, which should be set by NVM.
+ * This should be removed from code once it is fixed in NVM.
+ */
+#ifndef I40E_GLQF_ORT
+#define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
+#endif
+#ifndef I40E_GLQF_PIT
+#define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
+#endif
+
+static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw)
+{
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B);
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
+
+       /* GLQF_PIT Registers */
+       I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
+       I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
+}
+
 static int
 eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
                   struct rte_eth_dev *dev)
@@ -392,6 +408,13 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
                return ret;
        }
 
+       /*
+        * To work around the NVM issue,initialize registers
+        * for flexible payload by software.
+        * It should be removed once issues are fixed in NVM.
+        */
+       i40e_flex_payload_reg_init(hw);
+
        /* Initialize the parameters for adminq */
        i40e_init_adminq_parameter(hw);
        ret = i40e_init_adminq(hw);
@@ -493,7 +516,7 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
        if (!dev->data->mac_addrs) {
                PMD_INIT_LOG(ERR, "Failed to allocated memory "
                                        "for storing mac address");
-               goto err_get_mac_addr;
+               goto err_mac_alloc;
        }
        ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
                                        &dev->data->mac_addrs[0]);
@@ -514,8 +537,10 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
 
        return 0;
 
+err_mac_alloc:
+       i40e_vsi_release(pf->main_vsi);
 err_setup_pf_switch:
-       rte_free(pf->main_vsi);
+       i40e_fdir_teardown(pf);
 err_get_mac_addr:
 err_configure_lan_hmc:
        (void)i40e_shutdown_lan_hmc(hw);
@@ -534,6 +559,27 @@ err_get_capabilities:
 static int
 i40e_dev_configure(struct rte_eth_dev *dev)
 {
+       int ret;
+       enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
+
+       /* VMDQ setup.
+        *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
+        *  RSS setting have different requirements.
+        *  General PMD driver call sequence are NIC init, configure,
+        *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
+        *  will try to lookup the VSI that specific queue belongs to if VMDQ
+        *  applicable. So, VMDQ setting has to be done before
+        *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
+        *  For RSS setting, it will try to calculate actual configured RX queue
+        *  number, which will be available after rx_queue_setup(). dev_start()
+        *  function is good to place RSS setup.
+        */
+       if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+               ret = i40e_vmdq_setup(dev);
+               if (ret)
+                       return ret;
+       }
+
        return i40e_dev_init_vlan(dev);
 }
 
@@ -580,7 +626,6 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
        uint32_t val;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
        uint16_t msix_vect = vsi->msix_intr;
-       uint16_t interval = i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
        int i;
 
        for (i = 0; i < vsi->nb_qps; i++)
@@ -589,7 +634,7 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
        /* Bind all RX queues to allocated MSIX interrupt */
        for (i = 0; i < vsi->nb_qps; i++) {
                val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
-                       (interval << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+                       I40E_QINT_RQCTL_ITR_INDX_MASK |
                        ((vsi->base_queue + i + 1) <<
                        I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
                        (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
@@ -602,26 +647,34 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
 
        /* Write first RX queue to Link list register as the head element */
        if (vsi->type != I40E_VSI_SRIOV) {
+               uint16_t interval =
+                       i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
+
                I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
-                       (vsi->base_queue << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
+                                               (vsi->base_queue <<
+                               I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
                        (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
 
                I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
-                               msix_vect - 1), interval);
+                                               msix_vect - 1), interval);
 
+#ifndef I40E_GLINT_CTL
+#define I40E_GLINT_CTL                     0x0003F800
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK 0x4
+#endif
                /* Disable auto-mask on enabling of all none-zero  interrupt */
                I40E_WRITE_REG(hw, I40E_GLINT_CTL,
-                               I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
-       }
-       else {
+                       I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
+       } else {
                uint32_t reg;
+
                /* num_msix_vectors_vf needs to minus irq0 */
                reg = (hw->func_caps.num_msix_vectors_vf - 1) *
                        vsi->user_param + (msix_vect - 1);
 
-               I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
-                       (vsi->base_queue << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
-                       (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
+               I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), (vsi->base_queue <<
+                                       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
+                               (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
        }
 
        I40E_WRITE_FLUSH(hw);
@@ -750,8 +803,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
 {
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct i40e_vsi *vsi = pf->main_vsi;
-       int ret;
+       struct i40e_vsi *main_vsi = pf->main_vsi;
+       int ret, i;
 
        if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
                (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
@@ -762,26 +815,49 @@ i40e_dev_start(struct rte_eth_dev *dev)
        }
 
        /* Initialize VSI */
-       ret = i40e_vsi_init(vsi);
+       ret = i40e_dev_rxtx_init(pf);
        if (ret != I40E_SUCCESS) {
-               PMD_DRV_LOG(ERR, "Failed to init VSI");
+               PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
                goto err_up;
        }
 
        /* Map queues with MSIX interrupt */
-       i40e_vsi_queues_bind_intr(vsi);
-       i40e_vsi_enable_queues_intr(vsi);
+       i40e_vsi_queues_bind_intr(main_vsi);
+       i40e_vsi_enable_queues_intr(main_vsi);
+
+       /* Map VMDQ VSI queues with MSIX interrupt */
+       for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+               i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
+               i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
+       }
+
+       ret = i40e_fdir_configure(dev);
+       if (ret < 0) {
+               PMD_DRV_LOG(ERR, "failed to configure fdir.");
+               goto err_up;
+       }
+
+       /* enable FDIR MSIX interrupt */
+       if (pf->flags & I40E_FLAG_FDIR) {
+               i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
+               i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
+       }
 
        /* Enable all queues which have been configured */
-       ret = i40e_vsi_switch_queues(vsi, TRUE);
+       ret = i40e_dev_switch_queues(pf, TRUE);
        if (ret != I40E_SUCCESS) {
                PMD_DRV_LOG(ERR, "Failed to enable VSI");
                goto err_up;
        }
 
        /* Enable receiving broadcast packets */
-       if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
-               ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
+       ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
+       if (ret != I40E_SUCCESS)
+               PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
+
+       for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+               ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
+                                               true, NULL);
                if (ret != I40E_SUCCESS)
                        PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
        }
@@ -796,7 +872,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
        return I40E_SUCCESS;
 
 err_up:
-       i40e_vsi_switch_queues(vsi, FALSE);
+       i40e_dev_switch_queues(pf, FALSE);
+       i40e_dev_clear_queues(dev);
 
        return ret;
 }
@@ -805,17 +882,31 @@ static void
 i40e_dev_stop(struct rte_eth_dev *dev)
 {
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       struct i40e_vsi *vsi = pf->main_vsi;
+       struct i40e_vsi *main_vsi = pf->main_vsi;
+       int i;
 
        /* Disable all queues */
-       i40e_vsi_switch_queues(vsi, FALSE);
+       i40e_dev_switch_queues(pf, FALSE);
+
+       /* un-map queues with interrupt registers */
+       i40e_vsi_disable_queues_intr(main_vsi);
+       i40e_vsi_queues_unbind_intr(main_vsi);
+
+       for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+               i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
+               i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
+       }
+
+       if (pf->flags & I40E_FLAG_FDIR) {
+               i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
+               i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
+       }
+       /* Clear all queues and release memory */
+       i40e_dev_clear_queues(dev);
 
        /* Set link down */
        i40e_dev_set_link_down(dev);
 
-       /* un-map queues with interrupt registers */
-       i40e_vsi_disable_queues_intr(vsi);
-       i40e_vsi_queues_unbind_intr(vsi);
 }
 
 static void
@@ -837,6 +928,7 @@ i40e_dev_close(struct rte_eth_dev *dev)
        i40e_shutdown_lan_hmc(hw);
 
        /* release all the existing VSIs and VEBs */
+       i40e_fdir_teardown(pf);
        i40e_vsi_release(pf->main_vsi);
 
        /* shutdown the adminq */
@@ -1389,6 +1481,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                DEV_TX_OFFLOAD_UDP_CKSUM |
                DEV_TX_OFFLOAD_TCP_CKSUM |
                DEV_TX_OFFLOAD_SCTP_CKSUM;
+       dev_info->reta_size = pf->hash_lut_size;
 
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_thresh = {
@@ -1408,9 +1501,19 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                },
                .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
                .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
-               .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS,
+               .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+                               ETH_TXQ_FLAGS_NOOFFLOADS,
        };
 
+       if (pf->flags | I40E_FLAG_VMDQ) {
+               dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
+               dev_info->vmdq_queue_base = dev_info->max_rx_queues;
+               dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
+                                               pf->max_nb_vmdq_vsi;
+               dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
+               dev_info->max_rx_queues += dev_info->vmdq_queue_num;
+               dev_info->max_tx_queues += dev_info->vmdq_queue_num;
+       }
 }
 
 static int
@@ -1531,45 +1634,41 @@ i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
 static void
 i40e_macaddr_add(struct rte_eth_dev *dev,
                 struct ether_addr *mac_addr,
-                __attribute__((unused)) uint32_t index,
-                __attribute__((unused)) uint32_t pool)
+                __rte_unused uint32_t index,
+                uint32_t pool)
 {
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct i40e_vsi *vsi = pf->main_vsi;
-       struct ether_addr old_mac;
+       struct i40e_mac_filter_info mac_filter;
+       struct i40e_vsi *vsi;
        int ret;
 
-       if (!is_valid_assigned_ether_addr(mac_addr)) {
-               PMD_DRV_LOG(ERR, "Invalid ethernet address");
+       /* If VMDQ not enabled or configured, return */
+       if (pool != 0 && (!(pf->flags | I40E_FLAG_VMDQ) || !pf->nb_cfg_vmdq_vsi)) {
+               PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
+                       pf->flags | I40E_FLAG_VMDQ ? "configured" : "enabled",
+                       pool);
                return;
        }
 
-       if (is_same_ether_addr(mac_addr, &(pf->dev_addr))) {
-               PMD_DRV_LOG(INFO, "Ignore adding permanent mac address");
+       if (pool > pf->nb_cfg_vmdq_vsi) {
+               PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
+                               pool, pf->nb_cfg_vmdq_vsi);
                return;
        }
 
-       /* Write mac address */
-       ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
-                                       mac_addr->addr_bytes, NULL);
-       if (ret != I40E_SUCCESS) {
-               PMD_DRV_LOG(ERR, "Failed to write mac address");
-               return;
-       }
+       (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
+       mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
 
-       (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
-       (void)rte_memcpy(hw->mac.addr, mac_addr->addr_bytes,
-                       ETHER_ADDR_LEN);
+       if (pool == 0)
+               vsi = pf->main_vsi;
+       else
+               vsi = pf->vmdq[pool - 1].vsi;
 
-       ret = i40e_vsi_add_mac(vsi, mac_addr);
+       ret = i40e_vsi_add_mac(vsi, &mac_filter);
        if (ret != I40E_SUCCESS) {
                PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
                return;
        }
-
-       ether_addr_copy(mac_addr, &pf->dev_addr);
-       i40e_vsi_delete_mac(vsi, &old_mac);
 }
 
 /* Remove a MAC address, and update filters */
@@ -1577,66 +1676,191 @@ static void
 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
 {
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       struct i40e_vsi *vsi = pf->main_vsi;
-       struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
+       struct i40e_vsi *vsi;
+       struct rte_eth_dev_data *data = dev->data;
        struct ether_addr *macaddr;
        int ret;
-       struct i40e_hw *hw =
-               I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       if (index >= vsi->max_macaddrs)
-               return;
+       uint32_t i;
+       uint64_t pool_sel;
 
        macaddr = &(data->mac_addrs[index]);
-       if (!is_valid_assigned_ether_addr(macaddr))
-               return;
 
-       ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
-                                       hw->mac.perm_addr, NULL);
-       if (ret != I40E_SUCCESS) {
-               PMD_DRV_LOG(ERR, "Failed to write mac address");
-               return;
+       pool_sel = dev->data->mac_pool_sel[index];
+
+       for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
+               if (pool_sel & (1ULL << i)) {
+                       if (i == 0)
+                               vsi = pf->main_vsi;
+                       else {
+                               /* No VMDQ pool enabled or configured */
+                               if (!(pf->flags | I40E_FLAG_VMDQ) ||
+                                       (i > pf->nb_cfg_vmdq_vsi)) {
+                                       PMD_DRV_LOG(ERR, "No VMDQ pool enabled"
+                                                       "/configured");
+                                       return;
+                               }
+                               vsi = pf->vmdq[i - 1].vsi;
+                       }
+                       ret = i40e_vsi_delete_mac(vsi, macaddr);
+
+                       if (ret) {
+                               PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
+                               return;
+                       }
+               }
        }
+}
 
-       (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
+/* Set perfect match or hash match of MAC and VLAN for a VF */
+static int
+i40e_vf_mac_filter_set(struct i40e_pf *pf,
+                struct rte_eth_mac_filter *filter,
+                bool add)
+{
+       struct i40e_hw *hw;
+       struct i40e_mac_filter_info mac_filter;
+       struct ether_addr old_mac;
+       struct ether_addr *new_mac;
+       struct i40e_pf_vf *vf = NULL;
+       uint16_t vf_id;
+       int ret;
 
-       ret = i40e_vsi_delete_mac(vsi, macaddr);
-       if (ret != I40E_SUCCESS)
-               return;
+       if (pf == NULL) {
+               PMD_DRV_LOG(ERR, "Invalid PF argument.");
+               return -EINVAL;
+       }
+       hw = I40E_PF_TO_HW(pf);
 
-       /* Clear device address as it has been removed */
-       if (is_same_ether_addr(&(pf->dev_addr), macaddr))
-               memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
+       if (filter == NULL) {
+               PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
+               return -EINVAL;
+       }
+
+       new_mac = &filter->mac_addr;
+
+       if (is_zero_ether_addr(new_mac)) {
+               PMD_DRV_LOG(ERR, "Invalid ethernet address.");
+               return -EINVAL;
+       }
+
+       vf_id = filter->dst_id;
+
+       if (vf_id > pf->vf_num - 1 || !pf->vfs) {
+               PMD_DRV_LOG(ERR, "Invalid argument.");
+               return -EINVAL;
+       }
+       vf = &pf->vfs[vf_id];
+
+       if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
+               PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
+               return -EINVAL;
+       }
+
+       if (add) {
+               (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
+               (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
+                               ETHER_ADDR_LEN);
+               (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
+                                ETHER_ADDR_LEN);
+
+               mac_filter.filter_type = filter->filter_type;
+               ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
+               if (ret != I40E_SUCCESS) {
+                       PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
+                       return -1;
+               }
+               ether_addr_copy(new_mac, &pf->dev_addr);
+       } else {
+               (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
+                               ETHER_ADDR_LEN);
+               ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
+               if (ret != I40E_SUCCESS) {
+                       PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
+                       return -1;
+               }
+
+               /* Clear device address as it has been removed */
+               if (is_same_ether_addr(&(pf->dev_addr), new_mac))
+                       memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
+       }
+
+       return 0;
+}
+
+/* MAC filter handle */
+static int
+i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
+               void *arg)
+{
+       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct rte_eth_mac_filter *filter;
+       struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+       int ret = I40E_NOT_SUPPORTED;
+
+       filter = (struct rte_eth_mac_filter *)(arg);
+
+       switch (filter_op) {
+       case RTE_ETH_FILTER_NOP:
+               ret = I40E_SUCCESS;
+               break;
+       case RTE_ETH_FILTER_ADD:
+               i40e_pf_disable_irq0(hw);
+               if (filter->is_vf)
+                       ret = i40e_vf_mac_filter_set(pf, filter, 1);
+               i40e_pf_enable_irq0(hw);
+               break;
+       case RTE_ETH_FILTER_DELETE:
+               i40e_pf_disable_irq0(hw);
+               if (filter->is_vf)
+                       ret = i40e_vf_mac_filter_set(pf, filter, 0);
+               i40e_pf_enable_irq0(hw);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
+               ret = I40E_ERR_PARAM;
+               break;
+       }
+
+       return ret;
 }
 
 static int
 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
-                        struct rte_eth_rss_reta *reta_conf)
+                        struct rte_eth_rss_reta_entry64 *reta_conf,
+                        uint16_t reta_size)
 {
+       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t lut, l;
-       uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
-
-       for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
-               if (i < max)
-                       mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
-               else
-                       mask = (uint8_t)((reta_conf->mask_hi >>
-                                               (i - max)) & 0xF);
+       uint16_t i, j, lut_size = pf->hash_lut_size;
+       uint16_t idx, shift;
+       uint8_t mask;
+
+       if (reta_size != lut_size ||
+               reta_size > ETH_RSS_RETA_SIZE_512) {
+               PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+                       "(%d) doesn't match the number hardware can supported "
+                                       "(%d)\n", reta_size, lut_size);
+               return -EINVAL;
+       }
 
+       for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
+               idx = i / RTE_RETA_GROUP_SIZE;
+               shift = i % RTE_RETA_GROUP_SIZE;
+               mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+                                               I40E_4_BIT_MASK);
                if (!mask)
                        continue;
-
-               if (mask == 0xF)
+               if (mask == I40E_4_BIT_MASK)
                        l = 0;
                else
                        l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
-
-               for (j = 0, lut = 0; j < 4; j++) {
+               for (j = 0, lut = 0; j < I40E_4_BIT_WIDTH; j++) {
                        if (mask & (0x1 << j))
-                               lut |= reta_conf->reta[i + j] << (8 * j);
+                               lut |= reta_conf[idx].reta[shift + j] <<
+                                                       (CHAR_BIT * j);
                        else
-                               lut |= l & (0xFF << (8 * j));
+                               lut |= l & (I40E_8_BIT_MASK << (CHAR_BIT * j));
                }
                I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
        }
@@ -1646,27 +1870,37 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
 
 static int
 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
-                       struct rte_eth_rss_reta *reta_conf)
+                       struct rte_eth_rss_reta_entry64 *reta_conf,
+                       uint16_t reta_size)
 {
+       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t lut;
-       uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
-
-       for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
-               if (i < max)
-                       mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
-               else
-                       mask = (uint8_t)((reta_conf->mask_hi >>
-                                               (i - max)) & 0xF);
+       uint16_t i, j, lut_size = pf->hash_lut_size;
+       uint16_t idx, shift;
+       uint8_t mask;
+
+       if (reta_size != lut_size ||
+               reta_size > ETH_RSS_RETA_SIZE_512) {
+               PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+                       "(%d) doesn't match the number hardware can supported "
+                                       "(%d)\n", reta_size, lut_size);
+               return -EINVAL;
+       }
 
+       for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
+               idx = i / RTE_RETA_GROUP_SIZE;
+               shift = i % RTE_RETA_GROUP_SIZE;
+               mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+                                               I40E_4_BIT_MASK);
                if (!mask)
                        continue;
 
                lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
-               for (j = 0; j < 4; j++) {
+               for (j = 0; j < I40E_4_BIT_WIDTH; j++) {
                        if (mask & (0x1 << j))
-                               reta_conf->reta[i + j] =
-                                       (uint8_t)((lut >> (8 * j)) & 0xFF);
+                               reta_conf[idx].reta[shift] = ((lut >>
+                                       (CHAR_BIT * j)) & I40E_8_BIT_MASK);
                }
        }
 
@@ -1836,7 +2070,7 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 {
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_hw *hw = I40E_PF_TO_HW(pf);
-       uint16_t sum_queues = 0, sum_vsis;
+       uint16_t sum_queues = 0, sum_vsis, left_queues;
 
        /* First check if FW support SRIOV */
        if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
@@ -1852,7 +2086,7 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
                pf->flags |= I40E_FLAG_RSS;
                pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
                        (uint32_t)(1 << hw->func_caps.rss_table_entry_width));
-               pf->lan_nb_qps = i40e_prev_power_of_2(pf->lan_nb_qps);
+               pf->lan_nb_qps = i40e_align_floor(pf->lan_nb_qps);
        } else
                pf->lan_nb_qps = 1;
        sum_queues = pf->lan_nb_qps;
@@ -1886,11 +2120,19 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 
        if (hw->func_caps.vmdq) {
                pf->flags |= I40E_FLAG_VMDQ;
-               pf->vmdq_nb_qps = I40E_DEFAULT_QP_NUM_VMDQ;
-               sum_queues += pf->vmdq_nb_qps;
-               sum_vsis += 1;
-               PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u", pf->vmdq_nb_qps);
+               pf->vmdq_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
+               pf->max_nb_vmdq_vsi = 1;
+               /*
+                * If VMDQ available, assume a single VSI can be created.  Will adjust
+                * later.
+                */
+               sum_queues += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
+               sum_vsis += pf->max_nb_vmdq_vsi;
+       } else {
+               pf->vmdq_nb_qps = 0;
+               pf->max_nb_vmdq_vsi = 0;
        }
+       pf->nb_cfg_vmdq_vsi = 0;
 
        if (hw->func_caps.fd) {
                pf->flags |= I40E_FLAG_FDIR;
@@ -1911,6 +2153,22 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
                return -EINVAL;
        }
 
+       /* Adjust VMDQ setting to support as many VMs as possible */
+       if (pf->flags & I40E_FLAG_VMDQ) {
+               left_queues = hw->func_caps.num_rx_qp - sum_queues;
+
+               pf->max_nb_vmdq_vsi += RTE_MIN(left_queues / pf->vmdq_nb_qps,
+                                       pf->max_num_vsi - sum_vsis);
+
+               /* Limit the max VMDQ number that rte_ether that can support  */
+               pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
+                                       ETH_64_POOLS - 1);
+
+               PMD_INIT_LOG(INFO, "Max VMDQ VSI num:%u",
+                               pf->max_nb_vmdq_vsi);
+               PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u", pf->vmdq_nb_qps);
+       }
+
        /* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr
         * cause */
        if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
@@ -2303,7 +2561,7 @@ i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
        vsi->enabled_tc = enabled_tcmap;
 
        /* Number of queues per enabled TC */
-       qpnum_per_tc = i40e_prev_power_of_2(vsi->nb_qps / total_tc);
+       qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
        qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
        bsf = rte_bsf32(qpnum_per_tc);
 
@@ -2479,6 +2737,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi)
 {
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
        struct i40e_aqc_remove_macvlan_element_data def_filter;
+       struct i40e_mac_filter_info filter;
        int ret;
 
        if (vsi->type != I40E_VSI_MAIN)
@@ -2492,6 +2751,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi)
        ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
        if (ret != I40E_SUCCESS) {
                struct i40e_mac_filter *f;
+               struct ether_addr *mac;
 
                PMD_DRV_LOG(WARNING, "Cannot remove the default "
                            "macvlan filter");
@@ -2501,15 +2761,19 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi)
                        PMD_DRV_LOG(ERR, "failed to allocate memory");
                        return I40E_ERR_NO_MEMORY;
                }
-               (void)rte_memcpy(&f->macaddr.addr_bytes, hw->mac.perm_addr,
+               mac = &f->mac_info.mac_addr;
+               (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
                                ETH_ADDR_LEN);
+               f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
                TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
                vsi->mac_num++;
 
                return ret;
        }
-
-       return i40e_vsi_add_mac(vsi, (struct ether_addr *)(hw->mac.perm_addr));
+       (void)rte_memcpy(&filter.mac_addr,
+               (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
+       filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+       return i40e_vsi_add_mac(vsi, &filter);
 }
 
 static int
@@ -2563,6 +2827,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
 {
        struct i40e_hw *hw = I40E_PF_TO_HW(pf);
        struct i40e_vsi *vsi;
+       struct i40e_mac_filter_info filter;
        int ret;
        struct i40e_vsi_context ctxt;
        struct ether_addr broadcast =
@@ -2609,16 +2874,33 @@ i40e_vsi_setup(struct i40e_pf *pf,
        case I40E_VSI_SRIOV :
                vsi->nb_qps = pf->vf_nb_qps;
                break;
+       case I40E_VSI_VMDQ2:
+               vsi->nb_qps = pf->vmdq_nb_qps;
+               break;
+       case I40E_VSI_FDIR:
+               vsi->nb_qps = pf->fdir_nb_qps;
+               break;
        default:
                goto fail_mem;
        }
-       ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
-       if (ret < 0) {
-               PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
-                               vsi->seid, ret);
-               goto fail_mem;
-       }
-       vsi->base_queue = ret;
+       /*
+        * The filter status descriptor is reported in rx queue 0,
+        * while the tx queue for fdir filter programming has no
+        * such constraints, can be non-zero queues.
+        * To simplify it, choose FDIR vsi use queue 0 pair.
+        * To make sure it will use queue 0 pair, queue allocation
+        * need be done before this function is called
+        */
+       if (type != I40E_VSI_FDIR) {
+               ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
+                       if (ret < 0) {
+                               PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
+                                               vsi->seid, ret);
+                               goto fail_mem;
+                       }
+                       vsi->base_queue = ret;
+       } else
+               vsi->base_queue = I40E_FDIR_QUEUE_ID;
 
        /* VF has MSIX interrupt in VF range, don't allocate here */
        if (type != I40E_VSI_SRIOV) {
@@ -2750,8 +3032,61 @@ i40e_vsi_setup(struct i40e_pf *pf,
                 * Since VSI is not created yet, only configure parameter,
                 * will add vsi below.
                 */
-       }
-       else {
+       } else if (type == I40E_VSI_VMDQ2) {
+               memset(&ctxt, 0, sizeof(ctxt));
+               /*
+                * For other VSI, the uplink_seid equals to uplink VSI's
+                * uplink_seid since they share same VEB
+                */
+               vsi->uplink_seid = uplink_vsi->uplink_seid;
+               ctxt.pf_num = hw->pf_id;
+               ctxt.vf_num = 0;
+               ctxt.uplink_seid = vsi->uplink_seid;
+               ctxt.connection_type = 0x1;
+               ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
+
+               ctxt.info.valid_sections |=
+                               rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+               /* user_param carries flag to enable loop back */
+               if (user_param) {
+                       ctxt.info.switch_id =
+                       rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
+                       ctxt.info.switch_id |=
+                       rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+               }
+
+               /* Configure port/vlan */
+               ctxt.info.valid_sections |=
+                       rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
+               ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
+               ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
+                                               I40E_DEFAULT_TCMAP);
+               if (ret != I40E_SUCCESS) {
+                       PMD_DRV_LOG(ERR, "Failed to configure "
+                                       "TC queue mapping");
+                       goto fail_msix_alloc;
+               }
+               ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
+               ctxt.info.valid_sections |=
+                       rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
+       } else if (type == I40E_VSI_FDIR) {
+               vsi->uplink_seid = uplink_vsi->uplink_seid;
+               ctxt.pf_num = hw->pf_id;
+               ctxt.vf_num = 0;
+               ctxt.uplink_seid = vsi->uplink_seid;
+               ctxt.connection_type = 0x1;     /* regular data port */
+               ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+               ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
+                                               I40E_DEFAULT_TCMAP);
+               if (ret != I40E_SUCCESS) {
+                       PMD_DRV_LOG(ERR, "Failed to configure "
+                                       "TC queue mapping.");
+                       goto fail_msix_alloc;
+               }
+               ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
+               ctxt.info.valid_sections |=
+                       rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
+       } else {
                PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
                goto fail_msix_alloc;
        }
@@ -2773,7 +3108,10 @@ i40e_vsi_setup(struct i40e_pf *pf,
        }
 
        /* MAC/VLAN configuration */
-       ret = i40e_vsi_add_mac(vsi, &broadcast);
+       (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
+       filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+
+       ret = i40e_vsi_add_mac(vsi, &filter);
        if (ret != I40E_SUCCESS) {
                PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
                goto fail_msix_alloc;
@@ -2923,7 +3261,6 @@ i40e_pf_setup(struct i40e_pf *pf)
 {
        struct i40e_hw *hw = I40E_PF_TO_HW(pf);
        struct i40e_filter_control_settings settings;
-       struct rte_eth_dev_data *dev_data = pf->dev_data;
        struct i40e_vsi *vsi;
        int ret;
 
@@ -2937,20 +3274,47 @@ i40e_pf_setup(struct i40e_pf *pf)
                PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
                return ret;
        }
-
-       /* VSI setup */
+       if (pf->flags & I40E_FLAG_FDIR) {
+               /* make queue allocated first, let FDIR use queue pair 0*/
+               ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
+               if (ret != I40E_FDIR_QUEUE_ID) {
+                       PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :"
+                                   " ret =%d", ret);
+                       pf->flags &= ~I40E_FLAG_FDIR;
+               }
+       }
+       /*  main VSI setup */
        vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
        if (!vsi) {
                PMD_DRV_LOG(ERR, "Setup of main vsi failed");
                return I40E_ERR_NOT_READY;
        }
        pf->main_vsi = vsi;
-       dev_data->nb_rx_queues = vsi->nb_qps;
-       dev_data->nb_tx_queues = vsi->nb_qps;
+
+       /* setup FDIR after main vsi created.*/
+       if (pf->flags & I40E_FLAG_FDIR) {
+               ret = i40e_fdir_setup(pf);
+               if (ret != I40E_SUCCESS) {
+                       PMD_DRV_LOG(ERR, "Failed to setup flow director.");
+                       pf->flags &= ~I40E_FLAG_FDIR;
+               }
+       }
 
        /* Configure filter control */
        memset(&settings, 0, sizeof(settings));
-       settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
+       if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
+               settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
+       else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
+               settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
+       else {
+               PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n",
+                                               hw->func_caps.rss_table_size);
+               return I40E_ERR_PARAM;
+       }
+       PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table "
+                       "size: %u\n", hw->func_caps.rss_table_size);
+       pf->hash_lut_size = hw->func_caps.rss_table_size;
+
        /* Enable ethtype and macvlan filters */
        settings.enable_ethtype = TRUE;
        settings.enable_macvlan = TRUE;
@@ -3027,11 +3391,11 @@ i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
 
 /* Swith on or off the tx queues */
 static int
-i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)
+i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
 {
-       struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
+       struct rte_eth_dev_data *dev_data = pf->dev_data;
        struct i40e_tx_queue *txq;
-       struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
+       struct rte_eth_dev *dev = pf->adapter->eth_dev;
        uint16_t i;
        int ret;
 
@@ -3039,7 +3403,7 @@ i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)
                txq = dev_data->tx_queues[i];
                /* Don't operate the queue if not configured or
                 * if starting only per queue */
-               if (!txq->q_set || (on && txq->tx_deferred_start))
+               if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
                        continue;
                if (on)
                        ret = i40e_dev_tx_queue_start(dev, i);
@@ -3105,11 +3469,11 @@ i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
 }
 /* Switch on or off the rx queues */
 static int
-i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)
+i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
 {
-       struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
+       struct rte_eth_dev_data *dev_data = pf->dev_data;
        struct i40e_rx_queue *rxq;
-       struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
+       struct rte_eth_dev *dev = pf->adapter->eth_dev;
        uint16_t i;
        int ret;
 
@@ -3117,7 +3481,7 @@ i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)
                rxq = dev_data->rx_queues[i];
                /* Don't operate the queue if not configured or
                 * if starting only per queue */
-               if (!rxq->q_set || (on && rxq->rx_deferred_start))
+               if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
                        continue;
                if (on)
                        ret = i40e_dev_rx_queue_start(dev, i);
@@ -3132,26 +3496,26 @@ i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)
 
 /* Switch on or off all the rx/tx queues */
 int
-i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on)
+i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
 {
        int ret;
 
        if (on) {
                /* enable rx queues before enabling tx queues */
-               ret = i40e_vsi_switch_rx_queues(vsi, on);
+               ret = i40e_dev_switch_rx_queues(pf, on);
                if (ret) {
                        PMD_DRV_LOG(ERR, "Failed to switch rx queues");
                        return ret;
                }
-               ret = i40e_vsi_switch_tx_queues(vsi, on);
+               ret = i40e_dev_switch_tx_queues(pf, on);
        } else {
                /* Stop tx queues before stopping rx queues */
-               ret = i40e_vsi_switch_tx_queues(vsi, on);
+               ret = i40e_dev_switch_tx_queues(pf, on);
                if (ret) {
                        PMD_DRV_LOG(ERR, "Failed to switch tx queues");
                        return ret;
                }
-               ret = i40e_vsi_switch_rx_queues(vsi, on);
+               ret = i40e_dev_switch_rx_queues(pf, on);
        }
 
        return ret;
@@ -3159,15 +3523,18 @@ i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on)
 
 /* Initialize VSI for TX */
 static int
-i40e_vsi_tx_init(struct i40e_vsi *vsi)
+i40e_dev_tx_init(struct i40e_pf *pf)
 {
-       struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
        struct rte_eth_dev_data *data = pf->dev_data;
        uint16_t i;
        uint32_t ret = I40E_SUCCESS;
+       struct i40e_tx_queue *txq;
 
        for (i = 0; i < data->nb_tx_queues; i++) {
-               ret = i40e_tx_queue_init(data->tx_queues[i]);
+               txq = data->tx_queues[i];
+               if (!txq || !txq->q_set)
+                       continue;
+               ret = i40e_tx_queue_init(txq);
                if (ret != I40E_SUCCESS)
                        break;
        }
@@ -3177,16 +3544,20 @@ i40e_vsi_tx_init(struct i40e_vsi *vsi)
 
 /* Initialize VSI for RX */
 static int
-i40e_vsi_rx_init(struct i40e_vsi *vsi)
+i40e_dev_rx_init(struct i40e_pf *pf)
 {
-       struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
        struct rte_eth_dev_data *data = pf->dev_data;
        int ret = I40E_SUCCESS;
        uint16_t i;
+       struct i40e_rx_queue *rxq;
 
        i40e_pf_config_mq_rx(pf);
        for (i = 0; i < data->nb_rx_queues; i++) {
-               ret = i40e_rx_queue_init(data->rx_queues[i]);
+               rxq = data->rx_queues[i];
+               if (!rxq || !rxq->q_set)
+                       continue;
+
+               ret = i40e_rx_queue_init(rxq);
                if (ret != I40E_SUCCESS) {
                        PMD_DRV_LOG(ERR, "Failed to do RX queue "
                                    "initialization");
@@ -3197,26 +3568,121 @@ i40e_vsi_rx_init(struct i40e_vsi *vsi)
        return ret;
 }
 
-/* Initialize VSI */
 static int
-i40e_vsi_init(struct i40e_vsi *vsi)
+i40e_dev_rxtx_init(struct i40e_pf *pf)
 {
        int err;
 
-       err = i40e_vsi_tx_init(vsi);
+       err = i40e_dev_tx_init(pf);
        if (err) {
-               PMD_DRV_LOG(ERR, "Failed to do vsi TX initialization");
+               PMD_DRV_LOG(ERR, "Failed to do TX initialization");
                return err;
        }
-       err = i40e_vsi_rx_init(vsi);
+       err = i40e_dev_rx_init(pf);
        if (err) {
-               PMD_DRV_LOG(ERR, "Failed to do vsi RX initialization");
+               PMD_DRV_LOG(ERR, "Failed to do RX initialization");
                return err;
        }
 
        return err;
 }
 
+static int
+i40e_vmdq_setup(struct rte_eth_dev *dev)
+{
+       struct rte_eth_conf *conf = &dev->data->dev_conf;
+       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       int i, err, conf_vsis, j, loop;
+       struct i40e_vsi *vsi;
+       struct i40e_vmdq_info *vmdq_info;
+       struct rte_eth_vmdq_rx_conf *vmdq_conf;
+       struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+
+       /*
+        * Disable interrupt to avoid message from VF. Furthermore, it will
+        * avoid race condition in VSI creation/destroy.
+        */
+       i40e_pf_disable_irq0(hw);
+
+       if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
+               PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
+               return -ENOTSUP;
+       }
+
+       conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
+       if (conf_vsis > pf->max_nb_vmdq_vsi) {
+               PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
+                       conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
+                       pf->max_nb_vmdq_vsi);
+               return -ENOTSUP;
+       }
+
+       if (pf->vmdq != NULL) {
+               PMD_INIT_LOG(INFO, "VMDQ already configured");
+               return 0;
+       }
+
+       pf->vmdq = rte_zmalloc("vmdq_info_struct",
+                               sizeof(*vmdq_info) * conf_vsis, 0);
+
+       if (pf->vmdq == NULL) {
+               PMD_INIT_LOG(ERR, "Failed to allocate memory");
+               return -ENOMEM;
+       }
+
+       vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
+
+       /* Create VMDQ VSI */
+       for (i = 0; i < conf_vsis; i++) {
+               vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
+                               vmdq_conf->enable_loop_back);
+               if (vsi == NULL) {
+                       PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
+                       err = -1;
+                       goto err_vsi_setup;
+               }
+               vmdq_info = &pf->vmdq[i];
+               vmdq_info->pf = pf;
+               vmdq_info->vsi = vsi;
+       }
+       pf->nb_cfg_vmdq_vsi = conf_vsis;
+
+       /* Configure Vlan */
+       loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
+       for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
+               for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
+                       if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
+                               PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
+                                       vmdq_conf->pool_map[i].vlan_id, j);
+
+                               err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
+                                               vmdq_conf->pool_map[i].vlan_id);
+                               if (err) {
+                                       PMD_INIT_LOG(ERR, "Failed to add vlan");
+                                       err = -1;
+                                       goto err_vsi_setup;
+                               }
+                       }
+               }
+       }
+
+       i40e_pf_enable_irq0(hw);
+
+       return 0;
+
+err_vsi_setup:
+       for (i = 0; i < conf_vsis; i++)
+               if (pf->vmdq[i].vsi == NULL)
+                       break;
+               else
+                       i40e_vsi_release(pf->vmdq[i].vsi);
+
+       rte_free(pf->vmdq);
+       pf->vmdq = NULL;
+       i40e_pf_enable_irq0(hw);
+       return err;
+}
+
 static void
 i40e_stat_update_32(struct i40e_hw *hw,
                   uint32_t reg,
@@ -3234,7 +3700,7 @@ i40e_stat_update_32(struct i40e_hw *hw,
                *stat = (uint64_t)(new_data - *offset);
        else
                *stat = (uint64_t)((new_data +
-                       ((uint64_t)1 << I40E_32_BIT_SHIFT)) - *offset);
+                       ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
 }
 
 static void
@@ -3249,7 +3715,7 @@ i40e_stat_update_48(struct i40e_hw *hw,
 
        new_data = (uint64_t)I40E_READ_REG(hw, loreg);
        new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
-                       I40E_16_BIT_MASK)) << I40E_32_BIT_SHIFT;
+                       I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
 
        if (!offset_loaded)
                *offset = new_data;
@@ -3258,7 +3724,7 @@ i40e_stat_update_48(struct i40e_hw *hw,
                *stat = new_data - *offset;
        else
                *stat = (uint64_t)((new_data +
-                       ((uint64_t)1 << I40E_48_BIT_SHIFT)) - *offset);
+                       ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
 
        *stat &= I40E_48_BIT_MASK;
 }
@@ -3286,24 +3752,9 @@ i40e_pf_enable_irq0(struct i40e_hw *hw)
 static void
 i40e_pf_config_irq0(struct i40e_hw *hw)
 {
-       uint32_t enable;
-
        /* read pending request and disable first */
        i40e_pf_disable_irq0(hw);
-       /**
-        * Enable all interrupt error options to detect possible errors,
-        * other informative int are ignored
-        */
-       enable = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
-                I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
-                I40E_PFINT_ICR0_ENA_GRST_MASK |
-                I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
-                I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK |
-                I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
-                I40E_PFINT_ICR0_ENA_VFLR_MASK |
-                I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
-
-       I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, enable);
+       I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
        I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
                I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
 
@@ -3396,6 +3847,57 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
        rte_free(info.msg_buf);
 }
 
+/*
+ * Interrupt handler is registered as the alarm callback for handling LSC
+ * interrupt in a definite of time, in order to wait the NIC into a stable
+ * state. Currently it waits 1 sec in i40e for the link up interrupt, and
+ * no need for link down interrupt.
+ */
+static void
+i40e_dev_interrupt_delayed_handler(void *param)
+{
+       struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t icr0;
+
+       /* read interrupt causes again */
+       icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
+
+#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
+       if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
+               PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error\n");
+       if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
+               PMD_DRV_LOG(ERR, "ICR0: malicious programming detected\n");
+       if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
+               PMD_DRV_LOG(INFO, "ICR0: global reset requested\n");
+       if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
+               PMD_DRV_LOG(INFO, "ICR0: PCI exception\n activated\n");
+       if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
+               PMD_DRV_LOG(INFO, "ICR0: a change in the storm control "
+                                                               "state\n");
+       if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
+               PMD_DRV_LOG(ERR, "ICR0: HMC error\n");
+       if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
+               PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error\n");
+#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
+
+       if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
+               PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
+               i40e_dev_handle_vfr_event(dev);
+       }
+       if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
+               PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
+               i40e_dev_handle_aq_msg(dev);
+       }
+
+       /* handle the link up interrupt in an alarm callback */
+       i40e_dev_link_update(dev, 0);
+       _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+
+       i40e_pf_enable_irq0(hw);
+       rte_intr_enable(&(dev->pci_dev->intr_handle));
+}
+
 /**
  * Interrupt handler triggered by NIC  for handling
  * specific interrupt.
@@ -3414,56 +3916,72 @@ i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
 {
        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t cause, enable;
+       uint32_t icr0;
 
+       /* Disable interrupt */
        i40e_pf_disable_irq0(hw);
 
-       cause = I40E_READ_REG(hw, I40E_PFINT_ICR0);
-       enable = I40E_READ_REG(hw, I40E_PFINT_ICR0_ENA);
+       /* read out interrupt causes */
+       icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
 
-       /* Shared IRQ case, return */
-       if (!(cause & I40E_PFINT_ICR0_INTEVENT_MASK)) {
-               PMD_DRV_LOG(INFO, "Port%d INT0:share IRQ case, "
-                           "no INT event to process", hw->pf_id);
+       /* No interrupt event indicated */
+       if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
+               PMD_DRV_LOG(INFO, "No interrupt event");
                goto done;
        }
-
-       if (cause & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
-               PMD_DRV_LOG(INFO, "INT:Link status changed");
-               i40e_dev_link_update(dev, 0);
+#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
+       if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
+               PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
+       if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
+               PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
+       if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
+               PMD_DRV_LOG(INFO, "ICR0: global reset requested");
+       if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
+               PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
+       if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
+               PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
+       if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
+               PMD_DRV_LOG(ERR, "ICR0: HMC error");
+       if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
+               PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
+#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
+
+       if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
+               PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
+               i40e_dev_handle_vfr_event(dev);
+       }
+       if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
+               PMD_DRV_LOG(INFO, "ICR0: adminq event");
+               i40e_dev_handle_aq_msg(dev);
        }
 
-       if (cause & I40E_PFINT_ICR0_ECC_ERR_MASK)
-               PMD_DRV_LOG(INFO, "INT:Unrecoverable ECC Error");
-
-       if (cause & I40E_PFINT_ICR0_MAL_DETECT_MASK)
-               PMD_DRV_LOG(INFO, "INT:Malicious programming detected");
-
-       if (cause & I40E_PFINT_ICR0_GRST_MASK)
-               PMD_DRV_LOG(INFO, "INT:Global Resets Requested");
-
-       if (cause & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
-               PMD_DRV_LOG(INFO, "INT:PCI EXCEPTION occured");
+       /* Link Status Change interrupt */
+       if (icr0 & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
+#define I40E_US_PER_SECOND 1000000
+               struct rte_eth_link link;
 
-       if (cause & I40E_PFINT_ICR0_HMC_ERR_MASK)
-               PMD_DRV_LOG(INFO, "INT:HMC error occured");
+               PMD_DRV_LOG(INFO, "ICR0: link status changed\n");
+               memset(&link, 0, sizeof(link));
+               rte_i40e_dev_atomic_read_link_status(dev, &link);
+               i40e_dev_link_update(dev, 0);
 
-       /* Add processing func to deal with VF reset vent */
-       if (cause & I40E_PFINT_ICR0_VFLR_MASK) {
-               PMD_DRV_LOG(INFO, "INT:VF reset detected");
-               i40e_dev_handle_vfr_event(dev);
-       }
-       /* Find admin queue event */
-       if (cause & I40E_PFINT_ICR0_ADMINQ_MASK) {
-               PMD_DRV_LOG(INFO, "INT:ADMINQ event");
-               i40e_dev_handle_aq_msg(dev);
+               /*
+                * For link up interrupt, it needs to wait 1 second to let the
+                * hardware be a stable state. Otherwise several consecutive
+                * interrupts can be observed.
+                * For link down interrupt, no need to wait.
+                */
+               if (!link.link_status && rte_eal_alarm_set(I40E_US_PER_SECOND,
+                       i40e_dev_interrupt_delayed_handler, (void *)dev) >= 0)
+                       return;
+               else
+                       _rte_eth_dev_callback_process(dev,
+                               RTE_ETH_EVENT_INTR_LSC);
        }
 
 done:
-       I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, enable);
-       /* Re-enable interrupt from device side */
+       /* Enable interrupt */
        i40e_pf_enable_irq0(hw);
-       /* Re-enable interrupt from host side */
        rte_intr_enable(&(dev->pci_dev->intr_handle));
 }
 
@@ -3474,6 +3992,7 @@ i40e_add_macvlan_filters(struct i40e_vsi *vsi,
 {
        int ele_num, ele_buff_size;
        int num, actual_num, i;
+       uint16_t flags;
        int ret = I40E_SUCCESS;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
        struct i40e_aqc_add_macvlan_element_data *req_list;
@@ -3499,9 +4018,31 @@ i40e_add_macvlan_filters(struct i40e_vsi *vsi,
                                &filter[num + i].macaddr, ETH_ADDR_LEN);
                        req_list[i].vlan_tag =
                                rte_cpu_to_le_16(filter[num + i].vlan_id);
-                       req_list[i].flags = rte_cpu_to_le_16(\
-                               I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
+
+                       switch (filter[num + i].filter_type) {
+                       case RTE_MAC_PERFECT_MATCH:
+                               flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
+                                       I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+                               break;
+                       case RTE_MACVLAN_PERFECT_MATCH:
+                               flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
+                               break;
+                       case RTE_MAC_HASH_MATCH:
+                               flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
+                                       I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+                               break;
+                       case RTE_MACVLAN_HASH_MATCH:
+                               flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
+                               break;
+                       default:
+                               PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
+                               ret = I40E_ERR_PARAM;
+                               goto DONE;
+                       }
+
                        req_list[i].queue_number = 0;
+
+                       req_list[i].flags = rte_cpu_to_le_16(flags);
                }
 
                ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
@@ -3525,6 +4066,7 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
 {
        int ele_num, ele_buff_size;
        int num, actual_num, i;
+       uint16_t flags;
        int ret = I40E_SUCCESS;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
        struct i40e_aqc_remove_macvlan_element_data *req_list;
@@ -3551,7 +4093,28 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
                                &filter[num + i].macaddr, ETH_ADDR_LEN);
                        req_list[i].vlan_tag =
                                rte_cpu_to_le_16(filter[num + i].vlan_id);
-                       req_list[i].flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+
+                       switch (filter[num + i].filter_type) {
+                       case RTE_MAC_PERFECT_MATCH:
+                               flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
+                                       I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+                               break;
+                       case RTE_MACVLAN_PERFECT_MATCH:
+                               flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+                               break;
+                       case RTE_MAC_HASH_MATCH:
+                               flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
+                                       I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+                               break;
+                       case RTE_MACVLAN_HASH_MATCH:
+                               flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
+                               break;
+                       default:
+                               PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
+                               ret = I40E_ERR_PARAM;
+                               goto DONE;
+                       }
+                       req_list[i].flags = rte_cpu_to_le_16(flags);
                }
 
                ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
@@ -3576,7 +4139,7 @@ i40e_find_mac_filter(struct i40e_vsi *vsi,
        struct i40e_mac_filter *f;
 
        TAILQ_FOREACH(f, &vsi->mac_list, next) {
-               if (is_same_ether_addr(macaddr, &(f->macaddr)))
+               if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
                        return f;
        }
 
@@ -3677,8 +4240,10 @@ i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
                        PMD_DRV_LOG(ERR, "buffer number not match");
                        return I40E_ERR_PARAM;
                }
-               (void)rte_memcpy(&mv_f[i].macaddr, &f->macaddr, ETH_ADDR_LEN);
+               (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
+                               ETH_ADDR_LEN);
                mv_f[i].vlan_id = vlan;
+               mv_f[i].filter_type = f->mac_info.filter_type;
                i++;
        }
 
@@ -3712,14 +4277,14 @@ i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
        if (vsi->vlan_num == 0) {
                TAILQ_FOREACH(f, &vsi->mac_list, next) {
                        (void)rte_memcpy(&mv_f[i].macaddr,
-                               &f->macaddr, ETH_ADDR_LEN);
+                               &f->mac_info.mac_addr, ETH_ADDR_LEN);
                        mv_f[i].vlan_id = 0;
                        i++;
                }
        } else {
                TAILQ_FOREACH(f, &vsi->mac_list, next) {
                        ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
-                                       vsi->vlan_num, &f->macaddr);
+                                       vsi->vlan_num, &f->mac_info.mac_addr);
                        if (ret != I40E_SUCCESS)
                                goto DONE;
                        i += vsi->vlan_num;
@@ -3843,28 +4408,32 @@ DONE:
 }
 
 int
-i40e_vsi_add_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
+i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
 {
        struct i40e_mac_filter *f;
        struct i40e_macvlan_filter *mv_f;
-       int vlan_num;
+       int i, vlan_num = 0;
        int ret = I40E_SUCCESS;
 
        /* If it's add and we've config it, return */
-       f = i40e_find_mac_filter(vsi, addr);
+       f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
        if (f != NULL)
                return I40E_SUCCESS;
+       if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
+               (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
 
-       /**
-        * If vlan_num is 0, that's the first time to add mac,
-        * set mask for vlan_id 0.
-        */
-       if (vsi->vlan_num == 0) {
-               i40e_set_vlan_filter(vsi, 0, 1);
-               vsi->vlan_num = 1;
-       }
-
-       vlan_num = vsi->vlan_num;
+               /**
+                * If vlan_num is 0, that's the first time to add mac,
+                * set mask for vlan_id 0.
+                */
+               if (vsi->vlan_num == 0) {
+                       i40e_set_vlan_filter(vsi, 0, 1);
+                       vsi->vlan_num = 1;
+               }
+               vlan_num = vsi->vlan_num;
+       } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
+                       (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
+               vlan_num = 1;
 
        mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
        if (mv_f == NULL) {
@@ -3872,9 +4441,19 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
                return I40E_ERR_NO_MEMORY;
        }
 
-       ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
-       if (ret != I40E_SUCCESS)
-               goto DONE;
+       for (i = 0; i < vlan_num; i++) {
+               mv_f[i].filter_type = mac_filter->filter_type;
+               (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
+                               ETH_ADDR_LEN);
+       }
+
+       if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+               mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
+               ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
+                                       &mac_filter->mac_addr);
+               if (ret != I40E_SUCCESS)
+                       goto DONE;
+       }
 
        ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
        if (ret != I40E_SUCCESS)
@@ -3887,7 +4466,9 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
                ret = I40E_ERR_NO_MEMORY;
                goto DONE;
        }
-       (void)rte_memcpy(&f->macaddr, addr, ETH_ADDR_LEN);
+       (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
+                       ETH_ADDR_LEN);
+       f->mac_info.filter_type = mac_filter->filter_type;
        TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
        vsi->mac_num++;
 
@@ -3903,7 +4484,8 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
 {
        struct i40e_mac_filter *f;
        struct i40e_macvlan_filter *mv_f;
-       int vlan_num;
+       int i, vlan_num;
+       enum rte_mac_filter_type filter_type;
        int ret = I40E_SUCCESS;
 
        /* Can't find it, return an error */
@@ -3912,19 +4494,34 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
                return I40E_ERR_PARAM;
 
        vlan_num = vsi->vlan_num;
-       if (vlan_num == 0) {
-               PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
-               return I40E_ERR_PARAM;
-       }
+       filter_type = f->mac_info.filter_type;
+       if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+               filter_type == RTE_MACVLAN_HASH_MATCH) {
+               if (vlan_num == 0) {
+                       PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
+                       return I40E_ERR_PARAM;
+               }
+       } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
+                       filter_type == RTE_MAC_HASH_MATCH)
+               vlan_num = 1;
+
        mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
        if (mv_f == NULL) {
                PMD_DRV_LOG(ERR, "failed to allocate memory");
                return I40E_ERR_NO_MEMORY;
        }
 
-       ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
-       if (ret != I40E_SUCCESS)
-               goto DONE;
+       for (i = 0; i < vlan_num; i++) {
+               mv_f[i].filter_type = filter_type;
+               (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
+                               ETH_ADDR_LEN);
+       }
+       if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+                       filter_type == RTE_MACVLAN_HASH_MATCH) {
+               ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
+               if (ret != I40E_SUCCESS)
+                       goto DONE;
+       }
 
        ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
        if (ret != I40E_SUCCESS)
@@ -4240,12 +4837,12 @@ i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
        ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
                                        &filter_idx, NULL);
        if (ret < 0) {
-               PMD_DRV_LOG(ERR, "Failed to add VxLAN UDP port %d", port);
+               PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
                return -1;
        }
 
-       PMD_DRV_LOG(INFO, "Added %s port %d with AQ command with index %d",
-                        port,  filter_index);
+       PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
+                        port,  filter_idx);
 
        /* New port: add it and mark its index in the bitmap */
        pf->vxlan_ports[idx] = port;
@@ -4264,7 +4861,7 @@ i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
        struct i40e_hw *hw = I40E_PF_TO_HW(pf);
 
        if (!(pf->flags & I40E_FLAG_VXLAN)) {
-               PMD_DRV_LOG(ERR, "VxLAN UDP port was not configured.");
+               PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
                return -EINVAL;
        }
 
@@ -4276,7 +4873,7 @@ i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
        }
 
        if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
-               PMD_DRV_LOG(ERR, "Failed to delete VxLAN UDP port %d", port);
+               PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
                return -1;
        }
 
@@ -4352,6 +4949,26 @@ i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
        return ret;
 }
 
+/* Calculate the maximum number of contiguous PF queues that are configured */
+static int
+i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
+{
+       struct rte_eth_dev_data *data = pf->dev_data;
+       int i, num;
+       struct i40e_rx_queue *rxq;
+
+       num = 0;
+       for (i = 0; i < pf->lan_nb_qps; i++) {
+               rxq = data->rx_queues[i];
+               if (rxq && rxq->q_set)
+                       num++;
+               else
+                       break;
+       }
+
+       return num;
+}
+
 /* Configure RSS */
 static int
 i40e_pf_config_rss(struct i40e_pf *pf)
@@ -4359,7 +4976,25 @@ i40e_pf_config_rss(struct i40e_pf *pf)
        struct i40e_hw *hw = I40E_PF_TO_HW(pf);
        struct rte_eth_rss_conf rss_conf;
        uint32_t i, lut = 0;
-       uint16_t j, num = i40e_prev_power_of_2(pf->dev_data->nb_rx_queues);
+       uint16_t j, num;
+
+       /*
+        * If both VMDQ and RSS enabled, not all of PF queues are configured.
+        * It's necessary to calulate the actual PF queues that are configured.
+        */
+       if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+               num = i40e_pf_calc_configured_queues_num(pf);
+               num = i40e_align_floor(num);
+       } else
+               num = i40e_align_floor(pf->dev_data->nb_rx_queues);
+
+       PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
+                       num);
+
+       if (num == 0) {
+               PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
+               return -ENOTSUP;
+       }
 
        for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
                if (j == num)
@@ -4457,18 +5092,21 @@ i40e_tunnel_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
 static int
 i40e_pf_config_mq_rx(struct i40e_pf *pf)
 {
-       if (!pf->dev_data->sriov.active) {
-               switch (pf->dev_data->dev_conf.rxmode.mq_mode) {
-               case ETH_MQ_RX_RSS:
-                       i40e_pf_config_rss(pf);
-                       break;
-               default:
-                       i40e_pf_disable_rss(pf);
-                       break;
-               }
+       int ret = 0;
+       enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
+
+       if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
+               PMD_INIT_LOG(ERR, "i40e doesn't support DCB yet");
+               return -ENOTSUP;
        }
 
-       return 0;
+       /* RSS setup */
+       if (mq_mode & ETH_MQ_RX_RSS_FLAG)
+               ret = i40e_pf_config_rss(pf);
+       else
+               i40e_pf_disable_rss(pf);
+
+       return ret;
 }
 
 static int
@@ -4483,9 +5121,15 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
                return -EINVAL;
 
        switch (filter_type) {
+       case RTE_ETH_FILTER_MACVLAN:
+               ret = i40e_mac_filter_handle(dev, filter_op, arg);
+               break;
        case RTE_ETH_FILTER_TUNNEL:
                ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
                break;
+       case RTE_ETH_FILTER_FDIR:
+               ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
+               break;
        default:
                PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
                                                        filter_type);
@@ -4495,3 +5139,49 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
 
        return ret;
 }
+
+enum i40e_filter_pctype
+i40e_flowtype_to_pctype(enum rte_eth_flow_type flow_type)
+{
+       static const enum i40e_filter_pctype pctype_table[] = {
+               [RTE_ETH_FLOW_TYPE_UDPV4] = I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
+               [RTE_ETH_FLOW_TYPE_TCPV4] = I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+               [RTE_ETH_FLOW_TYPE_SCTPV4] = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
+               [RTE_ETH_FLOW_TYPE_IPV4_OTHER] =
+                                       I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
+               [RTE_ETH_FLOW_TYPE_FRAG_IPV4] =
+                                       I40E_FILTER_PCTYPE_FRAG_IPV4,
+               [RTE_ETH_FLOW_TYPE_UDPV6] = I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
+               [RTE_ETH_FLOW_TYPE_TCPV6] = I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
+               [RTE_ETH_FLOW_TYPE_SCTPV6] = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
+               [RTE_ETH_FLOW_TYPE_IPV6_OTHER] =
+                                       I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
+               [RTE_ETH_FLOW_TYPE_FRAG_IPV6] =
+                                       I40E_FILTER_PCTYPE_FRAG_IPV6,
+       };
+
+       return pctype_table[flow_type];
+}
+
+enum rte_eth_flow_type
+i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
+{
+       static const enum rte_eth_flow_type flowtype_table[] = {
+               [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = RTE_ETH_FLOW_TYPE_UDPV4,
+               [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = RTE_ETH_FLOW_TYPE_TCPV4,
+               [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = RTE_ETH_FLOW_TYPE_SCTPV4,
+               [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
+                                       RTE_ETH_FLOW_TYPE_IPV4_OTHER,
+               [I40E_FILTER_PCTYPE_FRAG_IPV4] =
+                                       RTE_ETH_FLOW_TYPE_FRAG_IPV4,
+               [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = RTE_ETH_FLOW_TYPE_UDPV6,
+               [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = RTE_ETH_FLOW_TYPE_TCPV6,
+               [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = RTE_ETH_FLOW_TYPE_SCTPV6,
+               [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
+                                       RTE_ETH_FLOW_TYPE_IPV6_OTHER,
+               [I40E_FILTER_PCTYPE_FRAG_IPV6] =
+                                       RTE_ETH_FLOW_TYPE_FRAG_IPV6,
+       };
+
+       return flowtype_table[pctype];
+}