common/cnxk: use computed value for WQE skip
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
index 57abc2c..7ffd7e7 100644 (file)
@@ -386,6 +386,7 @@ static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
                                      struct rte_ether_addr *mac_addr);
 
 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static void i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size);
 
 static int i40e_ethertype_filter_convert(
        const struct rte_eth_ethertype_filter *input,
@@ -1440,7 +1441,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
        }
        i40e_set_default_ptype_table(dev);
        pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       intr_handle = &pci_dev->intr_handle;
+       intr_handle = pci_dev->intr_handle;
 
        rte_eth_copy_pci_info(dev, pci_dev);
 
@@ -1629,7 +1630,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
 
        /* Set the global registers with default ether type value */
        if (!pf->support_multi_driver) {
-               ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+               ret = i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
                                         RTE_ETHER_TYPE_VLAN);
                if (ret != I40E_SUCCESS) {
                        PMD_INIT_LOG(ERR,
@@ -1709,11 +1710,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
         */
        i40e_add_tx_flow_control_drop_filter(pf);
 
-       /* Set the max frame size to 0x2600 by default,
-        * in case other drivers changed the default value.
-        */
-       i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL);
-
        /* initialize RSS rule list */
        TAILQ_INIT(&pf->rss_config_list);
 
@@ -1781,10 +1777,8 @@ i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
 
        ethertype_rule = &pf->ethertype;
        /* Remove all ethertype filter rules and hash */
-       if (ethertype_rule->hash_map)
-               rte_free(ethertype_rule->hash_map);
-       if (ethertype_rule->hash_table)
-               rte_hash_free(ethertype_rule->hash_table);
+       rte_free(ethertype_rule->hash_map);
+       rte_hash_free(ethertype_rule->hash_table);
 
        while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
                TAILQ_REMOVE(&ethertype_rule->ethertype_list,
@@ -1801,10 +1795,8 @@ i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
 
        tunnel_rule = &pf->tunnel;
        /* Remove all tunnel director rules and hash */
-       if (tunnel_rule->hash_map)
-               rte_free(tunnel_rule->hash_map);
-       if (tunnel_rule->hash_table)
-               rte_hash_free(tunnel_rule->hash_table);
+       rte_free(tunnel_rule->hash_map);
+       rte_hash_free(tunnel_rule->hash_table);
 
        while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
                TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
@@ -1833,16 +1825,11 @@ i40e_fdir_memory_cleanup(struct i40e_pf *pf)
        fdir_info = &pf->fdir;
 
        /* flow director memory cleanup */
-       if (fdir_info->hash_map)
-               rte_free(fdir_info->hash_map);
-       if (fdir_info->hash_table)
-               rte_hash_free(fdir_info->hash_table);
-       if (fdir_info->fdir_flow_pool.bitmap)
-               rte_free(fdir_info->fdir_flow_pool.bitmap);
-       if (fdir_info->fdir_flow_pool.pool)
-               rte_free(fdir_info->fdir_flow_pool.pool);
-       if (fdir_info->fdir_filter_array)
-               rte_free(fdir_info->fdir_filter_array);
+       rte_free(fdir_info->hash_map);
+       rte_hash_free(fdir_info->hash_table);
+       rte_free(fdir_info->fdir_flow_pool.bitmap);
+       rte_free(fdir_info->fdir_flow_pool.pool);
+       rte_free(fdir_info->fdir_filter_array);
 }
 
 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
@@ -1896,8 +1883,8 @@ i40e_dev_configure(struct rte_eth_dev *dev)
        ad->tx_simple_allowed = true;
        ad->tx_vec_allowed = true;
 
-       if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-               dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+       if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+               dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
        /* Only legacy filter API needs the following fdir config. So when the
         * legacy filter API is deprecated, the following codes should also be
@@ -1922,7 +1909,7 @@ i40e_dev_configure(struct rte_eth_dev *dev)
                goto err;
 
        /* VMDQ setup.
-        *  General PMD driver call sequence are NIC init, configure,
+        *  General PMD call sequence are NIC init, configure,
         *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
         *  will try to lookup the VSI that specific queue belongs to if VMDQ
         *  applicable. So, VMDQ setting has to be done before
@@ -1931,13 +1918,13 @@ i40e_dev_configure(struct rte_eth_dev *dev)
         *  number, which will be available after rx_queue_setup(). dev_start()
         *  function is good to place RSS setup.
         */
-       if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+       if (mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) {
                ret = i40e_vmdq_setup(dev);
                if (ret)
                        goto err;
        }
 
-       if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
+       if (mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
                ret = i40e_dcb_setup(dev);
                if (ret) {
                        PMD_DRV_LOG(ERR, "failed to configure DCB.");
@@ -1972,7 +1959,7 @@ i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
 {
        struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
        uint16_t msix_vect = vsi->msix_intr;
        uint16_t i;
@@ -2088,10 +2075,11 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
 {
        struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
        uint16_t msix_vect = vsi->msix_intr;
-       uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
+       uint16_t nb_msix = RTE_MIN(vsi->nb_msix,
+                                  rte_intr_nb_efd_get(intr_handle));
        uint16_t queue_idx = 0;
        int record = 0;
        int i;
@@ -2141,8 +2129,8 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
                                               vsi->nb_used_qps - i,
                                               itr_idx);
                        for (; !!record && i < vsi->nb_used_qps; i++)
-                               intr_handle->intr_vec[queue_idx + i] =
-                                       msix_vect;
+                               rte_intr_vec_list_index_set(intr_handle,
+                                               queue_idx + i, msix_vect);
                        break;
                }
                /* 1:1 queue/msix_vect mapping */
@@ -2150,7 +2138,9 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
                                       vsi->base_queue + i, 1,
                                       itr_idx);
                if (!!record)
-                       intr_handle->intr_vec[queue_idx + i] = msix_vect;
+                       if (rte_intr_vec_list_index_set(intr_handle,
+                                               queue_idx + i, msix_vect))
+                               return -rte_errno;
 
                msix_vect++;
                nb_msix--;
@@ -2164,7 +2154,7 @@ i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
 {
        struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
        struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
        uint16_t msix_intr, i;
@@ -2191,7 +2181,7 @@ i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
 {
        struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
        struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
        uint16_t msix_intr, i;
@@ -2214,17 +2204,17 @@ i40e_parse_link_speeds(uint16_t link_speeds)
 {
        uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
 
-       if (link_speeds & ETH_LINK_SPEED_40G)
+       if (link_speeds & RTE_ETH_LINK_SPEED_40G)
                link_speed |= I40E_LINK_SPEED_40GB;
-       if (link_speeds & ETH_LINK_SPEED_25G)
+       if (link_speeds & RTE_ETH_LINK_SPEED_25G)
                link_speed |= I40E_LINK_SPEED_25GB;
-       if (link_speeds & ETH_LINK_SPEED_20G)
+       if (link_speeds & RTE_ETH_LINK_SPEED_20G)
                link_speed |= I40E_LINK_SPEED_20GB;
-       if (link_speeds & ETH_LINK_SPEED_10G)
+       if (link_speeds & RTE_ETH_LINK_SPEED_10G)
                link_speed |= I40E_LINK_SPEED_10GB;
-       if (link_speeds & ETH_LINK_SPEED_1G)
+       if (link_speeds & RTE_ETH_LINK_SPEED_1G)
                link_speed |= I40E_LINK_SPEED_1GB;
-       if (link_speeds & ETH_LINK_SPEED_100M)
+       if (link_speeds & RTE_ETH_LINK_SPEED_100M)
                link_speed |= I40E_LINK_SPEED_100MB;
 
        return link_speed;
@@ -2332,13 +2322,13 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
        abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
                     I40E_AQ_PHY_LINK_ENABLED;
 
-       if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
-               conf->link_speeds = ETH_LINK_SPEED_40G |
-                                   ETH_LINK_SPEED_25G |
-                                   ETH_LINK_SPEED_20G |
-                                   ETH_LINK_SPEED_10G |
-                                   ETH_LINK_SPEED_1G |
-                                   ETH_LINK_SPEED_100M;
+       if (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
+               conf->link_speeds = RTE_ETH_LINK_SPEED_40G |
+                                   RTE_ETH_LINK_SPEED_25G |
+                                   RTE_ETH_LINK_SPEED_20G |
+                                   RTE_ETH_LINK_SPEED_10G |
+                                   RTE_ETH_LINK_SPEED_1G |
+                                   RTE_ETH_LINK_SPEED_100M;
 
                abilities |= I40E_AQ_PHY_AN_ENABLED;
        } else {
@@ -2357,10 +2347,11 @@ i40e_dev_start(struct rte_eth_dev *dev)
        struct i40e_vsi *main_vsi = pf->main_vsi;
        int ret, i;
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        uint32_t intr_vector = 0;
        struct i40e_vsi *vsi;
        uint16_t nb_rxq, nb_txq;
+       uint16_t max_frame_size;
 
        hw->adapter_stopped = 0;
 
@@ -2375,12 +2366,9 @@ i40e_dev_start(struct rte_eth_dev *dev)
                        return ret;
        }
 
-       if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
-               intr_handle->intr_vec =
-                       rte_zmalloc("intr_vec",
-                                   dev->data->nb_rx_queues * sizeof(int),
-                                   0);
-               if (!intr_handle->intr_vec) {
+       if (rte_intr_dp_is_en(intr_handle)) {
+               if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
+                                                  dev->data->nb_rx_queues)) {
                        PMD_INIT_LOG(ERR,
                                "Failed to allocate %d rx_queues intr_vec",
                                dev->data->nb_rx_queues);
@@ -2483,7 +2471,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
                if (ret != I40E_SUCCESS)
                        PMD_DRV_LOG(WARNING, "Fail to set phy mask");
 
-               /* Call get_link_info aq commond to enable/disable LSE */
+               /* Call get_link_info aq command to enable/disable LSE */
                i40e_dev_link_update(dev, 0);
        }
 
@@ -2502,6 +2490,9 @@ i40e_dev_start(struct rte_eth_dev *dev)
                            "please call hierarchy_commit() "
                            "before starting the port");
 
+       max_frame_size = dev->data->mtu + I40E_ETH_OVERHEAD;
+       i40e_set_mac_max_frame(dev, max_frame_size);
+
        return I40E_SUCCESS;
 
 tx_err:
@@ -2521,7 +2512,7 @@ i40e_dev_stop(struct rte_eth_dev *dev)
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct i40e_vsi *main_vsi = pf->main_vsi;
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        int i;
 
        if (hw->adapter_stopped == 1)
@@ -2562,10 +2553,9 @@ i40e_dev_stop(struct rte_eth_dev *dev)
 
        /* Clean datapath event and queue/vec mapping */
        rte_intr_efd_disable(intr_handle);
-       if (intr_handle->intr_vec) {
-               rte_free(intr_handle->intr_vec);
-               intr_handle->intr_vec = NULL;
-       }
+
+       /* Cleanup vector list */
+       rte_intr_vec_list_free(intr_handle);
 
        /* reset hierarchy commit */
        pf->tm_conf.committed = false;
@@ -2584,7 +2574,7 @@ i40e_dev_close(struct rte_eth_dev *dev)
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct i40e_filter_control_settings settings;
        struct rte_flow *p_flow;
        uint32_t reg;
@@ -2849,6 +2839,9 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev)
        return i40e_phy_conf_link(hw, abilities, speed, false);
 }
 
+#define CHECK_INTERVAL             100  /* 100ms */
+#define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
+
 static __rte_always_inline void
 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
 {
@@ -2876,34 +2869,34 @@ update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
        /* Parse the link status */
        switch (link_speed) {
        case I40E_REG_SPEED_0:
-               link->link_speed = ETH_SPEED_NUM_100M;
+               link->link_speed = RTE_ETH_SPEED_NUM_100M;
                break;
        case I40E_REG_SPEED_1:
-               link->link_speed = ETH_SPEED_NUM_1G;
+               link->link_speed = RTE_ETH_SPEED_NUM_1G;
                break;
        case I40E_REG_SPEED_2:
                if (hw->mac.type == I40E_MAC_X722)
-                       link->link_speed = ETH_SPEED_NUM_2_5G;
+                       link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
                else
-                       link->link_speed = ETH_SPEED_NUM_10G;
+                       link->link_speed = RTE_ETH_SPEED_NUM_10G;
                break;
        case I40E_REG_SPEED_3:
                if (hw->mac.type == I40E_MAC_X722) {
-                       link->link_speed = ETH_SPEED_NUM_5G;
+                       link->link_speed = RTE_ETH_SPEED_NUM_5G;
                } else {
                        reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
 
                        if (reg_val & I40E_REG_MACC_25GB)
-                               link->link_speed = ETH_SPEED_NUM_25G;
+                               link->link_speed = RTE_ETH_SPEED_NUM_25G;
                        else
-                               link->link_speed = ETH_SPEED_NUM_40G;
+                               link->link_speed = RTE_ETH_SPEED_NUM_40G;
                }
                break;
        case I40E_REG_SPEED_4:
                if (hw->mac.type == I40E_MAC_X722)
-                       link->link_speed = ETH_SPEED_NUM_10G;
+                       link->link_speed = RTE_ETH_SPEED_NUM_10G;
                else
-                       link->link_speed = ETH_SPEED_NUM_20G;
+                       link->link_speed = RTE_ETH_SPEED_NUM_20G;
                break;
        default:
                PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
@@ -2915,8 +2908,6 @@ static __rte_always_inline void
 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
        bool enable_lse, int wait_to_complete)
 {
-#define CHECK_INTERVAL             100  /* 100ms */
-#define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
        uint32_t rep_cnt = MAX_REPEAT_TIME;
        struct i40e_link_status link_status;
        int status;
@@ -2930,8 +2921,8 @@ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
                status = i40e_aq_get_link_info(hw, enable_lse,
                                                &link_status, NULL);
                if (unlikely(status != I40E_SUCCESS)) {
-                       link->link_speed = ETH_SPEED_NUM_NONE;
-                       link->link_duplex = ETH_LINK_FULL_DUPLEX;
+                       link->link_speed = RTE_ETH_SPEED_NUM_NONE;
+                       link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
                        PMD_DRV_LOG(ERR, "Failed to get link info");
                        return;
                }
@@ -2946,28 +2937,28 @@ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
        /* Parse the link status */
        switch (link_status.link_speed) {
        case I40E_LINK_SPEED_100MB:
-               link->link_speed = ETH_SPEED_NUM_100M;
+               link->link_speed = RTE_ETH_SPEED_NUM_100M;
                break;
        case I40E_LINK_SPEED_1GB:
-               link->link_speed = ETH_SPEED_NUM_1G;
+               link->link_speed = RTE_ETH_SPEED_NUM_1G;
                break;
        case I40E_LINK_SPEED_10GB:
-               link->link_speed = ETH_SPEED_NUM_10G;
+               link->link_speed = RTE_ETH_SPEED_NUM_10G;
                break;
        case I40E_LINK_SPEED_20GB:
-               link->link_speed = ETH_SPEED_NUM_20G;
+               link->link_speed = RTE_ETH_SPEED_NUM_20G;
                break;
        case I40E_LINK_SPEED_25GB:
-               link->link_speed = ETH_SPEED_NUM_25G;
+               link->link_speed = RTE_ETH_SPEED_NUM_25G;
                break;
        case I40E_LINK_SPEED_40GB:
-               link->link_speed = ETH_SPEED_NUM_40G;
+               link->link_speed = RTE_ETH_SPEED_NUM_40G;
                break;
        default:
                if (link->link_status)
-                       link->link_speed = ETH_SPEED_NUM_UNKNOWN;
+                       link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
                else
-                       link->link_speed = ETH_SPEED_NUM_NONE;
+                       link->link_speed = RTE_ETH_SPEED_NUM_NONE;
                break;
        }
 }
@@ -2984,9 +2975,9 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
        memset(&link, 0, sizeof(link));
 
        /* i40e uses full duplex only */
-       link.link_duplex = ETH_LINK_FULL_DUPLEX;
+       link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
        link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-                       ETH_LINK_SPEED_FIXED);
+                       RTE_ETH_LINK_SPEED_FIXED);
 
        if (!wait_to_complete && !enable_lse)
                update_link_reg(hw, &link);
@@ -3556,7 +3547,7 @@ static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
                count++;
        }
 
-       /* Get individiual stats from i40e_hw_port struct */
+       /* Get individual stats from i40e_hw_port struct */
        for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
                strlcpy(xstats_names[count].name,
                        rte_i40e_hw_port_strings[i].name,
@@ -3614,7 +3605,7 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                count++;
        }
 
-       /* Get individiual stats from i40e_hw_port struct */
+       /* Get individual stats from i40e_hw_port struct */
        for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
                xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
                        rte_i40e_hw_port_strings[i].offset);
@@ -3720,38 +3711,39 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->min_mtu = RTE_ETHER_MIN_MTU;
        dev_info->rx_queue_offload_capa = 0;
        dev_info->rx_offload_capa =
-               DEV_RX_OFFLOAD_VLAN_STRIP |
-               DEV_RX_OFFLOAD_QINQ_STRIP |
-               DEV_RX_OFFLOAD_IPV4_CKSUM |
-               DEV_RX_OFFLOAD_UDP_CKSUM |
-               DEV_RX_OFFLOAD_TCP_CKSUM |
-               DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-               DEV_RX_OFFLOAD_KEEP_CRC |
-               DEV_RX_OFFLOAD_SCATTER |
-               DEV_RX_OFFLOAD_VLAN_EXTEND |
-               DEV_RX_OFFLOAD_VLAN_FILTER |
-               DEV_RX_OFFLOAD_JUMBO_FRAME |
-               DEV_RX_OFFLOAD_RSS_HASH;
-
-       dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+               RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+               RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+               RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+               RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+               RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+               RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+               RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+               RTE_ETH_RX_OFFLOAD_SCATTER |
+               RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+               RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+               RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
+       dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
        dev_info->tx_offload_capa =
-               DEV_TX_OFFLOAD_VLAN_INSERT |
-               DEV_TX_OFFLOAD_QINQ_INSERT |
-               DEV_TX_OFFLOAD_IPV4_CKSUM |
-               DEV_TX_OFFLOAD_UDP_CKSUM |
-               DEV_TX_OFFLOAD_TCP_CKSUM |
-               DEV_TX_OFFLOAD_SCTP_CKSUM |
-               DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-               DEV_TX_OFFLOAD_TCP_TSO |
-               DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-               DEV_TX_OFFLOAD_GRE_TNL_TSO |
-               DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-               DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-               DEV_TX_OFFLOAD_MULTI_SEGS |
+               RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+               RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+               RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+               RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+               RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+               RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+               RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+               RTE_ETH_TX_OFFLOAD_TCP_TSO |
+               RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+               RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+               RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+               RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+               RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+               RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
                dev_info->tx_queue_offload_capa;
        dev_info->dev_capa =
                RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
                RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
        dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
                                                sizeof(uint32_t);
@@ -3806,7 +3798,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
        if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
                /* For XL710 */
-               dev_info->speed_capa = ETH_LINK_SPEED_40G;
+               dev_info->speed_capa = RTE_ETH_LINK_SPEED_40G;
                dev_info->default_rxportconf.nb_queues = 2;
                dev_info->default_txportconf.nb_queues = 2;
                if (dev->data->nb_rx_queues == 1)
@@ -3820,17 +3812,17 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
        } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
                /* For XXV710 */
-               dev_info->speed_capa = ETH_LINK_SPEED_25G;
+               dev_info->speed_capa = RTE_ETH_LINK_SPEED_25G;
                dev_info->default_rxportconf.nb_queues = 1;
                dev_info->default_txportconf.nb_queues = 1;
                dev_info->default_rxportconf.ring_size = 256;
                dev_info->default_txportconf.ring_size = 256;
        } else {
                /* For X710 */
-               dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+               dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
                dev_info->default_rxportconf.nb_queues = 1;
                dev_info->default_txportconf.nb_queues = 1;
-               if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
+               if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_10G) {
                        dev_info->default_rxportconf.ring_size = 512;
                        dev_info->default_txportconf.ring_size = 256;
                } else {
@@ -3869,7 +3861,7 @@ i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
        int ret;
 
        if (qinq) {
-               if (vlan_type == ETH_VLAN_TYPE_OUTER)
+               if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
                        reg_id = 2;
        }
 
@@ -3916,12 +3908,12 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        int qinq = dev->data->dev_conf.rxmode.offloads &
-                  DEV_RX_OFFLOAD_VLAN_EXTEND;
+                  RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
        int ret = 0;
 
-       if ((vlan_type != ETH_VLAN_TYPE_INNER &&
-            vlan_type != ETH_VLAN_TYPE_OUTER) ||
-           (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
+       if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+            vlan_type != RTE_ETH_VLAN_TYPE_OUTER) ||
+           (!qinq && vlan_type == RTE_ETH_VLAN_TYPE_INNER)) {
                PMD_DRV_LOG(ERR,
                            "Unsupported vlan type.");
                return -EINVAL;
@@ -3935,12 +3927,12 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
        /* 802.1ad frames ability is added in NVM API 1.7*/
        if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
                if (qinq) {
-                       if (vlan_type == ETH_VLAN_TYPE_OUTER)
+                       if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
                                hw->first_tag = rte_cpu_to_le_16(tpid);
-                       else if (vlan_type == ETH_VLAN_TYPE_INNER)
+                       else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER)
                                hw->second_tag = rte_cpu_to_le_16(tpid);
                } else {
-                       if (vlan_type == ETH_VLAN_TYPE_OUTER)
+                       if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
                                hw->second_tag = rte_cpu_to_le_16(tpid);
                }
                ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
@@ -3999,37 +3991,37 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
        struct rte_eth_rxmode *rxmode;
 
        rxmode = &dev->data->dev_conf.rxmode;
-       if (mask & ETH_VLAN_FILTER_MASK) {
-               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+       if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
                        i40e_vsi_config_vlan_filter(vsi, TRUE);
                else
                        i40e_vsi_config_vlan_filter(vsi, FALSE);
        }
 
-       if (mask & ETH_VLAN_STRIP_MASK) {
+       if (mask & RTE_ETH_VLAN_STRIP_MASK) {
                /* Enable or disable VLAN stripping */
-               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
                        i40e_vsi_config_vlan_stripping(vsi, TRUE);
                else
                        i40e_vsi_config_vlan_stripping(vsi, FALSE);
        }
 
-       if (mask & ETH_VLAN_EXTEND_MASK) {
-               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
+       if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
                        i40e_vsi_config_double_vlan(vsi, TRUE);
                        /* Set global registers with default ethertype. */
-                       i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+                       i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
                                           RTE_ETHER_TYPE_VLAN);
-                       i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
+                       i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
                                           RTE_ETHER_TYPE_VLAN);
                }
                else
                        i40e_vsi_config_double_vlan(vsi, FALSE);
        }
 
-       if (mask & ETH_QINQ_STRIP_MASK) {
+       if (mask & RTE_ETH_QINQ_STRIP_MASK) {
                /* Enable or disable outer VLAN stripping */
-               if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
                        i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
                else
                        i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
@@ -4112,17 +4104,17 @@ i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
         /* Return current mode according to actual setting*/
        switch (hw->fc.current_mode) {
        case I40E_FC_FULL:
-               fc_conf->mode = RTE_FC_FULL;
+               fc_conf->mode = RTE_ETH_FC_FULL;
                break;
        case I40E_FC_TX_PAUSE:
-               fc_conf->mode = RTE_FC_TX_PAUSE;
+               fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
                break;
        case I40E_FC_RX_PAUSE:
-               fc_conf->mode = RTE_FC_RX_PAUSE;
+               fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
                break;
        case I40E_FC_NONE:
        default:
-               fc_conf->mode = RTE_FC_NONE;
+               fc_conf->mode = RTE_ETH_FC_NONE;
        };
 
        return 0;
@@ -4138,10 +4130,10 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        struct i40e_hw *hw;
        struct i40e_pf *pf;
        enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
-               [RTE_FC_NONE] = I40E_FC_NONE,
-               [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
-               [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
-               [RTE_FC_FULL] = I40E_FC_FULL
+               [RTE_ETH_FC_NONE] = I40E_FC_NONE,
+               [RTE_ETH_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
+               [RTE_ETH_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
+               [RTE_ETH_FC_FULL] = I40E_FC_FULL
        };
 
        /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
@@ -4288,7 +4280,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
        }
 
        rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
-       if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+       if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
                mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
        else
                mac_filter.filter_type = I40E_MAC_PERFECT_MATCH;
@@ -4441,7 +4433,7 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
        int ret;
 
        if (reta_size != lut_size ||
-               reta_size > ETH_RSS_RETA_SIZE_512) {
+               reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
                PMD_DRV_LOG(ERR,
                        "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
                        reta_size, lut_size);
@@ -4457,8 +4449,8 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
        if (ret)
                goto out;
        for (i = 0; i < reta_size; i++) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                if (reta_conf[idx].mask & (1ULL << shift))
                        lut[i] = reta_conf[idx].reta[shift];
        }
@@ -4484,7 +4476,7 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
        int ret;
 
        if (reta_size != lut_size ||
-               reta_size > ETH_RSS_RETA_SIZE_512) {
+               reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
                PMD_DRV_LOG(ERR,
                        "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
                        reta_size, lut_size);
@@ -4501,8 +4493,8 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
        if (ret)
                goto out;
        for (i = 0; i < reta_size; i++) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                if (reta_conf[idx].mask & (1ULL << shift))
                        reta_conf[idx].reta[shift] = lut[i];
        }
@@ -4819,7 +4811,7 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
                        pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
                                hw->func_caps.num_vsis - vsi_count);
                        pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
-                               ETH_64_POOLS);
+                               RTE_ETH_64_POOLS);
                        if (pf->max_nb_vmdq_vsi) {
                                pf->flags |= I40E_FLAG_VMDQ;
                                pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
@@ -5544,7 +5536,7 @@ i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
                                        &ets_sla_config, NULL);
        if (ret != I40E_SUCCESS) {
                PMD_DRV_LOG(ERR,
-                       "VSI failed to get TC bandwdith configuration %u",
+                       "VSI failed to get TC bandwidth configuration %u",
                        hw->aq.asq_last_status);
                return ret;
        }
@@ -6105,10 +6097,10 @@ i40e_dev_init_vlan(struct rte_eth_dev *dev)
        int mask = 0;
 
        /* Apply vlan offload setting */
-       mask = ETH_VLAN_STRIP_MASK |
-              ETH_QINQ_STRIP_MASK |
-              ETH_VLAN_FILTER_MASK |
-              ETH_VLAN_EXTEND_MASK;
+       mask = RTE_ETH_VLAN_STRIP_MASK |
+              RTE_ETH_QINQ_STRIP_MASK |
+              RTE_ETH_VLAN_FILTER_MASK |
+              RTE_ETH_VLAN_EXTEND_MASK;
        ret = i40e_vlan_offload_set(dev, mask);
        if (ret) {
                PMD_DRV_LOG(INFO, "Failed to update vlan offload");
@@ -6237,9 +6229,9 @@ i40e_pf_setup(struct i40e_pf *pf)
 
        /* Configure filter control */
        memset(&settings, 0, sizeof(settings));
-       if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
+       if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_128)
                settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
-       else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
+       else if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_512)
                settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
        else {
                PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
@@ -6719,6 +6711,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
                        if (!ret)
                                rte_eth_dev_callback_process(dev,
                                        RTE_ETH_EVENT_INTR_LSC, NULL);
+
                        break;
                default:
                        PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
@@ -6822,7 +6815,7 @@ i40e_handle_mdd_event(struct rte_eth_dev *dev)
  * @param handle
  *  Pointer to interrupt handle.
  * @param param
- *  The address of parameter (struct rte_eth_dev *) regsitered before.
+ *  The address of parameter (struct rte_eth_dev *) registered before.
  *
  * @return
  *  void
@@ -7099,7 +7092,7 @@ i40e_find_vlan_filter(struct i40e_vsi *vsi,
 {
        uint32_t vid_idx, vid_bit;
 
-       if (vlan_id > ETH_VLAN_ID_MAX)
+       if (vlan_id > RTE_ETH_VLAN_ID_MAX)
                return 0;
 
        vid_idx = I40E_VFTA_IDX(vlan_id);
@@ -7134,7 +7127,7 @@ i40e_set_vlan_filter(struct i40e_vsi *vsi,
        struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
        int ret;
 
-       if (vlan_id > ETH_VLAN_ID_MAX)
+       if (vlan_id > RTE_ETH_VLAN_ID_MAX)
                return;
 
        i40e_store_vlan_filter(vsi, vlan_id, on);
@@ -7728,25 +7721,25 @@ static int
 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
 {
        switch (filter_type) {
-       case RTE_TUNNEL_FILTER_IMAC_IVLAN:
+       case RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN:
                *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
                break;
-       case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
+       case RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID:
                *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
                break;
-       case RTE_TUNNEL_FILTER_IMAC_TENID:
+       case RTE_ETH_TUNNEL_FILTER_IMAC_TENID:
                *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
                break;
-       case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
+       case RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC:
                *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
                break;
-       case ETH_TUNNEL_FILTER_IMAC:
+       case RTE_ETH_TUNNEL_FILTER_IMAC:
                *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
                break;
-       case ETH_TUNNEL_FILTER_OIP:
+       case RTE_ETH_TUNNEL_FILTER_OIP:
                *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
                break;
-       case ETH_TUNNEL_FILTER_IIP:
+       case RTE_ETH_TUNNEL_FILTER_IIP:
                *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
                break;
        default:
@@ -8712,16 +8705,16 @@ i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
                return -EINVAL;
 
        switch (udp_tunnel->prot_type) {
-       case RTE_TUNNEL_TYPE_VXLAN:
+       case RTE_ETH_TUNNEL_TYPE_VXLAN:
                ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
                                          I40E_AQC_TUNNEL_TYPE_VXLAN);
                break;
-       case RTE_TUNNEL_TYPE_VXLAN_GPE:
+       case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
                ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
                                          I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
                break;
-       case RTE_TUNNEL_TYPE_GENEVE:
-       case RTE_TUNNEL_TYPE_TEREDO:
+       case RTE_ETH_TUNNEL_TYPE_GENEVE:
+       case RTE_ETH_TUNNEL_TYPE_TEREDO:
                PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
                ret = -1;
                break;
@@ -8747,12 +8740,12 @@ i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
                return -EINVAL;
 
        switch (udp_tunnel->prot_type) {
-       case RTE_TUNNEL_TYPE_VXLAN:
-       case RTE_TUNNEL_TYPE_VXLAN_GPE:
+       case RTE_ETH_TUNNEL_TYPE_VXLAN:
+       case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
                ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
                break;
-       case RTE_TUNNEL_TYPE_GENEVE:
-       case RTE_TUNNEL_TYPE_TEREDO:
+       case RTE_ETH_TUNNEL_TYPE_GENEVE:
+       case RTE_ETH_TUNNEL_TYPE_TEREDO:
                PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
                ret = -1;
                break;
@@ -8844,7 +8837,7 @@ int
 i40e_pf_reset_rss_reta(struct i40e_pf *pf)
 {
        struct i40e_hw *hw = &pf->adapter->hw;
-       uint8_t lut[ETH_RSS_RETA_SIZE_512];
+       uint8_t lut[RTE_ETH_RSS_RETA_SIZE_512];
        uint32_t i;
        int num;
 
@@ -8852,7 +8845,7 @@ i40e_pf_reset_rss_reta(struct i40e_pf *pf)
         * configured. It's necessary to calculate the actual PF
         * queues that are configured.
         */
-       if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+       if (pf->dev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
                num = i40e_pf_calc_configured_queues_num(pf);
        else
                num = pf->dev_data->nb_rx_queues;
@@ -8931,7 +8924,7 @@ i40e_pf_config_rss(struct i40e_pf *pf)
        rss_hf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
        mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
        if (!(rss_hf & pf->adapter->flow_types_mask) ||
-           !(mq_mode & ETH_MQ_RX_RSS_FLAG))
+           !(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
                return 0;
 
        hw = I40E_PF_TO_HW(pf);
@@ -9719,7 +9712,7 @@ i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
        return 0;
 }
 
-/* Check if there exists the ehtertype filter */
+/* Check if there exists the ethertype filter */
 struct i40e_ethertype_filter *
 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
                                const struct i40e_ethertype_filter_input *input)
@@ -10268,16 +10261,16 @@ i40e_start_timecounters(struct rte_eth_dev *dev)
        rte_eth_linkstatus_get(dev, &link);
 
        switch (link.link_speed) {
-       case ETH_SPEED_NUM_40G:
-       case ETH_SPEED_NUM_25G:
+       case RTE_ETH_SPEED_NUM_40G:
+       case RTE_ETH_SPEED_NUM_25G:
                tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
                tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
                break;
-       case ETH_SPEED_NUM_10G:
+       case RTE_ETH_SPEED_NUM_10G:
                tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
                tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
                break;
-       case ETH_SPEED_NUM_1G:
+       case RTE_ETH_SPEED_NUM_1G:
                tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
                tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
                break;
@@ -10505,7 +10498,7 @@ i40e_parse_dcb_configure(struct rte_eth_dev *dev,
        else
                *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
 
-       if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+       if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
                dcb_cfg->pfc.willing = 0;
                dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
                dcb_cfg->pfc.pfcenable = *tc_map;
@@ -11013,7 +11006,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
        uint16_t bsf, tc_mapping;
        int i, j = 0;
 
-       if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+       if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
                dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
        else
                dcb_info->nb_tcs = 1;
@@ -11061,7 +11054,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
                                dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
                }
                j++;
-       } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
+       } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, RTE_ETH_MAX_VMDQ_POOL));
        return 0;
 }
 
@@ -11069,11 +11062,11 @@ static int
 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint16_t msix_intr;
 
-       msix_intr = intr_handle->intr_vec[queue_id];
+       msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id);
        if (msix_intr == I40E_MISC_VEC_ID)
                I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
                               I40E_PFINT_DYN_CTL0_INTENA_MASK |
@@ -11088,7 +11081,7 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
                               I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
 
        I40E_WRITE_FLUSH(hw);
-       rte_intr_ack(&pci_dev->intr_handle);
+       rte_intr_ack(pci_dev->intr_handle);
 
        return 0;
 }
@@ -11097,11 +11090,11 @@ static int
 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint16_t msix_intr;
 
-       msix_intr = intr_handle->intr_vec[queue_id];
+       msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id);
        if (msix_intr == I40E_MISC_VEC_ID)
                I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
                               I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
@@ -11419,30 +11412,16 @@ static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
 }
 
 static int
-i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       struct rte_eth_dev_data *dev_data = pf->dev_data;
-       uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
-       int ret = 0;
-
-       /* check if mtu is within the allowed range */
-       if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
-               return -EINVAL;
-
        /* mtu setting is forbidden if port is start */
-       if (dev_data->dev_started) {
+       if (dev->data->dev_started != 0) {
                PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
-                           dev_data->port_id);
+                           dev->data->port_id);
                return -EBUSY;
        }
 
-       if (mtu > RTE_ETHER_MTU)
-               dev_data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-       else
-               dev_data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
-       return ret;
+       return 0;
 }
 
 /* Restore ethertype filter */
@@ -12117,6 +12096,35 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
        return ret;
 }
 
+static void
+i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size)
+{
+       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t rep_cnt = MAX_REPEAT_TIME;
+       struct rte_eth_link link;
+       enum i40e_status_code status;
+       bool can_be_set = true;
+
+       /* I40E_MEDIA_TYPE_BASET link up can be ignored */
+       if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET) {
+               do {
+                       update_link_reg(hw, &link);
+                       if (link.link_status)
+                               break;
+                       rte_delay_ms(CHECK_INTERVAL);
+               } while (--rep_cnt);
+               can_be_set = !!link.link_status;
+       }
+
+       if (can_be_set) {
+               status = i40e_aq_set_mac_config(hw, size, TRUE, 0, false, NULL);
+               if (status != I40E_SUCCESS)
+                       PMD_DRV_LOG(ERR, "Failed to set max frame size at port level");
+       } else {
+               PMD_DRV_LOG(ERR, "Set max frame size at port level not applicable on link down");
+       }
+}
+
 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_init, init, NOTICE);
 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_driver, driver, NOTICE);
 #ifdef RTE_ETHDEV_DEBUG_RX