common/cnxk: add lower bound check for SSO resources
[dpdk.git] / drivers / net / ice / ice_ethdev.c
index 170a127..4c658ce 100644 (file)
@@ -139,6 +139,10 @@ static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
 static int ice_get_eeprom(struct rte_eth_dev *dev,
                          struct rte_dev_eeprom_info *eeprom);
+static int ice_get_module_info(struct rte_eth_dev *dev,
+                              struct rte_eth_dev_module_info *modinfo);
+static int ice_get_module_eeprom(struct rte_eth_dev *dev,
+                                struct rte_dev_eeprom_info *info);
 static int ice_stats_get(struct rte_eth_dev *dev,
                         struct rte_eth_stats *stats);
 static int ice_stats_reset(struct rte_eth_dev *dev);
@@ -192,9 +196,27 @@ static const struct rte_pci_id pci_id_ice_map[] = {
        { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP) },
        { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T) },
        { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII) },
+       { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E824S) },
+       { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_BACKPLANE) },
+       { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_QSFP) },
+       { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_SFP) },
+       { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_1GBE) },
+       { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825X) },
        { .vendor_id = 0, /* sentinel */ },
 };
 
+static int
+ice_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+               void *arg)
+{
+       if (!arg)
+               return -EINVAL;
+
+       *(const void **)arg = &ice_tm_ops;
+
+       return 0;
+}
+
 static const struct eth_dev_ops ice_eth_dev_ops = {
        .dev_configure                = ice_dev_configure,
        .dev_start                    = ice_dev_start,
@@ -238,6 +260,8 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
        .tx_burst_mode_get            = ice_tx_burst_mode_get,
        .get_eeprom_length            = ice_get_eeprom_length,
        .get_eeprom                   = ice_get_eeprom,
+       .get_module_info              = ice_get_module_info,
+       .get_module_eeprom            = ice_get_module_eeprom,
        .stats_get                    = ice_stats_get,
        .stats_reset                  = ice_stats_reset,
        .xstats_get                   = ice_xstats_get,
@@ -255,6 +279,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
        .timesync_read_time           = ice_timesync_read_time,
        .timesync_write_time          = ice_timesync_write_time,
        .timesync_disable             = ice_timesync_disable,
+       .tm_ops_get                   = ice_tm_ops_get,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -350,6 +375,13 @@ ice_init_controlq_parameter(struct ice_hw *hw)
        hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
        hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
        hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
+
+       /* fields for sideband queue */
+       hw->sbq.num_rq_entries = ICE_SBQ_LEN;
+       hw->sbq.num_sq_entries = ICE_SBQ_LEN;
+       hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
+       hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
+
 }
 
 static int
@@ -789,7 +821,7 @@ ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
                                struct ice_aqc_vsi_props *info,
                                uint8_t enabled_tcmap)
 {
-       uint16_t bsf, qp_idx;
+       uint16_t fls, qp_idx;
 
        /* default tc 0 now. Multi-TC supporting need to be done later.
         * Configure TC and queue mapping parameters, for enabled TC,
@@ -800,16 +832,32 @@ ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
                return -ENOTSUP;
        }
 
-       vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
-       bsf = rte_bsf32(vsi->nb_qps);
-       /* Adjust the queue number to actual queues that can be applied */
-       vsi->nb_qps = 0x1 << bsf;
+       /* vector 0 is reserved and 1 vector for ctrl vsi */
+       if (vsi->adapter->hw.func_caps.common_cap.num_msix_vectors < 2)
+               vsi->nb_qps = 0;
+       else
+               vsi->nb_qps = RTE_MIN
+                       ((uint16_t)vsi->adapter->hw.func_caps.common_cap.num_msix_vectors - 2,
+                       RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC));
+
+       /* nb_qps(hex)  -> fls */
+       /* 0000         -> 0 */
+       /* 0001         -> 0 */
+       /* 0002         -> 1 */
+       /* 0003 ~ 0004  -> 2 */
+       /* 0005 ~ 0008  -> 3 */
+       /* 0009 ~ 0010  -> 4 */
+       /* 0011 ~ 0020  -> 5 */
+       /* 0021 ~ 0040  -> 6 */
+       /* 0041 ~ 0080  -> 7 */
+       /* 0081 ~ 0100  -> 8 */
+       fls = (vsi->nb_qps == 0) ? 0 : rte_fls_u32(vsi->nb_qps - 1);
 
        qp_idx = 0;
        /* Set tc and queue mapping with VSI */
        info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
                                                ICE_AQ_VSI_TC_Q_OFFSET_S) |
-                                              (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
+                                              (fls << ICE_AQ_VSI_TC_Q_NUM_S));
 
        /* Associate queue number with VSI */
        info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
@@ -1257,7 +1305,7 @@ ice_handle_aq_msg(struct rte_eth_dev *dev)
  * @param handle
  *  Pointer to interrupt handle.
  * @param param
- *  The address of parameter (struct rte_eth_dev *) regsitered before.
+ *  The address of parameter (struct rte_eth_dev *) registered before.
  *
  * @return
  *  void
@@ -1480,9 +1528,9 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
        TAILQ_INIT(&vsi->mac_list);
        TAILQ_INIT(&vsi->vlan_list);
 
-       /* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
+       /* Be sync with RTE_ETH_RSS_RETA_SIZE_x maximum value definition */
        pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
-                       ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
+                       RTE_ETH_RSS_RETA_SIZE_512 ? RTE_ETH_RSS_RETA_SIZE_512 :
                        hw->func_caps.common_cap.rss_table_size;
        pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
 
@@ -1620,7 +1668,7 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
        }
 
        /* At the beginning, only TC0. */
-       /* What we need here is the maximam number of the TX queues.
+       /* What we need here is the maximum number of the TX queues.
         * Currently vsi->nb_qps means it.
         * Correct it if any change.
         */
@@ -2171,7 +2219,7 @@ ice_dev_init(struct rte_eth_dev *dev)
 
        ice_set_default_ptype_table(dev);
        pci_dev = RTE_DEV_TO_PCI(dev->device);
-       intr_handle = &pci_dev->intr_handle;
+       intr_handle = pci_dev->intr_handle;
 
        pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
        pf->dev_data = dev->data;
@@ -2293,6 +2341,9 @@ ice_dev_init(struct rte_eth_dev *dev)
        /* Initialize RSS context for gtpu_eh */
        ice_rss_ctx_init(pf);
 
+       /* Initialize TM configuration */
+       ice_tm_conf_init(dev);
+
        if (!ad->is_safe_mode) {
                ret = ice_flow_init(ad);
                if (ret) {
@@ -2368,7 +2419,7 @@ ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
 {
        struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
        struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
        uint16_t msix_intr, i;
 
@@ -2398,7 +2449,7 @@ ice_dev_stop(struct rte_eth_dev *dev)
        struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct ice_vsi *main_vsi = pf->main_vsi;
        struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        uint16_t i;
 
        /* avoid stopping again */
@@ -2423,10 +2474,7 @@ ice_dev_stop(struct rte_eth_dev *dev)
 
        /* Clean datapath event and queue/vec mapping */
        rte_intr_efd_disable(intr_handle);
-       if (intr_handle->intr_vec) {
-               rte_free(intr_handle->intr_vec);
-               intr_handle->intr_vec = NULL;
-       }
+       rte_intr_vec_list_free(intr_handle);
 
        pf->adapter_stopped = true;
        dev->data->dev_started = 0;
@@ -2440,7 +2488,7 @@ ice_dev_close(struct rte_eth_dev *dev)
        struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct ice_adapter *ad =
                ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
        int ret;
@@ -2476,6 +2524,9 @@ ice_dev_close(struct rte_eth_dev *dev)
        rte_free(pf->proto_xtr);
        pf->proto_xtr = NULL;
 
+       /* Uninit TM configuration */
+       ice_tm_conf_uninit(dev);
+
        if (ad->devargs.pps_out_ena) {
                ICE_WRITE_REG(hw, GLTSYN_AUX_OUT(pin_idx, timer), 0);
                ICE_WRITE_REG(hw, GLTSYN_CLKO(pin_idx, timer), 0);
@@ -2986,14 +3037,14 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        int ret;
 
 #define ICE_RSS_HF_ALL ( \
-       ETH_RSS_IPV4 | \
-       ETH_RSS_IPV6 | \
-       ETH_RSS_NONFRAG_IPV4_UDP | \
-       ETH_RSS_NONFRAG_IPV6_UDP | \
-       ETH_RSS_NONFRAG_IPV4_TCP | \
-       ETH_RSS_NONFRAG_IPV6_TCP | \
-       ETH_RSS_NONFRAG_IPV4_SCTP | \
-       ETH_RSS_NONFRAG_IPV6_SCTP)
+       RTE_ETH_RSS_IPV4 | \
+       RTE_ETH_RSS_IPV6 | \
+       RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+       RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+       RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+       RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+       RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+       RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
        ret = ice_rem_vsi_rss_cfg(hw, vsi->idx);
        if (ret)
@@ -3003,7 +3054,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        cfg.symm = 0;
        cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
        /* Configure RSS for IPv4 with src/dst addr as input set */
-       if (rss_hf & ETH_RSS_IPV4) {
+       if (rss_hf & RTE_ETH_RSS_IPV4) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_FLOW_HASH_IPV4;
                ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -3013,7 +3064,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        }
 
        /* Configure RSS for IPv6 with src/dst addr as input set */
-       if (rss_hf & ETH_RSS_IPV6) {
+       if (rss_hf & RTE_ETH_RSS_IPV6) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_FLOW_HASH_IPV6;
                ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -3023,7 +3074,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        }
 
        /* Configure RSS for udp4 with src/dst addr and port as input set */
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_UDP_IPV4;
@@ -3034,7 +3085,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        }
 
        /* Configure RSS for udp6 with src/dst addr and port as input set */
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_UDP_IPV6;
@@ -3045,7 +3096,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        }
 
        /* Configure RSS for tcp4 with src/dst addr and port as input set */
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_TCP_IPV4;
@@ -3056,7 +3107,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        }
 
        /* Configure RSS for tcp6 with src/dst addr and port as input set */
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_TCP_IPV6;
@@ -3067,7 +3118,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        }
 
        /* Configure RSS for sctp4 with src/dst addr and port as input set */
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_SCTP_IPV4;
@@ -3078,7 +3129,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        }
 
        /* Configure RSS for sctp6 with src/dst addr and port as input set */
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_SCTP_IPV6;
@@ -3088,7 +3139,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
                                    __func__, ret);
        }
 
-       if (rss_hf & ETH_RSS_IPV4) {
+       if (rss_hf & RTE_ETH_RSS_IPV4) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_FLOW_HASH_IPV4;
@@ -3098,7 +3149,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
                                    __func__, ret);
        }
 
-       if (rss_hf & ETH_RSS_IPV6) {
+       if (rss_hf & RTE_ETH_RSS_IPV6) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_FLOW_HASH_IPV6;
@@ -3108,7 +3159,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
                                    __func__, ret);
        }
 
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
                                ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_UDP_IPV4;
@@ -3118,7 +3169,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
                                    __func__, ret);
        }
 
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
                                ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_UDP_IPV6;
@@ -3128,7 +3179,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
                                    __func__, ret);
        }
 
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
                                ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_TCP_IPV4;
@@ -3138,7 +3189,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
                                    __func__, ret);
        }
 
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
                                ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_TCP_IPV6;
@@ -3231,7 +3282,8 @@ static int ice_init_rss(struct ice_pf *pf)
                           RTE_MIN(rss_conf->rss_key_len,
                                   vsi->rss_key_size));
 
-       rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
+       rte_memcpy(key.standard_rss_key, vsi->rss_key,
+               RTE_MIN(sizeof(key.standard_rss_key), vsi->rss_key_size));
        ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
        if (ret)
                goto out;
@@ -3281,8 +3333,8 @@ ice_dev_configure(struct rte_eth_dev *dev)
        ad->rx_bulk_alloc_allowed = true;
        ad->tx_simple_allowed = true;
 
-       if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-               dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+       if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+               dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
        if (dev->data->nb_rx_queues) {
                ret = ice_init_rss(pf);
@@ -3338,10 +3390,11 @@ ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
 {
        struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
        struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
        uint16_t msix_vect = vsi->msix_intr;
-       uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
+       uint16_t nb_msix = RTE_MIN(vsi->nb_msix,
+                                  rte_intr_nb_efd_get(intr_handle));
        uint16_t queue_idx = 0;
        int record = 0;
        int i;
@@ -3369,8 +3422,9 @@ ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
                                               vsi->nb_used_qps - i);
 
                        for (; !!record && i < vsi->nb_used_qps; i++)
-                               intr_handle->intr_vec[queue_idx + i] =
-                                       msix_vect;
+                               rte_intr_vec_list_index_set(intr_handle,
+                                               queue_idx + i, msix_vect);
+
                        break;
                }
 
@@ -3379,7 +3433,9 @@ ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
                                       vsi->base_queue + i, 1);
 
                if (!!record)
-                       intr_handle->intr_vec[queue_idx + i] = msix_vect;
+                       rte_intr_vec_list_index_set(intr_handle,
+                                                          queue_idx + i,
+                                                          msix_vect);
 
                msix_vect++;
                nb_msix--;
@@ -3391,7 +3447,7 @@ ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
 {
        struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
        struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
        uint16_t msix_intr, i;
 
@@ -3417,7 +3473,7 @@ ice_rxq_intr_setup(struct rte_eth_dev *dev)
 {
        struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct ice_vsi *vsi = pf->main_vsi;
        uint32_t intr_vector = 0;
 
@@ -3437,11 +3493,9 @@ ice_rxq_intr_setup(struct rte_eth_dev *dev)
                        return -1;
        }
 
-       if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
-               intr_handle->intr_vec =
-               rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
-                           0);
-               if (!intr_handle->intr_vec) {
+       if (rte_intr_dp_is_en(intr_handle)) {
+               if (rte_intr_vec_list_alloc(intr_handle, NULL,
+                                                  dev->data->nb_rx_queues)) {
                        PMD_DRV_LOG(ERR,
                                    "Failed to allocate %d rx_queues intr_vec",
                                    dev->data->nb_rx_queues);
@@ -3562,15 +3616,15 @@ ice_dev_start(struct rte_eth_dev *dev)
        ice_set_rx_function(dev);
        ice_set_tx_function(dev);
 
-       mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-                       ETH_VLAN_EXTEND_MASK;
+       mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+                       RTE_ETH_VLAN_EXTEND_MASK;
        ret = ice_vlan_offload_set(dev, mask);
        if (ret) {
                PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
                goto rx_err;
        }
 
-       /* enable Rx interrput and mapping Rx queue to interrupt vector */
+       /* enable Rx interrupt and mapping Rx queue to interrupt vector */
        if (ice_rxq_intr_setup(dev))
                return -EIO;
 
@@ -3597,14 +3651,14 @@ ice_dev_start(struct rte_eth_dev *dev)
 
        ice_dev_set_link_up(dev);
 
-       /* Call get_link_info aq commond to enable/disable LSE */
-       ice_link_update(dev, 0);
+       /* Call get_link_info aq command to enable/disable LSE */
+       ice_link_update(dev, 1);
 
        pf->adapter_stopped = false;
 
        /* Set the max frame size to default value*/
-       max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ?
-               pf->dev_data->dev_conf.rxmode.max_rx_pkt_len :
+       max_frame_size = pf->dev_data->mtu ?
+               pf->dev_data->mtu + ICE_ETH_OVERHEAD :
                ICE_FRAME_SIZE_MAX;
 
        /* Set the max frame size to HW*/
@@ -3675,41 +3729,40 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 
        dev_info->rx_offload_capa =
-               DEV_RX_OFFLOAD_VLAN_STRIP |
-               DEV_RX_OFFLOAD_JUMBO_FRAME |
-               DEV_RX_OFFLOAD_KEEP_CRC |
-               DEV_RX_OFFLOAD_SCATTER |
-               DEV_RX_OFFLOAD_VLAN_FILTER;
+               RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+               RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+               RTE_ETH_RX_OFFLOAD_SCATTER |
+               RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
        dev_info->tx_offload_capa =
-               DEV_TX_OFFLOAD_VLAN_INSERT |
-               DEV_TX_OFFLOAD_TCP_TSO |
-               DEV_TX_OFFLOAD_MULTI_SEGS |
-               DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+               RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+               RTE_ETH_TX_OFFLOAD_TCP_TSO |
+               RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+               RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
        dev_info->flow_type_rss_offloads = 0;
 
        if (!is_safe_mode) {
                dev_info->rx_offload_capa |=
-                       DEV_RX_OFFLOAD_IPV4_CKSUM |
-                       DEV_RX_OFFLOAD_UDP_CKSUM |
-                       DEV_RX_OFFLOAD_TCP_CKSUM |
-                       DEV_RX_OFFLOAD_QINQ_STRIP |
-                       DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-                       DEV_RX_OFFLOAD_VLAN_EXTEND |
-                       DEV_RX_OFFLOAD_RSS_HASH |
-                       DEV_RX_OFFLOAD_TIMESTAMP;
+                       RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+                       RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+                       RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+                       RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+                       RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+                       RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+                       RTE_ETH_RX_OFFLOAD_RSS_HASH |
+                       RTE_ETH_RX_OFFLOAD_TIMESTAMP;
                dev_info->tx_offload_capa |=
-                       DEV_TX_OFFLOAD_QINQ_INSERT |
-                       DEV_TX_OFFLOAD_IPV4_CKSUM |
-                       DEV_TX_OFFLOAD_UDP_CKSUM |
-                       DEV_TX_OFFLOAD_TCP_CKSUM |
-                       DEV_TX_OFFLOAD_SCTP_CKSUM |
-                       DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-                       DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+                       RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+                       RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+                       RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+                       RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+                       RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+                       RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+                       RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
                dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
        }
 
        dev_info->rx_queue_offload_capa = 0;
-       dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+       dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
        dev_info->reta_size = pf->hash_lut_size;
        dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
@@ -3748,24 +3801,24 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                .nb_align = ICE_ALIGN_RING_DESC,
        };
 
-       dev_info->speed_capa = ETH_LINK_SPEED_10M |
-                              ETH_LINK_SPEED_100M |
-                              ETH_LINK_SPEED_1G |
-                              ETH_LINK_SPEED_2_5G |
-                              ETH_LINK_SPEED_5G |
-                              ETH_LINK_SPEED_10G |
-                              ETH_LINK_SPEED_20G |
-                              ETH_LINK_SPEED_25G;
+       dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+                              RTE_ETH_LINK_SPEED_100M |
+                              RTE_ETH_LINK_SPEED_1G |
+                              RTE_ETH_LINK_SPEED_2_5G |
+                              RTE_ETH_LINK_SPEED_5G |
+                              RTE_ETH_LINK_SPEED_10G |
+                              RTE_ETH_LINK_SPEED_20G |
+                              RTE_ETH_LINK_SPEED_25G;
 
        phy_type_low = hw->port_info->phy.phy_type_low;
        phy_type_high = hw->port_info->phy.phy_type_high;
 
        if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
-               dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+               dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
 
        if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
                        ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
-               dev_info->speed_capa |= ETH_LINK_SPEED_100G;
+               dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
 
        dev_info->nb_rx_queues = dev->data->nb_rx_queues;
        dev_info->nb_tx_queues = dev->data->nb_tx_queues;
@@ -3830,8 +3883,8 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
                status = ice_aq_get_link_info(hw->port_info, enable_lse,
                                              &link_status, NULL);
                if (status != ICE_SUCCESS) {
-                       link.link_speed = ETH_SPEED_NUM_100M;
-                       link.link_duplex = ETH_LINK_FULL_DUPLEX;
+                       link.link_speed = RTE_ETH_SPEED_NUM_100M;
+                       link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
                        PMD_DRV_LOG(ERR, "Failed to get link info");
                        goto out;
                }
@@ -3847,55 +3900,55 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
                goto out;
 
        /* Full-duplex operation at all supported speeds */
-       link.link_duplex = ETH_LINK_FULL_DUPLEX;
+       link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
        /* Parse the link status */
        switch (link_status.link_speed) {
        case ICE_AQ_LINK_SPEED_10MB:
-               link.link_speed = ETH_SPEED_NUM_10M;
+               link.link_speed = RTE_ETH_SPEED_NUM_10M;
                break;
        case ICE_AQ_LINK_SPEED_100MB:
-               link.link_speed = ETH_SPEED_NUM_100M;
+               link.link_speed = RTE_ETH_SPEED_NUM_100M;
                break;
        case ICE_AQ_LINK_SPEED_1000MB:
-               link.link_speed = ETH_SPEED_NUM_1G;
+               link.link_speed = RTE_ETH_SPEED_NUM_1G;
                break;
        case ICE_AQ_LINK_SPEED_2500MB:
-               link.link_speed = ETH_SPEED_NUM_2_5G;
+               link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
                break;
        case ICE_AQ_LINK_SPEED_5GB:
-               link.link_speed = ETH_SPEED_NUM_5G;
+               link.link_speed = RTE_ETH_SPEED_NUM_5G;
                break;
        case ICE_AQ_LINK_SPEED_10GB:
-               link.link_speed = ETH_SPEED_NUM_10G;
+               link.link_speed = RTE_ETH_SPEED_NUM_10G;
                break;
        case ICE_AQ_LINK_SPEED_20GB:
-               link.link_speed = ETH_SPEED_NUM_20G;
+               link.link_speed = RTE_ETH_SPEED_NUM_20G;
                break;
        case ICE_AQ_LINK_SPEED_25GB:
-               link.link_speed = ETH_SPEED_NUM_25G;
+               link.link_speed = RTE_ETH_SPEED_NUM_25G;
                break;
        case ICE_AQ_LINK_SPEED_40GB:
-               link.link_speed = ETH_SPEED_NUM_40G;
+               link.link_speed = RTE_ETH_SPEED_NUM_40G;
                break;
        case ICE_AQ_LINK_SPEED_50GB:
-               link.link_speed = ETH_SPEED_NUM_50G;
+               link.link_speed = RTE_ETH_SPEED_NUM_50G;
                break;
        case ICE_AQ_LINK_SPEED_100GB:
-               link.link_speed = ETH_SPEED_NUM_100G;
+               link.link_speed = RTE_ETH_SPEED_NUM_100G;
                break;
        case ICE_AQ_LINK_SPEED_UNKNOWN:
                PMD_DRV_LOG(ERR, "Unknown link speed");
-               link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+               link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
                break;
        default:
                PMD_DRV_LOG(ERR, "None link speed");
-               link.link_speed = ETH_SPEED_NUM_NONE;
+               link.link_speed = RTE_ETH_SPEED_NUM_NONE;
                break;
        }
 
        link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-                             ETH_LINK_SPEED_FIXED);
+                             RTE_ETH_LINK_SPEED_FIXED);
 
 out:
        ice_atomic_write_link_status(dev, &link);
@@ -3974,33 +4027,16 @@ ice_dev_set_link_down(struct rte_eth_dev *dev)
 }
 
 static int
-ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
 {
-       struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       struct rte_eth_dev_data *dev_data = pf->dev_data;
-       uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
-
-       /* check if mtu is within the allowed range */
-       if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
-               return -EINVAL;
-
        /* mtu setting is forbidden if port is start */
-       if (dev_data->dev_started) {
+       if (dev->data->dev_started != 0) {
                PMD_DRV_LOG(ERR,
                            "port %d must be stopped before configuration",
-                           dev_data->port_id);
+                           dev->data->port_id);
                return -EBUSY;
        }
 
-       if (frame_size > ICE_ETH_MAX_LEN)
-               dev_data->dev_conf.rxmode.offloads |=
-                       DEV_RX_OFFLOAD_JUMBO_FRAME;
-       else
-               dev_data->dev_conf.rxmode.offloads &=
-                       ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
-       dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
        return 0;
 }
 
@@ -4388,15 +4424,15 @@ ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
        struct rte_eth_rxmode *rxmode;
 
        rxmode = &dev->data->dev_conf.rxmode;
-       if (mask & ETH_VLAN_FILTER_MASK) {
-               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+       if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
                        ice_vsi_config_vlan_filter(vsi, true);
                else
                        ice_vsi_config_vlan_filter(vsi, false);
        }
 
-       if (mask & ETH_VLAN_STRIP_MASK) {
-               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+       if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
                        ice_vsi_config_vlan_stripping(vsi, true);
                else
                        ice_vsi_config_vlan_stripping(vsi, false);
@@ -4511,8 +4547,8 @@ ice_rss_reta_update(struct rte_eth_dev *dev,
                goto out;
 
        for (i = 0; i < reta_size; i++) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                if (reta_conf[idx].mask & (1ULL << shift))
                        lut[i] = reta_conf[idx].reta[shift];
        }
@@ -4561,8 +4597,8 @@ ice_rss_reta_query(struct rte_eth_dev *dev,
                goto out;
 
        for (i = 0; i < reta_size; i++) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                if (reta_conf[idx].mask & (1ULL << shift))
                        reta_conf[idx].reta[shift] = lut[i];
        }
@@ -4766,19 +4802,19 @@ static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
                                    uint16_t queue_id)
 {
        struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t val;
        uint16_t msix_intr;
 
-       msix_intr = intr_handle->intr_vec[queue_id];
+       msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id);
 
        val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
              GLINT_DYN_CTL_ITR_INDX_M;
        val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
 
        ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
-       rte_intr_ack(&pci_dev->intr_handle);
+       rte_intr_ack(pci_dev->intr_handle);
 
        return 0;
 }
@@ -4787,11 +4823,11 @@ static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
                                     uint16_t queue_id)
 {
        struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint16_t msix_intr;
 
-       msix_intr = intr_handle->intr_vec[queue_id];
+       msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id);
 
        ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
 
@@ -4946,6 +4982,161 @@ ice_get_eeprom(struct rte_eth_dev *dev,
        return 0;
 }
 
+static int
+ice_get_module_info(struct rte_eth_dev *dev,
+                   struct rte_eth_dev_module_info *modinfo)
+{
+       struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       enum ice_status status;
+       u8 sff8472_comp = 0;
+       u8 sff8472_swap = 0;
+       u8 sff8636_rev = 0;
+       u8 value = 0;
+
+       status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 0x00, 0x00,
+                                  0, &value, 1, 0, NULL);
+       if (status)
+               return -EIO;
+
+       switch (value) {
+       case ICE_MODULE_TYPE_SFP:
+               status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
+                                          ICE_MODULE_SFF_8472_COMP, 0x00, 0,
+                                          &sff8472_comp, 1, 0, NULL);
+               if (status)
+                       return -EIO;
+               status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
+                                          ICE_MODULE_SFF_8472_SWAP, 0x00, 0,
+                                          &sff8472_swap, 1, 0, NULL);
+               if (status)
+                       return -EIO;
+
+               if (sff8472_swap & ICE_MODULE_SFF_ADDR_MODE) {
+                       modinfo->type = ICE_MODULE_SFF_8079;
+                       modinfo->eeprom_len = ICE_MODULE_SFF_8079_LEN;
+               } else if (sff8472_comp &&
+                          (sff8472_swap & ICE_MODULE_SFF_DIAG_CAPAB)) {
+                       modinfo->type = ICE_MODULE_SFF_8472;
+                       modinfo->eeprom_len = ICE_MODULE_SFF_8472_LEN;
+               } else {
+                       modinfo->type = ICE_MODULE_SFF_8079;
+                       modinfo->eeprom_len = ICE_MODULE_SFF_8079_LEN;
+               }
+               break;
+       case ICE_MODULE_TYPE_QSFP_PLUS:
+       case ICE_MODULE_TYPE_QSFP28:
+               status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
+                                          ICE_MODULE_REVISION_ADDR, 0x00, 0,
+                                          &sff8636_rev, 1, 0, NULL);
+               if (status)
+                       return -EIO;
+               /* Check revision compliance */
+               if (sff8636_rev > 0x02) {
+                       /* Module is SFF-8636 compliant */
+                       modinfo->type = ICE_MODULE_SFF_8636;
+                       modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN;
+               } else {
+                       modinfo->type = ICE_MODULE_SFF_8436;
+                       modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN;
+               }
+               break;
+       default:
+               PMD_DRV_LOG(WARNING, "SFF Module Type not recognized.\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int
+ice_get_module_eeprom(struct rte_eth_dev *dev,
+                     struct rte_dev_eeprom_info *info)
+{
+       struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+#define SFF_READ_BLOCK_SIZE 8
+#define I2C_BUSY_TRY_TIMES 4
+#define I2C_USLEEP_MIN_TIME 1500
+#define I2C_USLEEP_MAX_TIME 2500
+       uint8_t value[SFF_READ_BLOCK_SIZE] = {0};
+       uint8_t addr = ICE_I2C_EEPROM_DEV_ADDR;
+       uint8_t *data = NULL;
+       enum ice_status status;
+       bool is_sfp = false;
+       uint32_t i, j;
+       uint32_t offset = 0;
+       uint8_t page = 0;
+
+       if (!info || !info->length || !info->data)
+               return -EINVAL;
+
+       status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, value, 1, 0,
+                                  NULL);
+       if (status)
+               return -EIO;
+
+       if (value[0] == ICE_MODULE_TYPE_SFP)
+               is_sfp = true;
+
+       data = info->data;
+       memset(data, 0, info->length);
+       for (i = 0; i < info->length; i += SFF_READ_BLOCK_SIZE) {
+               offset = i + info->offset;
+               page = 0;
+
+               /* Check if we need to access the other memory page */
+               if (is_sfp) {
+                       if (offset >= ICE_MODULE_SFF_8079_LEN) {
+                               offset -= ICE_MODULE_SFF_8079_LEN;
+                               addr = ICE_I2C_EEPROM_DEV_ADDR2;
+                       }
+               } else {
+                       while (offset >= ICE_MODULE_SFF_8436_LEN) {
+                               /* Compute memory page number and offset. */
+                               offset -= ICE_MODULE_SFF_8436_LEN / 2;
+                               page++;
+                       }
+               }
+
+               /* Bit 2 of eeprom address 0x02 declares upper
+                * pages are disabled on QSFP modules.
+                * SFP modules only ever use page 0.
+                */
+               if (page == 0 || !(data[0x2] & 0x4)) {
+                       /* If i2c bus is busy due to slow page change or
+                        * link management access, call can fail.
+                        * This is normal. So we retry this a few times.
+                        */
+                       for (j = 0; j < I2C_BUSY_TRY_TIMES; j++) {
+                               status = ice_aq_sff_eeprom(hw, 0, addr, offset,
+                                                          page, !is_sfp, value,
+                                                          SFF_READ_BLOCK_SIZE,
+                                                          0, NULL);
+                               PMD_DRV_LOG(DEBUG, "SFF %02X %02X %02X %X = "
+                                       "%02X%02X%02X%02X."
+                                       "%02X%02X%02X%02X (%X)\n",
+                                       addr, offset, page, is_sfp,
+                                       value[0], value[1],
+                                       value[2], value[3],
+                                       value[4], value[5],
+                                       value[6], value[7],
+                                       status);
+                               if (status) {
+                                       usleep_range(I2C_USLEEP_MIN_TIME,
+                                                    I2C_USLEEP_MAX_TIME);
+                                       memset(value, 0, SFF_READ_BLOCK_SIZE);
+                                       continue;
+                               }
+                               break;
+                       }
+
+                       /* Make sure we have enough room for the new block */
+                       if ((i + SFF_READ_BLOCK_SIZE) < info->length)
+                               memcpy(data + i, value, SFF_READ_BLOCK_SIZE);
+               }
+       }
+
+       return 0;
+}
+
 static void
 ice_stat_update_32(struct ice_hw *hw,
                   uint32_t reg,
@@ -5407,7 +5598,7 @@ ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                count++;
        }
 
-       /* Get individiual stats from ice_hw_port struct */
+       /* Get individual stats from ice_hw_port struct */
        for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
                xstats[count].value =
                        *(uint64_t *)((char *)hw_stats +
@@ -5438,7 +5629,7 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
                count++;
        }
 
-       /* Get individiual stats from ice_hw_port struct */
+       /* Get individual stats from ice_hw_port struct */
        for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
                strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
                        sizeof(xstats_names[count].name));
@@ -5466,13 +5657,18 @@ ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 {
        int ret = 0;
        struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ice_adapter *ad =
+               ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 
        if (udp_tunnel == NULL)
                return -EINVAL;
 
        switch (udp_tunnel->prot_type) {
-       case RTE_TUNNEL_TYPE_VXLAN:
+       case RTE_ETH_TUNNEL_TYPE_VXLAN:
                ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
+               if (!ret && ad->psr != NULL)
+                       ice_parser_vxlan_tunnel_set(ad->psr,
+                                       udp_tunnel->udp_port, true);
                break;
        default:
                PMD_DRV_LOG(ERR, "Invalid tunnel type");
@@ -5490,13 +5686,18 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 {
        int ret = 0;
        struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ice_adapter *ad =
+               ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 
        if (udp_tunnel == NULL)
                return -EINVAL;
 
        switch (udp_tunnel->prot_type) {
-       case RTE_TUNNEL_TYPE_VXLAN:
+       case RTE_ETH_TUNNEL_TYPE_VXLAN:
                ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
+               if (!ret && ad->psr != NULL)
+                       ice_parser_vxlan_tunnel_set(ad->psr,
+                                       udp_tunnel->udp_port, false);
                break;
        default:
                PMD_DRV_LOG(ERR, "Invalid tunnel type");
@@ -5516,7 +5717,7 @@ ice_timesync_enable(struct rte_eth_dev *dev)
        int ret;
 
        if (dev->data->dev_started && !(dev->data->dev_conf.rxmode.offloads &
-           DEV_RX_OFFLOAD_TIMESTAMP)) {
+           RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
                PMD_DRV_LOG(ERR, "Rx timestamp offload not configured");
                return -1;
        }
@@ -5572,7 +5773,7 @@ ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
        rxq = dev->data->rx_queues[flags];
 
        ts_high = rxq->time_high;
-       ts_ns = ice_tstamp_convert_32b_64b(hw, ts_high);
+       ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, ts_high);
        ns = rte_timecounter_update(&ad->rx_tstamp_tc, ts_ns);
        *timestamp = rte_ns_to_timespec(ns);
 
@@ -5599,7 +5800,7 @@ ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
                return -1;
        }
 
-       ts_ns = ice_tstamp_convert_32b_64b(hw, (tstamp >> 8) & mask);
+       ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, (tstamp >> 8) & mask);
        ns = rte_timecounter_update(&ad->tx_tstamp_tc, ts_ns);
        *timestamp = rte_ns_to_timespec(ns);