net/ice: fix pattern check for flow director parser
[dpdk.git] / drivers / net / ice / ice_ethdev.c
index 3a1bcc4..c9fd3de 100644 (file)
@@ -350,6 +350,13 @@ ice_init_controlq_parameter(struct ice_hw *hw)
        hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
        hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
        hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
+
+       /* fields for sideband queue */
+       hw->sbq.num_rq_entries = ICE_SBQ_LEN;
+       hw->sbq.num_sq_entries = ICE_SBQ_LEN;
+       hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
+       hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
+
 }
 
 static int
@@ -1257,7 +1264,7 @@ ice_handle_aq_msg(struct rte_eth_dev *dev)
  * @param handle
  *  Pointer to interrupt handle.
  * @param param
- *  The address of parameter (struct rte_eth_dev *) regsitered before.
+ *  The address of parameter (struct rte_eth_dev *) registered before.
  *
  * @return
  *  void
@@ -1480,9 +1487,9 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
        TAILQ_INIT(&vsi->mac_list);
        TAILQ_INIT(&vsi->vlan_list);
 
-       /* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
+       /* Be sync with RTE_ETH_RSS_RETA_SIZE_x maximum value definition */
        pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
-                       ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
+                       RTE_ETH_RSS_RETA_SIZE_512 ? RTE_ETH_RSS_RETA_SIZE_512 :
                        hw->func_caps.common_cap.rss_table_size;
        pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
 
@@ -1620,7 +1627,7 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
        }
 
        /* At the beginning, only TC0. */
-       /* What we need here is the maximam number of the TX queues.
+       /* What we need here is the maximum number of the TX queues.
         * Currently vsi->nb_qps means it.
         * Correct it if any change.
         */
@@ -2171,7 +2178,7 @@ ice_dev_init(struct rte_eth_dev *dev)
 
        ice_set_default_ptype_table(dev);
        pci_dev = RTE_DEV_TO_PCI(dev->device);
-       intr_handle = &pci_dev->intr_handle;
+       intr_handle = pci_dev->intr_handle;
 
        pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
        pf->dev_data = dev->data;
@@ -2368,7 +2375,7 @@ ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
 {
        struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
        struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
        uint16_t msix_intr, i;
 
@@ -2398,7 +2405,7 @@ ice_dev_stop(struct rte_eth_dev *dev)
        struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct ice_vsi *main_vsi = pf->main_vsi;
        struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        uint16_t i;
 
        /* avoid stopping again */
@@ -2423,10 +2430,7 @@ ice_dev_stop(struct rte_eth_dev *dev)
 
        /* Clean datapath event and queue/vec mapping */
        rte_intr_efd_disable(intr_handle);
-       if (intr_handle->intr_vec) {
-               rte_free(intr_handle->intr_vec);
-               intr_handle->intr_vec = NULL;
-       }
+       rte_intr_vec_list_free(intr_handle);
 
        pf->adapter_stopped = true;
        dev->data->dev_started = 0;
@@ -2440,7 +2444,7 @@ ice_dev_close(struct rte_eth_dev *dev)
        struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct ice_adapter *ad =
                ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
        int ret;
@@ -2986,14 +2990,14 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        int ret;
 
 #define ICE_RSS_HF_ALL ( \
-       ETH_RSS_IPV4 | \
-       ETH_RSS_IPV6 | \
-       ETH_RSS_NONFRAG_IPV4_UDP | \
-       ETH_RSS_NONFRAG_IPV6_UDP | \
-       ETH_RSS_NONFRAG_IPV4_TCP | \
-       ETH_RSS_NONFRAG_IPV6_TCP | \
-       ETH_RSS_NONFRAG_IPV4_SCTP | \
-       ETH_RSS_NONFRAG_IPV6_SCTP)
+       RTE_ETH_RSS_IPV4 | \
+       RTE_ETH_RSS_IPV6 | \
+       RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+       RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+       RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+       RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+       RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+       RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
        ret = ice_rem_vsi_rss_cfg(hw, vsi->idx);
        if (ret)
@@ -3003,7 +3007,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        cfg.symm = 0;
        cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
        /* Configure RSS for IPv4 with src/dst addr as input set */
-       if (rss_hf & ETH_RSS_IPV4) {
+       if (rss_hf & RTE_ETH_RSS_IPV4) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_FLOW_HASH_IPV4;
                ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -3013,7 +3017,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        }
 
        /* Configure RSS for IPv6 with src/dst addr as input set */
-       if (rss_hf & ETH_RSS_IPV6) {
+       if (rss_hf & RTE_ETH_RSS_IPV6) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_FLOW_HASH_IPV6;
                ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -3023,7 +3027,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        }
 
        /* Configure RSS for udp4 with src/dst addr and port as input set */
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_UDP_IPV4;
@@ -3034,7 +3038,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        }
 
        /* Configure RSS for udp6 with src/dst addr and port as input set */
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_UDP_IPV6;
@@ -3045,7 +3049,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        }
 
        /* Configure RSS for tcp4 with src/dst addr and port as input set */
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_TCP_IPV4;
@@ -3056,7 +3060,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        }
 
        /* Configure RSS for tcp6 with src/dst addr and port as input set */
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_TCP_IPV6;
@@ -3067,7 +3071,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        }
 
        /* Configure RSS for sctp4 with src/dst addr and port as input set */
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_SCTP_IPV4;
@@ -3078,7 +3082,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        }
 
        /* Configure RSS for sctp6 with src/dst addr and port as input set */
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_SCTP_IPV6;
@@ -3088,7 +3092,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
                                    __func__, ret);
        }
 
-       if (rss_hf & ETH_RSS_IPV4) {
+       if (rss_hf & RTE_ETH_RSS_IPV4) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_FLOW_HASH_IPV4;
@@ -3098,7 +3102,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
                                    __func__, ret);
        }
 
-       if (rss_hf & ETH_RSS_IPV6) {
+       if (rss_hf & RTE_ETH_RSS_IPV6) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_FLOW_HASH_IPV6;
@@ -3108,7 +3112,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
                                    __func__, ret);
        }
 
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
                                ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_UDP_IPV4;
@@ -3118,7 +3122,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
                                    __func__, ret);
        }
 
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
                                ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_UDP_IPV6;
@@ -3128,7 +3132,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
                                    __func__, ret);
        }
 
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
                                ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_TCP_IPV4;
@@ -3138,7 +3142,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
                                    __func__, ret);
        }
 
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
                                ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_TCP_IPV6;
@@ -3281,8 +3285,8 @@ ice_dev_configure(struct rte_eth_dev *dev)
        ad->rx_bulk_alloc_allowed = true;
        ad->tx_simple_allowed = true;
 
-       if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-               dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+       if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+               dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
        if (dev->data->nb_rx_queues) {
                ret = ice_init_rss(pf);
@@ -3338,10 +3342,11 @@ ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
 {
        struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
        struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
        uint16_t msix_vect = vsi->msix_intr;
-       uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
+       uint16_t nb_msix = RTE_MIN(vsi->nb_msix,
+                                  rte_intr_nb_efd_get(intr_handle));
        uint16_t queue_idx = 0;
        int record = 0;
        int i;
@@ -3369,8 +3374,9 @@ ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
                                               vsi->nb_used_qps - i);
 
                        for (; !!record && i < vsi->nb_used_qps; i++)
-                               intr_handle->intr_vec[queue_idx + i] =
-                                       msix_vect;
+                               rte_intr_vec_list_index_set(intr_handle,
+                                               queue_idx + i, msix_vect);
+
                        break;
                }
 
@@ -3379,7 +3385,9 @@ ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
                                       vsi->base_queue + i, 1);
 
                if (!!record)
-                       intr_handle->intr_vec[queue_idx + i] = msix_vect;
+                       rte_intr_vec_list_index_set(intr_handle,
+                                                          queue_idx + i,
+                                                          msix_vect);
 
                msix_vect++;
                nb_msix--;
@@ -3391,7 +3399,7 @@ ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
 {
        struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
        struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
        uint16_t msix_intr, i;
 
@@ -3417,7 +3425,7 @@ ice_rxq_intr_setup(struct rte_eth_dev *dev)
 {
        struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct ice_vsi *vsi = pf->main_vsi;
        uint32_t intr_vector = 0;
 
@@ -3437,11 +3445,9 @@ ice_rxq_intr_setup(struct rte_eth_dev *dev)
                        return -1;
        }
 
-       if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
-               intr_handle->intr_vec =
-               rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
-                           0);
-               if (!intr_handle->intr_vec) {
+       if (rte_intr_dp_is_en(intr_handle)) {
+               if (rte_intr_vec_list_alloc(intr_handle, NULL,
+                                                  dev->data->nb_rx_queues)) {
                        PMD_DRV_LOG(ERR,
                                    "Failed to allocate %d rx_queues intr_vec",
                                    dev->data->nb_rx_queues);
@@ -3562,15 +3568,15 @@ ice_dev_start(struct rte_eth_dev *dev)
        ice_set_rx_function(dev);
        ice_set_tx_function(dev);
 
-       mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-                       ETH_VLAN_EXTEND_MASK;
+       mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+                       RTE_ETH_VLAN_EXTEND_MASK;
        ret = ice_vlan_offload_set(dev, mask);
        if (ret) {
                PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
                goto rx_err;
        }
 
-       /* enable Rx interrput and mapping Rx queue to interrupt vector */
+       /* enable Rx interrupt and mapping Rx queue to interrupt vector */
        if (ice_rxq_intr_setup(dev))
                return -EIO;
 
@@ -3597,7 +3603,7 @@ ice_dev_start(struct rte_eth_dev *dev)
 
        ice_dev_set_link_up(dev);
 
-       /* Call get_link_info aq commond to enable/disable LSE */
+       /* Call get_link_info aq command to enable/disable LSE */
        ice_link_update(dev, 0);
 
        pf->adapter_stopped = false;
@@ -3675,41 +3681,40 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 
        dev_info->rx_offload_capa =
-               DEV_RX_OFFLOAD_VLAN_STRIP |
-               DEV_RX_OFFLOAD_JUMBO_FRAME |
-               DEV_RX_OFFLOAD_KEEP_CRC |
-               DEV_RX_OFFLOAD_SCATTER |
-               DEV_RX_OFFLOAD_VLAN_FILTER;
+               RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+               RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+               RTE_ETH_RX_OFFLOAD_SCATTER |
+               RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
        dev_info->tx_offload_capa =
-               DEV_TX_OFFLOAD_VLAN_INSERT |
-               DEV_TX_OFFLOAD_TCP_TSO |
-               DEV_TX_OFFLOAD_MULTI_SEGS |
-               DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+               RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+               RTE_ETH_TX_OFFLOAD_TCP_TSO |
+               RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+               RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
        dev_info->flow_type_rss_offloads = 0;
 
        if (!is_safe_mode) {
                dev_info->rx_offload_capa |=
-                       DEV_RX_OFFLOAD_IPV4_CKSUM |
-                       DEV_RX_OFFLOAD_UDP_CKSUM |
-                       DEV_RX_OFFLOAD_TCP_CKSUM |
-                       DEV_RX_OFFLOAD_QINQ_STRIP |
-                       DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-                       DEV_RX_OFFLOAD_VLAN_EXTEND |
-                       DEV_RX_OFFLOAD_RSS_HASH |
-                       DEV_RX_OFFLOAD_TIMESTAMP;
+                       RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+                       RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+                       RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+                       RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+                       RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+                       RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+                       RTE_ETH_RX_OFFLOAD_RSS_HASH |
+                       RTE_ETH_RX_OFFLOAD_TIMESTAMP;
                dev_info->tx_offload_capa |=
-                       DEV_TX_OFFLOAD_QINQ_INSERT |
-                       DEV_TX_OFFLOAD_IPV4_CKSUM |
-                       DEV_TX_OFFLOAD_UDP_CKSUM |
-                       DEV_TX_OFFLOAD_TCP_CKSUM |
-                       DEV_TX_OFFLOAD_SCTP_CKSUM |
-                       DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-                       DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+                       RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+                       RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+                       RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+                       RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+                       RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+                       RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+                       RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
                dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
        }
 
        dev_info->rx_queue_offload_capa = 0;
-       dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+       dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
        dev_info->reta_size = pf->hash_lut_size;
        dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
@@ -3748,24 +3753,24 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                .nb_align = ICE_ALIGN_RING_DESC,
        };
 
-       dev_info->speed_capa = ETH_LINK_SPEED_10M |
-                              ETH_LINK_SPEED_100M |
-                              ETH_LINK_SPEED_1G |
-                              ETH_LINK_SPEED_2_5G |
-                              ETH_LINK_SPEED_5G |
-                              ETH_LINK_SPEED_10G |
-                              ETH_LINK_SPEED_20G |
-                              ETH_LINK_SPEED_25G;
+       dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+                              RTE_ETH_LINK_SPEED_100M |
+                              RTE_ETH_LINK_SPEED_1G |
+                              RTE_ETH_LINK_SPEED_2_5G |
+                              RTE_ETH_LINK_SPEED_5G |
+                              RTE_ETH_LINK_SPEED_10G |
+                              RTE_ETH_LINK_SPEED_20G |
+                              RTE_ETH_LINK_SPEED_25G;
 
        phy_type_low = hw->port_info->phy.phy_type_low;
        phy_type_high = hw->port_info->phy.phy_type_high;
 
        if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
-               dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+               dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
 
        if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
                        ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
-               dev_info->speed_capa |= ETH_LINK_SPEED_100G;
+               dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
 
        dev_info->nb_rx_queues = dev->data->nb_rx_queues;
        dev_info->nb_tx_queues = dev->data->nb_tx_queues;
@@ -3830,8 +3835,8 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
                status = ice_aq_get_link_info(hw->port_info, enable_lse,
                                              &link_status, NULL);
                if (status != ICE_SUCCESS) {
-                       link.link_speed = ETH_SPEED_NUM_100M;
-                       link.link_duplex = ETH_LINK_FULL_DUPLEX;
+                       link.link_speed = RTE_ETH_SPEED_NUM_100M;
+                       link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
                        PMD_DRV_LOG(ERR, "Failed to get link info");
                        goto out;
                }
@@ -3847,55 +3852,55 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
                goto out;
 
        /* Full-duplex operation at all supported speeds */
-       link.link_duplex = ETH_LINK_FULL_DUPLEX;
+       link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
        /* Parse the link status */
        switch (link_status.link_speed) {
        case ICE_AQ_LINK_SPEED_10MB:
-               link.link_speed = ETH_SPEED_NUM_10M;
+               link.link_speed = RTE_ETH_SPEED_NUM_10M;
                break;
        case ICE_AQ_LINK_SPEED_100MB:
-               link.link_speed = ETH_SPEED_NUM_100M;
+               link.link_speed = RTE_ETH_SPEED_NUM_100M;
                break;
        case ICE_AQ_LINK_SPEED_1000MB:
-               link.link_speed = ETH_SPEED_NUM_1G;
+               link.link_speed = RTE_ETH_SPEED_NUM_1G;
                break;
        case ICE_AQ_LINK_SPEED_2500MB:
-               link.link_speed = ETH_SPEED_NUM_2_5G;
+               link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
                break;
        case ICE_AQ_LINK_SPEED_5GB:
-               link.link_speed = ETH_SPEED_NUM_5G;
+               link.link_speed = RTE_ETH_SPEED_NUM_5G;
                break;
        case ICE_AQ_LINK_SPEED_10GB:
-               link.link_speed = ETH_SPEED_NUM_10G;
+               link.link_speed = RTE_ETH_SPEED_NUM_10G;
                break;
        case ICE_AQ_LINK_SPEED_20GB:
-               link.link_speed = ETH_SPEED_NUM_20G;
+               link.link_speed = RTE_ETH_SPEED_NUM_20G;
                break;
        case ICE_AQ_LINK_SPEED_25GB:
-               link.link_speed = ETH_SPEED_NUM_25G;
+               link.link_speed = RTE_ETH_SPEED_NUM_25G;
                break;
        case ICE_AQ_LINK_SPEED_40GB:
-               link.link_speed = ETH_SPEED_NUM_40G;
+               link.link_speed = RTE_ETH_SPEED_NUM_40G;
                break;
        case ICE_AQ_LINK_SPEED_50GB:
-               link.link_speed = ETH_SPEED_NUM_50G;
+               link.link_speed = RTE_ETH_SPEED_NUM_50G;
                break;
        case ICE_AQ_LINK_SPEED_100GB:
-               link.link_speed = ETH_SPEED_NUM_100G;
+               link.link_speed = RTE_ETH_SPEED_NUM_100G;
                break;
        case ICE_AQ_LINK_SPEED_UNKNOWN:
                PMD_DRV_LOG(ERR, "Unknown link speed");
-               link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+               link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
                break;
        default:
                PMD_DRV_LOG(ERR, "None link speed");
-               link.link_speed = ETH_SPEED_NUM_NONE;
+               link.link_speed = RTE_ETH_SPEED_NUM_NONE;
                break;
        }
 
        link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-                             ETH_LINK_SPEED_FIXED);
+                             RTE_ETH_LINK_SPEED_FIXED);
 
 out:
        ice_atomic_write_link_status(dev, &link);
@@ -4371,15 +4376,15 @@ ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
        struct rte_eth_rxmode *rxmode;
 
        rxmode = &dev->data->dev_conf.rxmode;
-       if (mask & ETH_VLAN_FILTER_MASK) {
-               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+       if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
                        ice_vsi_config_vlan_filter(vsi, true);
                else
                        ice_vsi_config_vlan_filter(vsi, false);
        }
 
-       if (mask & ETH_VLAN_STRIP_MASK) {
-               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+       if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
                        ice_vsi_config_vlan_stripping(vsi, true);
                else
                        ice_vsi_config_vlan_stripping(vsi, false);
@@ -4494,8 +4499,8 @@ ice_rss_reta_update(struct rte_eth_dev *dev,
                goto out;
 
        for (i = 0; i < reta_size; i++) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                if (reta_conf[idx].mask & (1ULL << shift))
                        lut[i] = reta_conf[idx].reta[shift];
        }
@@ -4544,8 +4549,8 @@ ice_rss_reta_query(struct rte_eth_dev *dev,
                goto out;
 
        for (i = 0; i < reta_size; i++) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                if (reta_conf[idx].mask & (1ULL << shift))
                        reta_conf[idx].reta[shift] = lut[i];
        }
@@ -4749,19 +4754,19 @@ static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
                                    uint16_t queue_id)
 {
        struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t val;
        uint16_t msix_intr;
 
-       msix_intr = intr_handle->intr_vec[queue_id];
+       msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id);
 
        val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
              GLINT_DYN_CTL_ITR_INDX_M;
        val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
 
        ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
-       rte_intr_ack(&pci_dev->intr_handle);
+       rte_intr_ack(pci_dev->intr_handle);
 
        return 0;
 }
@@ -4770,11 +4775,11 @@ static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
                                     uint16_t queue_id)
 {
        struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint16_t msix_intr;
 
-       msix_intr = intr_handle->intr_vec[queue_id];
+       msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id);
 
        ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
 
@@ -5390,7 +5395,7 @@ ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                count++;
        }
 
-       /* Get individiual stats from ice_hw_port struct */
+       /* Get individual stats from ice_hw_port struct */
        for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
                xstats[count].value =
                        *(uint64_t *)((char *)hw_stats +
@@ -5421,7 +5426,7 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
                count++;
        }
 
-       /* Get individiual stats from ice_hw_port struct */
+       /* Get individual stats from ice_hw_port struct */
        for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
                strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
                        sizeof(xstats_names[count].name));
@@ -5454,7 +5459,7 @@ ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
                return -EINVAL;
 
        switch (udp_tunnel->prot_type) {
-       case RTE_TUNNEL_TYPE_VXLAN:
+       case RTE_ETH_TUNNEL_TYPE_VXLAN:
                ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
                break;
        default:
@@ -5478,7 +5483,7 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
                return -EINVAL;
 
        switch (udp_tunnel->prot_type) {
-       case RTE_TUNNEL_TYPE_VXLAN:
+       case RTE_ETH_TUNNEL_TYPE_VXLAN:
                ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
                break;
        default:
@@ -5499,7 +5504,7 @@ ice_timesync_enable(struct rte_eth_dev *dev)
        int ret;
 
        if (dev->data->dev_started && !(dev->data->dev_conf.rxmode.offloads &
-           DEV_RX_OFFLOAD_TIMESTAMP)) {
+           RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
                PMD_DRV_LOG(ERR, "Rx timestamp offload not configured");
                return -1;
        }
@@ -5555,7 +5560,7 @@ ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
        rxq = dev->data->rx_queues[flags];
 
        ts_high = rxq->time_high;
-       ts_ns = ice_tstamp_convert_32b_64b(hw, ts_high);
+       ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, ts_high);
        ns = rte_timecounter_update(&ad->rx_tstamp_tc, ts_ns);
        *timestamp = rte_ns_to_timespec(ns);
 
@@ -5582,7 +5587,7 @@ ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
                return -1;
        }
 
-       ts_ns = ice_tstamp_convert_32b_64b(hw, (tstamp >> 8) & mask);
+       ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, (tstamp >> 8) & mask);
        ns = rte_timecounter_update(&ad->tx_tstamp_tc, ts_ns);
        *timestamp = rte_ns_to_timespec(ns);