ethdev: add namespace
[dpdk.git] / drivers / net / ice / ice_ethdev.c
index 66e33a1..6a6637a 100644 (file)
@@ -18,6 +18,7 @@
 #include "base/ice_flow.h"
 #include "base/ice_dcb.h"
 #include "base/ice_common.h"
+#include "base/ice_ptp_hw.h"
 
 #include "rte_pmd_ice.h"
 #include "ice_ethdev.h"
 #define ICE_PROTO_XTR_ARG         "proto_xtr"
 #define ICE_HW_DEBUG_MASK_ARG     "hw_debug_mask"
 #define ICE_ONE_PPS_OUT_ARG       "pps_out"
+#define ICE_RX_LOW_LATENCY_ARG    "rx_low_latency"
+
+#define ICE_CYCLECOUNTER_MASK  0xffffffffffffffffULL
+
+uint64_t ice_timestamp_dynflag;
+int ice_timestamp_dynfield_offset = -1;
 
 static const char * const ice_valid_args[] = {
        ICE_SAFE_MODE_SUPPORT_ARG,
@@ -37,10 +44,10 @@ static const char * const ice_valid_args[] = {
        ICE_PROTO_XTR_ARG,
        ICE_HW_DEBUG_MASK_ARG,
        ICE_ONE_PPS_OUT_ARG,
+       ICE_RX_LOW_LATENCY_ARG,
        NULL
 };
 
-#define NSEC_PER_SEC      1000000000
 #define PPS_OUT_DELAY_NS  1
 
 static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = {
@@ -146,6 +153,18 @@ static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
                        struct rte_eth_udp_tunnel *udp_tunnel);
 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
                        struct rte_eth_udp_tunnel *udp_tunnel);
+static int ice_timesync_enable(struct rte_eth_dev *dev);
+static int ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+                                         struct timespec *timestamp,
+                                         uint32_t flags);
+static int ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+                                         struct timespec *timestamp);
+static int ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
+static int ice_timesync_read_time(struct rte_eth_dev *dev,
+                                 struct timespec *timestamp);
+static int ice_timesync_write_time(struct rte_eth_dev *dev,
+                                  const struct timespec *timestamp);
+static int ice_timesync_disable(struct rte_eth_dev *dev);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
        { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
@@ -189,9 +208,9 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
        .tx_queue_start               = ice_tx_queue_start,
        .tx_queue_stop                = ice_tx_queue_stop,
        .rx_queue_setup               = ice_rx_queue_setup,
-       .rx_queue_release             = ice_rx_queue_release,
+       .rx_queue_release             = ice_dev_rx_queue_release,
        .tx_queue_setup               = ice_tx_queue_setup,
-       .tx_queue_release             = ice_tx_queue_release,
+       .tx_queue_release             = ice_dev_tx_queue_release,
        .dev_infos_get                = ice_dev_info_get,
        .dev_supported_ptypes_get     = ice_dev_supported_ptypes_get,
        .link_update                  = ice_link_update,
@@ -229,6 +248,13 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
        .udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
        .tx_done_cleanup              = ice_tx_done_cleanup,
        .get_monitor_addr             = ice_get_monitor_addr,
+       .timesync_enable              = ice_timesync_enable,
+       .timesync_read_rx_timestamp   = ice_timesync_read_rx_timestamp,
+       .timesync_read_tx_timestamp   = ice_timesync_read_tx_timestamp,
+       .timesync_adjust_time         = ice_timesync_adjust_time,
+       .timesync_read_time           = ice_timesync_read_time,
+       .timesync_write_time          = ice_timesync_write_time,
+       .timesync_disable             = ice_timesync_disable,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -324,6 +350,13 @@ ice_init_controlq_parameter(struct ice_hw *hw)
        hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
        hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
        hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
+
+       /* fields for sideband queue */
+       hw->sbq.num_rq_entries = ICE_SBQ_LEN;
+       hw->sbq.num_sq_entries = ICE_SBQ_LEN;
+       hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
+       hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
+
 }
 
 static int
@@ -1454,9 +1487,9 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
        TAILQ_INIT(&vsi->mac_list);
        TAILQ_INIT(&vsi->vlan_list);
 
-       /* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
+       /* Be sync with RTE_ETH_RSS_RETA_SIZE_x maximum value definition */
        pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
-                       ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
+                       RTE_ETH_RSS_RETA_SIZE_512 ? RTE_ETH_RSS_RETA_SIZE_512 :
                        hw->func_caps.common_cap.rss_table_size;
        pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
 
@@ -1828,18 +1861,18 @@ parse_pin_set(const char *input, int pps_type, struct ice_devargs *devargs)
                idx = strtoul(str, &end, 10);
                if (end == NULL || idx >= ICE_MAX_PIN_NUM)
                        return -1;
+               while (isblank(*end))
+                       end++;
+               if (*end != ']')
+                       return -1;
 
                devargs->pin_idx = idx;
                devargs->pps_out_ena = 1;
-       }
-
-       while (isblank(*end))
-               end++;
 
-       if (*end != ']')
-               return -1;
+               return 0;
+       }
 
-       return 0;
+       return -1;
 }
 
 static int
@@ -1956,6 +1989,9 @@ static int ice_parse_devargs(struct rte_eth_dev *dev)
        if (ret)
                goto bail;
 
+       ret = rte_kvargs_process(kvlist, ICE_RX_LOW_LATENCY_ARG,
+                                &parse_bool, &ad->devargs.rx_low_latency);
+
 bail:
        rte_kvargs_free(kvlist);
        return ret;
@@ -2957,14 +2993,14 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        int ret;
 
 #define ICE_RSS_HF_ALL ( \
-       ETH_RSS_IPV4 | \
-       ETH_RSS_IPV6 | \
-       ETH_RSS_NONFRAG_IPV4_UDP | \
-       ETH_RSS_NONFRAG_IPV6_UDP | \
-       ETH_RSS_NONFRAG_IPV4_TCP | \
-       ETH_RSS_NONFRAG_IPV6_TCP | \
-       ETH_RSS_NONFRAG_IPV4_SCTP | \
-       ETH_RSS_NONFRAG_IPV6_SCTP)
+       RTE_ETH_RSS_IPV4 | \
+       RTE_ETH_RSS_IPV6 | \
+       RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+       RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+       RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+       RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+       RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+       RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
        ret = ice_rem_vsi_rss_cfg(hw, vsi->idx);
        if (ret)
@@ -2974,7 +3010,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        cfg.symm = 0;
        cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
        /* Configure RSS for IPv4 with src/dst addr as input set */
-       if (rss_hf & ETH_RSS_IPV4) {
+       if (rss_hf & RTE_ETH_RSS_IPV4) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_FLOW_HASH_IPV4;
                ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -2984,7 +3020,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        }
 
        /* Configure RSS for IPv6 with src/dst addr as input set */
-       if (rss_hf & ETH_RSS_IPV6) {
+       if (rss_hf & RTE_ETH_RSS_IPV6) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_FLOW_HASH_IPV6;
                ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -2994,7 +3030,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        }
 
        /* Configure RSS for udp4 with src/dst addr and port as input set */
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_UDP_IPV4;
@@ -3005,7 +3041,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        }
 
        /* Configure RSS for udp6 with src/dst addr and port as input set */
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_UDP_IPV6;
@@ -3016,7 +3052,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        }
 
        /* Configure RSS for tcp4 with src/dst addr and port as input set */
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_TCP_IPV4;
@@ -3027,7 +3063,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        }
 
        /* Configure RSS for tcp6 with src/dst addr and port as input set */
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_TCP_IPV6;
@@ -3038,7 +3074,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        }
 
        /* Configure RSS for sctp4 with src/dst addr and port as input set */
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_SCTP_IPV4;
@@ -3049,7 +3085,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
        }
 
        /* Configure RSS for sctp6 with src/dst addr and port as input set */
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_SCTP_IPV6;
@@ -3059,7 +3095,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
                                    __func__, ret);
        }
 
-       if (rss_hf & ETH_RSS_IPV4) {
+       if (rss_hf & RTE_ETH_RSS_IPV4) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_FLOW_HASH_IPV4;
@@ -3069,7 +3105,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
                                    __func__, ret);
        }
 
-       if (rss_hf & ETH_RSS_IPV6) {
+       if (rss_hf & RTE_ETH_RSS_IPV6) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 |
                                ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_FLOW_HASH_IPV6;
@@ -3079,7 +3115,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
                                    __func__, ret);
        }
 
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
                                ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_UDP_IPV4;
@@ -3089,7 +3125,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
                                    __func__, ret);
        }
 
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
                                ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_UDP_IPV6;
@@ -3099,7 +3135,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
                                    __func__, ret);
        }
 
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
                                ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_TCP_IPV4;
@@ -3109,7 +3145,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
                                    __func__, ret);
        }
 
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
                cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
                                ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
                cfg.hash_flds = ICE_HASH_TCP_IPV6;
@@ -3252,8 +3288,8 @@ ice_dev_configure(struct rte_eth_dev *dev)
        ad->rx_bulk_alloc_allowed = true;
        ad->tx_simple_allowed = true;
 
-       if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-               dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+       if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+               dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
        if (dev->data->nb_rx_queues) {
                ret = ice_init_rss(pf);
@@ -3272,8 +3308,9 @@ __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
 {
        struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
        uint32_t val, val_tx;
-       int i;
+       int rx_low_latency, i;
 
+       rx_low_latency = vsi->adapter->devargs.rx_low_latency;
        for (i = 0; i < nb_queue; i++) {
                /*do actual bind*/
                val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
@@ -3283,8 +3320,21 @@ __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
 
                PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
                            base_queue + i, msix_vect);
+
                /* set ITR0 value */
-               ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x2);
+               if (rx_low_latency) {
+                       /**
+                        * Empirical configuration for optimal real time
+                        * latency reduced interrupt throttling to 2us
+                        */
+                       ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x1);
+                       ICE_WRITE_REG(hw, QRX_ITR(base_queue + i),
+                                     QRX_ITR_NO_EXPR_M);
+               } else {
+                       ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x2);
+                       ICE_WRITE_REG(hw, QRX_ITR(base_queue + i), 0);
+               }
+
                ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
                ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
        }
@@ -3519,8 +3569,8 @@ ice_dev_start(struct rte_eth_dev *dev)
        ice_set_rx_function(dev);
        ice_set_tx_function(dev);
 
-       mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-                       ETH_VLAN_EXTEND_MASK;
+       mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+                       RTE_ETH_VLAN_EXTEND_MASK;
        ret = ice_vlan_offload_set(dev, mask);
        if (ret) {
                PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
@@ -3560,8 +3610,8 @@ ice_dev_start(struct rte_eth_dev *dev)
        pf->adapter_stopped = false;
 
        /* Set the max frame size to default value*/
-       max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ?
-               pf->dev_data->dev_conf.rxmode.max_rx_pkt_len :
+       max_frame_size = pf->dev_data->mtu ?
+               pf->dev_data->mtu + ICE_ETH_OVERHEAD :
                ICE_FRAME_SIZE_MAX;
 
        /* Set the max frame size to HW*/
@@ -3632,40 +3682,40 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 
        dev_info->rx_offload_capa =
-               DEV_RX_OFFLOAD_VLAN_STRIP |
-               DEV_RX_OFFLOAD_JUMBO_FRAME |
-               DEV_RX_OFFLOAD_KEEP_CRC |
-               DEV_RX_OFFLOAD_SCATTER |
-               DEV_RX_OFFLOAD_VLAN_FILTER;
+               RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+               RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+               RTE_ETH_RX_OFFLOAD_SCATTER |
+               RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
        dev_info->tx_offload_capa =
-               DEV_TX_OFFLOAD_VLAN_INSERT |
-               DEV_TX_OFFLOAD_TCP_TSO |
-               DEV_TX_OFFLOAD_MULTI_SEGS |
-               DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+               RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+               RTE_ETH_TX_OFFLOAD_TCP_TSO |
+               RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+               RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
        dev_info->flow_type_rss_offloads = 0;
 
        if (!is_safe_mode) {
                dev_info->rx_offload_capa |=
-                       DEV_RX_OFFLOAD_IPV4_CKSUM |
-                       DEV_RX_OFFLOAD_UDP_CKSUM |
-                       DEV_RX_OFFLOAD_TCP_CKSUM |
-                       DEV_RX_OFFLOAD_QINQ_STRIP |
-                       DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-                       DEV_RX_OFFLOAD_VLAN_EXTEND |
-                       DEV_RX_OFFLOAD_RSS_HASH;
+                       RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+                       RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+                       RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+                       RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+                       RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+                       RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+                       RTE_ETH_RX_OFFLOAD_RSS_HASH |
+                       RTE_ETH_RX_OFFLOAD_TIMESTAMP;
                dev_info->tx_offload_capa |=
-                       DEV_TX_OFFLOAD_QINQ_INSERT |
-                       DEV_TX_OFFLOAD_IPV4_CKSUM |
-                       DEV_TX_OFFLOAD_UDP_CKSUM |
-                       DEV_TX_OFFLOAD_TCP_CKSUM |
-                       DEV_TX_OFFLOAD_SCTP_CKSUM |
-                       DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-                       DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+                       RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+                       RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+                       RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+                       RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+                       RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+                       RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+                       RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
                dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
        }
 
        dev_info->rx_queue_offload_capa = 0;
-       dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+       dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
        dev_info->reta_size = pf->hash_lut_size;
        dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
@@ -3704,24 +3754,24 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                .nb_align = ICE_ALIGN_RING_DESC,
        };
 
-       dev_info->speed_capa = ETH_LINK_SPEED_10M |
-                              ETH_LINK_SPEED_100M |
-                              ETH_LINK_SPEED_1G |
-                              ETH_LINK_SPEED_2_5G |
-                              ETH_LINK_SPEED_5G |
-                              ETH_LINK_SPEED_10G |
-                              ETH_LINK_SPEED_20G |
-                              ETH_LINK_SPEED_25G;
+       dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+                              RTE_ETH_LINK_SPEED_100M |
+                              RTE_ETH_LINK_SPEED_1G |
+                              RTE_ETH_LINK_SPEED_2_5G |
+                              RTE_ETH_LINK_SPEED_5G |
+                              RTE_ETH_LINK_SPEED_10G |
+                              RTE_ETH_LINK_SPEED_20G |
+                              RTE_ETH_LINK_SPEED_25G;
 
        phy_type_low = hw->port_info->phy.phy_type_low;
        phy_type_high = hw->port_info->phy.phy_type_high;
 
        if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
-               dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+               dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
 
        if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
                        ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
-               dev_info->speed_capa |= ETH_LINK_SPEED_100G;
+               dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
 
        dev_info->nb_rx_queues = dev->data->nb_rx_queues;
        dev_info->nb_tx_queues = dev->data->nb_tx_queues;
@@ -3786,8 +3836,8 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
                status = ice_aq_get_link_info(hw->port_info, enable_lse,
                                              &link_status, NULL);
                if (status != ICE_SUCCESS) {
-                       link.link_speed = ETH_SPEED_NUM_100M;
-                       link.link_duplex = ETH_LINK_FULL_DUPLEX;
+                       link.link_speed = RTE_ETH_SPEED_NUM_100M;
+                       link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
                        PMD_DRV_LOG(ERR, "Failed to get link info");
                        goto out;
                }
@@ -3803,55 +3853,55 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
                goto out;
 
        /* Full-duplex operation at all supported speeds */
-       link.link_duplex = ETH_LINK_FULL_DUPLEX;
+       link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
        /* Parse the link status */
        switch (link_status.link_speed) {
        case ICE_AQ_LINK_SPEED_10MB:
-               link.link_speed = ETH_SPEED_NUM_10M;
+               link.link_speed = RTE_ETH_SPEED_NUM_10M;
                break;
        case ICE_AQ_LINK_SPEED_100MB:
-               link.link_speed = ETH_SPEED_NUM_100M;
+               link.link_speed = RTE_ETH_SPEED_NUM_100M;
                break;
        case ICE_AQ_LINK_SPEED_1000MB:
-               link.link_speed = ETH_SPEED_NUM_1G;
+               link.link_speed = RTE_ETH_SPEED_NUM_1G;
                break;
        case ICE_AQ_LINK_SPEED_2500MB:
-               link.link_speed = ETH_SPEED_NUM_2_5G;
+               link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
                break;
        case ICE_AQ_LINK_SPEED_5GB:
-               link.link_speed = ETH_SPEED_NUM_5G;
+               link.link_speed = RTE_ETH_SPEED_NUM_5G;
                break;
        case ICE_AQ_LINK_SPEED_10GB:
-               link.link_speed = ETH_SPEED_NUM_10G;
+               link.link_speed = RTE_ETH_SPEED_NUM_10G;
                break;
        case ICE_AQ_LINK_SPEED_20GB:
-               link.link_speed = ETH_SPEED_NUM_20G;
+               link.link_speed = RTE_ETH_SPEED_NUM_20G;
                break;
        case ICE_AQ_LINK_SPEED_25GB:
-               link.link_speed = ETH_SPEED_NUM_25G;
+               link.link_speed = RTE_ETH_SPEED_NUM_25G;
                break;
        case ICE_AQ_LINK_SPEED_40GB:
-               link.link_speed = ETH_SPEED_NUM_40G;
+               link.link_speed = RTE_ETH_SPEED_NUM_40G;
                break;
        case ICE_AQ_LINK_SPEED_50GB:
-               link.link_speed = ETH_SPEED_NUM_50G;
+               link.link_speed = RTE_ETH_SPEED_NUM_50G;
                break;
        case ICE_AQ_LINK_SPEED_100GB:
-               link.link_speed = ETH_SPEED_NUM_100G;
+               link.link_speed = RTE_ETH_SPEED_NUM_100G;
                break;
        case ICE_AQ_LINK_SPEED_UNKNOWN:
                PMD_DRV_LOG(ERR, "Unknown link speed");
-               link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+               link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
                break;
        default:
                PMD_DRV_LOG(ERR, "None link speed");
-               link.link_speed = ETH_SPEED_NUM_NONE;
+               link.link_speed = RTE_ETH_SPEED_NUM_NONE;
                break;
        }
 
        link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-                             ETH_LINK_SPEED_FIXED);
+                             RTE_ETH_LINK_SPEED_FIXED);
 
 out:
        ice_atomic_write_link_status(dev, &link);
@@ -3930,33 +3980,16 @@ ice_dev_set_link_down(struct rte_eth_dev *dev)
 }
 
 static int
-ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
 {
-       struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       struct rte_eth_dev_data *dev_data = pf->dev_data;
-       uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
-
-       /* check if mtu is within the allowed range */
-       if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
-               return -EINVAL;
-
        /* mtu setting is forbidden if port is start */
-       if (dev_data->dev_started) {
+       if (dev->data->dev_started != 0) {
                PMD_DRV_LOG(ERR,
                            "port %d must be stopped before configuration",
-                           dev_data->port_id);
+                           dev->data->port_id);
                return -EBUSY;
        }
 
-       if (frame_size > ICE_ETH_MAX_LEN)
-               dev_data->dev_conf.rxmode.offloads |=
-                       DEV_RX_OFFLOAD_JUMBO_FRAME;
-       else
-               dev_data->dev_conf.rxmode.offloads &=
-                       ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
-       dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
        return 0;
 }
 
@@ -4344,15 +4377,15 @@ ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
        struct rte_eth_rxmode *rxmode;
 
        rxmode = &dev->data->dev_conf.rxmode;
-       if (mask & ETH_VLAN_FILTER_MASK) {
-               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+       if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
                        ice_vsi_config_vlan_filter(vsi, true);
                else
                        ice_vsi_config_vlan_filter(vsi, false);
        }
 
-       if (mask & ETH_VLAN_STRIP_MASK) {
-               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+       if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
                        ice_vsi_config_vlan_stripping(vsi, true);
                else
                        ice_vsi_config_vlan_stripping(vsi, false);
@@ -4467,8 +4500,8 @@ ice_rss_reta_update(struct rte_eth_dev *dev,
                goto out;
 
        for (i = 0; i < reta_size; i++) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                if (reta_conf[idx].mask & (1ULL << shift))
                        lut[i] = reta_conf[idx].reta[shift];
        }
@@ -4517,8 +4550,8 @@ ice_rss_reta_query(struct rte_eth_dev *dev,
                goto out;
 
        for (i = 0; i < reta_size; i++) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                if (reta_conf[idx].mask & (1ULL << shift))
                        reta_conf[idx].reta[shift] = lut[i];
        }
@@ -5427,7 +5460,7 @@ ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
                return -EINVAL;
 
        switch (udp_tunnel->prot_type) {
-       case RTE_TUNNEL_TYPE_VXLAN:
+       case RTE_ETH_TUNNEL_TYPE_VXLAN:
                ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
                break;
        default:
@@ -5451,7 +5484,7 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
                return -EINVAL;
 
        switch (udp_tunnel->prot_type) {
-       case RTE_TUNNEL_TYPE_VXLAN:
+       case RTE_ETH_TUNNEL_TYPE_VXLAN:
                ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
                break;
        default:
@@ -5463,6 +5496,184 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
        return ret;
 }
 
+static int
+ice_timesync_enable(struct rte_eth_dev *dev)
+{
+       struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ice_adapter *ad =
+                       ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       int ret;
+
+       if (dev->data->dev_started && !(dev->data->dev_conf.rxmode.offloads &
+           RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
+               PMD_DRV_LOG(ERR, "Rx timestamp offload not configured");
+               return -1;
+       }
+
+       if (hw->func_caps.ts_func_info.src_tmr_owned) {
+               ret = ice_ptp_init_phc(hw);
+               if (ret) {
+                       PMD_DRV_LOG(ERR, "Failed to initialize PHC");
+                       return -1;
+               }
+
+               ret = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
+               if (ret) {
+                       PMD_DRV_LOG(ERR,
+                               "Failed to write PHC increment time value");
+                       return -1;
+               }
+       }
+
+       /* Initialize cycle counters for system time/RX/TX timestamp */
+       memset(&ad->systime_tc, 0, sizeof(struct rte_timecounter));
+       memset(&ad->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+       memset(&ad->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+       ad->systime_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+       ad->systime_tc.cc_shift = 0;
+       ad->systime_tc.nsec_mask = 0;
+
+       ad->rx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+       ad->rx_tstamp_tc.cc_shift = 0;
+       ad->rx_tstamp_tc.nsec_mask = 0;
+
+       ad->tx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+       ad->tx_tstamp_tc.cc_shift = 0;
+       ad->tx_tstamp_tc.nsec_mask = 0;
+
+       ad->ptp_ena = 1;
+
+       return 0;
+}
+
+static int
+ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+                              struct timespec *timestamp, uint32_t flags)
+{
+       struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ice_adapter *ad =
+                       ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       struct ice_rx_queue *rxq;
+       uint32_t ts_high;
+       uint64_t ts_ns, ns;
+
+       rxq = dev->data->rx_queues[flags];
+
+       ts_high = rxq->time_high;
+       ts_ns = ice_tstamp_convert_32b_64b(hw, ts_high);
+       ns = rte_timecounter_update(&ad->rx_tstamp_tc, ts_ns);
+       *timestamp = rte_ns_to_timespec(ns);
+
+       return 0;
+}
+
+static int
+ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+                              struct timespec *timestamp)
+{
+       struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ice_adapter *ad =
+                       ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       uint8_t lport;
+       uint64_t ts_ns, ns, tstamp;
+       const uint64_t mask = 0xFFFFFFFF;
+       int ret;
+
+       lport = hw->port_info->lport;
+
+       ret = ice_read_phy_tstamp(hw, lport, 0, &tstamp);
+       if (ret) {
+               PMD_DRV_LOG(ERR, "Failed to read phy timestamp");
+               return -1;
+       }
+
+       ts_ns = ice_tstamp_convert_32b_64b(hw, (tstamp >> 8) & mask);
+       ns = rte_timecounter_update(&ad->tx_tstamp_tc, ts_ns);
+       *timestamp = rte_ns_to_timespec(ns);
+
+       return 0;
+}
+
+static int
+ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+       struct ice_adapter *ad =
+                       ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+       ad->systime_tc.nsec += delta;
+       ad->rx_tstamp_tc.nsec += delta;
+       ad->tx_tstamp_tc.nsec += delta;
+
+       return 0;
+}
+
+static int
+ice_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+       struct ice_adapter *ad =
+                       ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       uint64_t ns;
+
+       ns = rte_timespec_to_ns(ts);
+
+       ad->systime_tc.nsec = ns;
+       ad->rx_tstamp_tc.nsec = ns;
+       ad->tx_tstamp_tc.nsec = ns;
+
+       return 0;
+}
+
+static int
+ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+       struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ice_adapter *ad =
+                       ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       uint32_t hi, lo, lo2;
+       uint64_t time, ns;
+
+       lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+       hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
+       lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+
+       if (lo2 < lo) {
+               lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+               hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
+       }
+
+       time = ((uint64_t)hi << 32) | lo;
+       ns = rte_timecounter_update(&ad->systime_tc, time);
+       *ts = rte_ns_to_timespec(ns);
+
+       return 0;
+}
+
+static int
+ice_timesync_disable(struct rte_eth_dev *dev)
+{
+       struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ice_adapter *ad =
+                       ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       uint64_t val;
+       uint8_t lport;
+
+       lport = hw->port_info->lport;
+
+       ice_clear_phy_tstamp(hw, lport, 0);
+
+       val = ICE_READ_REG(hw, GLTSYN_ENA(0));
+       val &= ~GLTSYN_ENA_TSYN_ENA_M;
+       ICE_WRITE_REG(hw, GLTSYN_ENA(0), val);
+
+       ICE_WRITE_REG(hw, GLTSYN_INCVAL_L(0), 0);
+       ICE_WRITE_REG(hw, GLTSYN_INCVAL_H(0), 0);
+
+       ad->ptp_ena = 0;
+
+       return 0;
+}
+
 static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
              struct rte_pci_device *pci_dev)
@@ -5497,7 +5708,8 @@ RTE_PMD_REGISTER_PARAM_STRING(net_ice,
                              ICE_HW_DEBUG_MASK_ARG "=0xXXX"
                              ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset>"
                              ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"
-                             ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>");
+                             ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>"
+                             ICE_RX_LOW_LATENCY_ARG "=<0|1>");
 
 RTE_LOG_REGISTER_SUFFIX(ice_logtype_init, init, NOTICE);
 RTE_LOG_REGISTER_SUFFIX(ice_logtype_driver, driver, NOTICE);