mbuf: add namespace to offload flags
[dpdk.git] / drivers / net / ice / ice_rxtx.c
index bb75183..e721766 100644 (file)
 #include "ice_rxtx.h"
 #include "ice_rxtx_vec_common.h"
 
-#define ICE_TX_CKSUM_OFFLOAD_MASK (             \
-               PKT_TX_IP_CKSUM |                \
-               PKT_TX_L4_MASK |                 \
-               PKT_TX_TCP_SEG |                 \
-               PKT_TX_OUTER_IP_CKSUM)
+#define ICE_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM |             \
+               RTE_MBUF_F_TX_L4_MASK |          \
+               RTE_MBUF_F_TX_TCP_SEG |          \
+               RTE_MBUF_F_TX_OUTER_IP_CKSUM)
 
 /* Offset of mbuf dynamic field for protocol extraction data */
 int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
@@ -88,13 +87,13 @@ ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq,
        uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0);
 
        if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
-               mb->ol_flags |= PKT_RX_RSS_HASH;
+               mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
                mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
        }
 
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
        if (desc->flow_id != 0xFFFFFFFF) {
-               mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+               mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
                mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
        }
 #endif
@@ -112,14 +111,14 @@ ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
 #endif
 
        if (desc->flow_id != 0xFFFFFFFF) {
-               mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+               mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
                mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
        }
 
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
        stat_err = rte_le_to_cpu_16(desc->status_error0);
        if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
-               mb->ol_flags |= PKT_RX_RSS_HASH;
+               mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
                mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
        }
 #endif
@@ -136,13 +135,13 @@ ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
 
        stat_err = rte_le_to_cpu_16(desc->status_error0);
        if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
-               mb->ol_flags |= PKT_RX_RSS_HASH;
+               mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
                mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
        }
 
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
        if (desc->flow_id != 0xFFFFFFFF) {
-               mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+               mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
                mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
        }
 
@@ -178,13 +177,13 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
 
        stat_err = rte_le_to_cpu_16(desc->status_error0);
        if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
-               mb->ol_flags |= PKT_RX_RSS_HASH;
+               mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
                mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
        }
 
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
        if (desc->flow_id != 0xFFFFFFFF) {
-               mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+               mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
                mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
        }
 
@@ -267,42 +266,30 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
        struct ice_rlan_ctx rx_ctx;
        enum ice_status err;
        uint16_t buf_size;
-       struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
        uint32_t rxdid = ICE_RXDID_COMMS_OVS;
        uint32_t regval;
+       struct ice_adapter *ad = rxq->vsi->adapter;
+       uint32_t frame_size = dev_data->mtu + ICE_ETH_OVERHEAD;
 
        /* Set buffer size as the head split is disabled. */
        buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
                              RTE_PKTMBUF_HEADROOM);
        rxq->rx_hdr_len = 0;
        rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
-       rxq->max_pkt_len = RTE_MIN((uint32_t)
-                                  ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
-                                  dev_data->dev_conf.rxmode.max_rx_pkt_len);
-
-       if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
-               if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
-                   rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
-                       PMD_DRV_LOG(ERR, "maximum packet length must "
-                                   "be larger than %u and smaller than %u,"
-                                   "as jumbo frame is enabled",
-                                   (uint32_t)ICE_ETH_MAX_LEN,
-                                   (uint32_t)ICE_FRAME_SIZE_MAX);
-                       return -EINVAL;
-               }
-       } else {
-               if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
-                   rxq->max_pkt_len > ICE_ETH_MAX_LEN) {
-                       PMD_DRV_LOG(ERR, "maximum packet length must be "
-                                   "larger than %u and smaller than %u, "
-                                   "as jumbo frame is disabled",
-                                   (uint32_t)RTE_ETHER_MIN_LEN,
-                                   (uint32_t)ICE_ETH_MAX_LEN);
-                       return -EINVAL;
-               }
+       rxq->max_pkt_len =
+               RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
+                       frame_size);
+
+       if (rxq->max_pkt_len <= RTE_ETHER_MIN_LEN ||
+           rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
+               PMD_DRV_LOG(ERR, "maximum packet length must "
+                           "be larger than %u and smaller than %u",
+                           (uint32_t)RTE_ETHER_MIN_LEN,
+                           (uint32_t)ICE_FRAME_SIZE_MAX);
+               return -EINVAL;
        }
 
-       if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+       if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
                /* Register mbuf field and flag for Rx timestamp */
                err = rte_mbuf_dyn_rx_timestamp_register(
                                &ice_timestamp_dynfield_offset,
@@ -366,7 +353,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
        regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
                QRXFLXP_CNTXT_RXDID_PRIO_M;
 
-       if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+       if (ad->ptp_ena || rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
                regval |= QRXFLXP_CNTXT_TS_M;
 
        ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
@@ -384,11 +371,8 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
                return -EINVAL;
        }
 
-       buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
-                             RTE_PKTMBUF_HEADROOM);
-
        /* Check if scattered RX needs to be used. */
-       if (rxq->max_pkt_len > buf_size)
+       if (frame_size > buf_size)
                dev_data->scattered_rx = 1;
 
        rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
@@ -704,6 +688,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
        tx_ctx.tso_ena = 1; /* tso enable */
        tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
        tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
+       tx_ctx.tsyn_ena = 1;
 
        ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
                    ice_tlan_ctx_info);
@@ -1118,7 +1103,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 
        rxq->reg_idx = vsi->base_queue + queue_idx;
        rxq->port_id = dev->data->port_id;
-       if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+       if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
                rxq->crc_len = RTE_ETHER_CRC_LEN;
        else
                rxq->crc_len = 0;
@@ -1150,6 +1135,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
                return -ENOMEM;
        }
 
+       rxq->mz = rz;
        /* Zero all the descriptors in the ring. */
        memset(rz->addr, 0, ring_size);
 
@@ -1205,6 +1191,7 @@ ice_rx_queue_release(void *rxq)
 
        q->rx_rel_mbufs(q);
        rte_free(q->sw_ring);
+       rte_memzone_free(q->mz);
        rte_free(q);
 }
 
@@ -1351,6 +1338,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
                return -ENOMEM;
        }
 
+       txq->mz = tz;
        txq->nb_tx_desc = nb_desc;
        txq->tx_rs_thresh = tx_rs_thresh;
        txq->tx_free_thresh = tx_free_thresh;
@@ -1389,6 +1377,18 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
        return 0;
 }
 
+void
+ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       ice_rx_queue_release(dev->data->rx_queues[qid]);
+}
+
+void
+ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       ice_tx_queue_release(dev->data->tx_queues[qid]);
+}
+
 void
 ice_tx_queue_release(void *txq)
 {
@@ -1401,6 +1401,7 @@ ice_tx_queue_release(void *txq)
 
        q->tx_rel_mbufs(q);
        rte_free(q->sw_ring);
+       rte_memzone_free(q->mz);
        rte_free(q);
 }
 
@@ -1442,14 +1443,14 @@ ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 }
 
 uint32_t
-ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+ice_rx_queue_count(void *rx_queue)
 {
 #define ICE_RXQ_SCAN_INTERVAL 4
        volatile union ice_rx_flex_desc *rxdp;
        struct ice_rx_queue *rxq;
        uint16_t desc = 0;
 
-       rxq = dev->data->rx_queues[rx_queue_id];
+       rxq = rx_queue;
        rxdp = &rxq->rx_ring[rxq->rx_tail];
        while ((desc < rxq->nb_rx_desc) &&
               rte_le_to_cpu_16(rxdp->wb.status_error0) &
@@ -1488,27 +1489,27 @@ ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
                return 0;
 
        if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
-               flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
+               flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
                return flags;
        }
 
        if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
-               flags |= PKT_RX_IP_CKSUM_BAD;
+               flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
        else
-               flags |= PKT_RX_IP_CKSUM_GOOD;
+               flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
 
        if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
-               flags |= PKT_RX_L4_CKSUM_BAD;
+               flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
        else
-               flags |= PKT_RX_L4_CKSUM_GOOD;
+               flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
 
        if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
-               flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
+               flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
 
        if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
-               flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
+               flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
        else
-               flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
+               flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
 
        return flags;
 }
@@ -1518,7 +1519,7 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
 {
        if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
            (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
-               mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+               mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
                mb->vlan_tci =
                        rte_le_to_cpu_16(rxdp->wb.l2tag1);
                PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
@@ -1530,8 +1531,8 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
        if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
            (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
-               mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
-                               PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+               mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED | RTE_MBUF_F_RX_QINQ |
+                               RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN;
                mb->vlan_tci_outer = mb->vlan_tci;
                mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
                PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
@@ -1561,10 +1562,12 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
        int32_t i, j, nb_rx = 0;
        uint64_t pkt_flags = 0;
        uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
        struct ice_vsi *vsi = rxq->vsi;
        struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
        uint64_t ts_ns;
-
+       struct ice_adapter *ad = rxq->vsi->adapter;
+#endif
        rxdp = &rxq->rx_ring[rxq->rx_tail];
        rxep = &rxq->sw_ring[rxq->rx_tail];
 
@@ -1606,8 +1609,8 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
                                rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
                        ice_rxd_to_vlan_tci(mb, &rxdp[j]);
                        rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
-
-                       if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+                       if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
                                ts_ns = ice_tstamp_convert_32b_64b(hw,
                                        rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
                                if (ice_timestamp_dynflag > 0) {
@@ -1618,6 +1621,14 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
                                }
                        }
 
+                       if (ad->ptp_ena && ((mb->packet_type &
+                           RTE_PTYPE_L2_MASK) == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+                               rxq->time_high =
+                                  rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
+                               mb->timesync = rxq->queue_id;
+                               pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
+                       }
+#endif
                        mb->ol_flags |= pkt_flags;
                }
 
@@ -1801,10 +1812,12 @@ ice_recv_scattered_pkts(void *rx_queue,
        uint64_t dma_addr;
        uint64_t pkt_flags;
        uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
        struct ice_vsi *vsi = rxq->vsi;
        struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
        uint64_t ts_ns;
-
+       struct ice_adapter *ad = rxq->vsi->adapter;
+#endif
        while (nb_rx < nb_pkts) {
                rxdp = &rx_ring[rx_id];
                rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
@@ -1914,8 +1927,8 @@ ice_recv_scattered_pkts(void *rx_queue,
                ice_rxd_to_vlan_tci(first_seg, &rxd);
                rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
                pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
-
-               if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+               if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
                        ts_ns = ice_tstamp_convert_32b_64b(hw,
                                rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
                        if (ice_timestamp_dynflag > 0) {
@@ -1926,6 +1939,14 @@ ice_recv_scattered_pkts(void *rx_queue,
                        }
                }
 
+               if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
+                   == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+                       rxq->time_high =
+                          rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+                       first_seg->timesync = rxq->queue_id;
+                       pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
+               }
+#endif
                first_seg->ol_flags |= pkt_flags;
                /* Prefetch data of first segment, if configured to do so. */
                rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
@@ -2124,7 +2145,6 @@ ice_free_queues(struct rte_eth_dev *dev)
                        continue;
                ice_rx_queue_release(dev->data->rx_queues[i]);
                dev->data->rx_queues[i] = NULL;
-               rte_eth_dma_zone_free(dev, "rx_ring", i);
        }
        dev->data->nb_rx_queues = 0;
 
@@ -2133,7 +2153,6 @@ ice_free_queues(struct rte_eth_dev *dev)
                        continue;
                ice_tx_queue_release(dev->data->tx_queues[i]);
                dev->data->tx_queues[i] = NULL;
-               rte_eth_dma_zone_free(dev, "tx_ring", i);
        }
        dev->data->nb_tx_queues = 0;
 }
@@ -2180,6 +2199,7 @@ ice_fdir_setup_tx_resources(struct ice_pf *pf)
                return -ENOMEM;
        }
 
+       txq->mz = tz;
        txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
        txq->queue_id = ICE_FDIR_QUEUE_ID;
        txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
@@ -2238,6 +2258,7 @@ ice_fdir_setup_rx_resources(struct ice_pf *pf)
                return -ENOMEM;
        }
 
+       rxq->mz = rz;
        rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
        rxq->queue_id = ICE_FDIR_QUEUE_ID;
        rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
@@ -2281,10 +2302,12 @@ ice_recv_pkts(void *rx_queue,
        uint64_t dma_addr;
        uint64_t pkt_flags;
        uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
        struct ice_vsi *vsi = rxq->vsi;
        struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
        uint64_t ts_ns;
-
+       struct ice_adapter *ad = rxq->vsi->adapter;
+#endif
        while (nb_rx < nb_pkts) {
                rxdp = &rx_ring[rx_id];
                rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
@@ -2335,8 +2358,8 @@ ice_recv_pkts(void *rx_queue,
                ice_rxd_to_vlan_tci(rxm, &rxd);
                rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
                pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
-
-               if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+               if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
                        ts_ns = ice_tstamp_convert_32b_64b(hw,
                                rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
                        if (ice_timestamp_dynflag > 0) {
@@ -2347,6 +2370,14 @@ ice_recv_pkts(void *rx_queue,
                        }
                }
 
+               if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
+                   RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+                       rxq->time_high =
+                          rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+                       rxm->timesync = rxq->queue_id;
+                       pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
+               }
+#endif
                rxm->ol_flags |= pkt_flags;
                /* copy old mbuf to rx_pkts */
                rx_pkts[nb_rx++] = rxm;
@@ -2378,11 +2409,11 @@ ice_parse_tunneling_params(uint64_t ol_flags,
                            uint32_t *cd_tunneling)
 {
        /* EIPT: External (outer) IP header type */
-       if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+       if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
                *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
-       else if (ol_flags & PKT_TX_OUTER_IPV4)
+       else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)
                *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
-       else if (ol_flags & PKT_TX_OUTER_IPV6)
+       else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)
                *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
 
        /* EIPLEN: External (outer) IP header length, in DWords */
@@ -2390,16 +2421,16 @@ ice_parse_tunneling_params(uint64_t ol_flags,
                ICE_TXD_CTX_QW0_EIPLEN_S;
 
        /* L4TUNT: L4 Tunneling Type */
-       switch (ol_flags & PKT_TX_TUNNEL_MASK) {
-       case PKT_TX_TUNNEL_IPIP:
+       switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+       case RTE_MBUF_F_TX_TUNNEL_IPIP:
                /* for non UDP / GRE tunneling, set to 00b */
                break;
-       case PKT_TX_TUNNEL_VXLAN:
-       case PKT_TX_TUNNEL_GTP:
-       case PKT_TX_TUNNEL_GENEVE:
+       case RTE_MBUF_F_TX_TUNNEL_VXLAN:
+       case RTE_MBUF_F_TX_TUNNEL_GTP:
+       case RTE_MBUF_F_TX_TUNNEL_GENEVE:
                *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
                break;
-       case PKT_TX_TUNNEL_GRE:
+       case RTE_MBUF_F_TX_TUNNEL_GRE:
                *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
                break;
        default:
@@ -2436,7 +2467,7 @@ ice_txd_enable_checksum(uint64_t ol_flags,
                        union ice_tx_offload tx_offload)
 {
        /* Set MACLEN */
-       if (ol_flags & PKT_TX_TUNNEL_MASK)
+       if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
                *td_offset |= (tx_offload.outer_l2_len >> 1)
                        << ICE_TX_DESC_LEN_MACLEN_S;
        else
@@ -2444,21 +2475,21 @@ ice_txd_enable_checksum(uint64_t ol_flags,
                        << ICE_TX_DESC_LEN_MACLEN_S;
 
        /* Enable L3 checksum offloads */
-       if (ol_flags & PKT_TX_IP_CKSUM) {
+       if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
                *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
                *td_offset |= (tx_offload.l3_len >> 2) <<
                              ICE_TX_DESC_LEN_IPLEN_S;
-       } else if (ol_flags & PKT_TX_IPV4) {
+       } else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
                *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
                *td_offset |= (tx_offload.l3_len >> 2) <<
                              ICE_TX_DESC_LEN_IPLEN_S;
-       } else if (ol_flags & PKT_TX_IPV6) {
+       } else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
                *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
                *td_offset |= (tx_offload.l3_len >> 2) <<
                              ICE_TX_DESC_LEN_IPLEN_S;
        }
 
-       if (ol_flags & PKT_TX_TCP_SEG) {
+       if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
                *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
                *td_offset |= (tx_offload.l4_len >> 2) <<
                              ICE_TX_DESC_LEN_L4_LEN_S;
@@ -2466,18 +2497,18 @@ ice_txd_enable_checksum(uint64_t ol_flags,
        }
 
        /* Enable L4 checksum offloads */
-       switch (ol_flags & PKT_TX_L4_MASK) {
-       case PKT_TX_TCP_CKSUM:
+       switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+       case RTE_MBUF_F_TX_TCP_CKSUM:
                *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
                *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
                              ICE_TX_DESC_LEN_L4_LEN_S;
                break;
-       case PKT_TX_SCTP_CKSUM:
+       case RTE_MBUF_F_TX_SCTP_CKSUM:
                *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
                *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
                              ICE_TX_DESC_LEN_L4_LEN_S;
                break;
-       case PKT_TX_UDP_CKSUM:
+       case RTE_MBUF_F_TX_UDP_CKSUM:
                *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
                *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
                              ICE_TX_DESC_LEN_L4_LEN_S;
@@ -2555,10 +2586,11 @@ ice_build_ctob(uint32_t td_cmd,
 static inline uint16_t
 ice_calc_context_desc(uint64_t flags)
 {
-       static uint64_t mask = PKT_TX_TCP_SEG |
-               PKT_TX_QINQ |
-               PKT_TX_OUTER_IP_CKSUM |
-               PKT_TX_TUNNEL_MASK;
+       static uint64_t mask = RTE_MBUF_F_TX_TCP_SEG |
+               RTE_MBUF_F_TX_QINQ |
+               RTE_MBUF_F_TX_OUTER_IP_CKSUM |
+               RTE_MBUF_F_TX_TUNNEL_MASK |
+               RTE_MBUF_F_TX_IEEE1588_TMST;
 
        return (flags & mask) ? 1 : 0;
 }
@@ -2576,7 +2608,7 @@ ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
        }
 
        hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
-       hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
+       hdr_len += (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
                   tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
 
        cd_cmd = ICE_TX_CTX_DESC_TSO;
@@ -2663,7 +2695,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                 * the mbuf data size exceeds max data size that hw allows
                 * per tx desc.
                 */
-               if (ol_flags & PKT_TX_TCP_SEG)
+               if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
                        nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
                                             nb_ctx);
                else
@@ -2692,14 +2724,14 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                }
 
                /* Descriptor based VLAN insertion */
-               if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
+               if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
                        td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
                        td_tag = tx_pkt->vlan_tci;
                }
 
                /* Fill in tunneling parameters if necessary */
                cd_tunneling_params = 0;
-               if (ol_flags & PKT_TX_TUNNEL_MASK)
+               if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
                        ice_parse_tunneling_params(ol_flags, tx_offload,
                                                   &cd_tunneling_params);
 
@@ -2723,15 +2755,19 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                                txe->mbuf = NULL;
                        }
 
-                       if (ol_flags & PKT_TX_TCP_SEG)
+                       if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
                                cd_type_cmd_tso_mss |=
                                        ice_set_tso_ctx(tx_pkt, tx_offload);
+                       else if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
+                               cd_type_cmd_tso_mss |=
+                                       ((uint64_t)ICE_TX_CTX_DESC_TSYN <<
+                                       ICE_TXD_CTX_QW1_CMD_S);
 
                        ctx_txd->tunneling_params =
                                rte_cpu_to_le_32(cd_tunneling_params);
 
                        /* TX context descriptor based double VLAN insert */
-                       if (ol_flags & PKT_TX_QINQ) {
+                       if (ol_flags & RTE_MBUF_F_TX_QINQ) {
                                cd_l2tag2 = tx_pkt->vlan_tci_outer;
                                cd_type_cmd_tso_mss |=
                                        ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
@@ -2759,7 +2795,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                        slen = m_seg->data_len;
                        buf_dma_addr = rte_mbuf_data_iova(m_seg);
 
-                       while ((ol_flags & PKT_TX_TCP_SEG) &&
+                       while ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
                                unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
                                txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
                                txd->cmd_type_offset_bsz =
@@ -2839,7 +2875,7 @@ ice_tx_free_bufs(struct ice_tx_queue *txq)
        for (i = 0; i < txq->tx_rs_thresh; i++)
                rte_prefetch0((txep + i)->mbuf);
 
-       if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+       if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
                for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
                        rte_mempool_put(txep->mbuf->pool, txep->mbuf);
                        txep->mbuf = NULL;
@@ -3127,6 +3163,8 @@ ice_set_rx_function(struct rte_eth_dev *dev)
                ad->rx_use_avx512 = false;
                ad->rx_use_avx2 = false;
                rx_check_ret = ice_rx_vec_dev_check(dev);
+               if (ad->ptp_ena)
+                       rx_check_ret = -1;
                if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
                    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
                        ad->rx_vec_allowed = true;
@@ -3313,7 +3351,7 @@ ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
        /* Use a simple Tx queue if possible (only fast free is allowed) */
        ad->tx_simple_allowed =
                (txq->offloads ==
-               (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+               (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
                txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
 
        if (ad->tx_simple_allowed)
@@ -3346,7 +3384,7 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
                m = tx_pkts[i];
                ol_flags = m->ol_flags;
 
-               if (ol_flags & PKT_TX_TCP_SEG &&
+               if (ol_flags & RTE_MBUF_F_TX_TCP_SEG &&
                    (m->tso_segsz < ICE_MIN_TSO_MSS ||
                     m->tso_segsz > ICE_MAX_TSO_MSS ||
                     m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {