net/iavf: fix pointer of meta data
[dpdk.git] / drivers / net / ice / ice_rxtx_vec_avx512.c
index 0a3e8da..5bfd515 100644 (file)
@@ -3,6 +3,7 @@
  */
 
 #include "ice_rxtx_vec_common.h"
+#include "ice_rxtx_common_avx.h"
 
 #include <rte_vect.h>
 
@@ -12,7 +13,7 @@
 
 #define ICE_DESCS_PER_LOOP_AVX 8
 
-static inline void
+static __rte_always_inline void
 ice_rxq_rearm(struct ice_rx_queue *rxq)
 {
        int i;
@@ -24,6 +25,9 @@ ice_rxq_rearm(struct ice_rx_queue *rxq)
 
        rxdp = rxq->rx_ring + rxq->rxrearm_start;
 
+       if (unlikely(!cache))
+               return ice_rxq_rearm_common(rxq, true);
+
        /* We need to pull 'n' more MBUFs into the software ring */
        if (cache->len < ICE_RXQ_REARM_THRESH) {
                uint32_t req = ICE_RXQ_REARM_THRESH + (cache->size -
@@ -132,10 +136,10 @@ static inline __m256i
 ice_flex_rxd_to_fdir_flags_vec_avx512(const __m256i fdir_id0_7)
 {
 #define FDID_MIS_MAGIC 0xFFFFFFFF
-       RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
-       RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
-       const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR |
-                       PKT_RX_FDIR_ID);
+       RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR != (1 << 2));
+       RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
+       const __m256i pkt_fdir_bit = _mm256_set1_epi32(RTE_MBUF_F_RX_FDIR |
+                       RTE_MBUF_F_RX_FDIR_ID);
        /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
        const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
        __m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
@@ -147,10 +151,12 @@ ice_flex_rxd_to_fdir_flags_vec_avx512(const __m256i fdir_id0_7)
        return fdir_flags;
 }
 
-static inline uint16_t
+static __rte_always_inline uint16_t
 _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
                              struct rte_mbuf **rx_pkts,
-                             uint16_t nb_pkts, uint8_t *split_packet)
+                             uint16_t nb_pkts,
+                             uint8_t *split_packet,
+                             bool do_offload)
 {
        const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
        const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
@@ -221,6 +227,7 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
                        offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
 
+       /* following code block is for Rx Checksum Offload */
        /* Status/Error flag masks */
        /**
         * mask everything except Checksum Reports, RSS indication
@@ -236,82 +243,82 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
         * bits.  This gives use the l3_l4 flags.
         */
        const __m256i l3_l4_flags_shuf =
-               _mm256_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 |
-                PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
-                 PKT_RX_IP_CKSUM_BAD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
-                PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
-                PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
-                PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD  |
-                PKT_RX_IP_CKSUM_BAD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD  |
-                PKT_RX_IP_CKSUM_GOOD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
-                PKT_RX_IP_CKSUM_BAD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
-                PKT_RX_IP_CKSUM_GOOD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
-                PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
-                PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
-                PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
-                PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
-                PKT_RX_IP_CKSUM_BAD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
-                PKT_RX_IP_CKSUM_GOOD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
-                PKT_RX_IP_CKSUM_BAD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
-                PKT_RX_IP_CKSUM_GOOD) >> 1,
+               _mm256_set_epi8((RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 |
+                RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+                 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+                RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+                RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+                RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD  |
+                RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD  |
+                RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+                RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+                RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+                RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+                RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+                RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+                RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+                RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+                RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+                RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+                RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
                /**
                 * second 128-bits
                 * shift right 20 bits to use the low two bits to indicate
                 * outer checksum status
                 * shift right 1 bit to make sure it not exceed 255
                 */
-               (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
-                PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
-                PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
-                PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
-                PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD  |
-                PKT_RX_IP_CKSUM_BAD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD  |
-                PKT_RX_IP_CKSUM_GOOD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
-                PKT_RX_IP_CKSUM_BAD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
-                PKT_RX_IP_CKSUM_GOOD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
-                PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
-                PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
-                PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
-                PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
-                PKT_RX_IP_CKSUM_BAD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
-                PKT_RX_IP_CKSUM_GOOD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
-                PKT_RX_IP_CKSUM_BAD) >> 1,
-               (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
-                PKT_RX_IP_CKSUM_GOOD) >> 1);
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+                RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+                RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+                RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+                RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD  |
+                RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD  |
+                RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+                RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+                RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+                RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+                RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+                RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+                RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+                RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+                RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+                RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+               (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+                RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
        const __m256i cksum_mask =
-                _mm256_set1_epi32(PKT_RX_IP_CKSUM_MASK |
-                                  PKT_RX_L4_CKSUM_MASK |
-                                  PKT_RX_OUTER_IP_CKSUM_BAD |
-                                  PKT_RX_OUTER_L4_CKSUM_MASK);
+                _mm256_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_MASK |
+                                  RTE_MBUF_F_RX_L4_CKSUM_MASK |
+                                  RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+                                  RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK);
        /**
         * data to be shuffled by result of flag mask, shifted down 12.
         * If RSS(bit12)/VLAN(bit13) are set,
@@ -320,16 +327,16 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
        const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
                        0, 0, 0, 0,
                        0, 0, 0, 0,
-                       PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
-                       PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
-                       PKT_RX_RSS_HASH, 0,
+                       RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+                       RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+                       RTE_MBUF_F_RX_RSS_HASH, 0,
                        /* 2nd 128-bits */
                        0, 0, 0, 0,
                        0, 0, 0, 0,
                        0, 0, 0, 0,
-                       PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
-                       PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
-                       PKT_RX_RSS_HASH, 0);
+                       RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+                       RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+                       RTE_MBUF_F_RX_RSS_HASH, 0);
 
        uint16_t i, received;
 
@@ -484,37 +491,42 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
                __m256i status0_7 = _mm512_extracti64x4_epi64
                        (raw_status0_7, 0);
 
-               /* now do flag manipulation */
+               __m256i mbuf_flags = _mm256_set1_epi32(0);
 
-               /* get only flag/error bits we want */
-               const __m256i flag_bits =
-                       _mm256_and_si256(status0_7, flags_mask);
-               /**
-                * l3_l4_error flags, shuffle, then shift to correct adjustment
-                * of flags in flags_shuf, and finally mask out extra bits
-                */
-               __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
-                               _mm256_srli_epi32(flag_bits, 4));
-               l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
-               __m256i l4_outer_mask = _mm256_set1_epi32(0x6);
-               __m256i l4_outer_flags =
-                               _mm256_and_si256(l3_l4_flags, l4_outer_mask);
-               l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20);
-
-               __m256i l3_l4_mask = _mm256_set1_epi32(~0x6);
-               l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask);
-               l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags);
-               l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
-               /* set rss and vlan flags */
-               const __m256i rss_vlan_flag_bits =
-                       _mm256_srli_epi32(flag_bits, 12);
-               const __m256i rss_vlan_flags =
-                       _mm256_shuffle_epi8(rss_vlan_flags_shuf,
-                                           rss_vlan_flag_bits);
-
-               /* merge flags */
-               __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
+               if (do_offload) {
+                       /* now do flag manipulation */
+
+                       /* get only flag/error bits we want */
+                       const __m256i flag_bits =
+                               _mm256_and_si256(status0_7, flags_mask);
+                       /**
+                        * l3_l4_error flags, shuffle, then shift to correct adjustment
+                        * of flags in flags_shuf, and finally mask out extra bits
+                        */
+                       __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
+                                       _mm256_srli_epi32(flag_bits, 4));
+                       l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
+                       __m256i l4_outer_mask = _mm256_set1_epi32(0x6);
+                       __m256i l4_outer_flags =
+                                       _mm256_and_si256(l3_l4_flags, l4_outer_mask);
+                       l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20);
+
+                       __m256i l3_l4_mask = _mm256_set1_epi32(~0x6);
+
+                       l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask);
+                       l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags);
+                       l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
+                       /* set rss and vlan flags */
+                       const __m256i rss_vlan_flag_bits =
+                               _mm256_srli_epi32(flag_bits, 12);
+                       const __m256i rss_vlan_flags =
+                               _mm256_shuffle_epi8(rss_vlan_flags_shuf,
+                                                   rss_vlan_flag_bits);
+
+                       /* merge flags */
+                       mbuf_flags = _mm256_or_si256(l3_l4_flags,
                                                     rss_vlan_flags);
+               }
 
                if (rxq->fdir_enabled) {
                        const __m256i fdir_id4_7 =
@@ -526,12 +538,19 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
                        const __m256i fdir_id0_7 =
                                _mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3);
 
-                       const __m256i fdir_flags =
-                               ice_flex_rxd_to_fdir_flags_vec_avx512
-                                       (fdir_id0_7);
-
-                       /* merge with fdir_flags */
-                       mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags);
+                       if (do_offload) {
+                               const __m256i fdir_flags =
+                                       ice_flex_rxd_to_fdir_flags_vec_avx512
+                                               (fdir_id0_7);
+
+                               /* merge with fdir_flags */
+                               mbuf_flags = _mm256_or_si256
+                                               (mbuf_flags, fdir_flags);
+                       } else {
+                               mbuf_flags =
+                                       ice_flex_rxd_to_fdir_flags_vec_avx512
+                                               (fdir_id0_7);
+                       }
 
                        /* write to mbuf: have to use scalar store here */
                        rx_pkts[i + 0]->hash.fdir.hi =
@@ -559,95 +578,97 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
                                _mm256_extract_epi32(fdir_id0_7, 4);
                } /* if() on fdir_enabled */
 
+               if (do_offload) {
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-               /**
-                * needs to load 2nd 16B of each desc for RSS hash parsing,
-                * will cause performance drop to get into this context.
-                */
-               if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-                               DEV_RX_OFFLOAD_RSS_HASH) {
-                       /* load bottom half of every 32B desc */
-                       const __m128i raw_desc_bh7 =
-                               _mm_load_si128
-                                       ((void *)(&rxdp[7].wb.status_error1));
-                       rte_compiler_barrier();
-                       const __m128i raw_desc_bh6 =
-                               _mm_load_si128
-                                       ((void *)(&rxdp[6].wb.status_error1));
-                       rte_compiler_barrier();
-                       const __m128i raw_desc_bh5 =
-                               _mm_load_si128
-                                       ((void *)(&rxdp[5].wb.status_error1));
-                       rte_compiler_barrier();
-                       const __m128i raw_desc_bh4 =
-                               _mm_load_si128
-                                       ((void *)(&rxdp[4].wb.status_error1));
-                       rte_compiler_barrier();
-                       const __m128i raw_desc_bh3 =
-                               _mm_load_si128
-                                       ((void *)(&rxdp[3].wb.status_error1));
-                       rte_compiler_barrier();
-                       const __m128i raw_desc_bh2 =
-                               _mm_load_si128
-                                       ((void *)(&rxdp[2].wb.status_error1));
-                       rte_compiler_barrier();
-                       const __m128i raw_desc_bh1 =
-                               _mm_load_si128
-                                       ((void *)(&rxdp[1].wb.status_error1));
-                       rte_compiler_barrier();
-                       const __m128i raw_desc_bh0 =
-                               _mm_load_si128
-                                       ((void *)(&rxdp[0].wb.status_error1));
-
-                       __m256i raw_desc_bh6_7 =
-                               _mm256_inserti128_si256
-                                       (_mm256_castsi128_si256(raw_desc_bh6),
-                                       raw_desc_bh7, 1);
-                       __m256i raw_desc_bh4_5 =
-                               _mm256_inserti128_si256
-                                       (_mm256_castsi128_si256(raw_desc_bh4),
-                                       raw_desc_bh5, 1);
-                       __m256i raw_desc_bh2_3 =
-                               _mm256_inserti128_si256
-                                       (_mm256_castsi128_si256(raw_desc_bh2),
-                                       raw_desc_bh3, 1);
-                       __m256i raw_desc_bh0_1 =
-                               _mm256_inserti128_si256
-                                       (_mm256_castsi128_si256(raw_desc_bh0),
-                                       raw_desc_bh1, 1);
-
                        /**
-                        * to shift the 32b RSS hash value to the
-                        * highest 32b of each 128b before mask
+                        * needs to load 2nd 16B of each desc for RSS hash parsing,
+                        * will cause performance drop to get into this context.
                         */
-                       __m256i rss_hash6_7 =
-                               _mm256_slli_epi64(raw_desc_bh6_7, 32);
-                       __m256i rss_hash4_5 =
-                               _mm256_slli_epi64(raw_desc_bh4_5, 32);
-                       __m256i rss_hash2_3 =
-                               _mm256_slli_epi64(raw_desc_bh2_3, 32);
-                       __m256i rss_hash0_1 =
-                               _mm256_slli_epi64(raw_desc_bh0_1, 32);
-
-                       __m256i rss_hash_msk =
-                               _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
-                                                0xFFFFFFFF, 0, 0, 0);
-
-                       rss_hash6_7 = _mm256_and_si256
-                                       (rss_hash6_7, rss_hash_msk);
-                       rss_hash4_5 = _mm256_and_si256
-                                       (rss_hash4_5, rss_hash_msk);
-                       rss_hash2_3 = _mm256_and_si256
-                                       (rss_hash2_3, rss_hash_msk);
-                       rss_hash0_1 = _mm256_and_si256
-                                       (rss_hash0_1, rss_hash_msk);
-
-                       mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
-                       mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
-                       mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
-                       mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
-               } /* if() on RSS hash parsing */
+                       if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
+                                       RTE_ETH_RX_OFFLOAD_RSS_HASH) {
+                               /* load bottom half of every 32B desc */
+                               const __m128i raw_desc_bh7 =
+                                       _mm_load_si128
+                                               ((void *)(&rxdp[7].wb.status_error1));
+                               rte_compiler_barrier();
+                               const __m128i raw_desc_bh6 =
+                                       _mm_load_si128
+                                               ((void *)(&rxdp[6].wb.status_error1));
+                               rte_compiler_barrier();
+                               const __m128i raw_desc_bh5 =
+                                       _mm_load_si128
+                                               ((void *)(&rxdp[5].wb.status_error1));
+                               rte_compiler_barrier();
+                               const __m128i raw_desc_bh4 =
+                                       _mm_load_si128
+                                               ((void *)(&rxdp[4].wb.status_error1));
+                               rte_compiler_barrier();
+                               const __m128i raw_desc_bh3 =
+                                       _mm_load_si128
+                                               ((void *)(&rxdp[3].wb.status_error1));
+                               rte_compiler_barrier();
+                               const __m128i raw_desc_bh2 =
+                                       _mm_load_si128
+                                               ((void *)(&rxdp[2].wb.status_error1));
+                               rte_compiler_barrier();
+                               const __m128i raw_desc_bh1 =
+                                       _mm_load_si128
+                                               ((void *)(&rxdp[1].wb.status_error1));
+                               rte_compiler_barrier();
+                               const __m128i raw_desc_bh0 =
+                                       _mm_load_si128
+                                               ((void *)(&rxdp[0].wb.status_error1));
+
+                               __m256i raw_desc_bh6_7 =
+                                       _mm256_inserti128_si256
+                                               (_mm256_castsi128_si256(raw_desc_bh6),
+                                               raw_desc_bh7, 1);
+                               __m256i raw_desc_bh4_5 =
+                                       _mm256_inserti128_si256
+                                               (_mm256_castsi128_si256(raw_desc_bh4),
+                                               raw_desc_bh5, 1);
+                               __m256i raw_desc_bh2_3 =
+                                       _mm256_inserti128_si256
+                                               (_mm256_castsi128_si256(raw_desc_bh2),
+                                               raw_desc_bh3, 1);
+                               __m256i raw_desc_bh0_1 =
+                                       _mm256_inserti128_si256
+                                               (_mm256_castsi128_si256(raw_desc_bh0),
+                                               raw_desc_bh1, 1);
+
+                               /**
+                                * to shift the 32b RSS hash value to the
+                                * highest 32b of each 128b before mask
+                                */
+                               __m256i rss_hash6_7 =
+                                       _mm256_slli_epi64(raw_desc_bh6_7, 32);
+                               __m256i rss_hash4_5 =
+                                       _mm256_slli_epi64(raw_desc_bh4_5, 32);
+                               __m256i rss_hash2_3 =
+                                       _mm256_slli_epi64(raw_desc_bh2_3, 32);
+                               __m256i rss_hash0_1 =
+                                       _mm256_slli_epi64(raw_desc_bh0_1, 32);
+
+                               __m256i rss_hash_msk =
+                                       _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
+                                                        0xFFFFFFFF, 0, 0, 0);
+
+                               rss_hash6_7 = _mm256_and_si256
+                                               (rss_hash6_7, rss_hash_msk);
+                               rss_hash4_5 = _mm256_and_si256
+                                               (rss_hash4_5, rss_hash_msk);
+                               rss_hash2_3 = _mm256_and_si256
+                                               (rss_hash2_3, rss_hash_msk);
+                               rss_hash0_1 = _mm256_and_si256
+                                               (rss_hash0_1, rss_hash_msk);
+
+                               mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
+                               mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
+                               mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
+                               mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
+                       } /* if() on RSS hash parsing */
 #endif
+               }
 
                /**
                 * At this point, we have the 8 sets of flags in the low 16-bits
@@ -803,7 +824,19 @@ uint16_t
 ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
                         uint16_t nb_pkts)
 {
-       return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL);
+       return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL, false);
+}
+
+/**
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+ice_recv_pkts_vec_avx512_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+                                uint16_t nb_pkts)
+{
+       return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts,
+                                            nb_pkts, NULL, true);
 }
 
 /**
@@ -820,7 +853,49 @@ ice_recv_scattered_burst_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 
        /* get some new buffers */
        uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx512(rxq, rx_pkts, nb_pkts,
-                                                      split_flags);
+                                                      split_flags, false);
+       if (nb_bufs == 0)
+               return 0;
+
+       /* happy day case, full burst + no packets to be joined */
+       const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+       if (!rxq->pkt_first_seg &&
+           split_fl64[0] == 0 && split_fl64[1] == 0 &&
+           split_fl64[2] == 0 && split_fl64[3] == 0)
+               return nb_bufs;
+
+       /* reassemble any packets that need reassembly */
+       unsigned int i = 0;
+
+       if (!rxq->pkt_first_seg) {
+               /* find the first split flag, and only reassemble then */
+               while (i < nb_bufs && !split_flags[i])
+                       i++;
+               if (i == nb_bufs)
+                       return nb_bufs;
+               rxq->pkt_first_seg = rx_pkts[i];
+       }
+       return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+                                            &split_flags[i]);
+}
+
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+static uint16_t
+ice_recv_scattered_burst_vec_avx512_offload(void *rx_queue,
+                                           struct rte_mbuf **rx_pkts,
+                                           uint16_t nb_pkts)
+{
+       struct ice_rx_queue *rxq = rx_queue;
+       uint8_t split_flags[ICE_VPMD_RX_BURST] = {0};
+
+       /* get some new buffers */
+       uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx512(rxq,
+                               rx_pkts, nb_pkts, split_flags, true);
        if (nb_bufs == 0)
                return 0;
 
@@ -871,6 +946,32 @@ ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
                                rx_pkts + retval, nb_pkts);
 }
 
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ * Main receive routine that can handle arbitrary burst sizes
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
+                                          struct rte_mbuf **rx_pkts,
+                                          uint16_t nb_pkts)
+{
+       uint16_t retval = 0;
+
+       while (nb_pkts > ICE_VPMD_RX_BURST) {
+               uint16_t burst =
+                       ice_recv_scattered_burst_vec_avx512_offload(rx_queue,
+                               rx_pkts + retval, ICE_VPMD_RX_BURST);
+               retval += burst;
+               nb_pkts -= burst;
+               if (burst < ICE_VPMD_RX_BURST)
+                       return retval;
+       }
+       return retval + ice_recv_scattered_burst_vec_avx512_offload(rx_queue,
+                               rx_pkts + retval, nb_pkts);
+}
+
 static __rte_always_inline int
 ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
 {
@@ -894,7 +995,7 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
        txep = (void *)txq->sw_ring;
        txep += txq->tx_next_dd - (n - 1);
 
-       if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+       if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
                struct rte_mempool *mp = txep[0].mbuf->pool;
                void **cache_objs;
                struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
@@ -979,23 +1080,26 @@ done:
        return txq->tx_rs_thresh;
 }
 
-static inline void
+static __rte_always_inline void
 ice_vtx1(volatile struct ice_tx_desc *txdp,
-        struct rte_mbuf *pkt, uint64_t flags)
+        struct rte_mbuf *pkt, uint64_t flags, bool do_offload)
 {
        uint64_t high_qw =
                (ICE_TX_DESC_DTYPE_DATA |
                 ((uint64_t)flags  << ICE_TXD_QW1_CMD_S) |
                 ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S));
 
+       if (do_offload)
+               ice_txd_enable_offload(pkt, &high_qw);
+
        __m128i descriptor = _mm_set_epi64x(high_qw,
                                pkt->buf_iova + pkt->data_off);
        _mm_store_si128((__m128i *)txdp, descriptor);
 }
 
-static inline void
-ice_vtx(volatile struct ice_tx_desc *txdp,
-       struct rte_mbuf **pkt, uint16_t nb_pkts,  uint64_t flags)
+static __rte_always_inline void
+ice_vtx(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkt,
+       uint16_t nb_pkts,  uint64_t flags, bool do_offload)
 {
        const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA |
                        ((uint64_t)flags  << ICE_TXD_QW1_CMD_S));
@@ -1005,18 +1109,26 @@ ice_vtx(volatile struct ice_tx_desc *txdp,
                        hi_qw_tmpl |
                        ((uint64_t)pkt[3]->data_len <<
                         ICE_TXD_QW1_TX_BUF_SZ_S);
+               if (do_offload)
+                       ice_txd_enable_offload(pkt[3], &hi_qw3);
                uint64_t hi_qw2 =
                        hi_qw_tmpl |
                        ((uint64_t)pkt[2]->data_len <<
                         ICE_TXD_QW1_TX_BUF_SZ_S);
+               if (do_offload)
+                       ice_txd_enable_offload(pkt[2], &hi_qw2);
                uint64_t hi_qw1 =
                        hi_qw_tmpl |
                        ((uint64_t)pkt[1]->data_len <<
                         ICE_TXD_QW1_TX_BUF_SZ_S);
+               if (do_offload)
+                       ice_txd_enable_offload(pkt[1], &hi_qw1);
                uint64_t hi_qw0 =
                        hi_qw_tmpl |
                        ((uint64_t)pkt[0]->data_len <<
                         ICE_TXD_QW1_TX_BUF_SZ_S);
+               if (do_offload)
+                       ice_txd_enable_offload(pkt[0], &hi_qw0);
 
                __m512i desc0_3 =
                        _mm512_set_epi64
@@ -1033,7 +1145,7 @@ ice_vtx(volatile struct ice_tx_desc *txdp,
 
        /* do any last ones */
        while (nb_pkts) {
-               ice_vtx1(txdp, *pkt, flags);
+               ice_vtx1(txdp, *pkt, flags, do_offload);
                txdp++, pkt++, nb_pkts--;
        }
 }
@@ -1048,9 +1160,9 @@ ice_tx_backlog_entry_avx512(struct ice_vec_tx_entry *txep,
                txep[i].mbuf = tx_pkts[i];
 }
 
-static inline uint16_t
+static __rte_always_inline uint16_t
 ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
-                               uint16_t nb_pkts)
+                               uint16_t nb_pkts, bool do_offload)
 {
        struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
        volatile struct ice_tx_desc *txdp;
@@ -1080,11 +1192,11 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
        if (nb_commit >= n) {
                ice_tx_backlog_entry_avx512(txep, tx_pkts, n);
 
-               ice_vtx(txdp, tx_pkts, n - 1, flags);
+               ice_vtx(txdp, tx_pkts, n - 1, flags, do_offload);
                tx_pkts += (n - 1);
                txdp += (n - 1);
 
-               ice_vtx1(txdp, *tx_pkts++, rs);
+               ice_vtx1(txdp, *tx_pkts++, rs, do_offload);
 
                nb_commit = (uint16_t)(nb_commit - n);
 
@@ -1098,7 +1210,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 
        ice_tx_backlog_entry_avx512(txep, tx_pkts, nb_commit);
 
-       ice_vtx(txdp, tx_pkts, nb_commit, flags);
+       ice_vtx(txdp, tx_pkts, nb_commit, flags, do_offload);
 
        tx_id = (uint16_t)(tx_id + nb_commit);
        if (tx_id > txq->tx_next_rs) {
@@ -1128,7 +1240,30 @@ ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 
                num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
                ret = ice_xmit_fixed_burst_vec_avx512(tx_queue,
-                                                     &tx_pkts[nb_tx], num);
+                               &tx_pkts[nb_tx], num, false);
+               nb_tx += ret;
+               nb_pkts -= ret;
+               if (ret < num)
+                       break;
+       }
+
+       return nb_tx;
+}
+
+uint16_t
+ice_xmit_pkts_vec_avx512_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
+                                uint16_t nb_pkts)
+{
+       uint16_t nb_tx = 0;
+       struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
+
+       while (nb_pkts) {
+               uint16_t ret, num;
+
+               num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+               ret = ice_xmit_fixed_burst_vec_avx512(tx_queue,
+                               &tx_pkts[nb_tx], num, true);
+
                nb_tx += ret;
                nb_pkts -= ret;
                if (ret < num)