net/ice: add Rx AVX512 offload path
authorLeyi Rong <leyi.rong@intel.com>
Thu, 15 Apr 2021 08:58:11 +0000 (16:58 +0800)
committerQi Zhang <qi.z.zhang@intel.com>
Fri, 16 Apr 2021 10:44:27 +0000 (12:44 +0200)
Split AVX512 Rx data path into two, one is for basic,
the other one can support additional Rx offload features,
including Rx checksum offload, Rx vlan offload, RSS offload.

Signed-off-by: Leyi Rong <leyi.rong@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Tested-by: Qin Sun <qinx.sun@intel.com>
drivers/net/ice/ice_rxtx.c
drivers/net/ice/ice_rxtx.h
drivers/net/ice/ice_rxtx_vec_avx512.c
drivers/net/ice/ice_rxtx_vec_common.h

index 75326c7..92fbbc1 100644 (file)
@@ -1059,6 +1059,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
        uint32_t ring_size;
        uint16_t len;
        int use_def_burst_func = 1;
+       uint64_t offloads;
 
        if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
            nb_desc > ICE_MAX_RING_DESC ||
@@ -1068,6 +1069,8 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
                return -EINVAL;
        }
 
+       offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
        /* Free memory if needed */
        if (dev->data->rx_queues[queue_idx]) {
                ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
@@ -1088,6 +1091,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
        rxq->nb_rx_desc = nb_desc;
        rxq->rx_free_thresh = rx_conf->rx_free_thresh;
        rxq->queue_id = queue_idx;
+       rxq->offloads = offloads;
 
        rxq->reg_idx = vsi->base_queue + queue_idx;
        rxq->port_id = dev->data->port_id;
@@ -1990,7 +1994,9 @@ ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
            dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
 #ifdef CC_AVX512_SUPPORT
            dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
+           dev->rx_pkt_burst == ice_recv_pkts_vec_avx512_offload ||
            dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
+           dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload ||
 #endif
            dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
            dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
@@ -3052,12 +3058,14 @@ ice_set_rx_function(struct rte_eth_dev *dev)
 #ifdef RTE_ARCH_X86
        struct ice_rx_queue *rxq;
        int i;
+       int rx_check_ret;
        bool use_avx512 = false;
        bool use_avx2 = false;
 
        if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-               if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed &&
-                               rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
+               rx_check_ret = ice_rx_vec_dev_check(dev);
+               if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
+                   rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
                        ad->rx_vec_allowed = true;
                        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                                rxq = dev->data->rx_queues[i];
@@ -3091,11 +3099,19 @@ ice_set_rx_function(struct rte_eth_dev *dev)
                if (dev->data->scattered_rx) {
                        if (use_avx512) {
 #ifdef CC_AVX512_SUPPORT
-                               PMD_DRV_LOG(NOTICE,
-                                       "Using AVX512 Vector Scattered Rx (port %d).",
-                                       dev->data->port_id);
-                               dev->rx_pkt_burst =
-                                       ice_recv_scattered_pkts_vec_avx512;
+                               if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+                                       PMD_DRV_LOG(NOTICE,
+                                               "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
+                                               dev->data->port_id);
+                                       dev->rx_pkt_burst =
+                                               ice_recv_scattered_pkts_vec_avx512_offload;
+                               } else {
+                                       PMD_DRV_LOG(NOTICE,
+                                               "Using AVX512 Vector Scattered Rx (port %d).",
+                                               dev->data->port_id);
+                                       dev->rx_pkt_burst =
+                                               ice_recv_scattered_pkts_vec_avx512;
+                               }
 #endif
                        } else {
                                PMD_DRV_LOG(DEBUG,
@@ -3109,11 +3125,19 @@ ice_set_rx_function(struct rte_eth_dev *dev)
                } else {
                        if (use_avx512) {
 #ifdef CC_AVX512_SUPPORT
-                               PMD_DRV_LOG(NOTICE,
-                                       "Using AVX512 Vector Rx (port %d).",
-                                       dev->data->port_id);
-                               dev->rx_pkt_burst =
-                                       ice_recv_pkts_vec_avx512;
+                               if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+                                       PMD_DRV_LOG(NOTICE,
+                                               "Using AVX512 OFFLOAD Vector Rx (port %d).",
+                                               dev->data->port_id);
+                                       dev->rx_pkt_burst =
+                                               ice_recv_pkts_vec_avx512_offload;
+                               } else {
+                                       PMD_DRV_LOG(NOTICE,
+                                               "Using AVX512 Vector Rx (port %d).",
+                                               dev->data->port_id);
+                                       dev->rx_pkt_burst =
+                                               ice_recv_pkts_vec_avx512;
+                               }
 #endif
                        } else {
                                PMD_DRV_LOG(DEBUG,
@@ -3162,7 +3186,9 @@ static const struct {
 #ifdef RTE_ARCH_X86
 #ifdef CC_AVX512_SUPPORT
        { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
+       { ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" },
        { ice_recv_pkts_vec_avx512,           "Vector AVX512" },
+       { ice_recv_pkts_vec_avx512_offload,   "Offload Vector AVX512" },
 #endif
        { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
        { ice_recv_pkts_vec_avx2,           "Vector AVX2" },
index f72fad0..b29387c 100644 (file)
@@ -88,6 +88,7 @@ struct ice_rx_queue {
        uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
        ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
        ice_rx_release_mbufs_t rx_rel_mbufs;
+       uint64_t offloads;
 };
 
 struct ice_tx_entry {
@@ -256,9 +257,15 @@ uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
                                uint16_t nb_pkts);
 uint16_t ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
                                  uint16_t nb_pkts);
+uint16_t ice_recv_pkts_vec_avx512_offload(void *rx_queue,
+                                         struct rte_mbuf **rx_pkts,
+                                         uint16_t nb_pkts);
 uint16_t ice_recv_scattered_pkts_vec_avx512(void *rx_queue,
                                            struct rte_mbuf **rx_pkts,
                                            uint16_t nb_pkts);
+uint16_t ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
+                                                   struct rte_mbuf **rx_pkts,
+                                                   uint16_t nb_pkts);
 uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
                                  uint16_t nb_pkts);
 uint16_t ice_xmit_pkts_vec_avx512_offload(void *tx_queue,
index 1c4a59a..ad6c69d 100644 (file)
@@ -150,10 +150,12 @@ ice_flex_rxd_to_fdir_flags_vec_avx512(const __m256i fdir_id0_7)
        return fdir_flags;
 }
 
-static inline uint16_t
+static __rte_always_inline uint16_t
 _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
                              struct rte_mbuf **rx_pkts,
-                             uint16_t nb_pkts, uint8_t *split_packet)
+                             uint16_t nb_pkts,
+                             uint8_t *split_packet,
+                             bool do_offload)
 {
        const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
        const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
@@ -224,6 +226,7 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
                        offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
 
+       /* following code block is for Rx Checksum Offload */
        /* Status/Error flag masks */
        /**
         * mask everything except Checksum Reports, RSS indication
@@ -487,37 +490,42 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
                __m256i status0_7 = _mm512_extracti64x4_epi64
                        (raw_status0_7, 0);
 
-               /* now do flag manipulation */
+               __m256i mbuf_flags = _mm256_set1_epi32(0);
 
-               /* get only flag/error bits we want */
-               const __m256i flag_bits =
-                       _mm256_and_si256(status0_7, flags_mask);
-               /**
-                * l3_l4_error flags, shuffle, then shift to correct adjustment
-                * of flags in flags_shuf, and finally mask out extra bits
-                */
-               __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
-                               _mm256_srli_epi32(flag_bits, 4));
-               l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
-               __m256i l4_outer_mask = _mm256_set1_epi32(0x6);
-               __m256i l4_outer_flags =
-                               _mm256_and_si256(l3_l4_flags, l4_outer_mask);
-               l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20);
-
-               __m256i l3_l4_mask = _mm256_set1_epi32(~0x6);
-               l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask);
-               l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags);
-               l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
-               /* set rss and vlan flags */
-               const __m256i rss_vlan_flag_bits =
-                       _mm256_srli_epi32(flag_bits, 12);
-               const __m256i rss_vlan_flags =
-                       _mm256_shuffle_epi8(rss_vlan_flags_shuf,
-                                           rss_vlan_flag_bits);
-
-               /* merge flags */
-               __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
+               if (do_offload) {
+                       /* now do flag manipulation */
+
+                       /* get only flag/error bits we want */
+                       const __m256i flag_bits =
+                               _mm256_and_si256(status0_7, flags_mask);
+                       /**
+                        * l3_l4_error flags, shuffle, then shift to correct adjustment
+                        * of flags in flags_shuf, and finally mask out extra bits
+                        */
+                       __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
+                                       _mm256_srli_epi32(flag_bits, 4));
+                       l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
+                       __m256i l4_outer_mask = _mm256_set1_epi32(0x6);
+                       __m256i l4_outer_flags =
+                                       _mm256_and_si256(l3_l4_flags, l4_outer_mask);
+                       l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20);
+
+                       __m256i l3_l4_mask = _mm256_set1_epi32(~0x6);
+
+                       l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask);
+                       l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags);
+                       l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
+                       /* set rss and vlan flags */
+                       const __m256i rss_vlan_flag_bits =
+                               _mm256_srli_epi32(flag_bits, 12);
+                       const __m256i rss_vlan_flags =
+                               _mm256_shuffle_epi8(rss_vlan_flags_shuf,
+                                                   rss_vlan_flag_bits);
+
+                       /* merge flags */
+                       mbuf_flags = _mm256_or_si256(l3_l4_flags,
                                                     rss_vlan_flags);
+               }
 
                if (rxq->fdir_enabled) {
                        const __m256i fdir_id4_7 =
@@ -529,12 +537,19 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
                        const __m256i fdir_id0_7 =
                                _mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3);
 
-                       const __m256i fdir_flags =
-                               ice_flex_rxd_to_fdir_flags_vec_avx512
-                                       (fdir_id0_7);
-
-                       /* merge with fdir_flags */
-                       mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags);
+                       if (do_offload) {
+                               const __m256i fdir_flags =
+                                       ice_flex_rxd_to_fdir_flags_vec_avx512
+                                               (fdir_id0_7);
+
+                               /* merge with fdir_flags */
+                               mbuf_flags = _mm256_or_si256
+                                               (mbuf_flags, fdir_flags);
+                       } else {
+                               mbuf_flags =
+                                       ice_flex_rxd_to_fdir_flags_vec_avx512
+                                               (fdir_id0_7);
+                       }
 
                        /* write to mbuf: have to use scalar store here */
                        rx_pkts[i + 0]->hash.fdir.hi =
@@ -562,95 +577,97 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
                                _mm256_extract_epi32(fdir_id0_7, 4);
                } /* if() on fdir_enabled */
 
+               if (do_offload) {
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-               /**
-                * needs to load 2nd 16B of each desc for RSS hash parsing,
-                * will cause performance drop to get into this context.
-                */
-               if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-                               DEV_RX_OFFLOAD_RSS_HASH) {
-                       /* load bottom half of every 32B desc */
-                       const __m128i raw_desc_bh7 =
-                               _mm_load_si128
-                                       ((void *)(&rxdp[7].wb.status_error1));
-                       rte_compiler_barrier();
-                       const __m128i raw_desc_bh6 =
-                               _mm_load_si128
-                                       ((void *)(&rxdp[6].wb.status_error1));
-                       rte_compiler_barrier();
-                       const __m128i raw_desc_bh5 =
-                               _mm_load_si128
-                                       ((void *)(&rxdp[5].wb.status_error1));
-                       rte_compiler_barrier();
-                       const __m128i raw_desc_bh4 =
-                               _mm_load_si128
-                                       ((void *)(&rxdp[4].wb.status_error1));
-                       rte_compiler_barrier();
-                       const __m128i raw_desc_bh3 =
-                               _mm_load_si128
-                                       ((void *)(&rxdp[3].wb.status_error1));
-                       rte_compiler_barrier();
-                       const __m128i raw_desc_bh2 =
-                               _mm_load_si128
-                                       ((void *)(&rxdp[2].wb.status_error1));
-                       rte_compiler_barrier();
-                       const __m128i raw_desc_bh1 =
-                               _mm_load_si128
-                                       ((void *)(&rxdp[1].wb.status_error1));
-                       rte_compiler_barrier();
-                       const __m128i raw_desc_bh0 =
-                               _mm_load_si128
-                                       ((void *)(&rxdp[0].wb.status_error1));
-
-                       __m256i raw_desc_bh6_7 =
-                               _mm256_inserti128_si256
-                                       (_mm256_castsi128_si256(raw_desc_bh6),
-                                       raw_desc_bh7, 1);
-                       __m256i raw_desc_bh4_5 =
-                               _mm256_inserti128_si256
-                                       (_mm256_castsi128_si256(raw_desc_bh4),
-                                       raw_desc_bh5, 1);
-                       __m256i raw_desc_bh2_3 =
-                               _mm256_inserti128_si256
-                                       (_mm256_castsi128_si256(raw_desc_bh2),
-                                       raw_desc_bh3, 1);
-                       __m256i raw_desc_bh0_1 =
-                               _mm256_inserti128_si256
-                                       (_mm256_castsi128_si256(raw_desc_bh0),
-                                       raw_desc_bh1, 1);
-
                        /**
-                        * to shift the 32b RSS hash value to the
-                        * highest 32b of each 128b before mask
+                        * needs to load 2nd 16B of each desc for RSS hash parsing,
+                        * will cause performance drop to get into this context.
                         */
-                       __m256i rss_hash6_7 =
-                               _mm256_slli_epi64(raw_desc_bh6_7, 32);
-                       __m256i rss_hash4_5 =
-                               _mm256_slli_epi64(raw_desc_bh4_5, 32);
-                       __m256i rss_hash2_3 =
-                               _mm256_slli_epi64(raw_desc_bh2_3, 32);
-                       __m256i rss_hash0_1 =
-                               _mm256_slli_epi64(raw_desc_bh0_1, 32);
-
-                       __m256i rss_hash_msk =
-                               _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
-                                                0xFFFFFFFF, 0, 0, 0);
-
-                       rss_hash6_7 = _mm256_and_si256
-                                       (rss_hash6_7, rss_hash_msk);
-                       rss_hash4_5 = _mm256_and_si256
-                                       (rss_hash4_5, rss_hash_msk);
-                       rss_hash2_3 = _mm256_and_si256
-                                       (rss_hash2_3, rss_hash_msk);
-                       rss_hash0_1 = _mm256_and_si256
-                                       (rss_hash0_1, rss_hash_msk);
-
-                       mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
-                       mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
-                       mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
-                       mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
-               } /* if() on RSS hash parsing */
+                       if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
+                                       DEV_RX_OFFLOAD_RSS_HASH) {
+                               /* load bottom half of every 32B desc */
+                               const __m128i raw_desc_bh7 =
+                                       _mm_load_si128
+                                               ((void *)(&rxdp[7].wb.status_error1));
+                               rte_compiler_barrier();
+                               const __m128i raw_desc_bh6 =
+                                       _mm_load_si128
+                                               ((void *)(&rxdp[6].wb.status_error1));
+                               rte_compiler_barrier();
+                               const __m128i raw_desc_bh5 =
+                                       _mm_load_si128
+                                               ((void *)(&rxdp[5].wb.status_error1));
+                               rte_compiler_barrier();
+                               const __m128i raw_desc_bh4 =
+                                       _mm_load_si128
+                                               ((void *)(&rxdp[4].wb.status_error1));
+                               rte_compiler_barrier();
+                               const __m128i raw_desc_bh3 =
+                                       _mm_load_si128
+                                               ((void *)(&rxdp[3].wb.status_error1));
+                               rte_compiler_barrier();
+                               const __m128i raw_desc_bh2 =
+                                       _mm_load_si128
+                                               ((void *)(&rxdp[2].wb.status_error1));
+                               rte_compiler_barrier();
+                               const __m128i raw_desc_bh1 =
+                                       _mm_load_si128
+                                               ((void *)(&rxdp[1].wb.status_error1));
+                               rte_compiler_barrier();
+                               const __m128i raw_desc_bh0 =
+                                       _mm_load_si128
+                                               ((void *)(&rxdp[0].wb.status_error1));
+
+                               __m256i raw_desc_bh6_7 =
+                                       _mm256_inserti128_si256
+                                               (_mm256_castsi128_si256(raw_desc_bh6),
+                                               raw_desc_bh7, 1);
+                               __m256i raw_desc_bh4_5 =
+                                       _mm256_inserti128_si256
+                                               (_mm256_castsi128_si256(raw_desc_bh4),
+                                               raw_desc_bh5, 1);
+                               __m256i raw_desc_bh2_3 =
+                                       _mm256_inserti128_si256
+                                               (_mm256_castsi128_si256(raw_desc_bh2),
+                                               raw_desc_bh3, 1);
+                               __m256i raw_desc_bh0_1 =
+                                       _mm256_inserti128_si256
+                                               (_mm256_castsi128_si256(raw_desc_bh0),
+                                               raw_desc_bh1, 1);
+
+                               /**
+                                * to shift the 32b RSS hash value to the
+                                * highest 32b of each 128b before mask
+                                */
+                               __m256i rss_hash6_7 =
+                                       _mm256_slli_epi64(raw_desc_bh6_7, 32);
+                               __m256i rss_hash4_5 =
+                                       _mm256_slli_epi64(raw_desc_bh4_5, 32);
+                               __m256i rss_hash2_3 =
+                                       _mm256_slli_epi64(raw_desc_bh2_3, 32);
+                               __m256i rss_hash0_1 =
+                                       _mm256_slli_epi64(raw_desc_bh0_1, 32);
+
+                               __m256i rss_hash_msk =
+                                       _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
+                                                        0xFFFFFFFF, 0, 0, 0);
+
+                               rss_hash6_7 = _mm256_and_si256
+                                               (rss_hash6_7, rss_hash_msk);
+                               rss_hash4_5 = _mm256_and_si256
+                                               (rss_hash4_5, rss_hash_msk);
+                               rss_hash2_3 = _mm256_and_si256
+                                               (rss_hash2_3, rss_hash_msk);
+                               rss_hash0_1 = _mm256_and_si256
+                                               (rss_hash0_1, rss_hash_msk);
+
+                               mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
+                               mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
+                               mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
+                               mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
+                       } /* if() on RSS hash parsing */
 #endif
+               }
 
                /**
                 * At this point, we have the 8 sets of flags in the low 16-bits
@@ -806,7 +823,19 @@ uint16_t
 ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
                         uint16_t nb_pkts)
 {
-       return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL);
+       return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL, false);
+}
+
+/**
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+ice_recv_pkts_vec_avx512_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+                                uint16_t nb_pkts)
+{
+       return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts,
+                                            nb_pkts, NULL, true);
 }
 
 /**
@@ -823,7 +852,49 @@ ice_recv_scattered_burst_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 
        /* get some new buffers */
        uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx512(rxq, rx_pkts, nb_pkts,
-                                                      split_flags);
+                                                      split_flags, false);
+       if (nb_bufs == 0)
+               return 0;
+
+       /* happy day case, full burst + no packets to be joined */
+       const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+       if (!rxq->pkt_first_seg &&
+           split_fl64[0] == 0 && split_fl64[1] == 0 &&
+           split_fl64[2] == 0 && split_fl64[3] == 0)
+               return nb_bufs;
+
+       /* reassemble any packets that need reassembly */
+       unsigned int i = 0;
+
+       if (!rxq->pkt_first_seg) {
+               /* find the first split flag, and only reassemble then */
+               while (i < nb_bufs && !split_flags[i])
+                       i++;
+               if (i == nb_bufs)
+                       return nb_bufs;
+               rxq->pkt_first_seg = rx_pkts[i];
+       }
+       return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+                                            &split_flags[i]);
+}
+
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+static uint16_t
+ice_recv_scattered_burst_vec_avx512_offload(void *rx_queue,
+                                           struct rte_mbuf **rx_pkts,
+                                           uint16_t nb_pkts)
+{
+       struct ice_rx_queue *rxq = rx_queue;
+       uint8_t split_flags[ICE_VPMD_RX_BURST] = {0};
+
+       /* get some new buffers */
+       uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx512(rxq,
+                               rx_pkts, nb_pkts, split_flags, true);
        if (nb_bufs == 0)
                return 0;
 
@@ -874,6 +945,32 @@ ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
                                rx_pkts + retval, nb_pkts);
 }
 
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ * Main receive routine that can handle arbitrary burst sizes
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
+                                          struct rte_mbuf **rx_pkts,
+                                          uint16_t nb_pkts)
+{
+       uint16_t retval = 0;
+
+       while (nb_pkts > ICE_VPMD_RX_BURST) {
+               uint16_t burst =
+                       ice_recv_scattered_burst_vec_avx512_offload(rx_queue,
+                               rx_pkts + retval, ICE_VPMD_RX_BURST);
+               retval += burst;
+               nb_pkts -= burst;
+               if (burst < ICE_VPMD_RX_BURST)
+                       return retval;
+       }
+       return retval + ice_recv_scattered_burst_vec_avx512_offload(rx_queue,
+                               rx_pkts + retval, nb_pkts);
+}
+
 static __rte_always_inline int
 ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
 {
index 942647f..6e8d7a6 100644 (file)
@@ -247,6 +247,28 @@ ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
        return 0;
 }
 
+#define ICE_TX_NO_VECTOR_FLAGS (                       \
+               DEV_TX_OFFLOAD_MULTI_SEGS |             \
+               DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |       \
+               DEV_TX_OFFLOAD_TCP_TSO)
+
+#define ICE_TX_VECTOR_OFFLOAD (                                \
+               DEV_TX_OFFLOAD_VLAN_INSERT |            \
+               DEV_TX_OFFLOAD_QINQ_INSERT |            \
+               DEV_TX_OFFLOAD_IPV4_CKSUM |             \
+               DEV_TX_OFFLOAD_SCTP_CKSUM |             \
+               DEV_TX_OFFLOAD_UDP_CKSUM |              \
+               DEV_TX_OFFLOAD_TCP_CKSUM)
+
+#define ICE_RX_VECTOR_OFFLOAD (                                \
+               DEV_RX_OFFLOAD_CHECKSUM |               \
+               DEV_RX_OFFLOAD_SCTP_CKSUM |             \
+               DEV_RX_OFFLOAD_VLAN |                   \
+               DEV_RX_OFFLOAD_RSS_HASH)
+
+#define ICE_VECTOR_PATH                0
+#define ICE_VECTOR_OFFLOAD_PATH        1
+
 static inline int
 ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
 {
@@ -265,24 +287,11 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
        if (rxq->proto_xtr != PROTO_XTR_NONE)
                return -1;
 
-       return 0;
-}
-
-#define ICE_TX_NO_VECTOR_FLAGS (                       \
-               DEV_TX_OFFLOAD_MULTI_SEGS |             \
-               DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |       \
-               DEV_TX_OFFLOAD_TCP_TSO)
-
-#define ICE_TX_VECTOR_OFFLOAD (                                \
-               DEV_TX_OFFLOAD_VLAN_INSERT |            \
-               DEV_TX_OFFLOAD_QINQ_INSERT |            \
-               DEV_TX_OFFLOAD_IPV4_CKSUM |             \
-               DEV_TX_OFFLOAD_SCTP_CKSUM |             \
-               DEV_TX_OFFLOAD_UDP_CKSUM |              \
-               DEV_TX_OFFLOAD_TCP_CKSUM)
+       if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD)
+               return ICE_VECTOR_OFFLOAD_PATH;
 
-#define ICE_VECTOR_PATH                0
-#define ICE_VECTOR_OFFLOAD_PATH        1
+       return ICE_VECTOR_PATH;
+}
 
 static inline int
 ice_tx_vec_queue_default(struct ice_tx_queue *txq)
@@ -308,14 +317,19 @@ ice_rx_vec_dev_check_default(struct rte_eth_dev *dev)
 {
        int i;
        struct ice_rx_queue *rxq;
+       int ret = 0;
+       int result = 0;
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                rxq = dev->data->rx_queues[i];
-               if (ice_rx_vec_queue_default(rxq))
+               ret = (ice_rx_vec_queue_default(rxq));
+               if (ret < 0)
                        return -1;
+               if (ret == ICE_VECTOR_OFFLOAD_PATH)
+                       result = ret;
        }
 
-       return 0;
+       return result;
 }
 
 static inline int