uint32_t ring_size;
uint16_t len;
int use_def_burst_func = 1;
+ uint64_t offloads;
if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
nb_desc > ICE_MAX_RING_DESC ||
return -EINVAL;
}
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
/* Free memory if needed */
if (dev->data->rx_queues[queue_idx]) {
ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->queue_id = queue_idx;
+ rxq->offloads = offloads;
rxq->reg_idx = vsi->base_queue + queue_idx;
rxq->port_id = dev->data->port_id;
dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
#ifdef CC_AVX512_SUPPORT
dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
+ dev->rx_pkt_burst == ice_recv_pkts_vec_avx512_offload ||
dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
+ dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload ||
#endif
dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
#ifdef RTE_ARCH_X86
struct ice_rx_queue *rxq;
int i;
+ int rx_check_ret;
bool use_avx512 = false;
bool use_avx2 = false;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed &&
- rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
+ rx_check_ret = ice_rx_vec_dev_check(dev);
+ if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
ad->rx_vec_allowed = true;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
if (dev->data->scattered_rx) {
if (use_avx512) {
#ifdef CC_AVX512_SUPPORT
- PMD_DRV_LOG(NOTICE,
- "Using AVX512 Vector Scattered Rx (port %d).",
- dev->data->port_id);
- dev->rx_pkt_burst =
- ice_recv_scattered_pkts_vec_avx512;
+ if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_scattered_pkts_vec_avx512_offload;
+ } else {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX512 Vector Scattered Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_scattered_pkts_vec_avx512;
+ }
#endif
} else {
PMD_DRV_LOG(DEBUG,
} else {
if (use_avx512) {
#ifdef CC_AVX512_SUPPORT
- PMD_DRV_LOG(NOTICE,
- "Using AVX512 Vector Rx (port %d).",
- dev->data->port_id);
- dev->rx_pkt_burst =
- ice_recv_pkts_vec_avx512;
+ if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX512 OFFLOAD Vector Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_pkts_vec_avx512_offload;
+ } else {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX512 Vector Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_pkts_vec_avx512;
+ }
#endif
} else {
PMD_DRV_LOG(DEBUG,
#ifdef RTE_ARCH_X86
#ifdef CC_AVX512_SUPPORT
{ ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
+ { ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" },
{ ice_recv_pkts_vec_avx512, "Vector AVX512" },
+ { ice_recv_pkts_vec_avx512_offload, "Offload Vector AVX512" },
#endif
{ ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
{ ice_recv_pkts_vec_avx2, "Vector AVX2" },
return fdir_flags;
}
-static inline uint16_t
+static __rte_always_inline uint16_t
_ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts, uint8_t *split_packet)
+ uint16_t nb_pkts,
+ uint8_t *split_packet,
+ bool do_offload)
{
const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+ /* following code block is for Rx Checksum Offload */
/* Status/Error flag masks */
/**
* mask everything except Checksum Reports, RSS indication
__m256i status0_7 = _mm512_extracti64x4_epi64
(raw_status0_7, 0);
- /* now do flag manipulation */
+ __m256i mbuf_flags = _mm256_set1_epi32(0);
- /* get only flag/error bits we want */
- const __m256i flag_bits =
- _mm256_and_si256(status0_7, flags_mask);
- /**
- * l3_l4_error flags, shuffle, then shift to correct adjustment
- * of flags in flags_shuf, and finally mask out extra bits
- */
- __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
- _mm256_srli_epi32(flag_bits, 4));
- l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
- __m256i l4_outer_mask = _mm256_set1_epi32(0x6);
- __m256i l4_outer_flags =
- _mm256_and_si256(l3_l4_flags, l4_outer_mask);
- l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20);
-
- __m256i l3_l4_mask = _mm256_set1_epi32(~0x6);
- l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask);
- l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags);
- l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
- /* set rss and vlan flags */
- const __m256i rss_vlan_flag_bits =
- _mm256_srli_epi32(flag_bits, 12);
- const __m256i rss_vlan_flags =
- _mm256_shuffle_epi8(rss_vlan_flags_shuf,
- rss_vlan_flag_bits);
-
- /* merge flags */
- __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
+ if (do_offload) {
+ /* now do flag manipulation */
+
+ /* get only flag/error bits we want */
+ const __m256i flag_bits =
+ _mm256_and_si256(status0_7, flags_mask);
+ /**
+ * l3_l4_error flags, shuffle, then shift to correct adjustment
+ * of flags in flags_shuf, and finally mask out extra bits
+ */
+ __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
+ _mm256_srli_epi32(flag_bits, 4));
+ l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
+ __m256i l4_outer_mask = _mm256_set1_epi32(0x6);
+ __m256i l4_outer_flags =
+ _mm256_and_si256(l3_l4_flags, l4_outer_mask);
+ l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20);
+
+ __m256i l3_l4_mask = _mm256_set1_epi32(~0x6);
+
+ l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask);
+ l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags);
+ l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
+ /* set rss and vlan flags */
+ const __m256i rss_vlan_flag_bits =
+ _mm256_srli_epi32(flag_bits, 12);
+ const __m256i rss_vlan_flags =
+ _mm256_shuffle_epi8(rss_vlan_flags_shuf,
+ rss_vlan_flag_bits);
+
+ /* merge flags */
+ mbuf_flags = _mm256_or_si256(l3_l4_flags,
rss_vlan_flags);
+ }
if (rxq->fdir_enabled) {
const __m256i fdir_id4_7 =
const __m256i fdir_id0_7 =
_mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3);
- const __m256i fdir_flags =
- ice_flex_rxd_to_fdir_flags_vec_avx512
- (fdir_id0_7);
-
- /* merge with fdir_flags */
- mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags);
+ if (do_offload) {
+ const __m256i fdir_flags =
+ ice_flex_rxd_to_fdir_flags_vec_avx512
+ (fdir_id0_7);
+
+ /* merge with fdir_flags */
+ mbuf_flags = _mm256_or_si256
+ (mbuf_flags, fdir_flags);
+ } else {
+ mbuf_flags =
+ ice_flex_rxd_to_fdir_flags_vec_avx512
+ (fdir_id0_7);
+ }
/* write to mbuf: have to use scalar store here */
rx_pkts[i + 0]->hash.fdir.hi =
_mm256_extract_epi32(fdir_id0_7, 4);
} /* if() on fdir_enabled */
+ if (do_offload) {
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
- /**
- * needs to load 2nd 16B of each desc for RSS hash parsing,
- * will cause performance drop to get into this context.
- */
- if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_RSS_HASH) {
- /* load bottom half of every 32B desc */
- const __m128i raw_desc_bh7 =
- _mm_load_si128
- ((void *)(&rxdp[7].wb.status_error1));
- rte_compiler_barrier();
- const __m128i raw_desc_bh6 =
- _mm_load_si128
- ((void *)(&rxdp[6].wb.status_error1));
- rte_compiler_barrier();
- const __m128i raw_desc_bh5 =
- _mm_load_si128
- ((void *)(&rxdp[5].wb.status_error1));
- rte_compiler_barrier();
- const __m128i raw_desc_bh4 =
- _mm_load_si128
- ((void *)(&rxdp[4].wb.status_error1));
- rte_compiler_barrier();
- const __m128i raw_desc_bh3 =
- _mm_load_si128
- ((void *)(&rxdp[3].wb.status_error1));
- rte_compiler_barrier();
- const __m128i raw_desc_bh2 =
- _mm_load_si128
- ((void *)(&rxdp[2].wb.status_error1));
- rte_compiler_barrier();
- const __m128i raw_desc_bh1 =
- _mm_load_si128
- ((void *)(&rxdp[1].wb.status_error1));
- rte_compiler_barrier();
- const __m128i raw_desc_bh0 =
- _mm_load_si128
- ((void *)(&rxdp[0].wb.status_error1));
-
- __m256i raw_desc_bh6_7 =
- _mm256_inserti128_si256
- (_mm256_castsi128_si256(raw_desc_bh6),
- raw_desc_bh7, 1);
- __m256i raw_desc_bh4_5 =
- _mm256_inserti128_si256
- (_mm256_castsi128_si256(raw_desc_bh4),
- raw_desc_bh5, 1);
- __m256i raw_desc_bh2_3 =
- _mm256_inserti128_si256
- (_mm256_castsi128_si256(raw_desc_bh2),
- raw_desc_bh3, 1);
- __m256i raw_desc_bh0_1 =
- _mm256_inserti128_si256
- (_mm256_castsi128_si256(raw_desc_bh0),
- raw_desc_bh1, 1);
-
/**
- * to shift the 32b RSS hash value to the
- * highest 32b of each 128b before mask
+ * needs to load 2nd 16B of each desc for RSS hash parsing,
+ * will cause performance drop to get into this context.
*/
- __m256i rss_hash6_7 =
- _mm256_slli_epi64(raw_desc_bh6_7, 32);
- __m256i rss_hash4_5 =
- _mm256_slli_epi64(raw_desc_bh4_5, 32);
- __m256i rss_hash2_3 =
- _mm256_slli_epi64(raw_desc_bh2_3, 32);
- __m256i rss_hash0_1 =
- _mm256_slli_epi64(raw_desc_bh0_1, 32);
-
- __m256i rss_hash_msk =
- _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
- 0xFFFFFFFF, 0, 0, 0);
-
- rss_hash6_7 = _mm256_and_si256
- (rss_hash6_7, rss_hash_msk);
- rss_hash4_5 = _mm256_and_si256
- (rss_hash4_5, rss_hash_msk);
- rss_hash2_3 = _mm256_and_si256
- (rss_hash2_3, rss_hash_msk);
- rss_hash0_1 = _mm256_and_si256
- (rss_hash0_1, rss_hash_msk);
-
- mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
- mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
- mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
- mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
- } /* if() on RSS hash parsing */
+ if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_RSS_HASH) {
+ /* load bottom half of every 32B desc */
+ const __m128i raw_desc_bh7 =
+ _mm_load_si128
+ ((void *)(&rxdp[7].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh6 =
+ _mm_load_si128
+ ((void *)(&rxdp[6].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh5 =
+ _mm_load_si128
+ ((void *)(&rxdp[5].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh4 =
+ _mm_load_si128
+ ((void *)(&rxdp[4].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh3 =
+ _mm_load_si128
+ ((void *)(&rxdp[3].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh2 =
+ _mm_load_si128
+ ((void *)(&rxdp[2].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh1 =
+ _mm_load_si128
+ ((void *)(&rxdp[1].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh0 =
+ _mm_load_si128
+ ((void *)(&rxdp[0].wb.status_error1));
+
+ __m256i raw_desc_bh6_7 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc_bh6),
+ raw_desc_bh7, 1);
+ __m256i raw_desc_bh4_5 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc_bh4),
+ raw_desc_bh5, 1);
+ __m256i raw_desc_bh2_3 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc_bh2),
+ raw_desc_bh3, 1);
+ __m256i raw_desc_bh0_1 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc_bh0),
+ raw_desc_bh1, 1);
+
+ /**
+ * to shift the 32b RSS hash value to the
+ * highest 32b of each 128b before mask
+ */
+ __m256i rss_hash6_7 =
+ _mm256_slli_epi64(raw_desc_bh6_7, 32);
+ __m256i rss_hash4_5 =
+ _mm256_slli_epi64(raw_desc_bh4_5, 32);
+ __m256i rss_hash2_3 =
+ _mm256_slli_epi64(raw_desc_bh2_3, 32);
+ __m256i rss_hash0_1 =
+ _mm256_slli_epi64(raw_desc_bh0_1, 32);
+
+ __m256i rss_hash_msk =
+ _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
+ 0xFFFFFFFF, 0, 0, 0);
+
+ rss_hash6_7 = _mm256_and_si256
+ (rss_hash6_7, rss_hash_msk);
+ rss_hash4_5 = _mm256_and_si256
+ (rss_hash4_5, rss_hash_msk);
+ rss_hash2_3 = _mm256_and_si256
+ (rss_hash2_3, rss_hash_msk);
+ rss_hash0_1 = _mm256_and_si256
+ (rss_hash0_1, rss_hash_msk);
+
+ mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
+ mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
+ mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
+ mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
+ } /* if() on RSS hash parsing */
#endif
+ }
/**
* At this point, we have the 8 sets of flags in the low 16-bits
ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL);
+ return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL, false);
+}
+
+/**
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+ice_recv_pkts_vec_avx512_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts,
+ nb_pkts, NULL, true);
}
/**
/* get some new buffers */
uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx512(rxq, rx_pkts, nb_pkts,
- split_flags);
+ split_flags, false);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (!rxq->pkt_first_seg &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly */
+ unsigned int i = 0;
+
+ if (!rxq->pkt_first_seg) {
+ /* find the first split flag, and only reassemble then */
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ rxq->pkt_first_seg = rx_pkts[i];
+ }
+ return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+static uint16_t
+ice_recv_scattered_burst_vec_avx512_offload(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ice_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[ICE_VPMD_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx512(rxq,
+ rx_pkts, nb_pkts, split_flags, true);
if (nb_bufs == 0)
return 0;
rx_pkts + retval, nb_pkts);
}
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ * Main receive routine that can handle arbitrary burst sizes
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t retval = 0;
+
+ while (nb_pkts > ICE_VPMD_RX_BURST) {
+ uint16_t burst =
+ ice_recv_scattered_burst_vec_avx512_offload(rx_queue,
+ rx_pkts + retval, ICE_VPMD_RX_BURST);
+ retval += burst;
+ nb_pkts -= burst;
+ if (burst < ICE_VPMD_RX_BURST)
+ return retval;
+ }
+ return retval + ice_recv_scattered_burst_vec_avx512_offload(rx_queue,
+ rx_pkts + retval, nb_pkts);
+}
+
static __rte_always_inline int
ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
{