{
int i;
uint16_t rx_id;
- volatile union ice_rx_desc *rxdp;
+ volatile union ice_rx_flex_desc *rxdp;
struct ice_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
rxdp = rxq->rx_ring + rxq->rxrearm_start;
mb0 = rxep[0].mbuf;
mb1 = rxep[1].mbuf;
- /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
mb2 = rxep[2].mbuf;
mb3 = rxep[3].mbuf;
- /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
(rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
/* Update the tail pointer on the NIC */
- ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
}
-#define PKTLEN_SHIFT 10
+static inline __m256i
+ice_flex_rxd_to_fdir_flags_vec_avx2(const __m256i fdir_id0_7)
+{
+#define FDID_MIS_MAGIC 0xFFFFFFFF
+ RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
+ RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
+ const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR |
+ PKT_RX_FDIR_ID);
+ /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
+ const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
+ __m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
+ fdir_mis_mask);
+ /* this XOR op results to bit-reverse the fdir_mask */
+ fdir_mask = _mm256_xor_si256(fdir_mask, fdir_mis_mask);
+ const __m256i fdir_flags = _mm256_and_si256(fdir_mask, pkt_fdir_bit);
+
+ return fdir_flags;
+}
static inline uint16_t
_ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
0, rxq->mbuf_initializer);
struct ice_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail];
- volatile union ice_rx_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
+ volatile union ice_rx_flex_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
const int avx_aligned = ((rxq->rx_tail & 1) == 0);
rte_prefetch0(rxdp);
/* Before we start moving massive data around, check to see if
* there is actually a packet available
*/
- if (!(rxdp->wb.qword1.status_error_len &
- rte_cpu_to_le_32(1 << ICE_RX_DESC_STATUS_DD_S)))
+ if (!(rxdp->wb.status_error0 &
+ rte_cpu_to_le_32(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
return 0;
/* constants used in processing loop */
const __m256i shuf_msk =
_mm256_set_epi8
(/* first descriptor */
- 7, 6, 5, 4, /* octet 4~7, 32bits rss */
- 3, 2, /* octet 2~3, low 16 bits vlan_macip */
- 15, 14, /* octet 15~14, 16 bits data_len */
- 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
- 15, 14, /* octet 15~14, low 16 bits pkt_len */
- 0xFF, 0xFF, /* pkt_type set as unknown */
- 0xFF, 0xFF, /*pkt_type set as unknown */
+ 0xFF, 0xFF,
+ 0xFF, 0xFF, /* rss hash parsed separately */
+ 11, 10, /* octet 10~11, 16 bits vlan_macip */
+ 5, 4, /* octet 4~5, 16 bits data_len */
+ 0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */
+ 5, 4, /* octet 4~5, 16 bits pkt_len */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF, /*pkt_type set as unknown */
/* second descriptor */
- 7, 6, 5, 4, /* octet 4~7, 32bits rss */
- 3, 2, /* octet 2~3, low 16 bits vlan_macip */
- 15, 14, /* octet 15~14, 16 bits data_len */
- 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
- 15, 14, /* octet 15~14, low 16 bits pkt_len */
- 0xFF, 0xFF, /* pkt_type set as unknown */
- 0xFF, 0xFF /*pkt_type set as unknown */
+ 0xFF, 0xFF,
+ 0xFF, 0xFF, /* rss hash parsed separately */
+ 11, 10, /* octet 10~11, 16 bits vlan_macip */
+ 5, 4, /* octet 4~5, 16 bits data_len */
+ 0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */
+ 5, 4, /* octet 4~5, 16 bits pkt_len */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF /*pkt_type set as unknown */
);
/**
* compile-time check the above crc and shuffle layout is correct.
/* Status/Error flag masks */
/**
- * mask everything except RSS, flow director and VLAN flags
- * bit2 is for VLAN tag, bit11 for flow director indication
- * bit13:12 for RSS indication. Bits 3-5 of error
- * field (bits 22-24) are for IP/L4 checksum errors
+ * mask everything except Checksum Reports, RSS indication
+ * and VLAN indication.
+ * bit6:4 for IP/L4 checksum errors.
+ * bit12 is for RSS indication.
+ * bit13 is for VLAN indication.
*/
const __m256i flags_mask =
- _mm256_set1_epi32((1 << 2) | (1 << 11) |
- (3 << 12) | (7 << 22));
- /**
- * data to be shuffled by result of flag mask. If VLAN bit is set,
- * (bit 2), then position 4 in this array will be used in the
- * destination
- */
- const __m256i vlan_flags_shuf =
- _mm256_set_epi32(0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0,
- 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0);
+ _mm256_set1_epi32((7 << 4) | (1 << 12) | (1 << 13));
/**
- * data to be shuffled by result of flag mask, shifted down 11.
- * If RSS/FDIR bits are set, shuffle moves appropriate flags in
- * place.
- */
- const __m256i rss_flags_shuf =
- _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
- PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH,
- 0, 0, 0, 0, PKT_RX_FDIR, 0,/* end up 128-bits */
- 0, 0, 0, 0, 0, 0, 0, 0,
- PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH,
- 0, 0, 0, 0, PKT_RX_FDIR, 0);
-
- /**
- * data to be shuffled by the result of the flags mask shifted by 22
+ * data to be shuffled by the result of the flags mask shifted by 4
* bits. This gives use the l3_l4 flags.
*/
const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
/* shift right 1 bit to make sure it not exceed 255 */
(PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD) >> 1,
- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
- PKT_RX_IP_CKSUM_BAD >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
/* second 128-bits */
0, 0, 0, 0, 0, 0, 0, 0,
(PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD) >> 1,
- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
- PKT_RX_IP_CKSUM_BAD >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
-
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1);
const __m256i cksum_mask =
_mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_EIP_CKSUM_BAD);
+ /**
+ * data to be shuffled by result of flag mask, shifted down 12.
+ * If RSS(bit12)/VLAN(bit13) are set,
+ * shuffle moves appropriate flags in place.
+ */
+ const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_RSS_HASH, 0,
+ /* end up 128-bits */
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_RSS_HASH, 0);
RTE_SET_USED(avx_aligned); /* for 32B descriptors we don't use this */
}
/**
- * convert descriptors 4-7 into mbufs, adjusting length and
- * re-arranging fields. Then write into the mbuf
+ * convert descriptors 4-7 into mbufs, re-arrange fields.
+ * Then write into the mbuf.
*/
- const __m256i len6_7 = _mm256_slli_epi32(raw_desc6_7,
- PKTLEN_SHIFT);
- const __m256i len4_5 = _mm256_slli_epi32(raw_desc4_5,
- PKTLEN_SHIFT);
- const __m256i desc6_7 = _mm256_blend_epi16(raw_desc6_7,
- len6_7, 0x80);
- const __m256i desc4_5 = _mm256_blend_epi16(raw_desc4_5,
- len4_5, 0x80);
- __m256i mb6_7 = _mm256_shuffle_epi8(desc6_7, shuf_msk);
- __m256i mb4_5 = _mm256_shuffle_epi8(desc4_5, shuf_msk);
+ __m256i mb6_7 = _mm256_shuffle_epi8(raw_desc6_7, shuf_msk);
+ __m256i mb4_5 = _mm256_shuffle_epi8(raw_desc4_5, shuf_msk);
mb6_7 = _mm256_add_epi16(mb6_7, crc_adjust);
mb4_5 = _mm256_add_epi16(mb4_5, crc_adjust);
/**
- * to get packet types, shift 64-bit values down 30 bits
- * and so ptype is in lower 8-bits in each
+ * to get packet types, ptype is located in bit16-25
+ * of each 128bits
*/
- const __m256i ptypes6_7 = _mm256_srli_epi64(desc6_7, 30);
- const __m256i ptypes4_5 = _mm256_srli_epi64(desc4_5, 30);
- const uint8_t ptype7 = _mm256_extract_epi8(ptypes6_7, 24);
- const uint8_t ptype6 = _mm256_extract_epi8(ptypes6_7, 8);
- const uint8_t ptype5 = _mm256_extract_epi8(ptypes4_5, 24);
- const uint8_t ptype4 = _mm256_extract_epi8(ptypes4_5, 8);
+ const __m256i ptype_mask =
+ _mm256_set1_epi16(ICE_RX_FLEX_DESC_PTYPE_M);
+ const __m256i ptypes6_7 =
+ _mm256_and_si256(raw_desc6_7, ptype_mask);
+ const __m256i ptypes4_5 =
+ _mm256_and_si256(raw_desc4_5, ptype_mask);
+ const uint16_t ptype7 = _mm256_extract_epi16(ptypes6_7, 9);
+ const uint16_t ptype6 = _mm256_extract_epi16(ptypes6_7, 1);
+ const uint16_t ptype5 = _mm256_extract_epi16(ptypes4_5, 9);
+ const uint16_t ptype4 = _mm256_extract_epi16(ptypes4_5, 1);
mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype7], 4);
mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype6], 0);
mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype5], 4);
mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype4], 0);
/* merge the status bits into one register */
- const __m256i status4_7 = _mm256_unpackhi_epi32(desc6_7,
- desc4_5);
+ const __m256i status4_7 = _mm256_unpackhi_epi32(raw_desc6_7,
+ raw_desc4_5);
/**
- * convert descriptors 0-3 into mbufs, adjusting length and
- * re-arranging fields. Then write into the mbuf
+ * convert descriptors 0-3 into mbufs, re-arrange fields.
+ * Then write into the mbuf.
*/
- const __m256i len2_3 = _mm256_slli_epi32(raw_desc2_3,
- PKTLEN_SHIFT);
- const __m256i len0_1 = _mm256_slli_epi32(raw_desc0_1,
- PKTLEN_SHIFT);
- const __m256i desc2_3 = _mm256_blend_epi16(raw_desc2_3,
- len2_3, 0x80);
- const __m256i desc0_1 = _mm256_blend_epi16(raw_desc0_1,
- len0_1, 0x80);
- __m256i mb2_3 = _mm256_shuffle_epi8(desc2_3, shuf_msk);
- __m256i mb0_1 = _mm256_shuffle_epi8(desc0_1, shuf_msk);
+ __m256i mb2_3 = _mm256_shuffle_epi8(raw_desc2_3, shuf_msk);
+ __m256i mb0_1 = _mm256_shuffle_epi8(raw_desc0_1, shuf_msk);
mb2_3 = _mm256_add_epi16(mb2_3, crc_adjust);
mb0_1 = _mm256_add_epi16(mb0_1, crc_adjust);
- /* get the packet types */
- const __m256i ptypes2_3 = _mm256_srli_epi64(desc2_3, 30);
- const __m256i ptypes0_1 = _mm256_srli_epi64(desc0_1, 30);
- const uint8_t ptype3 = _mm256_extract_epi8(ptypes2_3, 24);
- const uint8_t ptype2 = _mm256_extract_epi8(ptypes2_3, 8);
- const uint8_t ptype1 = _mm256_extract_epi8(ptypes0_1, 24);
- const uint8_t ptype0 = _mm256_extract_epi8(ptypes0_1, 8);
+ /**
+ * to get packet types, ptype is located in bit16-25
+ * of each 128bits
+ */
+ const __m256i ptypes2_3 =
+ _mm256_and_si256(raw_desc2_3, ptype_mask);
+ const __m256i ptypes0_1 =
+ _mm256_and_si256(raw_desc0_1, ptype_mask);
+ const uint16_t ptype3 = _mm256_extract_epi16(ptypes2_3, 9);
+ const uint16_t ptype2 = _mm256_extract_epi16(ptypes2_3, 1);
+ const uint16_t ptype1 = _mm256_extract_epi16(ptypes0_1, 9);
+ const uint16_t ptype0 = _mm256_extract_epi16(ptypes0_1, 1);
mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype3], 4);
mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype2], 0);
mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype1], 4);
mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype0], 0);
/* merge the status bits into one register */
- const __m256i status0_3 = _mm256_unpackhi_epi32(desc2_3,
- desc0_1);
+ const __m256i status0_3 = _mm256_unpackhi_epi32(raw_desc2_3,
+ raw_desc0_1);
/**
* take the two sets of status bits and merge to one
/* get only flag/error bits we want */
const __m256i flag_bits =
_mm256_and_si256(status0_7, flags_mask);
- /* set vlan and rss flags */
- const __m256i vlan_flags =
- _mm256_shuffle_epi8(vlan_flags_shuf, flag_bits);
- const __m256i rss_flags =
- _mm256_shuffle_epi8(rss_flags_shuf,
- _mm256_srli_epi32(flag_bits, 11));
/**
* l3_l4_error flags, shuffle, then shift to correct adjustment
* of flags in flags_shuf, and finally mask out extra bits
*/
__m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
- _mm256_srli_epi32(flag_bits, 22));
+ _mm256_srli_epi32(flag_bits, 4));
l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
+ /* set rss and vlan flags */
+ const __m256i rss_vlan_flag_bits =
+ _mm256_srli_epi32(flag_bits, 12);
+ const __m256i rss_vlan_flags =
+ _mm256_shuffle_epi8(rss_vlan_flags_shuf,
+ rss_vlan_flag_bits);
/* merge flags */
- const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
- _mm256_or_si256(rss_flags, vlan_flags));
+ __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
+ rss_vlan_flags);
+
+ if (rxq->fdir_enabled) {
+ const __m256i fdir_id4_7 =
+ _mm256_unpackhi_epi32(raw_desc6_7, raw_desc4_5);
+
+ const __m256i fdir_id0_3 =
+ _mm256_unpackhi_epi32(raw_desc2_3, raw_desc0_1);
+
+ const __m256i fdir_id0_7 =
+ _mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3);
+
+ const __m256i fdir_flags =
+ ice_flex_rxd_to_fdir_flags_vec_avx2(fdir_id0_7);
+
+ /* merge with fdir_flags */
+ mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags);
+
+ /* write to mbuf: have to use scalar store here */
+ rx_pkts[i + 0]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 3);
+
+ rx_pkts[i + 1]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 7);
+
+ rx_pkts[i + 2]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 2);
+
+ rx_pkts[i + 3]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 6);
+
+ rx_pkts[i + 4]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 1);
+
+ rx_pkts[i + 5]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 5);
+
+ rx_pkts[i + 6]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 0);
+
+ rx_pkts[i + 7]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 4);
+ } /* if() on fdir_enabled */
+
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ /**
+ * needs to load 2nd 16B of each desc for RSS hash parsing,
+ * will cause performance drop to get into this context.
+ */
+ if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_RSS_HASH) {
+ /* load bottom half of every 32B desc */
+ const __m128i raw_desc_bh7 =
+ _mm_load_si128
+ ((void *)(&rxdp[7].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh6 =
+ _mm_load_si128
+ ((void *)(&rxdp[6].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh5 =
+ _mm_load_si128
+ ((void *)(&rxdp[5].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh4 =
+ _mm_load_si128
+ ((void *)(&rxdp[4].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh3 =
+ _mm_load_si128
+ ((void *)(&rxdp[3].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh2 =
+ _mm_load_si128
+ ((void *)(&rxdp[2].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh1 =
+ _mm_load_si128
+ ((void *)(&rxdp[1].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh0 =
+ _mm_load_si128
+ ((void *)(&rxdp[0].wb.status_error1));
+
+ __m256i raw_desc_bh6_7 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc_bh6),
+ raw_desc_bh7, 1);
+ __m256i raw_desc_bh4_5 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc_bh4),
+ raw_desc_bh5, 1);
+ __m256i raw_desc_bh2_3 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc_bh2),
+ raw_desc_bh3, 1);
+ __m256i raw_desc_bh0_1 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc_bh0),
+ raw_desc_bh1, 1);
+
+ /**
+ * to shift the 32b RSS hash value to the
+ * highest 32b of each 128b before mask
+ */
+ __m256i rss_hash6_7 =
+ _mm256_slli_epi64(raw_desc_bh6_7, 32);
+ __m256i rss_hash4_5 =
+ _mm256_slli_epi64(raw_desc_bh4_5, 32);
+ __m256i rss_hash2_3 =
+ _mm256_slli_epi64(raw_desc_bh2_3, 32);
+ __m256i rss_hash0_1 =
+ _mm256_slli_epi64(raw_desc_bh0_1, 32);
+
+ __m256i rss_hash_msk =
+ _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
+ 0xFFFFFFFF, 0, 0, 0);
+
+ rss_hash6_7 = _mm256_and_si256
+ (rss_hash6_7, rss_hash_msk);
+ rss_hash4_5 = _mm256_and_si256
+ (rss_hash4_5, rss_hash_msk);
+ rss_hash2_3 = _mm256_and_si256
+ (rss_hash2_3, rss_hash_msk);
+ rss_hash0_1 = _mm256_and_si256
+ (rss_hash0_1, rss_hash_msk);
+
+ mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
+ mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
+ mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
+ mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
+ } /* if() on RSS hash parsing */
+#endif
+
/**
* At this point, we have the 8 sets of flags in the low 16-bits
* of each 32-bit value in vlan0.
{
return _ice_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts, NULL);
}
+
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+static uint16_t
+ice_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ice_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[ICE_VPMD_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx2(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (!rxq->pkt_first_seg &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned int i = 0;
+
+ if (!rxq->pkt_first_seg) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ rxq->pkt_first_seg = rx_pkts[i];
+ }
+ return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ * Main receive routine that can handle arbitrary burst sizes
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+ice_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t retval = 0;
+
+ while (nb_pkts > ICE_VPMD_RX_BURST) {
+ uint16_t burst = ice_recv_scattered_burst_vec_avx2(rx_queue,
+ rx_pkts + retval, ICE_VPMD_RX_BURST);
+ retval += burst;
+ nb_pkts -= burst;
+ if (burst < ICE_VPMD_RX_BURST)
+ return retval;
+ }
+ return retval + ice_recv_scattered_burst_vec_avx2(rx_queue,
+ rx_pkts + retval, nb_pkts);
+}
+
+static inline void
+ice_vtx1(volatile struct ice_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64_t high_qw =
+ (ICE_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << ICE_TXD_QW1_CMD_S) |
+ ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S));
+
+ __m128i descriptor = _mm_set_epi64x(high_qw,
+ pkt->buf_iova + pkt->data_off);
+ _mm_store_si128((__m128i *)txdp, descriptor);
+}
+
+static inline void
+ice_vtx(volatile struct ice_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << ICE_TXD_QW1_CMD_S));
+
+ /* if unaligned on 32-bit boundary, do one to align */
+ if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) {
+ ice_vtx1(txdp, *pkt, flags);
+ nb_pkts--, txdp++, pkt++;
+ }
+
+ /* do two at a time while possible, in bursts */
+ for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) {
+ uint64_t hi_qw3 =
+ hi_qw_tmpl |
+ ((uint64_t)pkt[3]->data_len <<
+ ICE_TXD_QW1_TX_BUF_SZ_S);
+ uint64_t hi_qw2 =
+ hi_qw_tmpl |
+ ((uint64_t)pkt[2]->data_len <<
+ ICE_TXD_QW1_TX_BUF_SZ_S);
+ uint64_t hi_qw1 =
+ hi_qw_tmpl |
+ ((uint64_t)pkt[1]->data_len <<
+ ICE_TXD_QW1_TX_BUF_SZ_S);
+ uint64_t hi_qw0 =
+ hi_qw_tmpl |
+ ((uint64_t)pkt[0]->data_len <<
+ ICE_TXD_QW1_TX_BUF_SZ_S);
+
+ __m256i desc2_3 =
+ _mm256_set_epi64x
+ (hi_qw3,
+ pkt[3]->buf_iova + pkt[3]->data_off,
+ hi_qw2,
+ pkt[2]->buf_iova + pkt[2]->data_off);
+ __m256i desc0_1 =
+ _mm256_set_epi64x
+ (hi_qw1,
+ pkt[1]->buf_iova + pkt[1]->data_off,
+ hi_qw0,
+ pkt[0]->buf_iova + pkt[0]->data_off);
+ _mm256_store_si256((void *)(txdp + 2), desc2_3);
+ _mm256_store_si256((void *)txdp, desc0_1);
+ }
+
+ /* do any last ones */
+ while (nb_pkts) {
+ ice_vtx1(txdp, *pkt, flags);
+ txdp++, pkt++, nb_pkts--;
+ }
+}
+
+static inline uint16_t
+ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
+ volatile struct ice_tx_desc *txdp;
+ struct ice_tx_entry *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = ICE_TD_CMD;
+ uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ ice_tx_free_bufs(txq);
+
+ nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ ice_tx_backlog_entry(txep, tx_pkts, n);
+
+ ice_vtx(txdp, tx_pkts, n - 1, flags);
+ tx_pkts += (n - 1);
+ txdp += (n - 1);
+
+ ice_vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ ice_tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ ice_vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
+ ICE_TXD_QW1_CMD_S);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+uint16_t
+ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+ struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
+
+ while (nb_pkts) {
+ uint16_t ret, num;
+
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+ ret = ice_xmit_fixed_burst_vec_avx2(tx_queue, &tx_pkts[nb_tx],
+ num);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}