rxq->proto_xtr = IAVF_PROTO_XTR_NONE;
}
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+ struct virtchnl_vlan_supported_caps *stripping_support =
+ &vf->vlan_v2_caps.offloads.stripping_support;
+ uint32_t stripping_cap;
+
+ if (stripping_support->outer)
+ stripping_cap = stripping_support->outer;
+ else
+ stripping_cap = stripping_support->inner;
+
+ if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
+ rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+ else if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
+ rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
+ } else {
+ rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+ }
+
iavf_select_rxd_to_pkt_fields_handler(rxq, rxq->rxdid);
rxq->mp = mp;
static inline void
iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
- volatile union iavf_rx_flex_desc *rxdp)
+ volatile union iavf_rx_flex_desc *rxdp,
+ uint8_t rx_flags)
{
- if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
- (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
- mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
- mb->vlan_tci =
- rte_le_to_cpu_16(rxdp->wb.l2tag1);
- } else {
- mb->vlan_tci = 0;
- }
+ uint16_t vlan_tci = 0;
+
+ if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&
+ rte_le_to_cpu_64(rxdp->wb.status_error0) &
+ (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
+ vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);
#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
- if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
- (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
- mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
- PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
- mb->vlan_tci_outer = mb->vlan_tci;
- mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
- PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
- rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
- rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
- } else {
- mb->vlan_tci_outer = 0;
- }
+ if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&
+ rte_le_to_cpu_16(rxdp->wb.status_error1) &
+ (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
+ vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
#endif
+
+ if (vlan_tci) {
+ mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mb->vlan_tci = vlan_tci;
+ }
}
/* Translate the rx descriptor status and error fields to pkt flags */
rxm->ol_flags = 0;
rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
- iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
+ iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);
rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
rxm->ol_flags |= pkt_flags;
first_seg->ol_flags = 0;
first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
- iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
+ iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);
rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
- iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
+ iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags);
rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
* If RSS(bit12)/VLAN(bit13) are set,
* shuffle moves appropriate flags in place.
*/
- const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
+ const __m256i rss_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_RSS_HASH, 0,
PKT_RX_RSS_HASH, 0,
/* end up 128-bits */
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_RSS_HASH, 0,
PKT_RX_RSS_HASH, 0);
+ const __m256i vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ 0, 0,
+ /* end up 128-bits */
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ 0, 0);
+
uint16_t i, received;
for (i = 0, received = 0; i < nb_pkts;
_mm256_srli_epi32(flag_bits, 4));
l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
+
/* set rss and vlan flags */
const __m256i rss_vlan_flag_bits =
_mm256_srli_epi32(flag_bits, 12);
- const __m256i rss_vlan_flags =
- _mm256_shuffle_epi8(rss_vlan_flags_shuf,
+ const __m256i rss_flags =
+ _mm256_shuffle_epi8(rss_flags_shuf,
rss_vlan_flag_bits);
+ __m256i vlan_flags = _mm256_setzero_si256();
+
+ if (rxq->rx_flags == IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1)
+ vlan_flags =
+ _mm256_shuffle_epi8(vlan_flags_shuf,
+ rss_vlan_flag_bits);
+
+ const __m256i rss_vlan_flags =
+ _mm256_or_si256(rss_flags, vlan_flags);
+
/* merge flags */
__m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
rss_vlan_flags);
* will cause performance drop to get into this context.
*/
if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_RSS_HASH) {
+ DEV_RX_OFFLOAD_RSS_HASH ||
+ rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
/* load bottom half of every 32B desc */
const __m128i raw_desc_bh7 =
_mm_load_si128
(_mm256_castsi128_si256(raw_desc_bh0),
raw_desc_bh1, 1);
- /**
- * to shift the 32b RSS hash value to the
- * highest 32b of each 128b before mask
- */
- __m256i rss_hash6_7 =
- _mm256_slli_epi64(raw_desc_bh6_7, 32);
- __m256i rss_hash4_5 =
- _mm256_slli_epi64(raw_desc_bh4_5, 32);
- __m256i rss_hash2_3 =
- _mm256_slli_epi64(raw_desc_bh2_3, 32);
- __m256i rss_hash0_1 =
- _mm256_slli_epi64(raw_desc_bh0_1, 32);
-
- __m256i rss_hash_msk =
- _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
- 0xFFFFFFFF, 0, 0, 0);
-
- rss_hash6_7 = _mm256_and_si256
- (rss_hash6_7, rss_hash_msk);
- rss_hash4_5 = _mm256_and_si256
- (rss_hash4_5, rss_hash_msk);
- rss_hash2_3 = _mm256_and_si256
- (rss_hash2_3, rss_hash_msk);
- rss_hash0_1 = _mm256_and_si256
- (rss_hash0_1, rss_hash_msk);
-
- mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
- mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
- mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
- mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
+ if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_RSS_HASH) {
+ /**
+ * to shift the 32b RSS hash value to the
+ * highest 32b of each 128b before mask
+ */
+ __m256i rss_hash6_7 =
+ _mm256_slli_epi64(raw_desc_bh6_7, 32);
+ __m256i rss_hash4_5 =
+ _mm256_slli_epi64(raw_desc_bh4_5, 32);
+ __m256i rss_hash2_3 =
+ _mm256_slli_epi64(raw_desc_bh2_3, 32);
+ __m256i rss_hash0_1 =
+ _mm256_slli_epi64(raw_desc_bh0_1, 32);
+
+ const __m256i rss_hash_msk =
+ _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
+ 0xFFFFFFFF, 0, 0, 0);
+
+ rss_hash6_7 = _mm256_and_si256
+ (rss_hash6_7, rss_hash_msk);
+ rss_hash4_5 = _mm256_and_si256
+ (rss_hash4_5, rss_hash_msk);
+ rss_hash2_3 = _mm256_and_si256
+ (rss_hash2_3, rss_hash_msk);
+ rss_hash0_1 = _mm256_and_si256
+ (rss_hash0_1, rss_hash_msk);
+
+ mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
+ mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
+ mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
+ mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
+ }
+
+ if (rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
+ /* merge the status/error-1 bits into one register */
+ const __m256i status1_4_7 =
+ _mm256_unpacklo_epi32(raw_desc_bh6_7,
+ raw_desc_bh4_5);
+ const __m256i status1_0_3 =
+ _mm256_unpacklo_epi32(raw_desc_bh2_3,
+ raw_desc_bh0_1);
+
+ const __m256i status1_0_7 =
+ _mm256_unpacklo_epi64(status1_4_7,
+ status1_0_3);
+
+ const __m256i l2tag2p_flag_mask =
+ _mm256_set1_epi32
+ (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S);
+
+ __m256i l2tag2p_flag_bits =
+ _mm256_and_si256
+ (status1_0_7, l2tag2p_flag_mask);
+
+ l2tag2p_flag_bits =
+ _mm256_srli_epi32(l2tag2p_flag_bits,
+ IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S);
+
+ const __m256i l2tag2_flags_shuf =
+ _mm256_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ /* end up 128-bits */
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0,
+ PKT_RX_VLAN |
+ PKT_RX_VLAN_STRIPPED,
+ 0);
+
+ vlan_flags =
+ _mm256_shuffle_epi8(l2tag2_flags_shuf,
+ l2tag2p_flag_bits);
+
+ /* merge with vlan_flags */
+ mbuf_flags = _mm256_or_si256
+ (mbuf_flags, vlan_flags);
+
+ /* L2TAG2_2 */
+ __m256i vlan_tci6_7 =
+ _mm256_slli_si256(raw_desc_bh6_7, 4);
+ __m256i vlan_tci4_5 =
+ _mm256_slli_si256(raw_desc_bh4_5, 4);
+ __m256i vlan_tci2_3 =
+ _mm256_slli_si256(raw_desc_bh2_3, 4);
+ __m256i vlan_tci0_1 =
+ _mm256_slli_si256(raw_desc_bh0_1, 4);
+
+ const __m256i vlan_tci_msk =
+ _mm256_set_epi32(0, 0xFFFF0000, 0, 0,
+ 0, 0xFFFF0000, 0, 0);
+
+ vlan_tci6_7 = _mm256_and_si256
+ (vlan_tci6_7, vlan_tci_msk);
+ vlan_tci4_5 = _mm256_and_si256
+ (vlan_tci4_5, vlan_tci_msk);
+ vlan_tci2_3 = _mm256_and_si256
+ (vlan_tci2_3, vlan_tci_msk);
+ vlan_tci0_1 = _mm256_and_si256
+ (vlan_tci0_1, vlan_tci_msk);
+
+ mb6_7 = _mm256_or_si256(mb6_7, vlan_tci6_7);
+ mb4_5 = _mm256_or_si256(mb4_5, vlan_tci4_5);
+ mb2_3 = _mm256_or_si256(mb2_3, vlan_tci2_3);
+ mb0_1 = _mm256_or_si256(mb0_1, vlan_tci0_1);
+ }
} /* if() on RSS hash parsing */
#endif