net/iavf: support flow mark in SSE path
authorLeyi Rong <leyi.rong@intel.com>
Mon, 20 Apr 2020 06:16:23 +0000 (14:16 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 21 Apr 2020 11:57:09 +0000 (13:57 +0200)
Support Flow Director mark ID parsing from Flex
Rx descriptor in SSE path.

Signed-off-by: Leyi Rong <leyi.rong@intel.com>
Reviewed-by: Qi Zhang <qi.z.zhang@intel.com>
drivers/net/iavf/iavf_rxtx_vec_sse.c

index 5a03900..02c1037 100644 (file)
@@ -189,6 +189,25 @@ desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4],
        _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
 }
 
+static inline __m128i
+flex_rxd_to_fdir_flags_vec(const __m128i fdir_id0_3)
+{
+#define FDID_MIS_MAGIC 0xFFFFFFFF
+       RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
+       RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
+       const __m128i pkt_fdir_bit = _mm_set1_epi32(PKT_RX_FDIR |
+                       PKT_RX_FDIR_ID);
+       /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
+       const __m128i fdir_mis_mask = _mm_set1_epi32(FDID_MIS_MAGIC);
+       __m128i fdir_mask = _mm_cmpeq_epi32(fdir_id0_3,
+                       fdir_mis_mask);
+       /* this XOR op results to bit-reverse the fdir_mask */
+       fdir_mask = _mm_xor_si128(fdir_mask, fdir_mis_mask);
+       const __m128i fdir_flags = _mm_and_si128(fdir_mask, pkt_fdir_bit);
+
+       return fdir_flags;
+}
+
 static inline void
 flex_desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4],
                       struct rte_mbuf **rx_pkts)
@@ -267,6 +286,36 @@ flex_desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4],
        /* merge the flags */
        flags = _mm_or_si128(flags, rss_vlan);
 
+       if (rxq->fdir_enabled) {
+               const __m128i fdir_id0_1 =
+                       _mm_unpackhi_epi32(descs[0], descs[1]);
+
+               const __m128i fdir_id2_3 =
+                       _mm_unpackhi_epi32(descs[2], descs[3]);
+
+               const __m128i fdir_id0_3 =
+                       _mm_unpackhi_epi64(fdir_id0_1, fdir_id2_3);
+
+               const __m128i fdir_flags =
+                       flex_rxd_to_fdir_flags_vec(fdir_id0_3);
+
+               /* merge with fdir_flags */
+               flags = _mm_or_si128(flags, fdir_flags);
+
+               /* write fdir_id to mbuf */
+               rx_pkts[0]->hash.fdir.hi =
+                       _mm_extract_epi32(fdir_id0_3, 0);
+
+               rx_pkts[1]->hash.fdir.hi =
+                       _mm_extract_epi32(fdir_id0_3, 1);
+
+               rx_pkts[2]->hash.fdir.hi =
+                       _mm_extract_epi32(fdir_id0_3, 2);
+
+               rx_pkts[3]->hash.fdir.hi =
+                       _mm_extract_epi32(fdir_id0_3, 3);
+       } /* if() on fdir_enabled */
+
        /**
         * At this point, we have the 4 sets of flags in the low 16-bits
         * of each 32-bit value in flags.
@@ -604,7 +653,8 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
        const __m128i zero = _mm_setzero_si128();
        /* mask to shuffle from desc. to mbuf */
        const __m128i shuf_msk = _mm_set_epi8
-                       (15, 14, 13, 12,  /* octet 12~15, 32 bits rss */
+                       (0xFF, 0xFF,
+                        0xFF, 0xFF,  /* rss not supported */
                         11, 10,      /* octet 10~11, 16 bits vlan_macip */
                         5, 4,        /* octet 4~5, 16 bits data_len */
                         0xFF, 0xFF,  /* skip high 16 bits pkt_len, zero out */