net/mlx: do not enforce RSS hash offload
[dpdk.git] / drivers / net / mlx5 / mlx5_rxtx_vec_sse.h
index 7bd254f..02eb659 100644 (file)
@@ -6,7 +6,6 @@
 #ifndef RTE_PMD_MLX5_RXTX_VEC_SSE_H_
 #define RTE_PMD_MLX5_RXTX_VEC_SSE_H_
 
-#include <assert.h>
 #include <stdint.h>
 #include <string.h>
 #include <stdlib.h>
 #include <rte_mempool.h>
 #include <rte_prefetch.h>
 
+#include <mlx5_prm.h>
+
+#include "mlx5_defs.h"
 #include "mlx5.h"
 #include "mlx5_utils.h"
 #include "mlx5_rxtx.h"
 #include "mlx5_rxtx_vec.h"
 #include "mlx5_autoconf.h"
-#include "mlx5_defs.h"
-#include "mlx5_prm.h"
 
 #ifndef __INTEL_COMPILER
 #pragma GCC diagnostic ignored "-Wcast-qual"
 /**
  * Store free buffers to RX SW ring.
  *
- * @param rxq
- *   Pointer to RX queue structure.
+ * @param elts
+ *   Pointer to SW ring to be filled.
  * @param pkts
  *   Pointer to array of packets to be stored.
  * @param pkts_n
  *   Number of packets to be stored.
  */
 static inline void
-rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n)
+rxq_copy_mbuf_v(struct rte_mbuf **elts, struct rte_mbuf **pkts, uint16_t n)
 {
-       const uint16_t q_mask = (1 << rxq->elts_n) - 1;
-       struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask];
        unsigned int pos;
        uint16_t p = n & -2;
 
@@ -118,7 +116,6 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
                             14, 15,  6,  7,
                             10, 11,  2,  3);
 #endif
-
        /*
         * A. load mCQEs into a 128bit register.
         * B. store rearm data to mbuf.
@@ -133,8 +130,9 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
                __m128i byte_cnt, invalid_mask;
 #endif
 
-               if (!(pos & 0x7) && pos + 8 < mcqe_n)
-                       rte_prefetch0((void *)(cq + pos + 8));
+               for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
+                       if (likely(pos + i < mcqe_n))
+                               rte_prefetch0((void *)(cq + pos + i));
                /* A.1 load mCQEs into a 128bit register. */
                mcqe1 = _mm_loadu_si128((__m128i *)&mcq[pos % 8]);
                mcqe2 = _mm_loadu_si128((__m128i *)&mcq[pos % 8 + 2]);
@@ -191,9 +189,30 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
                        elts[pos + 2]->hash.fdir.hi = flow_tag;
                        elts[pos + 3]->hash.fdir.hi = flow_tag;
                }
+               if (rxq->dynf_meta) {
+                       int32_t offs = rxq->flow_meta_offset;
+                       const uint32_t meta =
+                               *RTE_MBUF_DYNFIELD(t_pkt, offs, uint32_t *);
+
+                       /* Check if title packet has valid metadata. */
+                       if (meta) {
+                               MLX5_ASSERT(t_pkt->ol_flags &
+                                           rxq->flow_meta_mask);
+                               *RTE_MBUF_DYNFIELD(elts[pos], offs,
+                                                       uint32_t *) = meta;
+                               *RTE_MBUF_DYNFIELD(elts[pos + 1], offs,
+                                                       uint32_t *) = meta;
+                               *RTE_MBUF_DYNFIELD(elts[pos + 2], offs,
+                                                       uint32_t *) = meta;
+                               *RTE_MBUF_DYNFIELD(elts[pos + 3], offs,
+                                                       uint32_t *) = meta;
+                       }
+               }
                pos += MLX5_VPMD_DESCS_PER_LOOP;
                /* Move to next CQE and invalidate consumed CQEs. */
                if (!(pos & 0x7) && pos < mcqe_n) {
+                       if (pos + 8 < mcqe_n)
+                               rte_prefetch0((void *)(cq + pos + 8));
                        mcq = (void *)(cq + pos);
                        for (i = 0; i < 8; ++i)
                                cq[inv++].op_own = MLX5_CQE_INVALIDATE;
@@ -206,7 +225,6 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
        rxq->stats.ipackets += mcqe_n;
        rxq->stats.ibytes += rcvd_byte;
 #endif
-       rxq->cq_ci += mcqe_n;
        return mcqe_n;
 }
 
@@ -230,7 +248,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
        __m128i pinfo0, pinfo1;
        __m128i pinfo, ptype;
        __m128i ol_flags = _mm_set1_epi32(rxq->rss_hash * PKT_RX_RSS_HASH |
-                                         rxq->hw_timestamp * PKT_RX_TIMESTAMP);
+                                         rxq->hw_timestamp * rxq->timestamp_rx_flag);
        __m128i cv_flags;
        const __m128i zero = _mm_setzero_si128();
        const __m128i ptype_mask =
@@ -259,7 +277,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
                              PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
                              PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
        const __m128i mbuf_init =
-               _mm_loadl_epi64((__m128i *)&rxq->mbuf_initializer);
+               _mm_load_si128((__m128i *)&rxq->mbuf_initializer);
        __m128i rearm0, rearm1, rearm2, rearm3;
        uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
 
@@ -272,9 +290,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
        pinfo1 = _mm_unpackhi_epi32(cqes[2], cqes[3]);
        ptype = _mm_unpacklo_epi64(pinfo0, pinfo1);
        if (rxq->mark) {
-               const __m128i pinfo_ft_mask =
-                       _mm_set_epi32(0xffffff00, 0xffffff00,
-                                     0xffffff00, 0xffffff00);
+               const __m128i pinfo_ft_mask = _mm_set1_epi32(0xffffff00);
                const __m128i fdir_flags = _mm_set1_epi32(PKT_RX_FDIR);
                __m128i fdir_id_flags = _mm_set1_epi32(PKT_RX_FDIR_ID);
                __m128i flow_tag, invalid_mask;
@@ -352,12 +368,16 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
 }
 
 /**
- * Receive burst of packets. An errored completion also consumes a mbuf, but the
- * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed
- * before returning to application.
+ * Process a non-compressed completion and fill in mbufs in RX SW ring
+ * with data extracted from the title completion descriptor.
  *
  * @param rxq
  *   Pointer to RX queue structure.
+ * @param cq
+ *   Pointer to completion array having a non-compressed completion at first.
+ * @param elts
+ *   Pointer to SW ring to be filled. The first mbuf has to be pre-built from
+ *   the title completion descriptor to be copied to the rest of mbufs.
  * @param[out] pkts
  *   Array to store received packets.
  * @param pkts_n
@@ -365,35 +385,28 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
  * @param[out] err
  *   Pointer to a flag. Set non-zero value if pkts array has at least one error
  *   packet to handle.
+ * @param[out] comp
+ *   Pointer to a index. Set it to the first compressed completion if any.
  *
  * @return
- *   Number of packets received including errors (<= pkts_n).
+ *   Number of CQEs successfully processed.
  */
 static inline uint16_t
-rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
-           uint64_t *err)
+rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+                struct rte_mbuf **elts, struct rte_mbuf **pkts,
+                uint16_t pkts_n, uint64_t *err, uint64_t *comp)
 {
        const uint16_t q_n = 1 << rxq->cqe_n;
        const uint16_t q_mask = q_n - 1;
-       volatile struct mlx5_cqe *cq;
-       struct rte_mbuf **elts;
        unsigned int pos;
-       uint64_t n;
-       uint16_t repl_n;
+       uint64_t n = 0;
        uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
        uint16_t nocmp_n = 0;
-       uint16_t rcvd_pkt = 0;
-       unsigned int cq_idx = rxq->cq_ci & q_mask;
-       unsigned int elts_idx;
        unsigned int ownership = !!(rxq->cq_ci & (q_mask + 1));
-       const __m128i owner_check =
-               _mm_set_epi64x(0x0100000001000000LL, 0x0100000001000000LL);
-       const __m128i opcode_check =
-               _mm_set_epi64x(0xf0000000f0000000LL, 0xf0000000f0000000LL);
-       const __m128i format_check =
-               _mm_set_epi64x(0x0c0000000c000000LL, 0x0c0000000c000000LL);
-       const __m128i resp_err_check =
-               _mm_set_epi64x(0xe0000000e0000000LL, 0xe0000000e0000000LL);
+       const __m128i owner_check =     _mm_set1_epi64x(0x0100000001000000LL);
+       const __m128i opcode_check = _mm_set1_epi64x(0xf0000000f0000000LL);
+       const __m128i format_check = _mm_set1_epi64x(0x0c0000000c000000LL);
+       const __m128i resp_err_check = _mm_set1_epi64x(0xe0000000e0000000LL);
 #ifdef MLX5_PMD_SOFT_COUNTERS
        uint32_t rcvd_byte = 0;
        /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
@@ -425,38 +438,6 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
                              0,
                              rxq->crc_present * RTE_ETHER_CRC_LEN);
        const __m128i flow_mark_adj = _mm_set_epi32(rxq->mark * (-1), 0, 0, 0);
-
-       assert(rxq->sges_n == 0);
-       assert(rxq->cqe_n == rxq->elts_n);
-       cq = &(*rxq->cqes)[cq_idx];
-       rte_prefetch0(cq);
-       rte_prefetch0(cq + 1);
-       rte_prefetch0(cq + 2);
-       rte_prefetch0(cq + 3);
-       pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
-       repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
-       if (repl_n >= rxq->rq_repl_thresh)
-               mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
-       /* See if there're unreturned mbufs from compressed CQE. */
-       rcvd_pkt = rxq->decompressed;
-       if (rcvd_pkt > 0) {
-               rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
-               rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt);
-               rxq->rq_pi += rcvd_pkt;
-               rxq->decompressed -= rcvd_pkt;
-               pkts += rcvd_pkt;
-       }
-       elts_idx = rxq->rq_pi & q_mask;
-       elts = &(*rxq->elts)[elts_idx];
-       /* Not to overflow pkts array. */
-       pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
-       /* Not to cross queue end. */
-       pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
-       pkts_n = RTE_MIN(pkts_n, q_n - cq_idx);
-       if (!pkts_n)
-               return rcvd_pkt;
-       /* At this point, there shouldn't be any remained packets. */
-       assert(rxq->decompressed == 0);
        /*
         * A. load first Qword (8bytes) in one loop.
         * B. copy 4 mbuf pointers from elts ring to returing pkts.
@@ -527,18 +508,18 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
                /* B.2 copy mbuf pointers. */
                _mm_storeu_si128((__m128i *)&pkts[pos], mbp1);
                _mm_storeu_si128((__m128i *)&pkts[pos + 2], mbp2);
-               rte_cio_rmb();
+               rte_io_rmb();
                /* C.1 load remained CQE data and extract necessary fields. */
                cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p3]);
                cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos + p2]);
                cqes[3] = _mm_blendv_epi8(cqes[3], cqe_tmp2, blend_mask);
                cqes[2] = _mm_blendv_epi8(cqes[2], cqe_tmp1, blend_mask);
-               cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p3].rsvd1[3]);
-               cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos + p2].rsvd1[3]);
+               cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p3].csum);
+               cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos + p2].csum);
                cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x30);
                cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x30);
-               cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p3].rsvd2[10]);
-               cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos + p2].rsvd2[10]);
+               cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p3].rsvd4[2]);
+               cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos + p2].rsvd4[2]);
                cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x04);
                cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x04);
                /* C.2 generate final structure for mbuf with swapping bytes. */
@@ -560,12 +541,12 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
                cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos]);
                cqes[1] = _mm_blendv_epi8(cqes[1], cqe_tmp2, blend_mask);
                cqes[0] = _mm_blendv_epi8(cqes[0], cqe_tmp1, blend_mask);
-               cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p1].rsvd1[3]);
-               cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos].rsvd1[3]);
+               cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p1].csum);
+               cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos].csum);
                cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x30);
                cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x30);
-               cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p1].rsvd2[10]);
-               cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos].rsvd2[10]);
+               cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p1].rsvd4[2]);
+               cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos].rsvd4[2]);
                cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x04);
                cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x04);
                /* C.2 generate final structure for mbuf with swapping bytes. */
@@ -631,14 +612,54 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
                /* D.5 fill in mbuf - rearm_data and packet_type. */
                rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
                if (rxq->hw_timestamp) {
-                       pkts[pos]->timestamp =
-                               rte_be_to_cpu_64(cq[pos].timestamp);
-                       pkts[pos + 1]->timestamp =
-                               rte_be_to_cpu_64(cq[pos + p1].timestamp);
-                       pkts[pos + 2]->timestamp =
-                               rte_be_to_cpu_64(cq[pos + p2].timestamp);
-                       pkts[pos + 3]->timestamp =
-                               rte_be_to_cpu_64(cq[pos + p3].timestamp);
+                       int offset = rxq->timestamp_offset;
+                       if (rxq->rt_timestamp) {
+                               struct mlx5_dev_ctx_shared *sh = rxq->sh;
+                               uint64_t ts;
+
+                               ts = rte_be_to_cpu_64(cq[pos].timestamp);
+                               mlx5_timestamp_set(pkts[pos], offset,
+                                       mlx5_txpp_convert_rx_ts(sh, ts));
+                               ts = rte_be_to_cpu_64(cq[pos + p1].timestamp);
+                               mlx5_timestamp_set(pkts[pos + 1], offset,
+                                       mlx5_txpp_convert_rx_ts(sh, ts));
+                               ts = rte_be_to_cpu_64(cq[pos + p2].timestamp);
+                               mlx5_timestamp_set(pkts[pos + 2], offset,
+                                       mlx5_txpp_convert_rx_ts(sh, ts));
+                               ts = rte_be_to_cpu_64(cq[pos + p3].timestamp);
+                               mlx5_timestamp_set(pkts[pos + 3], offset,
+                                       mlx5_txpp_convert_rx_ts(sh, ts));
+                       } else {
+                               mlx5_timestamp_set(pkts[pos], offset,
+                                       rte_be_to_cpu_64(cq[pos].timestamp));
+                               mlx5_timestamp_set(pkts[pos + 1], offset,
+                                       rte_be_to_cpu_64(cq[pos + p1].timestamp));
+                               mlx5_timestamp_set(pkts[pos + 2], offset,
+                                       rte_be_to_cpu_64(cq[pos + p2].timestamp));
+                               mlx5_timestamp_set(pkts[pos + 3], offset,
+                                       rte_be_to_cpu_64(cq[pos + p3].timestamp));
+                       }
+               }
+               if (rxq->dynf_meta) {
+                       /* This code is subject for futher optimization. */
+                       int32_t offs = rxq->flow_meta_offset;
+
+                       *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
+                               cq[pos].flow_table_metadata;
+                       *RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) =
+                               cq[pos + p1].flow_table_metadata;
+                       *RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) =
+                               cq[pos + p2].flow_table_metadata;
+                       *RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) =
+                               cq[pos + p3].flow_table_metadata;
+                       if (*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *))
+                               pkts[pos]->ol_flags |= rxq->flow_meta_mask;
+                       if (*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *))
+                               pkts[pos + 1]->ol_flags |= rxq->flow_meta_mask;
+                       if (*RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *))
+                               pkts[pos + 2]->ol_flags |= rxq->flow_meta_mask;
+                       if (*RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *))
+                               pkts[pos + 3]->ol_flags |= rxq->flow_meta_mask;
                }
 #ifdef MLX5_PMD_SOFT_COUNTERS
                /* Add up received bytes count. */
@@ -654,37 +675,13 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
                if (n != MLX5_VPMD_DESCS_PER_LOOP)
                        break;
        }
-       /* If no new CQE seen, return without updating cq_db. */
-       if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
-               return rcvd_pkt;
-       /* Update the consumer indexes for non-compressed CQEs. */
-       assert(nocmp_n <= pkts_n);
-       rxq->cq_ci += nocmp_n;
-       rxq->rq_pi += nocmp_n;
-       rcvd_pkt += nocmp_n;
 #ifdef MLX5_PMD_SOFT_COUNTERS
        rxq->stats.ipackets += nocmp_n;
        rxq->stats.ibytes += rcvd_byte;
 #endif
-       /* Decompress the last CQE if compressed. */
-       if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
-               assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
-               rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
-                                                       &elts[nocmp_n]);
-               /* Return more packets if needed. */
-               if (nocmp_n < pkts_n) {
-                       uint16_t n = rxq->decompressed;
-
-                       n = RTE_MIN(n, pkts_n - nocmp_n);
-                       rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n);
-                       rxq->rq_pi += n;
-                       rcvd_pkt += n;
-                       rxq->decompressed -= n;
-               }
-       }
-       rte_compiler_barrier();
-       *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
-       return rcvd_pkt;
+       if (comp_idx == n)
+               *comp = comp_idx;
+       return nocmp_n;
 }
 
 #endif /* RTE_PMD_MLX5_RXTX_VEC_SSE_H_ */