net/mlx5: fix L4 packet type support
authorYongseok Koh <yskoh@mellanox.com>
Wed, 26 Jul 2017 19:29:33 +0000 (12:29 -0700)
committerFerruh Yigit <ferruh.yigit@intel.com>
Mon, 31 Jul 2017 17:58:41 +0000 (19:58 +0200)
TCP/UDP/NONFRAG/FRAG flags aren't counted for both outer and inner
header even though device supports it.

Fixes: 0603df73a077 ("net/mlx5: fix Rx packet validation and type")
Fixes: 6cb559d67b83 ("net/mlx5: add vectorized Rx/Tx burst for x86")
Cc: stable@dpdk.org
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5_ethdev.c
drivers/net/mlx5/mlx5_rxtx.c
drivers/net/mlx5/mlx5_rxtx.h
drivers/net/mlx5/mlx5_rxtx_vec_sse.c

index 5fd0e76..757c910 100644 (file)
@@ -888,6 +888,8 @@ RTE_INIT(rte_mlx5_pmd_init);
 static void
 rte_mlx5_pmd_init(void)
 {
+       /* Build the static table for ptype conversion. */
+       mlx5_set_ptype_table();
        /*
         * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
         * huge pages. Calling ibv_fork_init() during init allows
index 1644546..08cc814 100644 (file)
@@ -716,12 +716,20 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 {
        static const uint32_t ptypes[] = {
                /* refers to rxq_cq_to_pkt_type() */
+               RTE_PTYPE_L2_ETHER,
                RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
                RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+               RTE_PTYPE_L4_NONFRAG,
+               RTE_PTYPE_L4_FRAG,
+               RTE_PTYPE_L4_TCP,
+               RTE_PTYPE_L4_UDP,
                RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
                RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+               RTE_PTYPE_INNER_L4_NONFRAG,
+               RTE_PTYPE_INNER_L4_FRAG,
+               RTE_PTYPE_INNER_L4_TCP,
+               RTE_PTYPE_INNER_L4_UDP,
                RTE_PTYPE_UNKNOWN
-
        };
 
        if (dev->rx_pkt_burst == mlx5_rx_burst ||
index 5a24e15..2572a16 100644 (file)
@@ -79,30 +79,122 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
 static __rte_always_inline uint32_t
 rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe);
 
-/*
- * The index to the array should have:
- * bit[1:0] = l3_hdr_type, bit[2] = tunneled, bit[3] = outer_l3_type
- */
-const uint32_t mlx5_ptype_table[] = {
-       RTE_PTYPE_UNKNOWN,
-       RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,               /* b0001 */
-       RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,               /* b0010 */
-       RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
-       RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-               RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, /* b0101 */
-       RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-               RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, /* b0110 */
-       RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
-       RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,               /* b1001 */
-       RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,               /* b1010 */
-       RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
-       RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-               RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, /* b1101 */
-       RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-               RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, /* b1110 */
-       RTE_PTYPE_ALL_MASK                           /* b1111 */
+uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
+       [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
 };
 
+/**
+ * Build a table to translate Rx completion flags to packet type.
+ *
+ * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
+ */
+void
+mlx5_set_ptype_table(void)
+{
+       unsigned int i;
+       uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
+
+       for (i = 0; i < RTE_DIM(mlx5_ptype_table); ++i)
+               (*p)[i] = RTE_PTYPE_UNKNOWN;
+       /*
+        * The index to the array should have:
+        * bit[1:0] = l3_hdr_type
+        * bit[4:2] = l4_hdr_type
+        * bit[5] = ip_frag
+        * bit[6] = tunneled
+        * bit[7] = outer_l3_type
+        */
+       /* L3 */
+       (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_NONFRAG;
+       (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_NONFRAG;
+       /* Fragmented */
+       (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_FRAG;
+       (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_FRAG;
+       /* TCP */
+       (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       /* UDP */
+       (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_UDP;
+       (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_UDP;
+       /* Repeat with outer_l3_type being set. Just in case. */
+       (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_NONFRAG;
+       (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_NONFRAG;
+       (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_FRAG;
+       (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_FRAG;
+       (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_UDP;
+       (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_UDP;
+       /* Tunneled - L3 */
+       (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_NONFRAG;
+       (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_NONFRAG;
+       (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_NONFRAG;
+       (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_NONFRAG;
+       /* Tunneled - Fragmented */
+       (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_FRAG;
+       (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_FRAG;
+       (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_FRAG;
+       (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_FRAG;
+       /* Tunneled - TCP */
+       (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       /* Tunneled - UDP */
+       (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_UDP;
+       (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_UDP;
+       (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_UDP;
+       (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_UDP;
+}
+
 /**
  * Return the size of tailroom of WQ.
  *
@@ -1501,30 +1593,20 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 static inline uint32_t
 rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
 {
-       uint32_t pkt_type;
-       uint16_t flags = ntohs(cqe->hdr_type_etc);
+       uint8_t idx;
+       uint8_t pinfo = cqe->pkt_info;
+       uint16_t ptype = cqe->hdr_type_etc;
 
-       if (cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) {
-               pkt_type =
-                       TRANSPOSE(flags,
-                                 MLX5_CQE_RX_IPV4_PACKET,
-                                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN) |
-                       TRANSPOSE(flags,
-                                 MLX5_CQE_RX_IPV6_PACKET,
-                                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN);
-               pkt_type |= ((cqe->pkt_info & MLX5_CQE_RX_OUTER_PACKET) ?
-                            RTE_PTYPE_L3_IPV6_EXT_UNKNOWN :
-                            RTE_PTYPE_L3_IPV4_EXT_UNKNOWN);
-       } else {
-               pkt_type =
-                       TRANSPOSE(flags,
-                                 MLX5_CQE_L3_HDR_TYPE_IPV6,
-                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) |
-                       TRANSPOSE(flags,
-                                 MLX5_CQE_L3_HDR_TYPE_IPV4,
-                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN);
-       }
-       return pkt_type;
+       /*
+        * The index to the array should have:
+        * bit[1:0] = l3_hdr_type
+        * bit[4:2] = l4_hdr_type
+        * bit[5] = ip_frag
+        * bit[6] = tunneled
+        * bit[7] = outer_l3_type
+        */
+       idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
+       return mlx5_ptype_table[idx];
 }
 
 /**
index 7fd59a4..02184ae 100644 (file)
@@ -334,8 +334,9 @@ uint16_t mlx5_tx_burst_secondary_setup(void *, struct rte_mbuf **, uint16_t);
 
 /* mlx5_rxtx.c */
 
-extern const uint32_t mlx5_ptype_table[];
+extern uint32_t mlx5_ptype_table[];
 
+void mlx5_set_ptype_table(void);
 uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t);
 uint16_t mlx5_tx_burst_mpw(void *, struct rte_mbuf **, uint16_t);
 uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t);
index 0a65502..74e5953 100644 (file)
@@ -755,7 +755,7 @@ rxq_cq_to_ptype_oflags_v(struct rxq *rxq, __m128i cqes[4], __m128i op_err,
        __m128i cv_flags;
        const __m128i zero = _mm_setzero_si128();
        const __m128i ptype_mask =
-               _mm_set_epi32(0xd06, 0xd06, 0xd06, 0xd06);
+               _mm_set_epi32(0xfd06, 0xfd06, 0xfd06, 0xfd06);
        const __m128i ptype_ol_mask =
                _mm_set_epi32(0x106, 0x106, 0x106, 0x106);
        const __m128i pinfo_mask =
@@ -816,18 +816,23 @@ rxq_cq_to_ptype_oflags_v(struct rxq *rxq, __m128i cqes[4], __m128i op_err,
        }
        /*
         * Merge the two fields to generate the following:
-        * bit[1]  = l3_ok,    bit[2]     = l4_ok
-        * bit[8]  = cv,       bit[11:10] = l3_hdr_type
-        * bit[12] = tunneled, bit[13]    = outer_l3_type
+        * bit[1]     = l3_ok
+        * bit[2]     = l4_ok
+        * bit[8]     = cv
+        * bit[11:10] = l3_hdr_type
+        * bit[14:12] = l4_hdr_type
+        * bit[15]    = ip_frag
+        * bit[16]    = tunneled
+        * bit[17]    = outer_l3_type
         */
        ptype = _mm_and_si128(ptype, ptype_mask);
        pinfo = _mm_and_si128(pinfo, pinfo_mask);
-       pinfo = _mm_slli_epi32(pinfo, 12);
+       pinfo = _mm_slli_epi32(pinfo, 16);
        ptype = _mm_or_si128(ptype, pinfo);
        ptype = _mm_srli_epi32(ptype, 10);
        ptype = _mm_packs_epi32(ptype, zero);
        /* Errored packets will have RTE_PTYPE_ALL_MASK. */
-       op_err = _mm_srli_epi16(op_err, 12);
+       op_err = _mm_srli_epi16(op_err, 8);
        ptype = _mm_or_si128(ptype, op_err);
        pkts[0]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 0)];
        pkts[1]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 2)];