net/mlx5: fix calculating TSO inline size
[dpdk.git] / drivers / net / mlx5 / mlx5_rxtx.c
index 5a24e15..f89fa40 100644 (file)
 #pragma GCC diagnostic error "-Wpedantic"
 #endif
 
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
 #include <rte_mbuf.h>
 #include <rte_mempool.h>
 #include <rte_prefetch.h>
 #include <rte_common.h>
 #include <rte_branch_prediction.h>
 #include <rte_ether.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
 
 #include "mlx5.h"
 #include "mlx5_utils.h"
@@ -79,30 +72,123 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
 static __rte_always_inline uint32_t
 rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe);
 
-/*
- * The index to the array should have:
- * bit[1:0] = l3_hdr_type, bit[2] = tunneled, bit[3] = outer_l3_type
- */
-const uint32_t mlx5_ptype_table[] = {
-       RTE_PTYPE_UNKNOWN,
-       RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,               /* b0001 */
-       RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,               /* b0010 */
-       RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
-       RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-               RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, /* b0101 */
-       RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-               RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, /* b0110 */
-       RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
-       RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,               /* b1001 */
-       RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,               /* b1010 */
-       RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
-       RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-               RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, /* b1101 */
-       RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-               RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, /* b1110 */
-       RTE_PTYPE_ALL_MASK                           /* b1111 */
+uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
+       [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
 };
 
+/**
+ * Build a table to translate Rx completion flags to packet type.
+ *
+ * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
+ */
+void
+mlx5_set_ptype_table(void)
+{
+       unsigned int i;
+       uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
+
+       /* Last entry must not be overwritten, reserved for errored packet. */
+       for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
+               (*p)[i] = RTE_PTYPE_UNKNOWN;
+       /*
+        * The index to the array should have:
+        * bit[1:0] = l3_hdr_type
+        * bit[4:2] = l4_hdr_type
+        * bit[5] = ip_frag
+        * bit[6] = tunneled
+        * bit[7] = outer_l3_type
+        */
+       /* L3 */
+       (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_NONFRAG;
+       (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_NONFRAG;
+       /* Fragmented */
+       (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_FRAG;
+       (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_FRAG;
+       /* TCP */
+       (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       /* UDP */
+       (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_UDP;
+       (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_UDP;
+       /* Repeat with outer_l3_type being set. Just in case. */
+       (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_NONFRAG;
+       (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_NONFRAG;
+       (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_FRAG;
+       (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_FRAG;
+       (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_UDP;
+       (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_UDP;
+       /* Tunneled - L3 */
+       (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_NONFRAG;
+       (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_NONFRAG;
+       (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_NONFRAG;
+       (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_NONFRAG;
+       /* Tunneled - Fragmented */
+       (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_FRAG;
+       (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_FRAG;
+       (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_FRAG;
+       (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_FRAG;
+       /* Tunneled - TCP */
+       (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       /* Tunneled - UDP */
+       (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_UDP;
+       (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_UDP;
+       (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_UDP;
+       (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_UDP;
+}
+
 /**
  * Return the size of tailroom of WQ.
  *
@@ -368,6 +454,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        length -= pkt_inline_sz;
                        addr += pkt_inline_sz;
                }
+               raw += MLX5_WQE_DWORD_SIZE;
                if (txq->tso_en) {
                        tso = buf->ol_flags & PKT_TX_TCP_SEG;
                        if (tso) {
@@ -400,7 +487,6 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                copy_b = tso_header_sz - pkt_inline_sz;
                                /* First seg must contain all headers. */
                                assert(copy_b <= length);
-                               raw += MLX5_WQE_DWORD_SIZE;
                                if (copy_b &&
                                   ((end - (uintptr_t)raw) > copy_b)) {
                                        uint16_t n = (MLX5_WQE_DS(copy_b) -
@@ -413,14 +499,11 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                                   (void *)addr, copy_b);
                                        addr += copy_b;
                                        length -= copy_b;
+                                       /* Include padding for TSO header. */
+                                       copy_b = MLX5_WQE_DS(copy_b) *
+                                                MLX5_WQE_DWORD_SIZE;
                                        pkt_inline_sz += copy_b;
-                                       /*
-                                        * Another DWORD will be added
-                                        * in the inline part.
-                                        */
-                                       raw += MLX5_WQE_DS(copy_b) *
-                                              MLX5_WQE_DWORD_SIZE -
-                                              MLX5_WQE_DWORD_SIZE;
+                                       raw += copy_b;
                                } else {
                                        /* NOP WQE. */
                                        wqe->ctrl = (rte_v128u32_t){
@@ -438,19 +521,20 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                }
                /* Inline if enough room. */
                if (inline_en || tso) {
+                       uint32_t inl;
                        uintptr_t end = (uintptr_t)
                                (((uintptr_t)txq->wqes) +
                                 (1 << txq->wqe_n) * MLX5_WQE_SIZE);
                        unsigned int inline_room = max_inline *
                                                   RTE_CACHE_LINE_SIZE -
-                                                  (pkt_inline_sz - 2);
+                                                  (pkt_inline_sz - 2) -
+                                                  !!tso * sizeof(inl);
                        uintptr_t addr_end = (addr + inline_room) &
                                             ~(RTE_CACHE_LINE_SIZE - 1);
                        unsigned int copy_b = (addr_end > addr) ?
                                RTE_MIN((addr_end - addr), length) :
                                0;
 
-                       raw += MLX5_WQE_DWORD_SIZE;
                        if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
                                /*
                                 * One Dseg remains in the current WQE.  To
@@ -463,12 +547,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                        break;
                                max_wqe -= n;
                                if (tso) {
-                                       uint32_t inl =
-                                               htonl(copy_b | MLX5_INLINE_SEG);
-
-                                       pkt_inline_sz =
-                                               MLX5_WQE_DS(tso_header_sz) *
-                                               MLX5_WQE_DWORD_SIZE;
+                                       inl = htonl(copy_b | MLX5_INLINE_SEG);
                                        rte_memcpy((void *)raw,
                                                   (void *)&inl, sizeof(inl));
                                        raw += sizeof(inl);
@@ -1501,30 +1580,20 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 static inline uint32_t
 rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
 {
-       uint32_t pkt_type;
-       uint16_t flags = ntohs(cqe->hdr_type_etc);
+       uint8_t idx;
+       uint8_t pinfo = cqe->pkt_info;
+       uint16_t ptype = cqe->hdr_type_etc;
 
-       if (cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) {
-               pkt_type =
-                       TRANSPOSE(flags,
-                                 MLX5_CQE_RX_IPV4_PACKET,
-                                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN) |
-                       TRANSPOSE(flags,
-                                 MLX5_CQE_RX_IPV6_PACKET,
-                                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN);
-               pkt_type |= ((cqe->pkt_info & MLX5_CQE_RX_OUTER_PACKET) ?
-                            RTE_PTYPE_L3_IPV6_EXT_UNKNOWN :
-                            RTE_PTYPE_L3_IPV4_EXT_UNKNOWN);
-       } else {
-               pkt_type =
-                       TRANSPOSE(flags,
-                                 MLX5_CQE_L3_HDR_TYPE_IPV6,
-                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) |
-                       TRANSPOSE(flags,
-                                 MLX5_CQE_L3_HDR_TYPE_IPV4,
-                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN);
-       }
-       return pkt_type;
+       /*
+        * The index to the array should have:
+        * bit[1:0] = l3_hdr_type
+        * bit[4:2] = l4_hdr_type
+        * bit[5] = ip_frag
+        * bit[6] = tunneled
+        * bit[7] = outer_l3_type
+        */
+       idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
+       return mlx5_ptype_table[idx];
 }
 
 /**
@@ -1945,9 +2014,3 @@ priv_check_vec_rx_support(struct priv *priv)
        (void)priv;
        return -ENOTSUP;
 }
-
-void __attribute__((weak))
-priv_prep_vec_rx_function(struct priv *priv)
-{
-       (void)priv;
-}