net/mlx5: use SPDX tags in 6WIND copyrighted files
[dpdk.git] / drivers / net / mlx5 / mlx5_rxtx.c
index 5a24e15..dc4ead9 100644 (file)
@@ -1,34 +1,6 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright 2015 6WIND S.A.
- *   Copyright 2015 Mellanox.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of 6WIND S.A. nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
  */
 
 #include <assert.h>
 #pragma GCC diagnostic ignored "-Wpedantic"
 #endif
 #include <infiniband/verbs.h>
-#include <infiniband/mlx5_hw.h>
-#include <infiniband/arch.h>
+#include <infiniband/mlx5dv.h>
 #ifdef PEDANTIC
 #pragma GCC diagnostic error "-Wpedantic"
 #endif
 
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
 #include <rte_mbuf.h>
 #include <rte_mempool.h>
 #include <rte_prefetch.h>
 #include <rte_common.h>
 #include <rte_branch_prediction.h>
 #include <rte_ether.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
 
 #include "mlx5.h"
 #include "mlx5_utils.h"
@@ -73,36 +37,131 @@ static __rte_always_inline uint32_t
 rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe);
 
 static __rte_always_inline int
-mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
+mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
                 uint16_t cqe_cnt, uint32_t *rss_hash);
 
 static __rte_always_inline uint32_t
-rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe);
+rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
 
-/*
- * The index to the array should have:
- * bit[1:0] = l3_hdr_type, bit[2] = tunneled, bit[3] = outer_l3_type
- */
-const uint32_t mlx5_ptype_table[] = {
-       RTE_PTYPE_UNKNOWN,
-       RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,               /* b0001 */
-       RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,               /* b0010 */
-       RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
-       RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-               RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, /* b0101 */
-       RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-               RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, /* b0110 */
-       RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
-       RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,               /* b1001 */
-       RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,               /* b1010 */
-       RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
-       RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-               RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, /* b1101 */
-       RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-               RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, /* b1110 */
-       RTE_PTYPE_ALL_MASK                           /* b1111 */
+uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
+       [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
 };
 
+/**
+ * Build a table to translate Rx completion flags to packet type.
+ *
+ * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
+ */
+void
+mlx5_set_ptype_table(void)
+{
+       unsigned int i;
+       uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
+
+       /* Last entry must not be overwritten, reserved for errored packet. */
+       for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
+               (*p)[i] = RTE_PTYPE_UNKNOWN;
+       /*
+        * The index to the array should have:
+        * bit[1:0] = l3_hdr_type
+        * bit[4:2] = l4_hdr_type
+        * bit[5] = ip_frag
+        * bit[6] = tunneled
+        * bit[7] = outer_l3_type
+        */
+       /* L2 */
+       (*p)[0x00] = RTE_PTYPE_L2_ETHER;
+       /* L3 */
+       (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_NONFRAG;
+       (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_NONFRAG;
+       /* Fragmented */
+       (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_FRAG;
+       (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_FRAG;
+       /* TCP */
+       (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       /* UDP */
+       (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_UDP;
+       (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_UDP;
+       /* Repeat with outer_l3_type being set. Just in case. */
+       (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_NONFRAG;
+       (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_NONFRAG;
+       (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_FRAG;
+       (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_FRAG;
+       (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_UDP;
+       (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_UDP;
+       /* Tunneled - L3 */
+       (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_NONFRAG;
+       (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_NONFRAG;
+       (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_NONFRAG;
+       (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_NONFRAG;
+       /* Tunneled - Fragmented */
+       (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_FRAG;
+       (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_FRAG;
+       (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_FRAG;
+       (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_FRAG;
+       /* Tunneled - TCP */
+       (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_TCP;
+       (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_TCP;
+       (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_TCP;
+       (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_TCP;
+       /* Tunneled - UDP */
+       (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_UDP;
+       (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_UDP;
+       (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_UDP;
+       (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_UDP;
+}
+
 /**
  * Return the size of tailroom of WQ.
  *
@@ -115,7 +174,7 @@ const uint32_t mlx5_ptype_table[] = {
  *   Size of tailroom.
  */
 static inline size_t
-tx_mlx5_wq_tailroom(struct txq *txq, void *addr)
+tx_mlx5_wq_tailroom(struct mlx5_txq_data *txq, void *addr)
 {
        size_t tailroom;
        tailroom = (uintptr_t)(txq->wqes) +
@@ -173,7 +232,7 @@ mlx5_copy_to_wq(void *dst, const void *src, size_t n,
 int
 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
 {
-       struct txq *txq = tx_queue;
+       struct mlx5_txq_data *txq = tx_queue;
        uint16_t used;
 
        mlx5_tx_complete(txq);
@@ -197,7 +256,7 @@ mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
 int
 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
 {
-       struct rxq *rxq = rx_queue;
+       struct mlx5_rxq_data *rxq = rx_queue;
        struct rxq_zip *zip = &rxq->zip;
        volatile struct mlx5_cqe *cqe;
        const unsigned int cqe_n = (1 << rxq->cqe_n);
@@ -220,7 +279,7 @@ mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
 
                op_own = cqe->op_own;
                if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
-                       n = ntohl(cqe->byte_cnt);
+                       n = rte_be_to_cpu_32(cqe->byte_cnt);
                else
                        n = 1;
                cq_ci += n;
@@ -249,7 +308,7 @@ mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
 uint16_t
 mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 {
-       struct txq *txq = (struct txq *)dpdk_txq;
+       struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
        uint16_t elts_head = txq->elts_head;
        const uint16_t elts_n = 1 << txq->elts_n;
        const uint16_t elts_m = elts_n - 1;
@@ -257,15 +316,11 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
        unsigned int j = 0;
        unsigned int k = 0;
        uint16_t max_elts;
-       unsigned int max_inline = txq->max_inline;
-       const unsigned int inline_en = !!max_inline && txq->inline_en;
        uint16_t max_wqe;
        unsigned int comp;
-       volatile struct mlx5_wqe_v *wqe = NULL;
        volatile struct mlx5_wqe_ctrl *last_wqe = NULL;
        unsigned int segs_n = 0;
-       struct rte_mbuf *buf = NULL;
-       uint8_t *raw;
+       const unsigned int max_inline = txq->max_inline;
 
        if (unlikely(!pkts_n))
                return 0;
@@ -274,20 +329,24 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
        /* Start processing. */
        mlx5_tx_complete(txq);
        max_elts = (elts_n - (elts_head - txq->elts_tail));
+       /* A CQE slot must always be available. */
+       assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
        max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
        if (unlikely(!max_wqe))
                return 0;
        do {
+               struct rte_mbuf *buf = NULL;
+               uint8_t *raw;
+               volatile struct mlx5_wqe_v *wqe = NULL;
                volatile rte_v128u32_t *dseg = NULL;
                uint32_t length;
                unsigned int ds = 0;
                unsigned int sg = 0; /* counter of additional segs attached. */
                uintptr_t addr;
-               uint64_t naddr;
                uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
                uint16_t tso_header_sz = 0;
                uint16_t ehdr;
-               uint8_t cs_flags = 0;
+               uint8_t cs_flags;
                uint64_t tso = 0;
                uint16_t tso_segsz = 0;
 #ifdef MLX5_PMD_SOFT_COUNTERS
@@ -305,7 +364,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                if (max_elts < segs_n)
                        break;
                max_elts -= segs_n;
-               --segs_n;
+               sg = --segs_n;
                if (unlikely(--max_wqe == 0))
                        break;
                wqe = (volatile struct mlx5_wqe_v *)
@@ -320,35 +379,22 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 #ifdef MLX5_PMD_SOFT_COUNTERS
                total_length = length;
 #endif
-               if (length < (MLX5_WQE_DWORD_SIZE + 2))
+               if (length < (MLX5_WQE_DWORD_SIZE + 2)) {
+                       txq->stats.oerrors++;
                        break;
+               }
                /* Update element. */
                (*txq->elts)[elts_head & elts_m] = buf;
                /* Prefetch next buffer data. */
                if (pkts_n - i > 1)
                        rte_prefetch0(
                            rte_pktmbuf_mtod(*(pkts + 1), volatile void *));
-               /* Should we enable HW CKSUM offload */
-               if (buf->ol_flags &
-                   (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
-                       const uint64_t is_tunneled = buf->ol_flags &
-                                                    (PKT_TX_TUNNEL_GRE |
-                                                     PKT_TX_TUNNEL_VXLAN);
-
-                       if (is_tunneled && txq->tunnel_en) {
-                               cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
-                                          MLX5_ETH_WQE_L4_INNER_CSUM;
-                               if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
-                                       cs_flags |= MLX5_ETH_WQE_L3_CSUM;
-                       } else {
-                               cs_flags = MLX5_ETH_WQE_L3_CSUM |
-                                          MLX5_ETH_WQE_L4_CSUM;
-                       }
-               }
+               cs_flags = txq_ol_cksum_to_cs(txq, buf);
                raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
                /* Replace the Ethernet type by the VLAN if necessary. */
                if (buf->ol_flags & PKT_TX_VLAN_PKT) {
-                       uint32_t vlan = htonl(0x81000000 | buf->vlan_tci);
+                       uint32_t vlan = rte_cpu_to_be_32(0x81000000 |
+                                                        buf->vlan_tci);
                        unsigned int len = 2 * ETHER_ADDR_LEN - 2;
 
                        addr += 2;
@@ -368,89 +414,88 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        length -= pkt_inline_sz;
                        addr += pkt_inline_sz;
                }
-               if (txq->tso_en) {
-                       tso = buf->ol_flags & PKT_TX_TCP_SEG;
-                       if (tso) {
-                               uintptr_t end = (uintptr_t)
-                                               (((uintptr_t)txq->wqes) +
-                                               (1 << txq->wqe_n) *
-                                               MLX5_WQE_SIZE);
-                               unsigned int copy_b;
-                               uint8_t vlan_sz = (buf->ol_flags &
-                                                 PKT_TX_VLAN_PKT) ? 4 : 0;
-                               const uint64_t is_tunneled =
-                                                       buf->ol_flags &
-                                                       (PKT_TX_TUNNEL_GRE |
-                                                        PKT_TX_TUNNEL_VXLAN);
-
-                               tso_header_sz = buf->l2_len + vlan_sz +
-                                               buf->l3_len + buf->l4_len;
-                               tso_segsz = buf->tso_segsz;
-
-                               if (is_tunneled && txq->tunnel_en) {
-                                       tso_header_sz += buf->outer_l2_len +
-                                                        buf->outer_l3_len;
-                                       cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
-                               } else {
-                                       cs_flags |= MLX5_ETH_WQE_L4_CSUM;
-                               }
-                               if (unlikely(tso_header_sz >
-                                            MLX5_MAX_TSO_HEADER))
+               raw += MLX5_WQE_DWORD_SIZE;
+               tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG);
+               if (tso) {
+                       uintptr_t end =
+                               (uintptr_t)(((uintptr_t)txq->wqes) +
+                                           (1 << txq->wqe_n) * MLX5_WQE_SIZE);
+                       unsigned int copy_b;
+                       uint8_t vlan_sz =
+                               (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0;
+                       const uint64_t is_tunneled =
+                               buf->ol_flags & (PKT_TX_TUNNEL_GRE |
+                                                PKT_TX_TUNNEL_VXLAN);
+
+                       tso_header_sz = buf->l2_len + vlan_sz +
+                                       buf->l3_len + buf->l4_len;
+                       tso_segsz = buf->tso_segsz;
+                       if (unlikely(tso_segsz == 0)) {
+                               txq->stats.oerrors++;
+                               break;
+                       }
+                       if (is_tunneled && txq->tunnel_en) {
+                               tso_header_sz += buf->outer_l2_len +
+                                                buf->outer_l3_len;
+                               cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
+                       } else {
+                               cs_flags |= MLX5_ETH_WQE_L4_CSUM;
+                       }
+                       if (unlikely(tso_header_sz > MLX5_MAX_TSO_HEADER)) {
+                               txq->stats.oerrors++;
+                               break;
+                       }
+                       copy_b = tso_header_sz - pkt_inline_sz;
+                       /* First seg must contain all headers. */
+                       assert(copy_b <= length);
+                       if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
+                               uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
+
+                               if (unlikely(max_wqe < n))
                                        break;
-                               copy_b = tso_header_sz - pkt_inline_sz;
-                               /* First seg must contain all headers. */
-                               assert(copy_b <= length);
-                               raw += MLX5_WQE_DWORD_SIZE;
-                               if (copy_b &&
-                                  ((end - (uintptr_t)raw) > copy_b)) {
-                                       uint16_t n = (MLX5_WQE_DS(copy_b) -
-                                                     1 + 3) / 4;
-
-                                       if (unlikely(max_wqe < n))
-                                               break;
-                                       max_wqe -= n;
-                                       rte_memcpy((void *)raw,
-                                                  (void *)addr, copy_b);
-                                       addr += copy_b;
-                                       length -= copy_b;
-                                       pkt_inline_sz += copy_b;
-                                       /*
-                                        * Another DWORD will be added
-                                        * in the inline part.
-                                        */
-                                       raw += MLX5_WQE_DS(copy_b) *
-                                              MLX5_WQE_DWORD_SIZE -
-                                              MLX5_WQE_DWORD_SIZE;
-                               } else {
-                                       /* NOP WQE. */
-                                       wqe->ctrl = (rte_v128u32_t){
-                                                    htonl(txq->wqe_ci << 8),
-                                                    htonl(txq->qp_num_8s | 1),
-                                                    0,
-                                                    0,
-                                       };
-                                       ds = 1;
-                                       total_length = 0;
-                                       k++;
-                                       goto next_wqe;
-                               }
+                               max_wqe -= n;
+                               rte_memcpy((void *)raw, (void *)addr, copy_b);
+                               addr += copy_b;
+                               length -= copy_b;
+                               /* Include padding for TSO header. */
+                               copy_b = MLX5_WQE_DS(copy_b) *
+                                        MLX5_WQE_DWORD_SIZE;
+                               pkt_inline_sz += copy_b;
+                               raw += copy_b;
+                       } else {
+                               /* NOP WQE. */
+                               wqe->ctrl = (rte_v128u32_t){
+                                       rte_cpu_to_be_32(txq->wqe_ci << 8),
+                                       rte_cpu_to_be_32(txq->qp_num_8s | 1),
+                                       0,
+                                       0,
+                               };
+                               ds = 1;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+                               total_length = 0;
+#endif
+                               k++;
+                               goto next_wqe;
                        }
                }
                /* Inline if enough room. */
-               if (inline_en || tso) {
+               if (max_inline || tso) {
+                       uint32_t inl = 0;
                        uintptr_t end = (uintptr_t)
                                (((uintptr_t)txq->wqes) +
                                 (1 << txq->wqe_n) * MLX5_WQE_SIZE);
                        unsigned int inline_room = max_inline *
                                                   RTE_CACHE_LINE_SIZE -
-                                                  (pkt_inline_sz - 2);
-                       uintptr_t addr_end = (addr + inline_room) &
-                                            ~(RTE_CACHE_LINE_SIZE - 1);
-                       unsigned int copy_b = (addr_end > addr) ?
-                               RTE_MIN((addr_end - addr), length) :
-                               0;
-
-                       raw += MLX5_WQE_DWORD_SIZE;
+                                                  (pkt_inline_sz - 2) -
+                                                  !!tso * sizeof(inl);
+                       uintptr_t addr_end;
+                       unsigned int copy_b;
+
+pkt_inline:
+                       addr_end = RTE_ALIGN_FLOOR(addr + inline_room,
+                                                  RTE_CACHE_LINE_SIZE);
+                       copy_b = (addr_end > addr) ?
+                                RTE_MIN((addr_end - addr), length) : 0;
                        if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
                                /*
                                 * One Dseg remains in the current WQE.  To
@@ -462,13 +507,9 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                if (unlikely(max_wqe < n))
                                        break;
                                max_wqe -= n;
-                               if (tso) {
-                                       uint32_t inl =
-                                               htonl(copy_b | MLX5_INLINE_SEG);
-
-                                       pkt_inline_sz =
-                                               MLX5_WQE_DS(tso_header_sz) *
-                                               MLX5_WQE_DWORD_SIZE;
+                               if (tso && !inl) {
+                                       inl = rte_cpu_to_be_32(copy_b |
+                                                              MLX5_INLINE_SEG);
                                        rte_memcpy((void *)raw,
                                                   (void *)&inl, sizeof(inl));
                                        raw += sizeof(inl);
@@ -501,11 +542,18 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        } else if (!segs_n) {
                                goto next_pkt;
                        } else {
-                               /* dseg will be advance as part of next_seg */
-                               dseg = (volatile rte_v128u32_t *)
-                                       ((uintptr_t)wqe +
-                                        ((ds - 1) * MLX5_WQE_DWORD_SIZE));
-                               goto next_seg;
+                               raw += copy_b;
+                               inline_room -= copy_b;
+                               --segs_n;
+                               buf = buf->next;
+                               assert(buf);
+                               addr = rte_pktmbuf_mtod(buf, uintptr_t);
+                               length = DATA_LEN(buf);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+                               total_length += length;
+#endif
+                               (*txq->elts)[++elts_head & elts_m] = buf;
+                               goto pkt_inline;
                        }
                } else {
                        /*
@@ -517,12 +565,12 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        ds = 3;
 use_dseg:
                        /* Add the remaining packet as a simple ds. */
-                       naddr = htonll(addr);
+                       addr = rte_cpu_to_be_64(addr);
                        *dseg = (rte_v128u32_t){
-                               htonl(length),
+                               rte_cpu_to_be_32(length),
                                mlx5_tx_mb2mr(txq, buf),
-                               naddr,
-                               naddr >> 32,
+                               addr,
+                               addr >> 32,
                        };
                        ++ds;
                        if (!segs_n)
@@ -556,42 +604,45 @@ next_seg:
                total_length += length;
 #endif
                /* Store segment information. */
-               naddr = htonll(rte_pktmbuf_mtod(buf, uintptr_t));
+               addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
                *dseg = (rte_v128u32_t){
-                       htonl(length),
+                       rte_cpu_to_be_32(length),
                        mlx5_tx_mb2mr(txq, buf),
-                       naddr,
-                       naddr >> 32,
+                       addr,
+                       addr >> 32,
                };
                (*txq->elts)[++elts_head & elts_m] = buf;
-               ++sg;
-               /* Advance counter only if all segs are successfully posted. */
-               if (sg < segs_n)
+               if (--segs_n)
                        goto next_seg;
-               else
-                       j += sg;
 next_pkt:
+               if (ds > MLX5_DSEG_MAX) {
+                       txq->stats.oerrors++;
+                       break;
+               }
                ++elts_head;
                ++pkts;
                ++i;
+               j += sg;
                /* Initialize known and common part of the WQE structure. */
                if (tso) {
                        wqe->ctrl = (rte_v128u32_t){
-                               htonl((txq->wqe_ci << 8) | MLX5_OPCODE_TSO),
-                               htonl(txq->qp_num_8s | ds),
+                               rte_cpu_to_be_32((txq->wqe_ci << 8) |
+                                                MLX5_OPCODE_TSO),
+                               rte_cpu_to_be_32(txq->qp_num_8s | ds),
                                0,
                                0,
                        };
                        wqe->eseg = (rte_v128u32_t){
                                0,
-                               cs_flags | (htons(tso_segsz) << 16),
+                               cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16),
                                0,
-                               (ehdr << 16) | htons(tso_header_sz),
+                               (ehdr << 16) | rte_cpu_to_be_16(tso_header_sz),
                        };
                } else {
                        wqe->ctrl = (rte_v128u32_t){
-                               htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
-                               htonl(txq->qp_num_8s | ds),
+                               rte_cpu_to_be_32((txq->wqe_ci << 8) |
+                                                MLX5_OPCODE_SEND),
+                               rte_cpu_to_be_32(txq->qp_num_8s | ds),
                                0,
                                0,
                        };
@@ -599,7 +650,7 @@ next_pkt:
                                0,
                                cs_flags,
                                0,
-                               (ehdr << 16) | htons(pkt_inline_sz),
+                               (ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz),
                        };
                }
 next_wqe:
@@ -619,10 +670,13 @@ next_wqe:
        comp = txq->elts_comp + i + j + k;
        if (comp >= MLX5_TX_COMP_THRESH) {
                /* Request completion on last WQE. */
-               last_wqe->ctrl2 = htonl(8);
+               last_wqe->ctrl2 = rte_cpu_to_be_32(8);
                /* Save elts_head in unused "immediate" field of WQE. */
                last_wqe->ctrl3 = txq->elts_head;
                txq->elts_comp = 0;
+#ifndef NDEBUG
+               ++txq->cq_pi;
+#endif
        } else {
                txq->elts_comp = comp;
        }
@@ -646,7 +700,7 @@ next_wqe:
  *   Packet length.
  */
 static inline void
-mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
+mlx5_mpw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, uint32_t length)
 {
        uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
        volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] =
@@ -658,13 +712,14 @@ mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
        mpw->len = length;
        mpw->total_len = 0;
        mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
-       mpw->wqe->eseg.mss = htons(length);
+       mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
        mpw->wqe->eseg.inline_hdr_sz = 0;
        mpw->wqe->eseg.rsvd0 = 0;
        mpw->wqe->eseg.rsvd1 = 0;
        mpw->wqe->eseg.rsvd2 = 0;
-       mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
-                                 (txq->wqe_ci << 8) | MLX5_OPCODE_TSO);
+       mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
+                                            (txq->wqe_ci << 8) |
+                                            MLX5_OPCODE_TSO);
        mpw->wqe->ctrl[2] = 0;
        mpw->wqe->ctrl[3] = 0;
        mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *)
@@ -685,7 +740,7 @@ mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
  *   Pointer to MPW session structure.
  */
 static inline void
-mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw)
+mlx5_mpw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
 {
        unsigned int num = mpw->pkts_n;
 
@@ -693,7 +748,7 @@ mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw)
         * Store size in multiple of 16 bytes. Control and Ethernet segments
         * count as 2.
         */
-       mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | (2 + num));
+       mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | (2 + num));
        mpw->state = MLX5_MPW_STATE_CLOSED;
        if (num < 3)
                ++txq->wqe_ci;
@@ -719,7 +774,7 @@ mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw)
 uint16_t
 mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 {
-       struct txq *txq = (struct txq *)dpdk_txq;
+       struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
        uint16_t elts_head = txq->elts_head;
        const uint16_t elts_n = 1 << txq->elts_n;
        const uint16_t elts_m = elts_n - 1;
@@ -740,6 +795,8 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
        /* Start processing. */
        mlx5_tx_complete(txq);
        max_elts = (elts_n - (elts_head - txq->elts_tail));
+       /* A CQE slot must always be available. */
+       assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
        max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
        if (unlikely(!max_wqe))
                return 0;
@@ -747,7 +804,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                struct rte_mbuf *buf = *(pkts++);
                uint32_t length;
                unsigned int segs_n = buf->nb_segs;
-               uint32_t cs_flags = 0;
+               uint32_t cs_flags;
 
                /*
                 * Make sure there is enough room to store this packet and
@@ -757,14 +814,13 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                if (max_elts < segs_n)
                        break;
                /* Do not bother with large packets MPW cannot handle. */
-               if (segs_n > MLX5_MPW_DSEG_MAX)
+               if (segs_n > MLX5_MPW_DSEG_MAX) {
+                       txq->stats.oerrors++;
                        break;
+               }
                max_elts -= segs_n;
                --pkts_n;
-               /* Should we enable HW CKSUM offload */
-               if (buf->ol_flags &
-                   (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
-                       cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+               cs_flags = txq_ol_cksum_to_cs(txq, buf);
                /* Retrieve packet information. */
                length = PKT_LEN(buf);
                assert(length);
@@ -800,9 +856,9 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        dseg = mpw.data.dseg[mpw.pkts_n];
                        addr = rte_pktmbuf_mtod(buf, uintptr_t);
                        *dseg = (struct mlx5_wqe_data_seg){
-                               .byte_count = htonl(DATA_LEN(buf)),
+                               .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
                                .lkey = mlx5_tx_mb2mr(txq, buf),
-                               .addr = htonll(addr),
+                               .addr = rte_cpu_to_be_64(addr),
                        };
 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
                        length += DATA_LEN(buf);
@@ -830,10 +886,13 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                volatile struct mlx5_wqe *wqe = mpw.wqe;
 
                /* Request completion on last WQE. */
-               wqe->ctrl[2] = htonl(8);
+               wqe->ctrl[2] = rte_cpu_to_be_32(8);
                /* Save elts_head in unused "immediate" field of WQE. */
                wqe->ctrl[3] = elts_head;
                txq->elts_comp = 0;
+#ifndef NDEBUG
+               ++txq->cq_pi;
+#endif
        } else {
                txq->elts_comp = comp;
        }
@@ -860,7 +919,8 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
  *   Packet length.
  */
 static inline void
-mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
+mlx5_mpw_inline_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw,
+                   uint32_t length)
 {
        uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
        struct mlx5_wqe_inl_small *inl;
@@ -870,12 +930,12 @@ mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
        mpw->len = length;
        mpw->total_len = 0;
        mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
-       mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
-                                 (txq->wqe_ci << 8) |
-                                 MLX5_OPCODE_TSO);
+       mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
+                                            (txq->wqe_ci << 8) |
+                                            MLX5_OPCODE_TSO);
        mpw->wqe->ctrl[2] = 0;
        mpw->wqe->ctrl[3] = 0;
-       mpw->wqe->eseg.mss = htons(length);
+       mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
        mpw->wqe->eseg.inline_hdr_sz = 0;
        mpw->wqe->eseg.cs_flags = 0;
        mpw->wqe->eseg.rsvd0 = 0;
@@ -895,7 +955,7 @@ mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
  *   Pointer to MPW session structure.
  */
 static inline void
-mlx5_mpw_inline_close(struct txq *txq, struct mlx5_mpw *mpw)
+mlx5_mpw_inline_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
 {
        unsigned int size;
        struct mlx5_wqe_inl_small *inl = (struct mlx5_wqe_inl_small *)
@@ -906,9 +966,10 @@ mlx5_mpw_inline_close(struct txq *txq, struct mlx5_mpw *mpw)
         * Store size in multiple of 16 bytes. Control and Ethernet segments
         * count as 2.
         */
-       mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(size));
+       mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
+                                            MLX5_WQE_DS(size));
        mpw->state = MLX5_MPW_STATE_CLOSED;
-       inl->byte_cnt = htonl(mpw->total_len | MLX5_INLINE_SEG);
+       inl->byte_cnt = rte_cpu_to_be_32(mpw->total_len | MLX5_INLINE_SEG);
        txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
 }
 
@@ -929,7 +990,7 @@ uint16_t
 mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
                         uint16_t pkts_n)
 {
-       struct txq *txq = (struct txq *)dpdk_txq;
+       struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
        uint16_t elts_head = txq->elts_head;
        const uint16_t elts_n = 1 << txq->elts_n;
        const uint16_t elts_m = elts_n - 1;
@@ -963,12 +1024,14 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
        /* Start processing. */
        mlx5_tx_complete(txq);
        max_elts = (elts_n - (elts_head - txq->elts_tail));
+       /* A CQE slot must always be available. */
+       assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
        do {
                struct rte_mbuf *buf = *(pkts++);
                uintptr_t addr;
                uint32_t length;
                unsigned int segs_n = buf->nb_segs;
-               uint32_t cs_flags = 0;
+               uint8_t cs_flags;
 
                /*
                 * Make sure there is enough room to store this packet and
@@ -978,8 +1041,10 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
                if (max_elts < segs_n)
                        break;
                /* Do not bother with large packets MPW cannot handle. */
-               if (segs_n > MLX5_MPW_DSEG_MAX)
+               if (segs_n > MLX5_MPW_DSEG_MAX) {
+                       txq->stats.oerrors++;
                        break;
+               }
                max_elts -= segs_n;
                --pkts_n;
                /*
@@ -987,10 +1052,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
                 * iteration.
                 */
                max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
-               /* Should we enable HW CKSUM offload */
-               if (buf->ol_flags &
-                   (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
-                       cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+               cs_flags = txq_ol_cksum_to_cs(txq, buf);
                /* Retrieve packet information. */
                length = PKT_LEN(buf);
                /* Start new session if packet differs. */
@@ -1046,9 +1108,10 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
                                dseg = mpw.data.dseg[mpw.pkts_n];
                                addr = rte_pktmbuf_mtod(buf, uintptr_t);
                                *dseg = (struct mlx5_wqe_data_seg){
-                                       .byte_count = htonl(DATA_LEN(buf)),
+                                       .byte_count =
+                                              rte_cpu_to_be_32(DATA_LEN(buf)),
                                        .lkey = mlx5_tx_mb2mr(txq, buf),
-                                       .addr = htonll(addr),
+                                       .addr = rte_cpu_to_be_64(addr),
                                };
 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
                                length += DATA_LEN(buf);
@@ -1120,10 +1183,13 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
                volatile struct mlx5_wqe *wqe = mpw.wqe;
 
                /* Request completion on last WQE. */
-               wqe->ctrl[2] = htonl(8);
+               wqe->ctrl[2] = rte_cpu_to_be_32(8);
                /* Save elts_head in unused "immediate" field of WQE. */
                wqe->ctrl[3] = elts_head;
                txq->elts_comp = 0;
+#ifndef NDEBUG
+               ++txq->cq_pi;
+#endif
        } else {
                txq->elts_comp = comp;
        }
@@ -1152,7 +1218,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
  *   Packet length.
  */
 static inline void
-mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding)
+mlx5_empw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, int padding)
 {
        uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
 
@@ -1160,9 +1226,10 @@ mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding)
        mpw->pkts_n = 0;
        mpw->total_len = sizeof(struct mlx5_wqe);
        mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
-       mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_ENHANCED_MPSW << 24) |
-                                 (txq->wqe_ci << 8) |
-                                 MLX5_OPCODE_ENHANCED_MPSW);
+       mpw->wqe->ctrl[0] =
+               rte_cpu_to_be_32((MLX5_OPC_MOD_ENHANCED_MPSW << 24) |
+                                (txq->wqe_ci << 8) |
+                                MLX5_OPCODE_ENHANCED_MPSW);
        mpw->wqe->ctrl[2] = 0;
        mpw->wqe->ctrl[3] = 0;
        memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE);
@@ -1170,9 +1237,9 @@ mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding)
                uintptr_t addr = (uintptr_t)(mpw->wqe + 1);
 
                /* Pad the first 2 DWORDs with zero-length inline header. */
-               *(volatile uint32_t *)addr = htonl(MLX5_INLINE_SEG);
+               *(volatile uint32_t *)addr = rte_cpu_to_be_32(MLX5_INLINE_SEG);
                *(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) =
-                       htonl(MLX5_INLINE_SEG);
+                       rte_cpu_to_be_32(MLX5_INLINE_SEG);
                mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE;
                /* Start from the next WQEBB. */
                mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1));
@@ -1193,14 +1260,15 @@ mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding)
  *   Number of consumed WQEs.
  */
 static inline uint16_t
-mlx5_empw_close(struct txq *txq, struct mlx5_mpw *mpw)
+mlx5_empw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
 {
        uint16_t ret;
 
        /* Store size in multiple of 16 bytes. Control and Ethernet segments
         * count as 2.
         */
-       mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(mpw->total_len));
+       mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
+                                            MLX5_WQE_DS(mpw->total_len));
        mpw->state = MLX5_MPW_STATE_CLOSED;
        ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
        txq->wqe_ci += ret;
@@ -1208,10 +1276,10 @@ mlx5_empw_close(struct txq *txq, struct mlx5_mpw *mpw)
 }
 
 /**
- * DPDK callback for TX with Enhanced MPW support.
+ * TX with Enhanced MPW support.
  *
- * @param dpdk_txq
- *   Generic pointer to TX queue structure.
+ * @param txq
+ *   Pointer to TX queue structure.
  * @param[in] pkts
  *   Packets to transmit.
  * @param pkts_n
@@ -1220,10 +1288,10 @@ mlx5_empw_close(struct txq *txq, struct mlx5_mpw *mpw)
  * @return
  *   Number of packets successfully transmitted (<= pkts_n).
  */
-uint16_t
-mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+static inline uint16_t
+txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
+              uint16_t pkts_n)
 {
-       struct txq *txq = (struct txq *)dpdk_txq;
        uint16_t elts_head = txq->elts_head;
        const uint16_t elts_n = 1 << txq->elts_n;
        const uint16_t elts_m = elts_n - 1;
@@ -1252,27 +1320,17 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
        do {
                struct rte_mbuf *buf = *(pkts++);
                uintptr_t addr;
-               uint64_t naddr;
                unsigned int n;
                unsigned int do_inline = 0; /* Whether inline is possible. */
                uint32_t length;
-               unsigned int segs_n = buf->nb_segs;
-               uint32_t cs_flags = 0;
+               uint8_t cs_flags;
 
-               /*
-                * Make sure there is enough room to store this packet and
-                * that one ring entry remains unused.
-                */
-               assert(segs_n);
-               if (max_elts - j < segs_n)
-                       break;
-               /* Do not bother with large packets MPW cannot handle. */
-               if (segs_n > MLX5_MPW_DSEG_MAX)
+               /* Multi-segmented packet is handled in slow-path outside. */
+               assert(NB_SEGS(buf) == 1);
+               /* Make sure there is enough room to store this packet. */
+               if (max_elts - j == 0)
                        break;
-               /* Should we enable HW CKSUM offload. */
-               if (buf->ol_flags &
-                   (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
-                       cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+               cs_flags = txq_ol_cksum_to_cs(txq, buf);
                /* Retrieve packet information. */
                length = PKT_LEN(buf);
                /* Start new session if:
@@ -1280,50 +1338,35 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                 * - no space left even for a dseg
                 * - next packet can be inlined with a new WQE
                 * - cs_flag differs
-                * It can't be MLX5_MPW_STATE_OPENED as always have a single
-                * segmented packet.
                 */
                if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED) {
-                       if ((segs_n != 1) ||
-                           (inl_pad + sizeof(struct mlx5_wqe_data_seg) >
-                             mpw_room) ||
+                       if ((inl_pad + sizeof(struct mlx5_wqe_data_seg) >
+                            mpw_room) ||
                            (length <= txq->inline_max_packet_sz &&
                             inl_pad + sizeof(inl_hdr) + length >
-                             mpw_room) ||
+                            mpw_room) ||
                            (mpw.wqe->eseg.cs_flags != cs_flags))
                                max_wqe -= mlx5_empw_close(txq, &mpw);
                }
                if (unlikely(mpw.state == MLX5_MPW_STATE_CLOSED)) {
-                       if (unlikely(segs_n != 1)) {
-                               /* Fall back to legacy MPW.
-                                * A MPW session consumes 2 WQEs at most to
-                                * include MLX5_MPW_DSEG_MAX pointers.
-                                */
-                               if (unlikely(max_wqe < 2))
-                                       break;
-                               mlx5_mpw_new(txq, &mpw, length);
-                       } else {
-                               /* In Enhanced MPW, inline as much as the budget
-                                * is allowed. The remaining space is to be
-                                * filled with dsegs. If the title WQEBB isn't
-                                * padded, it will have 2 dsegs there.
-                                */
-                               mpw_room = RTE_MIN(MLX5_WQE_SIZE_MAX,
-                                           (max_inline ? max_inline :
-                                            pkts_n * MLX5_WQE_DWORD_SIZE) +
-                                           MLX5_WQE_SIZE);
-                               if (unlikely(max_wqe * MLX5_WQE_SIZE <
-                                             mpw_room))
-                                       break;
-                               /* Don't pad the title WQEBB to not waste WQ. */
-                               mlx5_empw_new(txq, &mpw, 0);
-                               mpw_room -= mpw.total_len;
-                               inl_pad = 0;
-                               do_inline =
-                                       length <= txq->inline_max_packet_sz &&
-                                       sizeof(inl_hdr) + length <= mpw_room &&
-                                       !txq->mpw_hdr_dseg;
-                       }
+                       /* In Enhanced MPW, inline as much as the budget is
+                        * allowed. The remaining space is to be filled with
+                        * dsegs. If the title WQEBB isn't padded, it will have
+                        * 2 dsegs there.
+                        */
+                       mpw_room = RTE_MIN(MLX5_WQE_SIZE_MAX,
+                                          (max_inline ? max_inline :
+                                           pkts_n * MLX5_WQE_DWORD_SIZE) +
+                                          MLX5_WQE_SIZE);
+                       if (unlikely(max_wqe * MLX5_WQE_SIZE < mpw_room))
+                               break;
+                       /* Don't pad the title WQEBB to not waste WQ. */
+                       mlx5_empw_new(txq, &mpw, 0);
+                       mpw_room -= mpw.total_len;
+                       inl_pad = 0;
+                       do_inline = length <= txq->inline_max_packet_sz &&
+                                   sizeof(inl_hdr) + length <= mpw_room &&
+                                   !txq->mpw_hdr_dseg;
                        mpw.wqe->eseg.cs_flags = cs_flags;
                } else {
                        /* Evaluate whether the next packet can be inlined.
@@ -1339,46 +1382,13 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                (!txq->mpw_hdr_dseg ||
                                 mpw.total_len >= MLX5_WQE_SIZE);
                }
-               /* Multi-segment packets must be alone in their MPW. */
-               assert((segs_n == 1) || (mpw.pkts_n == 0));
-               if (unlikely(mpw.state == MLX5_MPW_STATE_OPENED)) {
-#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
-                       length = 0;
-#endif
-                       do {
-                               volatile struct mlx5_wqe_data_seg *dseg;
-
-                               assert(buf);
-                               (*txq->elts)[elts_head++ & elts_m] = buf;
-                               dseg = mpw.data.dseg[mpw.pkts_n];
-                               addr = rte_pktmbuf_mtod(buf, uintptr_t);
-                               *dseg = (struct mlx5_wqe_data_seg){
-                                       .byte_count = htonl(DATA_LEN(buf)),
-                                       .lkey = mlx5_tx_mb2mr(txq, buf),
-                                       .addr = htonll(addr),
-                               };
-#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
-                               length += DATA_LEN(buf);
-#endif
-                               buf = buf->next;
-                               ++j;
-                               ++mpw.pkts_n;
-                       } while (--segs_n);
-                       /* A multi-segmented packet takes one MPW session.
-                        * TODO: Pack more multi-segmented packets if possible.
-                        */
-                       mlx5_mpw_close(txq, &mpw);
-                       if (mpw.pkts_n < 3)
-                               max_wqe--;
-                       else
-                               max_wqe -= 2;
-               } else if (do_inline) {
+               if (do_inline) {
                        /* Inline packet into WQE. */
                        unsigned int max;
 
                        assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
                        assert(length == DATA_LEN(buf));
-                       inl_hdr = htonl(length | MLX5_INLINE_SEG);
+                       inl_hdr = rte_cpu_to_be_32(length | MLX5_INLINE_SEG);
                        addr = rte_pktmbuf_mtod(buf, uintptr_t);
                        mpw.data.raw = (volatile void *)
                                ((uintptr_t)mpw.data.raw + inl_pad);
@@ -1434,12 +1444,12 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++)
                                rte_prefetch2((void *)(addr +
                                                n * RTE_CACHE_LINE_SIZE));
-                       naddr = htonll(addr);
+                       addr = rte_cpu_to_be_64(addr);
                        *dseg = (rte_v128u32_t) {
-                               htonl(length),
+                               rte_cpu_to_be_32(length),
                                mlx5_tx_mb2mr(txq, buf),
-                               naddr,
-                               naddr >> 32,
+                               addr,
+                               addr >> 32,
                        };
                        mpw.data.raw = (volatile void *)(dseg + 1);
                        mpw.total_len += (inl_pad + sizeof(*dseg));
@@ -1464,12 +1474,14 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                volatile struct mlx5_wqe *wqe = mpw.wqe;
 
                /* Request completion on last WQE. */
-               wqe->ctrl[2] = htonl(8);
+               wqe->ctrl[2] = rte_cpu_to_be_32(8);
                /* Save elts_head in unused "immediate" field of WQE. */
                wqe->ctrl[3] = elts_head;
                txq->elts_comp = 0;
                txq->mpw_comp = txq->wqe_ci;
-               txq->cq_pi++;
+#ifndef NDEBUG
+               ++txq->cq_pi;
+#endif
        } else {
                txq->elts_comp += j;
        }
@@ -1479,14 +1491,53 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 #endif
        if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED)
                mlx5_empw_close(txq, &mpw);
-       else if (mpw.state == MLX5_MPW_STATE_OPENED)
-               mlx5_mpw_close(txq, &mpw);
        /* Ring QP doorbell. */
        mlx5_tx_dbrec(txq, mpw.wqe);
        txq->elts_head = elts_head;
        return i;
 }
 
+/**
+ * DPDK callback for TX with Enhanced MPW support.
+ *
+ * @param dpdk_txq
+ *   Generic pointer to TX queue structure.
+ * @param[in] pkts
+ *   Packets to transmit.
+ * @param pkts_n
+ *   Number of packets in array.
+ *
+ * @return
+ *   Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+       struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
+       uint16_t nb_tx = 0;
+
+       while (pkts_n > nb_tx) {
+               uint16_t n;
+               uint16_t ret;
+
+               n = txq_count_contig_multi_seg(&pkts[nb_tx], pkts_n - nb_tx);
+               if (n) {
+                       ret = mlx5_tx_burst(dpdk_txq, &pkts[nb_tx], n);
+                       if (!ret)
+                               break;
+                       nb_tx += ret;
+               }
+               n = txq_count_contig_single_seg(&pkts[nb_tx], pkts_n - nb_tx);
+               if (n) {
+                       ret = txq_burst_empw(txq, &pkts[nb_tx], n);
+                       if (!ret)
+                               break;
+                       nb_tx += ret;
+               }
+       }
+       return nb_tx;
+}
+
 /**
  * Translate RX completion flags to packet type.
  *
@@ -1501,30 +1552,20 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 static inline uint32_t
 rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
 {
-       uint32_t pkt_type;
-       uint16_t flags = ntohs(cqe->hdr_type_etc);
+       uint8_t idx;
+       uint8_t pinfo = cqe->pkt_info;
+       uint16_t ptype = cqe->hdr_type_etc;
 
-       if (cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) {
-               pkt_type =
-                       TRANSPOSE(flags,
-                                 MLX5_CQE_RX_IPV4_PACKET,
-                                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN) |
-                       TRANSPOSE(flags,
-                                 MLX5_CQE_RX_IPV6_PACKET,
-                                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN);
-               pkt_type |= ((cqe->pkt_info & MLX5_CQE_RX_OUTER_PACKET) ?
-                            RTE_PTYPE_L3_IPV6_EXT_UNKNOWN :
-                            RTE_PTYPE_L3_IPV4_EXT_UNKNOWN);
-       } else {
-               pkt_type =
-                       TRANSPOSE(flags,
-                                 MLX5_CQE_L3_HDR_TYPE_IPV6,
-                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) |
-                       TRANSPOSE(flags,
-                                 MLX5_CQE_L3_HDR_TYPE_IPV4,
-                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN);
-       }
-       return pkt_type;
+       /*
+        * The index to the array should have:
+        * bit[1:0] = l3_hdr_type
+        * bit[4:2] = l4_hdr_type
+        * bit[5] = ip_frag
+        * bit[6] = tunneled
+        * bit[7] = outer_l3_type
+        */
+       idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
+       return mlx5_ptype_table[idx];
 }
 
 /**
@@ -1544,7 +1585,7 @@ rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
  *   with error.
  */
 static inline int
-mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
+mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
                 uint16_t cqe_cnt, uint32_t *rss_hash)
 {
        struct rxq_zip *zip = &rxq->zip;
@@ -1558,8 +1599,8 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
                        (volatile struct mlx5_mini_cqe8 (*)[8])
                        (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info);
 
-               len = ntohl((*mc)[zip->ai & 7].byte_cnt);
-               *rss_hash = ntohl((*mc)[zip->ai & 7].rx_hash_result);
+               len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
+               *rss_hash = rte_be_to_cpu_32((*mc)[zip->ai & 7].rx_hash_result);
                if ((++zip->ai & 7) == 0) {
                        /* Invalidate consumed CQEs */
                        idx = zip->ca;
@@ -1600,6 +1641,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
                        return 0;
                ++rxq->cq_ci;
                op_own = cqe->op_own;
+               rte_cio_rmb();
                if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
                        volatile struct mlx5_mini_cqe8 (*mc)[8] =
                                (volatile struct mlx5_mini_cqe8 (*)[8])
@@ -1607,7 +1649,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
                                                          cqe_cnt].pkt_info);
 
                        /* Fix endianness. */
-                       zip->cqe_cnt = ntohl(cqe->byte_cnt);
+                       zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
                        /*
                         * Current mini array position is the one returned by
                         * check_cqe64().
@@ -1622,8 +1664,8 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
                        --rxq->cq_ci;
                        zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
                        /* Get packet size to return. */
-                       len = ntohl((*mc)[0].byte_cnt);
-                       *rss_hash = ntohl((*mc)[0].rx_hash_result);
+                       len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
+                       *rss_hash = rte_be_to_cpu_32((*mc)[0].rx_hash_result);
                        zip->ai = 1;
                        /* Prefetch all the entries to be invalidated */
                        idx = zip->ca;
@@ -1633,8 +1675,8 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
                                ++idx;
                        }
                } else {
-                       len = ntohl(cqe->byte_cnt);
-                       *rss_hash = ntohl(cqe->rx_hash_res);
+                       len = rte_be_to_cpu_32(cqe->byte_cnt);
+                       *rss_hash = rte_be_to_cpu_32(cqe->rx_hash_res);
                }
                /* Error while receiving packet. */
                if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR))
@@ -1655,10 +1697,10 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
  *   Offload flags (ol_flags) for struct rte_mbuf.
  */
 static inline uint32_t
-rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
+rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
 {
        uint32_t ol_flags = 0;
-       uint16_t flags = ntohs(cqe->hdr_type_etc);
+       uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
 
        ol_flags =
                TRANSPOSE(flags,
@@ -1694,7 +1736,7 @@ rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
 uint16_t
 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 {
-       struct rxq *rxq = dpdk_rxq;
+       struct mlx5_rxq_data *rxq = dpdk_rxq;
        const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
        const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
        const unsigned int sges_n = rxq->sges_n;
@@ -1765,7 +1807,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                            MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
                                pkt->ol_flags |= PKT_RX_FDIR;
                                if (cqe->sop_drop_qpn !=
-                                   htonl(MLX5_FLOW_MARK_DEFAULT)) {
+                                   rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
                                        uint32_t mark = cqe->sop_drop_qpn;
 
                                        pkt->ol_flags |= PKT_RX_FDIR_ID;
@@ -1777,10 +1819,16 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe);
                        if (rxq->vlan_strip &&
                            (cqe->hdr_type_etc &
-                            htons(MLX5_CQE_VLAN_STRIPPED))) {
-                               pkt->ol_flags |= PKT_RX_VLAN_PKT |
+                            rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
+                               pkt->ol_flags |= PKT_RX_VLAN |
                                        PKT_RX_VLAN_STRIPPED;
-                               pkt->vlan_tci = ntohs(cqe->vlan_info);
+                               pkt->vlan_tci =
+                                       rte_be_to_cpu_16(cqe->vlan_info);
+                       }
+                       if (rxq->hw_timestamp) {
+                               pkt->timestamp =
+                                       rte_be_to_cpu_64(cqe->timestamp);
+                               pkt->ol_flags |= PKT_RX_TIMESTAMP;
                        }
                        if (rxq->crc_present)
                                len -= ETHER_CRC_LEN;
@@ -1796,7 +1844,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                 * of the buffers are already known, only the buffer address
                 * changes.
                 */
-               wqe->addr = htonll(rte_pktmbuf_mtod(rep, uintptr_t));
+               wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
                if (len > DATA_LEN(seg)) {
                        len -= DATA_LEN(seg);
                        ++NB_SEGS(pkt);
@@ -1823,10 +1871,10 @@ skip:
                return 0;
        /* Update the consumer index. */
        rxq->rq_ci = rq_ci >> sges_n;
-       rte_wmb();
-       *rxq->cq_db = htonl(rxq->cq_ci);
-       rte_wmb();
-       *rxq->rq_db = htonl(rxq->rq_ci);
+       rte_cio_wmb();
+       *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
+       rte_cio_wmb();
+       *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
 #ifdef MLX5_PMD_SOFT_COUNTERS
        /* Increment packets counter. */
        rxq->stats.ipackets += i;
@@ -1919,21 +1967,23 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 }
 
 int __attribute__((weak))
-priv_check_raw_vec_tx_support(struct priv *priv)
+priv_check_raw_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
 {
        (void)priv;
+       (void)dev;
        return -ENOTSUP;
 }
 
 int __attribute__((weak))
-priv_check_vec_tx_support(struct priv *priv)
+priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
 {
        (void)priv;
+       (void)dev;
        return -ENOTSUP;
 }
 
 int __attribute__((weak))
-rxq_check_vec_support(struct rxq *rxq)
+rxq_check_vec_support(struct mlx5_rxq_data *rxq)
 {
        (void)rxq;
        return -ENOTSUP;
@@ -1945,9 +1995,3 @@ priv_check_vec_rx_support(struct priv *priv)
        (void)priv;
        return -ENOTSUP;
 }
-
-void __attribute__((weak))
-priv_prep_vec_rx_function(struct priv *priv)
-{
-       (void)priv;
-}