net/mlx5: support extended statistics
[dpdk.git] / drivers / net / mlx5 / mlx5_rxtx.c
index 2a66d52..56c7f78 100644 (file)
 #include "mlx5_defs.h"
 #include "mlx5_prm.h"
 
+static inline int
+check_cqe(volatile struct mlx5_cqe *cqe,
+         unsigned int cqes_n, const uint16_t ci)
+         __attribute__((always_inline));
+
+static inline void
+txq_complete(struct txq *txq) __attribute__((always_inline));
+
+static inline uint32_t
+txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
+       __attribute__((always_inline));
+
+static inline void
+mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
+       __attribute__((always_inline));
+
+static inline uint32_t
+rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
+       __attribute__((always_inline));
+
+static inline int
+mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
+                uint16_t cqe_cnt, uint32_t *rss_hash)
+                __attribute__((always_inline));
+
+static inline uint32_t
+rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
+                  __attribute__((always_inline));
+
 #ifndef NDEBUG
 
 /**
  *   0 the first time.
  */
 static inline int
-check_cqe64_seen(volatile struct mlx5_cqe64 *cqe)
+check_cqe_seen(volatile struct mlx5_cqe *cqe)
 {
        static const uint8_t magic[] = "seen";
-       volatile uint8_t (*buf)[sizeof(cqe->rsvd40)] = &cqe->rsvd40;
+       volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0;
        int ret = 1;
        unsigned int i;
 
@@ -98,11 +127,6 @@ check_cqe64_seen(volatile struct mlx5_cqe64 *cqe)
 
 #endif /* NDEBUG */
 
-static inline int
-check_cqe64(volatile struct mlx5_cqe64 *cqe,
-           unsigned int cqes_n, const uint16_t ci)
-           __attribute__((always_inline));
-
 /**
  * Check whether CQE is valid.
  *
@@ -117,8 +141,8 @@ check_cqe64(volatile struct mlx5_cqe64 *cqe,
  *   0 on success, 1 on failure.
  */
 static inline int
-check_cqe64(volatile struct mlx5_cqe64 *cqe,
-               unsigned int cqes_n, const uint16_t ci)
+check_cqe(volatile struct mlx5_cqe *cqe,
+         unsigned int cqes_n, const uint16_t ci)
 {
        uint16_t idx = ci & cqes_n;
        uint8_t op_own = cqe->op_own;
@@ -136,14 +160,14 @@ check_cqe64(volatile struct mlx5_cqe64 *cqe,
                if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) ||
                    (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
                        return 0;
-               if (!check_cqe64_seen(cqe))
+               if (!check_cqe_seen(cqe))
                        ERROR("unexpected CQE error %u (0x%02x)"
                              " syndrome 0x%02x",
                              op_code, op_code, syndrome);
                return 1;
        } else if ((op_code != MLX5_CQE_RESP_SEND) &&
                   (op_code != MLX5_CQE_REQ)) {
-               if (!check_cqe64_seen(cqe))
+               if (!check_cqe_seen(cqe))
                        ERROR("unexpected CQE opcode %u (0x%02x)",
                              op_code, op_code);
                return 1;
@@ -152,8 +176,23 @@ check_cqe64(volatile struct mlx5_cqe64 *cqe,
        return 0;
 }
 
-static inline void
-txq_complete(struct txq *txq) __attribute__((always_inline));
+/**
+ * Return the address of the WQE.
+ *
+ * @param txq
+ *   Pointer to TX queue structure.
+ * @param  wqe_ci
+ *   WQE consumer index.
+ *
+ * @return
+ *   WQE address.
+ */
+static inline uintptr_t *
+tx_mlx5_wqe(struct txq *txq, uint16_t ci)
+{
+       ci &= ((1 << txq->wqe_n) - 1);
+       return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE);
+}
 
 /**
  * Manage TX completions.
@@ -172,25 +211,25 @@ txq_complete(struct txq *txq)
        uint16_t elts_free = txq->elts_tail;
        uint16_t elts_tail;
        uint16_t cq_ci = txq->cq_ci;
-       volatile struct mlx5_cqe64 *cqe = NULL;
-       volatile struct mlx5_wqe *wqe;
+       volatile struct mlx5_cqe *cqe = NULL;
+       volatile struct mlx5_wqe_ctrl *ctrl;
 
        do {
-               volatile struct mlx5_cqe64 *tmp;
+               volatile struct mlx5_cqe *tmp;
 
-               tmp = &(*txq->cqes)[cq_ci & cqe_cnt].cqe64;
-               if (check_cqe64(tmp, cqe_n, cq_ci))
+               tmp = &(*txq->cqes)[cq_ci & cqe_cnt];
+               if (check_cqe(tmp, cqe_n, cq_ci))
                        break;
                cqe = tmp;
 #ifndef NDEBUG
                if (MLX5_CQE_FORMAT(cqe->op_own) == MLX5_COMPRESSED) {
-                       if (!check_cqe64_seen(cqe))
+                       if (!check_cqe_seen(cqe))
                                ERROR("unexpected compressed CQE, TX stopped");
                        return;
                }
                if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
                    (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
-                       if (!check_cqe64_seen(cqe))
+                       if (!check_cqe_seen(cqe))
                                ERROR("unexpected error CQE, TX stopped");
                        return;
                }
@@ -199,9 +238,9 @@ txq_complete(struct txq *txq)
        } while (1);
        if (unlikely(cqe == NULL))
                return;
-       wqe = &(*txq->wqes)[htons(cqe->wqe_counter) &
-                           ((1 << txq->wqe_n) - 1)].hdr;
-       elts_tail = wqe->ctrl[3];
+       ctrl = (volatile struct mlx5_wqe_ctrl *)
+               tx_mlx5_wqe(txq, ntohs(cqe->wqe_counter));
+       elts_tail = ctrl->ctrl3;
        assert(elts_tail < (1 << txq->wqe_n));
        /* Free buffers. */
        while (elts_free != elts_tail) {
@@ -246,10 +285,6 @@ txq_mb2mp(struct rte_mbuf *buf)
        return buf->pool;
 }
 
-static inline uint32_t
-txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
-       __attribute__((always_inline));
-
 /**
  * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[].
  * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
@@ -292,57 +327,20 @@ txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
  *
  * @param txq
  *   Pointer to TX queue structure.
+ * @param wqe
+ *   Pointer to the last WQE posted in the NIC.
  */
 static inline void
-mlx5_tx_dbrec(struct txq *txq)
+mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
 {
-       uint8_t *dst = (uint8_t *)((uintptr_t)txq->bf_reg + txq->bf_offset);
-       uint32_t data[4] = {
-               htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
-               htonl(txq->qp_num_8s),
-               0,
-               0,
-       };
+       uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
+       volatile uint64_t *src = ((volatile uint64_t *)wqe);
+
        rte_wmb();
        *txq->qp_db = htonl(txq->wqe_ci);
        /* Ensure ordering between DB record and BF copy. */
        rte_wmb();
-       rte_mov16(dst, (uint8_t *)data);
-       txq->bf_offset ^= (1 << txq->bf_buf_size);
-}
-
-/**
- * Prefetch a CQE.
- *
- * @param txq
- *   Pointer to TX queue structure.
- * @param cqe_ci
- *   CQE consumer index.
- */
-static inline void
-tx_prefetch_cqe(struct txq *txq, uint16_t ci)
-{
-       volatile struct mlx5_cqe *cqe;
-
-       cqe = &(*txq->cqes)[ci & ((1 << txq->cqe_n) - 1)];
-       rte_prefetch0(cqe);
-}
-
-/**
- * Prefetch a WQE.
- *
- * @param txq
- *   Pointer to TX queue structure.
- * @param  wqe_ci
- *   WQE consumer index.
- */
-static inline void
-tx_prefetch_wqe(struct txq *txq, uint16_t ci)
-{
-       volatile struct mlx5_wqe64 *wqe;
-
-       wqe = &(*txq->wqes)[ci & ((1 << txq->wqe_n) - 1)];
-       rte_prefetch0(wqe);
+       *dst = *src;
 }
 
 /**
@@ -368,7 +366,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
        unsigned int j = 0;
        unsigned int max;
        unsigned int comp;
-       volatile struct mlx5_wqe *wqe = NULL;
+       volatile struct mlx5_wqe_v *wqe = NULL;
        unsigned int segs_n = 0;
        struct rte_mbuf *buf = NULL;
        uint8_t *raw;
@@ -376,8 +374,6 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
        if (unlikely(!pkts_n))
                return 0;
        /* Prefetch first packet cacheline. */
-       tx_prefetch_cqe(txq, txq->cq_ci);
-       tx_prefetch_cqe(txq, txq->cq_ci + 1);
        rte_prefetch0(*pkts);
        /* Start processing. */
        txq_complete(txq);
@@ -385,10 +381,14 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
        if (max > elts_n)
                max -= elts_n;
        do {
-               volatile struct mlx5_wqe_data_seg *dseg = NULL;
+               volatile rte_v128u32_t *dseg = NULL;
                uint32_t length;
                unsigned int ds = 0;
                uintptr_t addr;
+               uint64_t naddr;
+               uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE;
+               uint16_t ehdr;
+               uint8_t cs_flags = 0;
 #ifdef MLX5_PMD_SOFT_COUNTERS
                uint32_t total_length = 0;
 #endif
@@ -407,13 +407,15 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                --segs_n;
                if (!segs_n)
                        --pkts_n;
-               wqe = &(*txq->wqes)[txq->wqe_ci &
-                                   ((1 << txq->wqe_n) - 1)].hdr;
-               tx_prefetch_wqe(txq, txq->wqe_ci + 1);
+               wqe = (volatile struct mlx5_wqe_v *)
+                       tx_mlx5_wqe(txq, txq->wqe_ci);
+               rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
                if (pkts_n > 1)
                        rte_prefetch0(*pkts);
                addr = rte_pktmbuf_mtod(buf, uintptr_t);
                length = DATA_LEN(buf);
+               ehdr = (((uint8_t *)addr)[1] << 8) |
+                      ((uint8_t *)addr)[0];
 #ifdef MLX5_PMD_SOFT_COUNTERS
                total_length = length;
 #endif
@@ -431,30 +433,22 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                /* Should we enable HW CKSUM offload */
                if (buf->ol_flags &
                    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
-                       wqe->eseg.cs_flags =
-                               MLX5_ETH_WQE_L3_CSUM |
-                               MLX5_ETH_WQE_L4_CSUM;
-               } else {
-                       wqe->eseg.cs_flags = 0;
+                       cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
                }
-               raw  = (uint8_t *)(uintptr_t)&wqe->eseg.inline_hdr[0];
-               /* Start the know and common part of the WQE structure. */
-               wqe->ctrl[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND);
-               wqe->ctrl[2] = 0;
-               wqe->ctrl[3] = 0;
-               wqe->eseg.rsvd0 = 0;
-               wqe->eseg.rsvd1 = 0;
-               wqe->eseg.mss = 0;
-               wqe->eseg.rsvd2 = 0;
-               /* Start by copying the Ethernet Header. */
-               rte_mov16((uint8_t *)raw, (uint8_t *)addr);
+               raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
+               /*
+                * Start by copying the Ethernet header minus the first two
+                * bytes which will be appended at the end of the Ethernet
+                * segment.
+                */
+               memcpy((uint8_t *)raw, ((uint8_t *)addr) + 2, 16);
                length -= MLX5_WQE_DWORD_SIZE;
                addr += MLX5_WQE_DWORD_SIZE;
                /* Replace the Ethernet type by the VLAN if necessary. */
                if (buf->ol_flags & PKT_TX_VLAN_PKT) {
                        uint32_t vlan = htonl(0x81000000 | buf->vlan_tci);
 
-                       memcpy((uint8_t *)(raw + MLX5_WQE_DWORD_SIZE -
+                       memcpy((uint8_t *)(raw + MLX5_WQE_DWORD_SIZE - 2 -
                                           sizeof(vlan)),
                               &vlan, sizeof(vlan));
                        addr -= sizeof(vlan);
@@ -462,14 +456,18 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                }
                /* Inline if enough room. */
                if (txq->max_inline != 0) {
-                       uintptr_t end =
-                               (uintptr_t)&(*txq->wqes)[1 << txq->wqe_n];
+                       uintptr_t end = (uintptr_t)
+                               (((uintptr_t)txq->wqes) +
+                                (1 << txq->wqe_n) * MLX5_WQE_SIZE);
                        uint16_t max_inline =
                                txq->max_inline * RTE_CACHE_LINE_SIZE;
-                       uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE;
                        uint16_t room;
 
-                       raw += MLX5_WQE_DWORD_SIZE;
+                       /*
+                        * raw starts two bytes before the boundary to
+                        * continue the above copy of packet data.
+                        */
+                       raw += MLX5_WQE_DWORD_SIZE - 2;
                        room = end - (uintptr_t)raw;
                        if (room > max_inline) {
                                uintptr_t addr_end = (addr + max_inline) &
@@ -485,24 +483,26 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                /* Sanity check. */
                                assert(addr <= addr_end);
                        }
-                       /* Store the inlined packet size in the WQE. */
-                       wqe->eseg.inline_hdr_sz = htons(pkt_inline_sz);
                        /*
-                        * 2 DWORDs consumed by the WQE header + 1 DSEG +
+                        * 2 DWORDs consumed by the WQE header + ETH segment +
                         * the size of the inline part of the packet.
                         */
                        ds = 2 + MLX5_WQE_DS(pkt_inline_sz - 2);
                        if (length > 0) {
-                               dseg = (struct mlx5_wqe_data_seg *)
+                               dseg = (volatile rte_v128u32_t *)
                                        ((uintptr_t)wqe +
                                         (ds * MLX5_WQE_DWORD_SIZE));
                                if ((uintptr_t)dseg >= end)
-                                       dseg = (struct mlx5_wqe_data_seg *)
-                                               ((uintptr_t)&(*txq->wqes)[0]);
+                                       dseg = (volatile rte_v128u32_t *)
+                                              txq->wqes;
                                goto use_dseg;
                        } else if (!segs_n) {
                                goto next_pkt;
                        } else {
+                               /* dseg will be advance as part of next_seg */
+                               dseg = (volatile rte_v128u32_t *)
+                                       ((uintptr_t)wqe +
+                                        ((ds - 1) * MLX5_WQE_DWORD_SIZE));
                                goto next_seg;
                        }
                } else {
@@ -510,16 +510,17 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                         * No inline has been done in the packet, only the
                         * Ethernet Header as been stored.
                         */
-                       wqe->eseg.inline_hdr_sz = htons(MLX5_WQE_DWORD_SIZE);
-                       dseg = (struct mlx5_wqe_data_seg *)
+                       dseg = (volatile rte_v128u32_t *)
                                ((uintptr_t)wqe + (3 * MLX5_WQE_DWORD_SIZE));
                        ds = 3;
 use_dseg:
                        /* Add the remaining packet as a simple ds. */
-                       *dseg = (struct mlx5_wqe_data_seg) {
-                               .addr = htonll(addr),
-                               .byte_count = htonl(length),
-                               .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+                       naddr = htonll(addr);
+                       *dseg = (rte_v128u32_t){
+                               htonl(length),
+                               txq_mp2mr(txq, txq_mb2mp(buf)),
+                               naddr,
+                               naddr >> 32,
                        };
                        ++ds;
                        if (!segs_n)
@@ -539,9 +540,9 @@ next_seg:
                        unsigned int n = (txq->wqe_ci + ((ds + 3) / 4)) &
                                ((1 << txq->wqe_n) - 1);
 
-                       dseg = (struct mlx5_wqe_data_seg *)
-                               ((uintptr_t)&(*txq->wqes)[n]);
-                       tx_prefetch_wqe(txq, n + 1);
+                       dseg = (volatile rte_v128u32_t *)
+                              tx_mlx5_wqe(txq, n);
+                       rte_prefetch0(tx_mlx5_wqe(txq, n + 1));
                } else {
                        ++dseg;
                }
@@ -553,10 +554,12 @@ next_seg:
                total_length += length;
 #endif
                /* Store segment information. */
-               *dseg = (struct mlx5_wqe_data_seg) {
-                       .addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)),
-                       .byte_count = htonl(length),
-                       .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+               naddr = htonll(rte_pktmbuf_mtod(buf, uintptr_t));
+               *dseg = (rte_v128u32_t){
+                       htonl(length),
+                       txq_mp2mr(txq, txq_mb2mp(buf)),
+                       naddr,
+                       naddr >> 32,
                };
                (*txq->elts)[elts_head] = buf;
                elts_head = (elts_head + 1) & (elts_n - 1);
@@ -568,7 +571,19 @@ next_seg:
                        --pkts_n;
 next_pkt:
                ++i;
-               wqe->ctrl[1] = htonl(txq->qp_num_8s | ds);
+               /* Initialize known and common part of the WQE structure. */
+               wqe->ctrl = (rte_v128u32_t){
+                       htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
+                       htonl(txq->qp_num_8s | ds),
+                       0,
+                       0,
+               };
+               wqe->eseg = (rte_v128u32_t){
+                       0,
+                       cs_flags,
+                       0,
+                       (ehdr << 16) | htons(pkt_inline_sz),
+               };
                txq->wqe_ci += (ds + 3) / 4;
 #ifdef MLX5_PMD_SOFT_COUNTERS
                /* Increment sent bytes counter. */
@@ -581,10 +596,13 @@ next_pkt:
        /* Check whether completion threshold has been reached. */
        comp = txq->elts_comp + i + j;
        if (comp >= MLX5_TX_COMP_THRESH) {
+               volatile struct mlx5_wqe_ctrl *w =
+                       (volatile struct mlx5_wqe_ctrl *)wqe;
+
                /* Request completion on last WQE. */
-               wqe->ctrl[2] = htonl(8);
+               w->ctrl2 = htonl(8);
                /* Save elts_head in unused "immediate" field of WQE. */
-               wqe->ctrl[3] = elts_head;
+               w->ctrl3 = elts_head;
                txq->elts_comp = 0;
        } else {
                txq->elts_comp = comp;
@@ -594,7 +612,7 @@ next_pkt:
        txq->stats.opackets += i;
 #endif
        /* Ring QP doorbell. */
-       mlx5_tx_dbrec(txq);
+       mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)wqe);
        txq->elts_head = elts_head;
        return i;
 }
@@ -615,20 +633,20 @@ mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
        uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
        volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] =
                (volatile struct mlx5_wqe_data_seg (*)[])
-               (uintptr_t)&(*txq->wqes)[(idx + 1) & ((1 << txq->wqe_n) - 1)];
+               tx_mlx5_wqe(txq, idx + 1);
 
        mpw->state = MLX5_MPW_STATE_OPENED;
        mpw->pkts_n = 0;
        mpw->len = length;
        mpw->total_len = 0;
-       mpw->wqe = (volatile struct mlx5_wqe *)&(*txq->wqes)[idx].hdr;
+       mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
        mpw->wqe->eseg.mss = htons(length);
        mpw->wqe->eseg.inline_hdr_sz = 0;
        mpw->wqe->eseg.rsvd0 = 0;
        mpw->wqe->eseg.rsvd1 = 0;
        mpw->wqe->eseg.rsvd2 = 0;
        mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
-                                 (txq->wqe_ci << 8) | MLX5_OPCODE_LSO_MPW);
+                                 (txq->wqe_ci << 8) | MLX5_OPCODE_TSO);
        mpw->wqe->ctrl[2] = 0;
        mpw->wqe->ctrl[3] = 0;
        mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *)
@@ -663,8 +681,8 @@ mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw)
                ++txq->wqe_ci;
        else
                txq->wqe_ci += 2;
-       tx_prefetch_wqe(txq, txq->wqe_ci);
-       tx_prefetch_wqe(txq, txq->wqe_ci + 1);
+       rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
+       rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
 }
 
 /**
@@ -697,9 +715,8 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
        if (unlikely(!pkts_n))
                return 0;
        /* Prefetch first packet cacheline. */
-       tx_prefetch_cqe(txq, txq->cq_ci);
-       tx_prefetch_wqe(txq, txq->wqe_ci);
-       tx_prefetch_wqe(txq, txq->wqe_ci + 1);
+       rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
+       rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
        /* Start processing. */
        txq_complete(txq);
        max = (elts_n - (elts_head - txq->elts_tail));
@@ -802,7 +819,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
        /* Ring QP doorbell. */
        if (mpw.state == MLX5_MPW_STATE_OPENED)
                mlx5_mpw_close(txq, &mpw);
-       mlx5_tx_dbrec(txq);
+       mlx5_tx_dbrec(txq, mpw.wqe);
        txq->elts_head = elts_head;
        return i;
 }
@@ -827,10 +844,10 @@ mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
        mpw->pkts_n = 0;
        mpw->len = length;
        mpw->total_len = 0;
-       mpw->wqe = (volatile struct mlx5_wqe *)&(*txq->wqes)[idx].hdr;
+       mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
        mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
                                  (txq->wqe_ci << 8) |
-                                 MLX5_OPCODE_LSO_MPW);
+                                 MLX5_OPCODE_TSO);
        mpw->wqe->ctrl[2] = 0;
        mpw->wqe->ctrl[3] = 0;
        mpw->wqe->eseg.mss = htons(length);
@@ -902,9 +919,8 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
        if (unlikely(!pkts_n))
                return 0;
        /* Prefetch first packet cacheline. */
-       tx_prefetch_cqe(txq, txq->cq_ci);
-       tx_prefetch_wqe(txq, txq->wqe_ci);
-       tx_prefetch_wqe(txq, txq->wqe_ci + 1);
+       rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
+       rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
        /* Start processing. */
        txq_complete(txq);
        max = (elts_n - (elts_head - txq->elts_tail));
@@ -1005,14 +1021,15 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
                        addr = rte_pktmbuf_mtod(buf, uintptr_t);
                        (*txq->elts)[elts_head] = buf;
                        /* Maximum number of bytes before wrapping. */
-                       max = ((uintptr_t)&(*txq->wqes)[1 << txq->wqe_n] -
+                       max = ((((uintptr_t)(txq->wqes)) +
+                               (1 << txq->wqe_n) *
+                               MLX5_WQE_SIZE) -
                               (uintptr_t)mpw.data.raw);
                        if (length > max) {
                                rte_memcpy((void *)(uintptr_t)mpw.data.raw,
                                           (void *)addr,
                                           max);
-                               mpw.data.raw =
-                                       (volatile void *)&(*txq->wqes)[0];
+                               mpw.data.raw = (volatile void *)txq->wqes;
                                rte_memcpy((void *)(uintptr_t)mpw.data.raw,
                                           (void *)(addr + max),
                                           length - max);
@@ -1024,9 +1041,8 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
                                mpw.data.raw += length;
                        }
                        if ((uintptr_t)mpw.data.raw ==
-                           (uintptr_t)&(*txq->wqes)[1 << txq->wqe_n])
-                               mpw.data.raw =
-                                       (volatile void *)&(*txq->wqes)[0];
+                           (uintptr_t)tx_mlx5_wqe(txq, 1 << txq->wqe_n))
+                               mpw.data.raw = (volatile void *)txq->wqes;
                        ++mpw.pkts_n;
                        ++j;
                        if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) {
@@ -1071,7 +1087,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
                mlx5_mpw_inline_close(txq, &mpw);
        else if (mpw.state == MLX5_MPW_STATE_OPENED)
                mlx5_mpw_close(txq, &mpw);
-       mlx5_tx_dbrec(txq);
+       mlx5_tx_dbrec(txq, mpw.wqe);
        txq->elts_head = elts_head;
        return i;
 }
@@ -1088,34 +1104,33 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
  *   Packet type for struct rte_mbuf.
  */
 static inline uint32_t
-rxq_cq_to_pkt_type(volatile struct mlx5_cqe64 *cqe)
+rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
 {
        uint32_t pkt_type;
        uint8_t flags = cqe->l4_hdr_type_etc;
-       uint8_t info = cqe->rsvd0[0];
 
-       if (info & IBV_EXP_CQ_RX_TUNNEL_PACKET)
+       if (cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET)
                pkt_type =
                        TRANSPOSE(flags,
-                                 IBV_EXP_CQ_RX_OUTER_IPV4_PACKET,
-                                 RTE_PTYPE_L3_IPV4) |
+                                 MLX5_CQE_RX_OUTER_IPV4_PACKET,
+                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN) |
                        TRANSPOSE(flags,
-                                 IBV_EXP_CQ_RX_OUTER_IPV6_PACKET,
-                                 RTE_PTYPE_L3_IPV6) |
+                                 MLX5_CQE_RX_OUTER_IPV6_PACKET,
+                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) |
                        TRANSPOSE(flags,
-                                 IBV_EXP_CQ_RX_IPV4_PACKET,
-                                 RTE_PTYPE_INNER_L3_IPV4) |
+                                 MLX5_CQE_RX_IPV4_PACKET,
+                                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN) |
                        TRANSPOSE(flags,
-                                 IBV_EXP_CQ_RX_IPV6_PACKET,
-                                 RTE_PTYPE_INNER_L3_IPV6);
+                                 MLX5_CQE_RX_IPV6_PACKET,
+                                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN);
        else
                pkt_type =
                        TRANSPOSE(flags,
                                  MLX5_CQE_L3_HDR_TYPE_IPV6,
-                                 RTE_PTYPE_L3_IPV6) |
+                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) |
                        TRANSPOSE(flags,
                                  MLX5_CQE_L3_HDR_TYPE_IPV4,
-                                 RTE_PTYPE_L3_IPV4);
+                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN);
        return pkt_type;
 }
 
@@ -1128,14 +1143,16 @@ rxq_cq_to_pkt_type(volatile struct mlx5_cqe64 *cqe)
  *   Pointer to RX queue.
  * @param cqe
  *   CQE to process.
+ * @param[out] rss_hash
+ *   Packet RSS Hash result.
  *
  * @return
  *   Packet size in bytes (0 if there is none), -1 in case of completion
  *   with error.
  */
 static inline int
-mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe,
-                uint16_t cqe_cnt)
+mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
+                uint16_t cqe_cnt, uint32_t *rss_hash)
 {
        struct rxq_zip *zip = &rxq->zip;
        uint16_t cqe_n = cqe_cnt + 1;
@@ -1145,9 +1162,10 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe,
        if (zip->ai) {
                volatile struct mlx5_mini_cqe8 (*mc)[8] =
                        (volatile struct mlx5_mini_cqe8 (*)[8])
-                       (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].cqe64);
+                       (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt]);
 
                len = ntohl((*mc)[zip->ai & 7].byte_cnt);
+               *rss_hash = ntohl((*mc)[zip->ai & 7].rx_hash_result);
                if ((++zip->ai & 7) == 0) {
                        /*
                         * Increment consumer index to skip the number of
@@ -1158,11 +1176,11 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe,
                        zip->na += 8;
                }
                if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
-                       uint16_t idx = rxq->cq_ci;
+                       uint16_t idx = rxq->cq_ci + 1;
                        uint16_t end = zip->cq_ci;
 
                        while (idx != end) {
-                               (*rxq->cqes)[idx & cqe_cnt].cqe64.op_own =
+                               (*rxq->cqes)[idx & cqe_cnt].op_own =
                                        MLX5_CQE_INVALIDATE;
                                ++idx;
                        }
@@ -1174,7 +1192,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe,
                int ret;
                int8_t op_own;
 
-               ret = check_cqe64(cqe, cqe_n, rxq->cq_ci);
+               ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
                if (unlikely(ret == 1))
                        return 0;
                ++rxq->cq_ci;
@@ -1183,7 +1201,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe,
                        volatile struct mlx5_mini_cqe8 (*mc)[8] =
                                (volatile struct mlx5_mini_cqe8 (*)[8])
                                (uintptr_t)(&(*rxq->cqes)[rxq->cq_ci &
-                                                         cqe_cnt].cqe64);
+                                                         cqe_cnt]);
 
                        /* Fix endianness. */
                        zip->cqe_cnt = ntohl(cqe->byte_cnt);
@@ -1202,9 +1220,11 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe,
                        zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
                        /* Get packet size to return. */
                        len = ntohl((*mc)[0].byte_cnt);
+                       *rss_hash = ntohl((*mc)[0].rx_hash_result);
                        zip->ai = 1;
                } else {
                        len = ntohl(cqe->byte_cnt);
+                       *rss_hash = ntohl(cqe->rx_hash_res);
                }
                /* Error while receiving packet. */
                if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR))
@@ -1225,38 +1245,32 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe,
  *   Offload flags (ol_flags) for struct rte_mbuf.
  */
 static inline uint32_t
-rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe)
+rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
 {
        uint32_t ol_flags = 0;
        uint8_t l3_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L3_HDR_TYPE_MASK;
        uint8_t l4_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L4_HDR_TYPE_MASK;
-       uint8_t info = cqe->rsvd0[0];
 
        if ((l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV4) ||
            (l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV6))
-               ol_flags |=
-                       (!(cqe->hds_ip_ext & MLX5_CQE_L3_OK) *
-                        PKT_RX_IP_CKSUM_BAD);
+               ol_flags |= TRANSPOSE(cqe->hds_ip_ext,
+                                     MLX5_CQE_L3_OK,
+                                     PKT_RX_IP_CKSUM_GOOD);
        if ((l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP) ||
            (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_EMP_ACK) ||
            (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_ACK) ||
            (l4_hdr == MLX5_CQE_L4_HDR_TYPE_UDP))
+               ol_flags |= TRANSPOSE(cqe->hds_ip_ext,
+                                     MLX5_CQE_L4_OK,
+                                     PKT_RX_L4_CKSUM_GOOD);
+       if ((cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
                ol_flags |=
-                       (!(cqe->hds_ip_ext & MLX5_CQE_L4_OK) *
-                        PKT_RX_L4_CKSUM_BAD);
-       /*
-        * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place
-        * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
-        * (its value is 0).
-        */
-       if ((info & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
-               ol_flags |=
-                       TRANSPOSE(~cqe->l4_hdr_type_etc,
-                                 IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK,
-                                 PKT_RX_IP_CKSUM_BAD) |
-                       TRANSPOSE(~cqe->l4_hdr_type_etc,
-                                 IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK,
-                                 PKT_RX_L4_CKSUM_BAD);
+                       TRANSPOSE(cqe->l4_hdr_type_etc,
+                                 MLX5_CQE_RX_OUTER_IP_CSUM_OK,
+                                 PKT_RX_IP_CKSUM_GOOD) |
+                       TRANSPOSE(cqe->l4_hdr_type_etc,
+                                 MLX5_CQE_RX_OUTER_TCP_UDP_CSUM_OK,
+                                 PKT_RX_L4_CKSUM_GOOD);
        return ol_flags;
 }
 
@@ -1282,16 +1296,17 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
        const unsigned int sges_n = rxq->sges_n;
        struct rte_mbuf *pkt = NULL;
        struct rte_mbuf *seg = NULL;
-       volatile struct mlx5_cqe64 *cqe =
-               &(*rxq->cqes)[rxq->cq_ci & cqe_cnt].cqe64;
+       volatile struct mlx5_cqe *cqe =
+               &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
        unsigned int i = 0;
        unsigned int rq_ci = rxq->rq_ci << sges_n;
-       int len;
+       int len; /* keep its value across iterations. */
 
        while (pkts_n) {
                unsigned int idx = rq_ci & wqe_cnt;
                volatile struct mlx5_wqe_data_seg *wqe = &(*rxq->wqes)[idx];
                struct rte_mbuf *rep = (*rxq->elts)[idx];
+               uint32_t rss_hash_res = 0;
 
                if (pkt)
                        NEXT(seg) = rep;
@@ -1311,17 +1326,18 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        }
                        while (pkt != seg) {
                                assert(pkt != (*rxq->elts)[idx]);
-                               seg = NEXT(pkt);
+                               rep = NEXT(pkt);
                                rte_mbuf_refcnt_set(pkt, 0);
                                __rte_mbuf_raw_free(pkt);
-                               pkt = seg;
+                               pkt = rep;
                        }
                        break;
                }
                if (!pkt) {
-                       cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt].cqe64;
-                       len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt);
-                       if (len == 0) {
+                       cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
+                       len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt,
+                                              &rss_hash_res);
+                       if (!len) {
                                rte_mbuf_refcnt_set(rep, 0);
                                __rte_mbuf_raw_free(rep);
                                break;
@@ -1338,12 +1354,26 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        /* Update packet information. */
                        pkt->packet_type = 0;
                        pkt->ol_flags = 0;
+                       if (rss_hash_res && rxq->rss_hash) {
+                               pkt->hash.rss = rss_hash_res;
+                               pkt->ol_flags = PKT_RX_RSS_HASH;
+                       }
+                       if (rxq->mark &&
+                           ((cqe->sop_drop_qpn !=
+                             htonl(MLX5_FLOW_MARK_INVALID)) ||
+                            (cqe->sop_drop_qpn !=
+                             htonl(MLX5_FLOW_MARK_DEFAULT)))) {
+                               pkt->hash.fdir.hi =
+                                       mlx5_flow_mark_get(cqe->sop_drop_qpn);
+                               pkt->ol_flags &= ~PKT_RX_RSS_HASH;
+                               pkt->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+                       }
                        if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip |
                            rxq->crc_present) {
                                if (rxq->csum) {
                                        pkt->packet_type =
                                                rxq_cq_to_pkt_type(cqe);
-                                       pkt->ol_flags =
+                                       pkt->ol_flags |=
                                                rxq_cq_to_ol_flags(rxq, cqe);
                                }
                                if (cqe->l4_hdr_type_etc &