net/mlx5: use buffer address for LKEY search
[dpdk.git] / drivers / net / mlx5 / mlx5_rxtx.c
index 87eec2e..688ee90 100644 (file)
 #include "mlx5_defs.h"
 #include "mlx5_prm.h"
 
-static inline int
+static __rte_always_inline int
 check_cqe(volatile struct mlx5_cqe *cqe,
-         unsigned int cqes_n, const uint16_t ci)
-         __attribute__((always_inline));
+         unsigned int cqes_n, const uint16_t ci);
 
-static inline void
-txq_complete(struct txq *txq) __attribute__((always_inline));
+static __rte_always_inline void
+txq_complete(struct txq *txq);
 
-static inline uint32_t
-txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
-       __attribute__((always_inline));
+static __rte_always_inline uint32_t
+txq_mb2mr(struct txq *txq, struct rte_mbuf *mb);
 
-static inline void
-mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
-       __attribute__((always_inline));
+static __rte_always_inline void
+mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe);
 
-static inline uint32_t
-rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
-       __attribute__((always_inline));
+static __rte_always_inline uint32_t
+rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe);
 
-static inline int
+static __rte_always_inline int
 mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
-                uint16_t cqe_cnt, uint32_t *rss_hash)
-                __attribute__((always_inline));
+                uint16_t cqe_cnt, uint32_t *rss_hash);
 
-static inline uint32_t
-rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
-                  __attribute__((always_inline));
+static __rte_always_inline uint32_t
+rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe);
 
 #ifndef NDEBUG
 
@@ -261,7 +255,8 @@ mlx5_copy_to_wq(void *dst, const void *src, size_t n,
 static inline void
 txq_complete(struct txq *txq)
 {
-       const unsigned int elts_n = 1 << txq->elts_n;
+       const uint16_t elts_n = 1 << txq->elts_n;
+       const uint16_t elts_m = elts_n - 1;
        const unsigned int cqe_n = 1 << txq->cqe_n;
        const unsigned int cqe_cnt = cqe_n - 1;
        uint16_t elts_free = txq->elts_tail;
@@ -269,6 +264,9 @@ txq_complete(struct txq *txq)
        uint16_t cq_ci = txq->cq_ci;
        volatile struct mlx5_cqe *cqe = NULL;
        volatile struct mlx5_wqe_ctrl *ctrl;
+       struct rte_mbuf *m, *free[elts_n];
+       struct rte_mempool *pool = NULL;
+       unsigned int blk_n = 0;
 
        do {
                volatile struct mlx5_cqe *tmp;
@@ -298,25 +296,36 @@ txq_complete(struct txq *txq)
        ctrl = (volatile struct mlx5_wqe_ctrl *)
                tx_mlx5_wqe(txq, txq->wqe_pi);
        elts_tail = ctrl->ctrl3;
-       assert(elts_tail < (1 << txq->wqe_n));
+       assert((elts_tail & elts_m) < (1 << txq->wqe_n));
        /* Free buffers. */
        while (elts_free != elts_tail) {
-               struct rte_mbuf *elt = (*txq->elts)[elts_free];
-               unsigned int elts_free_next =
-                       (elts_free + 1) & (elts_n - 1);
-               struct rte_mbuf *elt_next = (*txq->elts)[elts_free_next];
-
+               m = rte_pktmbuf_prefree_seg((*txq->elts)[elts_free++ & elts_m]);
+               if (likely(m != NULL)) {
+                       if (likely(m->pool == pool)) {
+                               free[blk_n++] = m;
+                       } else {
+                               if (likely(pool != NULL))
+                                       rte_mempool_put_bulk(pool,
+                                                            (void *)free,
+                                                            blk_n);
+                               free[0] = m;
+                               pool = m->pool;
+                               blk_n = 1;
+                       }
+               }
+       }
+       if (blk_n)
+               rte_mempool_put_bulk(pool, (void *)free, blk_n);
 #ifndef NDEBUG
-               /* Poisoning. */
-               memset(&(*txq->elts)[elts_free],
+       elts_free = txq->elts_tail;
+       /* Poisoning. */
+       while (elts_free != elts_tail) {
+               memset(&(*txq->elts)[elts_free & elts_m],
                       0x66,
-                      sizeof((*txq->elts)[elts_free]));
-#endif
-               RTE_MBUF_PREFETCH_TO_FREE(elt_next);
-               /* Only one segment needs to be freed. */
-               rte_pktmbuf_free_seg(elt);
-               elts_free = elts_free_next;
+                      sizeof((*txq->elts)[elts_free & elts_m]));
+               ++elts_free;
        }
+#endif
        txq->cq_ci = cq_ci;
        txq->elts_tail = elts_tail;
        /* Update the consumer index. */
@@ -343,7 +352,7 @@ txq_mb2mp(struct rte_mbuf *buf)
 }
 
 /**
- * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[].
+ * Get Memory Region (MR) <-> rte_mbuf association from txq->mp2mr[].
  * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
  * remove an entry first.
  *
@@ -356,27 +365,30 @@ txq_mb2mp(struct rte_mbuf *buf)
  *   mr->lkey on success, (uint32_t)-1 on failure.
  */
 static inline uint32_t
-txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
+txq_mb2mr(struct txq *txq, struct rte_mbuf *mb)
 {
-       unsigned int i;
-       uint32_t lkey = (uint32_t)-1;
+       uint16_t i = txq->mr_cache_idx;
+       uintptr_t addr = rte_pktmbuf_mtod(mb, uintptr_t);
 
+       assert(i < RTE_DIM(txq->mp2mr));
+       if (likely(txq->mp2mr[i].start <= addr && txq->mp2mr[i].end >= addr))
+               return txq->mp2mr[i].lkey;
        for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
-               if (unlikely(txq->mp2mr[i].mp == NULL)) {
+               if (unlikely(txq->mp2mr[i].mr == NULL)) {
                        /* Unknown MP, add a new MR for it. */
                        break;
                }
-               if (txq->mp2mr[i].mp == mp) {
+               if (txq->mp2mr[i].start <= addr &&
+                   txq->mp2mr[i].end >= addr) {
                        assert(txq->mp2mr[i].lkey != (uint32_t)-1);
                        assert(htonl(txq->mp2mr[i].mr->lkey) ==
                               txq->mp2mr[i].lkey);
-                       lkey = txq->mp2mr[i].lkey;
-                       break;
+                       txq->mr_cache_idx = i;
+                       return txq->mp2mr[i].lkey;
                }
        }
-       if (unlikely(lkey == (uint32_t)-1))
-               lkey = txq_mp2mr_reg(txq, mp, i);
-       return lkey;
+       txq->mr_cache_idx = 0;
+       return txq_mp2mr_reg(txq, txq_mb2mp(mb), i);
 }
 
 /**
@@ -415,12 +427,10 @@ int
 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
 {
        struct txq *txq = tx_queue;
-       const unsigned int elts_n = 1 << txq->elts_n;
-       const unsigned int elts_cnt = elts_n - 1;
-       unsigned int used;
+       uint16_t used;
 
        txq_complete(txq);
-       used = (txq->elts_head - txq->elts_tail) & elts_cnt;
+       used = txq->elts_head - txq->elts_tail;
        if (offset < used)
                return RTE_ETH_TX_DESC_FULL;
        return RTE_ETH_TX_DESC_DONE;
@@ -494,14 +504,18 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 {
        struct txq *txq = (struct txq *)dpdk_txq;
        uint16_t elts_head = txq->elts_head;
-       const unsigned int elts_n = 1 << txq->elts_n;
+       const uint16_t elts_n = 1 << txq->elts_n;
+       const uint16_t elts_m = elts_n - 1;
        unsigned int i = 0;
        unsigned int j = 0;
        unsigned int k = 0;
-       unsigned int max;
+       uint16_t max_elts;
+       unsigned int max_inline = txq->max_inline;
+       const unsigned int inline_en = !!max_inline && txq->inline_en;
        uint16_t max_wqe;
        unsigned int comp;
        volatile struct mlx5_wqe_v *wqe = NULL;
+       volatile struct mlx5_wqe_ctrl *last_wqe = NULL;
        unsigned int segs_n = 0;
        struct rte_mbuf *buf = NULL;
        uint8_t *raw;
@@ -512,9 +526,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
        rte_prefetch0(*pkts);
        /* Start processing. */
        txq_complete(txq);
-       max = (elts_n - (elts_head - txq->elts_tail));
-       if (max > elts_n)
-               max -= elts_n;
+       max_elts = (elts_n - (elts_head - txq->elts_tail));
        max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
        if (unlikely(!max_wqe))
                return 0;
@@ -522,6 +534,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                volatile rte_v128u32_t *dseg = NULL;
                uint32_t length;
                unsigned int ds = 0;
+               unsigned int sg = 0; /* counter of additional segs attached. */
                uintptr_t addr;
                uint64_t naddr;
                uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
@@ -529,31 +542,30 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                uint16_t ehdr;
                uint8_t cs_flags = 0;
                uint64_t tso = 0;
+               uint16_t tso_segsz = 0;
 #ifdef MLX5_PMD_SOFT_COUNTERS
                uint32_t total_length = 0;
 #endif
 
                /* first_seg */
-               buf = *(pkts++);
+               buf = *pkts;
                segs_n = buf->nb_segs;
                /*
                 * Make sure there is enough room to store this packet and
                 * that one ring entry remains unused.
                 */
                assert(segs_n);
-               if (max < segs_n + 1)
+               if (max_elts < segs_n)
                        break;
-               max -= segs_n;
+               max_elts -= segs_n;
                --segs_n;
-               if (!segs_n)
-                       --pkts_n;
                if (unlikely(--max_wqe == 0))
                        break;
                wqe = (volatile struct mlx5_wqe_v *)
                        tx_mlx5_wqe(txq, txq->wqe_ci);
                rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
-               if (pkts_n > 1)
-                       rte_prefetch0(*pkts);
+               if (pkts_n - i > 1)
+                       rte_prefetch0(*(pkts + 1));
                addr = rte_pktmbuf_mtod(buf, uintptr_t);
                length = DATA_LEN(buf);
                ehdr = (((uint8_t *)addr)[1] << 8) |
@@ -564,15 +576,11 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                if (length < (MLX5_WQE_DWORD_SIZE + 2))
                        break;
                /* Update element. */
-               (*txq->elts)[elts_head] = buf;
-               elts_head = (elts_head + 1) & (elts_n - 1);
+               (*txq->elts)[elts_head & elts_m] = buf;
                /* Prefetch next buffer data. */
-               if (pkts_n > 1) {
-                       volatile void *pkt_addr;
-
-                       pkt_addr = rte_pktmbuf_mtod(*pkts, volatile void *);
-                       rte_prefetch0(pkt_addr);
-               }
+               if (pkts_n - i > 1)
+                       rte_prefetch0(
+                           rte_pktmbuf_mtod(*(pkts + 1), volatile void *));
                /* Should we enable HW CKSUM offload */
                if (buf->ol_flags &
                    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
@@ -630,6 +638,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 
                                tso_header_sz = buf->l2_len + vlan_sz +
                                                buf->l3_len + buf->l4_len;
+                               tso_segsz = buf->tso_segsz;
 
                                if (is_tunneled && txq->tunnel_en) {
                                        tso_header_sz += buf->outer_l2_len +
@@ -675,24 +684,20 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                        };
                                        ds = 1;
                                        total_length = 0;
-                                       pkts--;
-                                       pkts_n++;
-                                       elts_head = (elts_head - 1) &
-                                                   (elts_n - 1);
                                        k++;
                                        goto next_wqe;
                                }
                        }
                }
                /* Inline if enough room. */
-               if (txq->inline_en || tso) {
+               if (inline_en || tso) {
                        uintptr_t end = (uintptr_t)
                                (((uintptr_t)txq->wqes) +
                                 (1 << txq->wqe_n) * MLX5_WQE_SIZE);
-                       unsigned int max_inline = txq->max_inline *
-                                                 RTE_CACHE_LINE_SIZE -
-                                                 (pkt_inline_sz - 2);
-                       uintptr_t addr_end = (addr + max_inline) &
+                       unsigned int inline_room = max_inline *
+                                                  RTE_CACHE_LINE_SIZE -
+                                                  (pkt_inline_sz - 2);
+                       uintptr_t addr_end = (addr + inline_room) &
                                             ~(RTE_CACHE_LINE_SIZE - 1);
                        unsigned int copy_b = (addr_end > addr) ?
                                RTE_MIN((addr_end - addr), length) :
@@ -768,7 +773,7 @@ use_dseg:
                        naddr = htonll(addr);
                        *dseg = (rte_v128u32_t){
                                htonl(length),
-                               txq_mp2mr(txq, txq_mb2mp(buf)),
+                               txq_mb2mr(txq, buf),
                                naddr,
                                naddr >> 32,
                        };
@@ -807,19 +812,20 @@ next_seg:
                naddr = htonll(rte_pktmbuf_mtod(buf, uintptr_t));
                *dseg = (rte_v128u32_t){
                        htonl(length),
-                       txq_mp2mr(txq, txq_mb2mp(buf)),
+                       txq_mb2mr(txq, buf),
                        naddr,
                        naddr >> 32,
                };
-               (*txq->elts)[elts_head] = buf;
-               elts_head = (elts_head + 1) & (elts_n - 1);
-               ++j;
-               --segs_n;
-               if (segs_n)
+               (*txq->elts)[++elts_head & elts_m] = buf;
+               ++sg;
+               /* Advance counter only if all segs are successfully posted. */
+               if (sg < segs_n)
                        goto next_seg;
                else
-                       --pkts_n;
+                       j += sg;
 next_pkt:
+               ++elts_head;
+               ++pkts;
                ++i;
                /* Initialize known and common part of the WQE structure. */
                if (tso) {
@@ -831,7 +837,7 @@ next_pkt:
                        };
                        wqe->eseg = (rte_v128u32_t){
                                0,
-                               cs_flags | (htons(buf->tso_segsz) << 16),
+                               cs_flags | (htons(tso_segsz) << 16),
                                0,
                                (ehdr << 16) | htons(tso_header_sz),
                        };
@@ -851,24 +857,24 @@ next_pkt:
                }
 next_wqe:
                txq->wqe_ci += (ds + 3) / 4;
+               /* Save the last successful WQE for completion request */
+               last_wqe = (volatile struct mlx5_wqe_ctrl *)wqe;
 #ifdef MLX5_PMD_SOFT_COUNTERS
                /* Increment sent bytes counter. */
                txq->stats.obytes += total_length;
 #endif
-       } while (pkts_n);
+       } while (i < pkts_n);
        /* Take a shortcut if nothing must be sent. */
        if (unlikely((i + k) == 0))
                return 0;
+       txq->elts_head += (i + j);
        /* Check whether completion threshold has been reached. */
        comp = txq->elts_comp + i + j + k;
        if (comp >= MLX5_TX_COMP_THRESH) {
-               volatile struct mlx5_wqe_ctrl *w =
-                       (volatile struct mlx5_wqe_ctrl *)wqe;
-
                /* Request completion on last WQE. */
-               w->ctrl2 = htonl(8);
+               last_wqe->ctrl2 = htonl(8);
                /* Save elts_head in unused "immediate" field of WQE. */
-               w->ctrl3 = elts_head;
+               last_wqe->ctrl3 = txq->elts_head;
                txq->elts_comp = 0;
        } else {
                txq->elts_comp = comp;
@@ -878,8 +884,7 @@ next_wqe:
        txq->stats.opackets += i;
 #endif
        /* Ring QP doorbell. */
-       mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)wqe);
-       txq->elts_head = elts_head;
+       mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)last_wqe);
        return i;
 }
 
@@ -969,10 +974,11 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 {
        struct txq *txq = (struct txq *)dpdk_txq;
        uint16_t elts_head = txq->elts_head;
-       const unsigned int elts_n = 1 << txq->elts_n;
+       const uint16_t elts_n = 1 << txq->elts_n;
+       const uint16_t elts_m = elts_n - 1;
        unsigned int i = 0;
        unsigned int j = 0;
-       unsigned int max;
+       uint16_t max_elts;
        uint16_t max_wqe;
        unsigned int comp;
        struct mlx5_mpw mpw = {
@@ -986,15 +992,12 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
        rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
        /* Start processing. */
        txq_complete(txq);
-       max = (elts_n - (elts_head - txq->elts_tail));
-       if (max > elts_n)
-               max -= elts_n;
+       max_elts = (elts_n - (elts_head - txq->elts_tail));
        max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
        if (unlikely(!max_wqe))
                return 0;
        do {
                struct rte_mbuf *buf = *(pkts++);
-               unsigned int elts_head_next;
                uint32_t length;
                unsigned int segs_n = buf->nb_segs;
                uint32_t cs_flags = 0;
@@ -1004,12 +1007,12 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                 * that one ring entry remains unused.
                 */
                assert(segs_n);
-               if (max < segs_n + 1)
+               if (max_elts < segs_n)
                        break;
                /* Do not bother with large packets MPW cannot handle. */
                if (segs_n > MLX5_MPW_DSEG_MAX)
                        break;
-               max -= segs_n;
+               max_elts -= segs_n;
                --pkts_n;
                /* Should we enable HW CKSUM offload */
                if (buf->ol_flags &
@@ -1045,17 +1048,15 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        volatile struct mlx5_wqe_data_seg *dseg;
                        uintptr_t addr;
 
-                       elts_head_next = (elts_head + 1) & (elts_n - 1);
                        assert(buf);
-                       (*txq->elts)[elts_head] = buf;
+                       (*txq->elts)[elts_head++ & elts_m] = buf;
                        dseg = mpw.data.dseg[mpw.pkts_n];
                        addr = rte_pktmbuf_mtod(buf, uintptr_t);
                        *dseg = (struct mlx5_wqe_data_seg){
                                .byte_count = htonl(DATA_LEN(buf)),
-                               .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+                               .lkey = txq_mb2mr(txq, buf),
                                .addr = htonll(addr),
                        };
-                       elts_head = elts_head_next;
 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
                        length += DATA_LEN(buf);
 #endif
@@ -1066,7 +1067,6 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                assert(length == mpw.len);
                if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
                        mlx5_mpw_close(txq, &mpw);
-               elts_head = elts_head_next;
 #ifdef MLX5_PMD_SOFT_COUNTERS
                /* Increment sent bytes counter. */
                txq->stats.obytes += length;
@@ -1184,10 +1184,11 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
 {
        struct txq *txq = (struct txq *)dpdk_txq;
        uint16_t elts_head = txq->elts_head;
-       const unsigned int elts_n = 1 << txq->elts_n;
+       const uint16_t elts_n = 1 << txq->elts_n;
+       const uint16_t elts_m = elts_n - 1;
        unsigned int i = 0;
        unsigned int j = 0;
-       unsigned int max;
+       uint16_t max_elts;
        uint16_t max_wqe;
        unsigned int comp;
        unsigned int inline_room = txq->max_inline * RTE_CACHE_LINE_SIZE;
@@ -1214,12 +1215,9 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
        rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
        /* Start processing. */
        txq_complete(txq);
-       max = (elts_n - (elts_head - txq->elts_tail));
-       if (max > elts_n)
-               max -= elts_n;
+       max_elts = (elts_n - (elts_head - txq->elts_tail));
        do {
                struct rte_mbuf *buf = *(pkts++);
-               unsigned int elts_head_next;
                uintptr_t addr;
                uint32_t length;
                unsigned int segs_n = buf->nb_segs;
@@ -1230,12 +1228,12 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
                 * that one ring entry remains unused.
                 */
                assert(segs_n);
-               if (max < segs_n + 1)
+               if (max_elts < segs_n)
                        break;
                /* Do not bother with large packets MPW cannot handle. */
                if (segs_n > MLX5_MPW_DSEG_MAX)
                        break;
-               max -= segs_n;
+               max_elts -= segs_n;
                --pkts_n;
                /*
                 * Compute max_wqe in case less WQE were consumed in previous
@@ -1296,18 +1294,15 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
                        do {
                                volatile struct mlx5_wqe_data_seg *dseg;
 
-                               elts_head_next =
-                                       (elts_head + 1) & (elts_n - 1);
                                assert(buf);
-                               (*txq->elts)[elts_head] = buf;
+                               (*txq->elts)[elts_head++ & elts_m] = buf;
                                dseg = mpw.data.dseg[mpw.pkts_n];
                                addr = rte_pktmbuf_mtod(buf, uintptr_t);
                                *dseg = (struct mlx5_wqe_data_seg){
                                        .byte_count = htonl(DATA_LEN(buf)),
-                                       .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+                                       .lkey = txq_mb2mr(txq, buf),
                                        .addr = htonll(addr),
                                };
-                               elts_head = elts_head_next;
 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
                                length += DATA_LEN(buf);
 #endif
@@ -1324,9 +1319,8 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
                        assert(mpw.state == MLX5_MPW_INL_STATE_OPENED);
                        assert(length <= inline_room);
                        assert(length == DATA_LEN(buf));
-                       elts_head_next = (elts_head + 1) & (elts_n - 1);
                        addr = rte_pktmbuf_mtod(buf, uintptr_t);
-                       (*txq->elts)[elts_head] = buf;
+                       (*txq->elts)[elts_head++ & elts_m] = buf;
                        /* Maximum number of bytes before wrapping. */
                        max = ((((uintptr_t)(txq->wqes)) +
                                (1 << txq->wqe_n) *
@@ -1363,7 +1357,6 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
                                inline_room -= length;
                        }
                }
-               elts_head = elts_head_next;
 #ifdef MLX5_PMD_SOFT_COUNTERS
                /* Increment sent bytes counter. */
                txq->stats.obytes += length;
@@ -1485,10 +1478,11 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 {
        struct txq *txq = (struct txq *)dpdk_txq;
        uint16_t elts_head = txq->elts_head;
-       const unsigned int elts_n = 1 << txq->elts_n;
+       const uint16_t elts_n = 1 << txq->elts_n;
+       const uint16_t elts_m = elts_n - 1;
        unsigned int i = 0;
        unsigned int j = 0;
-       unsigned int max_elts;
+       uint16_t max_elts;
        uint16_t max_wqe;
        unsigned int max_inline = txq->max_inline * RTE_CACHE_LINE_SIZE;
        unsigned int mpw_room = 0;
@@ -1503,8 +1497,6 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
        /* Start processing. */
        txq_complete(txq);
        max_elts = (elts_n - (elts_head - txq->elts_tail));
-       if (max_elts > elts_n)
-               max_elts -= elts_n;
        /* A CQE slot must always be available. */
        assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
        max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
@@ -1512,7 +1504,6 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                return 0;
        do {
                struct rte_mbuf *buf = *(pkts++);
-               unsigned int elts_head_next;
                uintptr_t addr;
                uint64_t naddr;
                unsigned int n;
@@ -1526,7 +1517,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                 * that one ring entry remains unused.
                 */
                assert(segs_n);
-               if (max_elts - j < segs_n + 1)
+               if (max_elts - j < segs_n)
                        break;
                /* Do not bother with large packets MPW cannot handle. */
                if (segs_n > MLX5_MPW_DSEG_MAX)
@@ -1610,18 +1601,15 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        do {
                                volatile struct mlx5_wqe_data_seg *dseg;
 
-                               elts_head_next =
-                                       (elts_head + 1) & (elts_n - 1);
                                assert(buf);
-                               (*txq->elts)[elts_head] = buf;
+                               (*txq->elts)[elts_head++ & elts_m] = buf;
                                dseg = mpw.data.dseg[mpw.pkts_n];
                                addr = rte_pktmbuf_mtod(buf, uintptr_t);
                                *dseg = (struct mlx5_wqe_data_seg){
                                        .byte_count = htonl(DATA_LEN(buf)),
-                                       .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+                                       .lkey = txq_mb2mr(txq, buf),
                                        .addr = htonll(addr),
                                };
-                               elts_head = elts_head_next;
 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
                                length += DATA_LEN(buf);
 #endif
@@ -1672,7 +1660,6 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        /* No need to get completion as the entire packet is
                         * copied to WQ. Free the buf right away.
                         */
-                       elts_head_next = elts_head;
                        rte_pktmbuf_free_seg(buf);
                        mpw_room -= (inl_pad + sizeof(inl_hdr) + length);
                        /* Add pad in the next packet if any. */
@@ -1695,8 +1682,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                dseg = (volatile void *)
                                        ((uintptr_t)mpw.data.raw +
                                         inl_pad);
-                       elts_head_next = (elts_head + 1) & (elts_n - 1);
-                       (*txq->elts)[elts_head] = buf;
+                       (*txq->elts)[elts_head++ & elts_m] = buf;
                        addr = rte_pktmbuf_mtod(buf, uintptr_t);
                        for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++)
                                rte_prefetch2((void *)(addr +
@@ -1704,7 +1690,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        naddr = htonll(addr);
                        *dseg = (rte_v128u32_t) {
                                htonl(length),
-                               txq_mp2mr(txq, txq_mb2mp(buf)),
+                               txq_mb2mr(txq, buf),
                                naddr,
                                naddr >> 32,
                        };
@@ -1715,7 +1701,6 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        mpw_room -= (inl_pad + sizeof(*dseg));
                        inl_pad = 0;
                }
-               elts_head = elts_head_next;
 #ifdef MLX5_PMD_SOFT_COUNTERS
                /* Increment sent bytes counter. */
                txq->stats.obytes += length;
@@ -1972,7 +1957,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
        unsigned int i = 0;
        unsigned int rq_ci = rxq->rq_ci << sges_n;
-       int len; /* keep its value across iterations. */
+       int len = 0; /* keep its value across iterations. */
 
        while (pkts_n) {
                unsigned int idx = rq_ci & wqe_cnt;
@@ -1999,8 +1984,9 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        while (pkt != seg) {
                                assert(pkt != (*rxq->elts)[idx]);
                                rep = NEXT(pkt);
-                               rte_mbuf_refcnt_set(pkt, 0);
-                               __rte_mbuf_raw_free(pkt);
+                               NEXT(pkt) = NULL;
+                               NB_SEGS(pkt) = 1;
+                               rte_mbuf_raw_free(pkt);
                                pkt = rep;
                        }
                        break;
@@ -2010,14 +1996,12 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt,
                                               &rss_hash_res);
                        if (!len) {
-                               rte_mbuf_refcnt_set(rep, 0);
-                               __rte_mbuf_raw_free(rep);
+                               rte_mbuf_raw_free(rep);
                                break;
                        }
                        if (unlikely(len == -1)) {
                                /* RX error, packet is likely too large. */
-                               rte_mbuf_refcnt_set(rep, 0);
-                               __rte_mbuf_raw_free(rep);
+                               rte_mbuf_raw_free(rep);
                                ++rxq->stats.idropped;
                                goto skip;
                        }
@@ -2042,31 +2026,25 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                                mlx5_flow_mark_get(mark);
                                }
                        }
-                       if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip |
-                           rxq->crc_present) {
-                               if (rxq->csum) {
-                                       pkt->packet_type =
-                                               rxq_cq_to_pkt_type(cqe);
-                                       pkt->ol_flags |=
-                                               rxq_cq_to_ol_flags(rxq, cqe);
-                               }
-                               if (ntohs(cqe->hdr_type_etc) &
-                                   MLX5_CQE_VLAN_STRIPPED) {
-                                       pkt->ol_flags |= PKT_RX_VLAN_PKT |
-                                               PKT_RX_VLAN_STRIPPED;
-                                       pkt->vlan_tci = ntohs(cqe->vlan_info);
-                               }
-                               if (rxq->crc_present)
-                                       len -= ETHER_CRC_LEN;
+                       if (rxq->csum | rxq->csum_l2tun) {
+                               pkt->packet_type = rxq_cq_to_pkt_type(cqe);
+                               pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe);
+                       }
+                       if (rxq->vlan_strip &&
+                           (cqe->hdr_type_etc &
+                            htons(MLX5_CQE_VLAN_STRIPPED))) {
+                               pkt->ol_flags |= PKT_RX_VLAN_PKT |
+                                       PKT_RX_VLAN_STRIPPED;
+                               pkt->vlan_tci = ntohs(cqe->vlan_info);
                        }
+                       if (rxq->crc_present)
+                               len -= ETHER_CRC_LEN;
                        PKT_LEN(pkt) = len;
                }
                DATA_LEN(rep) = DATA_LEN(seg);
                PKT_LEN(rep) = PKT_LEN(seg);
                SET_DATA_OFF(rep, DATA_OFF(seg));
-               NB_SEGS(rep) = NB_SEGS(seg);
                PORT(rep) = PORT(seg);
-               NEXT(rep) = NULL;
                (*rxq->elts)[idx] = rep;
                /*
                 * Fill NIC descriptor with the new buffer.  The lkey and size
@@ -2160,76 +2138,3 @@ removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
        (void)pkts_n;
        return 0;
 }
-
-/**
- * DPDK callback for rx queue interrupt enable.
- *
- * @param dev
- *   Pointer to Ethernet device structure.
- * @param rx_queue_id
- *   RX queue number
- *
- * @return
- *   0 on success, negative on failure.
- */
-int
-mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
-{
-#ifdef HAVE_UPDATE_CQ_CI
-       struct priv *priv = mlx5_get_priv(dev);
-       struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
-       struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
-       struct ibv_cq *cq = rxq_ctrl->cq;
-       uint16_t ci = rxq->cq_ci;
-       int ret = 0;
-
-       ibv_mlx5_exp_update_cq_ci(cq, ci);
-       ret = ibv_req_notify_cq(cq, 0);
-#else
-       int ret = -1;
-       (void)dev;
-       (void)rx_queue_id;
-#endif
-       if (ret)
-               WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
-       return ret;
-}
-
-/**
- * DPDK callback for rx queue interrupt disable.
- *
- * @param dev
- *   Pointer to Ethernet device structure.
- * @param rx_queue_id
- *   RX queue number
- *
- * @return
- *   0 on success, negative on failure.
- */
-int
-mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
-{
-#ifdef HAVE_UPDATE_CQ_CI
-       struct priv *priv = mlx5_get_priv(dev);
-       struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
-       struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
-       struct ibv_cq *cq = rxq_ctrl->cq;
-       struct ibv_cq *ev_cq;
-       void *ev_ctx;
-       int ret = 0;
-
-       ret = ibv_get_cq_event(cq->channel, &ev_cq, &ev_ctx);
-       if (ret || ev_cq != cq)
-               ret = -1;
-       else
-               ibv_ack_cq_events(cq, 1);
-#else
-       int ret = -1;
-       (void)dev;
-       (void)rx_queue_id;
-#endif
-       if (ret)
-               WARN("unable to disable interrupt on rx queue %d",
-                    rx_queue_id);
-       return ret;
-}