net/mlx5: remove Tx implementation
[dpdk.git] / drivers / net / mlx5 / mlx5_rxtx.c
index 6dca1e7..f2d6918 100644 (file)
@@ -25,6 +25,7 @@
 #include <rte_common.h>
 #include <rte_branch_prediction.h>
 #include <rte_ether.h>
+#include <rte_cycles.h>
 
 #include "mlx5.h"
 #include "mlx5_utils.h"
@@ -50,6 +51,10 @@ rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
 static __rte_always_inline void
 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx);
 
+static int
+mlx5_queue_state_modify(struct rte_eth_dev *dev,
+                       struct mlx5_mp_arg_queue_state_modify *sm);
+
 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
        [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
 };
@@ -282,140 +287,6 @@ mlx5_set_swp_types_table(void)
        }
 }
 
-/**
- * Return the size of tailroom of WQ.
- *
- * @param txq
- *   Pointer to TX queue structure.
- * @param addr
- *   Pointer to tail of WQ.
- *
- * @return
- *   Size of tailroom.
- */
-static inline size_t
-tx_mlx5_wq_tailroom(struct mlx5_txq_data *txq, void *addr)
-{
-       size_t tailroom;
-       tailroom = (uintptr_t)(txq->wqes) +
-                  (1 << txq->wqe_n) * MLX5_WQE_SIZE -
-                  (uintptr_t)addr;
-       return tailroom;
-}
-
-/**
- * Copy data to tailroom of circular queue.
- *
- * @param dst
- *   Pointer to destination.
- * @param src
- *   Pointer to source.
- * @param n
- *   Number of bytes to copy.
- * @param base
- *   Pointer to head of queue.
- * @param tailroom
- *   Size of tailroom from dst.
- *
- * @return
- *   Pointer after copied data.
- */
-static inline void *
-mlx5_copy_to_wq(void *dst, const void *src, size_t n,
-               void *base, size_t tailroom)
-{
-       void *ret;
-
-       if (n > tailroom) {
-               rte_memcpy(dst, src, tailroom);
-               rte_memcpy(base, (void *)((uintptr_t)src + tailroom),
-                          n - tailroom);
-               ret = (uint8_t *)base + n - tailroom;
-       } else {
-               rte_memcpy(dst, src, n);
-               ret = (n == tailroom) ? base : (uint8_t *)dst + n;
-       }
-       return ret;
-}
-
-/**
- * Inline TSO headers into WQE.
- *
- * @return
- *   0 on success, negative errno value on failure.
- */
-static int
-inline_tso(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
-          uint32_t *length,
-          uintptr_t *addr,
-          uint16_t *pkt_inline_sz,
-          uint8_t **raw,
-          uint16_t *max_wqe,
-          uint16_t *tso_segsz,
-          uint16_t *tso_header_sz)
-{
-       uintptr_t end = (uintptr_t)(((uintptr_t)txq->wqes) +
-                                   (1 << txq->wqe_n) * MLX5_WQE_SIZE);
-       unsigned int copy_b;
-       uint8_t vlan_sz = (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0;
-       const uint8_t tunneled = txq->tunnel_en && (buf->ol_flags &
-                                PKT_TX_TUNNEL_MASK);
-       uint16_t n_wqe;
-
-       *tso_segsz = buf->tso_segsz;
-       *tso_header_sz = buf->l2_len + vlan_sz + buf->l3_len + buf->l4_len;
-       if (unlikely(*tso_segsz == 0 || *tso_header_sz == 0)) {
-               txq->stats.oerrors++;
-               return -EINVAL;
-       }
-       if (tunneled)
-               *tso_header_sz += buf->outer_l2_len + buf->outer_l3_len;
-       /* First seg must contain all TSO headers. */
-       if (unlikely(*tso_header_sz > MLX5_MAX_TSO_HEADER) ||
-                    *tso_header_sz > DATA_LEN(buf)) {
-               txq->stats.oerrors++;
-               return -EINVAL;
-       }
-       copy_b = *tso_header_sz - *pkt_inline_sz;
-       if (!copy_b || ((end - (uintptr_t)*raw) < copy_b))
-               return -EAGAIN;
-       n_wqe = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
-       if (unlikely(*max_wqe < n_wqe))
-               return -EINVAL;
-       *max_wqe -= n_wqe;
-       rte_memcpy((void *)*raw, (void *)*addr, copy_b);
-       *length -= copy_b;
-       *addr += copy_b;
-       copy_b = MLX5_WQE_DS(copy_b) * MLX5_WQE_DWORD_SIZE;
-       *pkt_inline_sz += copy_b;
-       *raw += copy_b;
-       return 0;
-}
-
-/**
- * DPDK callback to check the status of a tx descriptor.
- *
- * @param tx_queue
- *   The tx queue.
- * @param[in] offset
- *   The index of the descriptor in the ring.
- *
- * @return
- *   The status of the tx descriptor.
- */
-int
-mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
-{
-       struct mlx5_txq_data *txq = tx_queue;
-       uint16_t used;
-
-       mlx5_tx_complete(txq);
-       used = txq->elts_head - txq->elts_tail;
-       if (offset < used)
-               return RTE_ETH_TX_DESC_FULL;
-       return RTE_ETH_TX_DESC_DONE;
-}
-
 /**
  * Internal function to compute the number of used descriptors in an RX queue
  *
@@ -444,7 +315,7 @@ rx_queue_count(struct mlx5_rxq_data *rxq)
                cq_ci = rxq->cq_ci;
        }
        cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
-       while (check_cqe(cqe, cqe_n, cq_ci) == 0) {
+       while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
                int8_t op_own;
                unsigned int n;
 
@@ -569,1265 +440,425 @@ mlx5_dump_debug_information(const char *fname, const char *hex_title,
 }
 
 /**
- * DPDK callback for TX.
+ * Move QP from error state to running state and initialize indexes.
  *
- * @param dpdk_txq
- *   Generic pointer to TX queue structure.
- * @param[in] pkts
- *   Packets to transmit.
- * @param pkts_n
- *   Number of packets in array.
+ * @param txq_ctrl
+ *   Pointer to TX queue control structure.
  *
  * @return
- *   Number of packets successfully transmitted (<= pkts_n).
+ *   0 on success, else -1.
  */
-uint16_t
-mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+static int
+tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
 {
-       struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
-       uint16_t elts_head = txq->elts_head;
-       const uint16_t elts_n = 1 << txq->elts_n;
-       const uint16_t elts_m = elts_n - 1;
-       unsigned int i = 0;
-       unsigned int j = 0;
-       unsigned int k = 0;
-       uint16_t max_elts;
-       uint16_t max_wqe;
-       unsigned int comp;
-       volatile struct mlx5_wqe_ctrl *last_wqe = NULL;
-       unsigned int segs_n = 0;
-       const unsigned int max_inline = txq->max_inline;
-       uint64_t addr_64;
-
-       if (unlikely(!pkts_n))
-               return 0;
-       /* Prefetch first packet cacheline. */
-       rte_prefetch0(*pkts);
-       /* Start processing. */
-       mlx5_tx_complete(txq);
-       max_elts = (elts_n - (elts_head - txq->elts_tail));
-       max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
-       if (unlikely(!max_wqe))
-               return 0;
-       do {
-               struct rte_mbuf *buf = *pkts; /* First_seg. */
-               uint8_t *raw;
-               volatile struct mlx5_wqe_v *wqe = NULL;
-               volatile rte_v128u32_t *dseg = NULL;
-               uint32_t length;
-               unsigned int ds = 0;
-               unsigned int sg = 0; /* counter of additional segs attached. */
-               uintptr_t addr;
-               uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
-               uint16_t tso_header_sz = 0;
-               uint16_t ehdr;
-               uint8_t cs_flags;
-               uint8_t tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG);
-               uint32_t swp_offsets = 0;
-               uint8_t swp_types = 0;
-               rte_be32_t metadata;
-               uint16_t tso_segsz = 0;
-#ifdef MLX5_PMD_SOFT_COUNTERS
-               uint32_t total_length = 0;
-#endif
-               int ret;
+       struct mlx5_mp_arg_queue_state_modify sm = {
+                       .is_wq = 0,
+                       .queue_id = txq_ctrl->txq.idx,
+       };
 
-               segs_n = buf->nb_segs;
-               /*
-                * Make sure there is enough room to store this packet and
-                * that one ring entry remains unused.
-                */
-               assert(segs_n);
-               if (max_elts < segs_n)
-                       break;
-               max_elts -= segs_n;
-               sg = --segs_n;
-               if (unlikely(--max_wqe == 0))
-                       break;
-               wqe = (volatile struct mlx5_wqe_v *)
-                       tx_mlx5_wqe(txq, txq->wqe_ci);
-               rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
-               if (pkts_n - i > 1)
-                       rte_prefetch0(*(pkts + 1));
-               addr = rte_pktmbuf_mtod(buf, uintptr_t);
-               length = DATA_LEN(buf);
-               ehdr = (((uint8_t *)addr)[1] << 8) |
-                      ((uint8_t *)addr)[0];
-#ifdef MLX5_PMD_SOFT_COUNTERS
-               total_length = length;
-#endif
-               if (length < (MLX5_WQE_DWORD_SIZE + 2)) {
-                       txq->stats.oerrors++;
-                       break;
-               }
-               /* Update element. */
-               (*txq->elts)[elts_head & elts_m] = buf;
-               /* Prefetch next buffer data. */
-               if (pkts_n - i > 1)
-                       rte_prefetch0(
-                           rte_pktmbuf_mtod(*(pkts + 1), volatile void *));
-               cs_flags = txq_ol_cksum_to_cs(buf);
-               txq_mbuf_to_swp(txq, buf, (uint8_t *)&swp_offsets, &swp_types);
-               raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
-               /* Copy metadata from mbuf if valid */
-               metadata = buf->ol_flags & PKT_TX_METADATA ? buf->tx_metadata :
-                                                            0;
-               /* Replace the Ethernet type by the VLAN if necessary. */
-               if (buf->ol_flags & PKT_TX_VLAN_PKT) {
-                       uint32_t vlan = rte_cpu_to_be_32(0x81000000 |
-                                                        buf->vlan_tci);
-                       unsigned int len = 2 * RTE_ETHER_ADDR_LEN - 2;
-
-                       addr += 2;
-                       length -= 2;
-                       /* Copy Destination and source mac address. */
-                       memcpy((uint8_t *)raw, ((uint8_t *)addr), len);
-                       /* Copy VLAN. */
-                       memcpy((uint8_t *)raw + len, &vlan, sizeof(vlan));
-                       /* Copy missing two bytes to end the DSeg. */
-                       memcpy((uint8_t *)raw + len + sizeof(vlan),
-                              ((uint8_t *)addr) + len, 2);
-                       addr += len + 2;
-                       length -= (len + 2);
-               } else {
-                       memcpy((uint8_t *)raw, ((uint8_t *)addr) + 2,
-                              MLX5_WQE_DWORD_SIZE);
-                       length -= pkt_inline_sz;
-                       addr += pkt_inline_sz;
-               }
-               raw += MLX5_WQE_DWORD_SIZE;
-               if (tso) {
-                       ret = inline_tso(txq, buf, &length,
-                                        &addr, &pkt_inline_sz,
-                                        &raw, &max_wqe,
-                                        &tso_segsz, &tso_header_sz);
-                       if (ret == -EINVAL) {
-                               break;
-                       } else if (ret == -EAGAIN) {
-                               /* NOP WQE. */
-                               wqe->ctrl = (rte_v128u32_t){
-                                       rte_cpu_to_be_32(txq->wqe_ci << 8),
-                                       rte_cpu_to_be_32(txq->qp_num_8s | 1),
-                                       0,
-                                       0,
-                               };
-                               ds = 1;
-#ifdef MLX5_PMD_SOFT_COUNTERS
-                               total_length = 0;
-#endif
-                               k++;
-                               goto next_wqe;
-                       }
-               }
-               /* Inline if enough room. */
-               if (max_inline || tso) {
-                       uint32_t inl = 0;
-                       uintptr_t end = (uintptr_t)
-                               (((uintptr_t)txq->wqes) +
-                                (1 << txq->wqe_n) * MLX5_WQE_SIZE);
-                       unsigned int inline_room = max_inline *
-                                                  RTE_CACHE_LINE_SIZE -
-                                                  (pkt_inline_sz - 2) -
-                                                  !!tso * sizeof(inl);
-                       uintptr_t addr_end;
-                       unsigned int copy_b;
-
-pkt_inline:
-                       addr_end = RTE_ALIGN_FLOOR(addr + inline_room,
-                                                  RTE_CACHE_LINE_SIZE);
-                       copy_b = (addr_end > addr) ?
-                                RTE_MIN((addr_end - addr), length) : 0;
-                       if (copy_b && ((end - (uintptr_t)raw) >
-                                      (copy_b + sizeof(inl)))) {
-                               /*
-                                * One Dseg remains in the current WQE.  To
-                                * keep the computation positive, it is
-                                * removed after the bytes to Dseg conversion.
-                                */
-                               uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
-
-                               if (unlikely(max_wqe < n))
-                                       break;
-                               max_wqe -= n;
-                               if (tso) {
-                                       assert(inl == 0);
-                                       inl = rte_cpu_to_be_32(copy_b |
-                                                              MLX5_INLINE_SEG);
-                                       rte_memcpy((void *)raw,
-                                                  (void *)&inl, sizeof(inl));
-                                       raw += sizeof(inl);
-                                       pkt_inline_sz += sizeof(inl);
-                               }
-                               rte_memcpy((void *)raw, (void *)addr, copy_b);
-                               addr += copy_b;
-                               length -= copy_b;
-                               pkt_inline_sz += copy_b;
-                       }
-                       /*
-                        * 2 DWORDs consumed by the WQE header + ETH segment +
-                        * the size of the inline part of the packet.
-                        */
-                       ds = 2 + MLX5_WQE_DS(pkt_inline_sz - 2);
-                       if (length > 0) {
-                               if (ds % (MLX5_WQE_SIZE /
-                                         MLX5_WQE_DWORD_SIZE) == 0) {
-                                       if (unlikely(--max_wqe == 0))
-                                               break;
-                                       dseg = (volatile rte_v128u32_t *)
-                                              tx_mlx5_wqe(txq, txq->wqe_ci +
-                                                          ds / 4);
-                               } else {
-                                       dseg = (volatile rte_v128u32_t *)
-                                               ((uintptr_t)wqe +
-                                                (ds * MLX5_WQE_DWORD_SIZE));
-                               }
-                               goto use_dseg;
-                       } else if (!segs_n) {
-                               goto next_pkt;
-                       } else {
-                               /*
-                                * Further inline the next segment only for
-                                * non-TSO packets.
-                                */
-                               if (!tso) {
-                                       raw += copy_b;
-                                       inline_room -= copy_b;
-                               } else {
-                                       inline_room = 0;
-                               }
-                               /* Move to the next segment. */
-                               --segs_n;
-                               buf = buf->next;
-                               assert(buf);
-                               addr = rte_pktmbuf_mtod(buf, uintptr_t);
-                               length = DATA_LEN(buf);
-#ifdef MLX5_PMD_SOFT_COUNTERS
-                               total_length += length;
-#endif
-                               (*txq->elts)[++elts_head & elts_m] = buf;
-                               goto pkt_inline;
-                       }
-               } else {
-                       /*
-                        * No inline has been done in the packet, only the
-                        * Ethernet Header as been stored.
-                        */
-                       dseg = (volatile rte_v128u32_t *)
-                               ((uintptr_t)wqe + (3 * MLX5_WQE_DWORD_SIZE));
-                       ds = 3;
-use_dseg:
-                       /* Add the remaining packet as a simple ds. */
-                       addr_64 = rte_cpu_to_be_64(addr);
-                       *dseg = (rte_v128u32_t){
-                               rte_cpu_to_be_32(length),
-                               mlx5_tx_mb2mr(txq, buf),
-                               addr_64,
-                               addr_64 >> 32,
-                       };
-                       ++ds;
-                       if (!segs_n)
-                               goto next_pkt;
-               }
-next_seg:
-               assert(buf);
-               assert(ds);
-               assert(wqe);
-               /*
-                * Spill on next WQE when the current one does not have
-                * enough room left. Size of WQE must a be a multiple
-                * of data segment size.
-                */
-               assert(!(MLX5_WQE_SIZE % MLX5_WQE_DWORD_SIZE));
-               if (!(ds % (MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE))) {
-                       if (unlikely(--max_wqe == 0))
-                               break;
-                       dseg = (volatile rte_v128u32_t *)
-                              tx_mlx5_wqe(txq, txq->wqe_ci + ds / 4);
-                       rte_prefetch0(tx_mlx5_wqe(txq,
-                                                 txq->wqe_ci + ds / 4 + 1));
-               } else {
-                       ++dseg;
-               }
-               ++ds;
-               buf = buf->next;
-               assert(buf);
-               length = DATA_LEN(buf);
-#ifdef MLX5_PMD_SOFT_COUNTERS
-               total_length += length;
-#endif
-               /* Store segment information. */
-               addr_64 = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
-               *dseg = (rte_v128u32_t){
-                       rte_cpu_to_be_32(length),
-                       mlx5_tx_mb2mr(txq, buf),
-                       addr_64,
-                       addr_64 >> 32,
-               };
-               (*txq->elts)[++elts_head & elts_m] = buf;
-               if (--segs_n)
-                       goto next_seg;
-next_pkt:
-               if (ds > MLX5_DSEG_MAX) {
-                       txq->stats.oerrors++;
-                       break;
-               }
-               ++elts_head;
-               ++pkts;
-               ++i;
-               j += sg;
-               /* Initialize known and common part of the WQE structure. */
-               if (tso) {
-                       wqe->ctrl = (rte_v128u32_t){
-                               rte_cpu_to_be_32((txq->wqe_ci << 8) |
-                                                MLX5_OPCODE_TSO),
-                               rte_cpu_to_be_32(txq->qp_num_8s | ds),
-                               0,
-                               0,
-                       };
-                       wqe->eseg = (rte_v128u32_t){
-                               swp_offsets,
-                               cs_flags | (swp_types << 8) |
-                               (rte_cpu_to_be_16(tso_segsz) << 16),
-                               metadata,
-                               (ehdr << 16) | rte_cpu_to_be_16(tso_header_sz),
-                       };
-               } else {
-                       wqe->ctrl = (rte_v128u32_t){
-                               rte_cpu_to_be_32((txq->wqe_ci << 8) |
-                                                MLX5_OPCODE_SEND),
-                               rte_cpu_to_be_32(txq->qp_num_8s | ds),
-                               0,
-                               0,
-                       };
-                       wqe->eseg = (rte_v128u32_t){
-                               swp_offsets,
-                               cs_flags | (swp_types << 8),
-                               metadata,
-                               (ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz),
-                       };
-               }
-next_wqe:
-               txq->wqe_ci += (ds + 3) / 4;
-               /* Save the last successful WQE for completion request */
-               last_wqe = (volatile struct mlx5_wqe_ctrl *)wqe;
-#ifdef MLX5_PMD_SOFT_COUNTERS
-               /* Increment sent bytes counter. */
-               txq->stats.obytes += total_length;
-#endif
-       } while (i < pkts_n);
-       /* Take a shortcut if nothing must be sent. */
-       if (unlikely((i + k) == 0))
-               return 0;
-       txq->elts_head += (i + j);
-       /* Check whether completion threshold has been reached. */
-       comp = txq->elts_comp + i + j + k;
-       if (comp >= MLX5_TX_COMP_THRESH) {
-               /* A CQE slot must always be available. */
-               assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
-               /* Request completion on last WQE. */
-               last_wqe->ctrl2 = rte_cpu_to_be_32(8);
-               /* Save elts_head in unused "immediate" field of WQE. */
-               last_wqe->ctrl3 = txq->elts_head;
-               txq->elts_comp = 0;
-       } else {
-               txq->elts_comp = comp;
-       }
-#ifdef MLX5_PMD_SOFT_COUNTERS
-       /* Increment sent packets counter. */
-       txq->stats.opackets += i;
-#endif
-       /* Ring QP doorbell. */
-       mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)last_wqe);
-       return i;
+       if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
+               return -1;
+       txq_ctrl->txq.wqe_ci = 0;
+       txq_ctrl->txq.wqe_pi = 0;
+       txq_ctrl->txq.elts_comp = 0;
+       return 0;
 }
 
-/**
- * Open a MPW session.
- *
- * @param txq
- *   Pointer to TX queue structure.
- * @param mpw
- *   Pointer to MPW session structure.
- * @param length
- *   Packet length.
- */
-static inline void
-mlx5_mpw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, uint32_t length)
+/* Return 1 if the error CQE is signed otherwise, sign it and return 0. */
+static int
+check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe)
 {
-       uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
-       volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] =
-               (volatile struct mlx5_wqe_data_seg (*)[])
-               tx_mlx5_wqe(txq, idx + 1);
-
-       mpw->state = MLX5_MPW_STATE_OPENED;
-       mpw->pkts_n = 0;
-       mpw->len = length;
-       mpw->total_len = 0;
-       mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
-       mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
-       mpw->wqe->eseg.inline_hdr_sz = 0;
-       mpw->wqe->eseg.rsvd0 = 0;
-       mpw->wqe->eseg.rsvd1 = 0;
-       mpw->wqe->eseg.flow_table_metadata = 0;
-       mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
-                                            (txq->wqe_ci << 8) |
-                                            MLX5_OPCODE_TSO);
-       mpw->wqe->ctrl[2] = 0;
-       mpw->wqe->ctrl[3] = 0;
-       mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *)
-               (((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE));
-       mpw->data.dseg[1] = (volatile struct mlx5_wqe_data_seg *)
-               (((uintptr_t)mpw->wqe) + (3 * MLX5_WQE_DWORD_SIZE));
-       mpw->data.dseg[2] = &(*dseg)[0];
-       mpw->data.dseg[3] = &(*dseg)[1];
-       mpw->data.dseg[4] = &(*dseg)[2];
+       static const uint8_t magic[] = "seen";
+       int ret = 1;
+       unsigned int i;
+
+       for (i = 0; i < sizeof(magic); ++i)
+               if (!ret || err_cqe->rsvd1[i] != magic[i]) {
+                       ret = 0;
+                       err_cqe->rsvd1[i] = magic[i];
+               }
+       return ret;
 }
 
 /**
- * Close a MPW session.
+ * Handle error CQE.
  *
  * @param txq
  *   Pointer to TX queue structure.
- * @param mpw
- *   Pointer to MPW session structure.
- */
-static inline void
-mlx5_mpw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
-{
-       unsigned int num = mpw->pkts_n;
-
-       /*
-        * Store size in multiple of 16 bytes. Control and Ethernet segments
-        * count as 2.
-        */
-       mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | (2 + num));
-       mpw->state = MLX5_MPW_STATE_CLOSED;
-       if (num < 3)
-               ++txq->wqe_ci;
-       else
-               txq->wqe_ci += 2;
-       rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
-       rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
-}
-
-/**
- * DPDK callback for TX with MPW support.
- *
- * @param dpdk_txq
- *   Generic pointer to TX queue structure.
- * @param[in] pkts
- *   Packets to transmit.
- * @param pkts_n
- *   Number of packets in array.
+ * @param error_cqe
+ *   Pointer to the error CQE.
  *
  * @return
- *   Number of packets successfully transmitted (<= pkts_n).
+ *   The last Tx buffer element to free.
  */
 uint16_t
-mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+mlx5_tx_error_cqe_handle(struct mlx5_txq_data *txq,
+                        volatile struct mlx5_err_cqe *err_cqe)
 {
-       struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
-       uint16_t elts_head = txq->elts_head;
-       const uint16_t elts_n = 1 << txq->elts_n;
-       const uint16_t elts_m = elts_n - 1;
-       unsigned int i = 0;
-       unsigned int j = 0;
-       uint16_t max_elts;
-       uint16_t max_wqe;
-       unsigned int comp;
-       struct mlx5_mpw mpw = {
-               .state = MLX5_MPW_STATE_CLOSED,
-       };
-
-       if (unlikely(!pkts_n))
-               return 0;
-       /* Prefetch first packet cacheline. */
-       rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
-       rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
-       /* Start processing. */
-       mlx5_tx_complete(txq);
-       max_elts = (elts_n - (elts_head - txq->elts_tail));
-       max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
-       if (unlikely(!max_wqe))
-               return 0;
-       do {
-               struct rte_mbuf *buf = *(pkts++);
-               uint32_t length;
-               unsigned int segs_n = buf->nb_segs;
-               uint32_t cs_flags;
-               rte_be32_t metadata;
-
-               /*
-                * Make sure there is enough room to store this packet and
-                * that one ring entry remains unused.
-                */
-               assert(segs_n);
-               if (max_elts < segs_n)
-                       break;
-               /* Do not bother with large packets MPW cannot handle. */
-               if (segs_n > MLX5_MPW_DSEG_MAX) {
-                       txq->stats.oerrors++;
-                       break;
+       if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
+               const uint16_t wqe_m = ((1 << txq->wqe_n) - 1);
+               struct mlx5_txq_ctrl *txq_ctrl =
+                               container_of(txq, struct mlx5_txq_ctrl, txq);
+               uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter);
+               int seen = check_err_cqe_seen(err_cqe);
+
+               if (!seen && txq_ctrl->dump_file_n <
+                   txq_ctrl->priv->config.max_dump_files_num) {
+                       MKSTR(err_str, "Unexpected CQE error syndrome "
+                             "0x%02x CQN = %u SQN = %u wqe_counter = %u "
+                             "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
+                             txq_ctrl->cqn, txq->qp_num_8s >> 8,
+                             rte_be_to_cpu_16(err_cqe->wqe_counter),
+                             txq->wqe_ci, txq->cq_ci);
+                       MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
+                             PORT_ID(txq_ctrl->priv), txq->idx,
+                             txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc());
+                       mlx5_dump_debug_information(name, NULL, err_str, 0);
+                       mlx5_dump_debug_information(name, "MLX5 Error CQ:",
+                                                   (const void *)((uintptr_t)
+                                                   &(*txq->cqes)[0]),
+                                                   sizeof(*err_cqe) *
+                                                   (1 << txq->cqe_n));
+                       mlx5_dump_debug_information(name, "MLX5 Error SQ:",
+                                                   (const void *)((uintptr_t)
+                                                   txq->wqes),
+                                                   MLX5_WQE_SIZE *
+                                                   (1 << txq->wqe_n));
+                       txq_ctrl->dump_file_n++;
                }
-               max_elts -= segs_n;
-               --pkts_n;
-               cs_flags = txq_ol_cksum_to_cs(buf);
-               /* Copy metadata from mbuf if valid */
-               metadata = buf->ol_flags & PKT_TX_METADATA ? buf->tx_metadata :
-                                                            0;
-               /* Retrieve packet information. */
-               length = PKT_LEN(buf);
-               assert(length);
-               /* Start new session if packet differs. */
-               if ((mpw.state == MLX5_MPW_STATE_OPENED) &&
-                   ((mpw.len != length) ||
-                    (segs_n != 1) ||
-                    (mpw.wqe->eseg.flow_table_metadata != metadata) ||
-                    (mpw.wqe->eseg.cs_flags != cs_flags)))
-                       mlx5_mpw_close(txq, &mpw);
-               if (mpw.state == MLX5_MPW_STATE_CLOSED) {
+               if (!seen)
                        /*
-                        * Multi-Packet WQE consumes at most two WQE.
-                        * mlx5_mpw_new() expects to be able to use such
-                        * resources.
+                        * Count errors in WQEs units.
+                        * Later it can be improved to count error packets,
+                        * for example, by SQ parsing to find how much packets
+                        * should be counted for each WQE.
                         */
-                       if (unlikely(max_wqe < 2))
-                               break;
-                       max_wqe -= 2;
-                       mlx5_mpw_new(txq, &mpw, length);
-                       mpw.wqe->eseg.cs_flags = cs_flags;
-                       mpw.wqe->eseg.flow_table_metadata = metadata;
+                       txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
+                                               new_wqe_pi) & wqe_m;
+               if (tx_recover_qp(txq_ctrl) == 0) {
+                       txq->cq_ci++;
+                       /* Release all the remaining buffers. */
+                       return txq->elts_head;
                }
-               /* Multi-segment packets must be alone in their MPW. */
-               assert((segs_n == 1) || (mpw.pkts_n == 0));
-#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
-               length = 0;
-#endif
-               do {
-                       volatile struct mlx5_wqe_data_seg *dseg;
-                       uintptr_t addr;
-
-                       assert(buf);
-                       (*txq->elts)[elts_head++ & elts_m] = buf;
-                       dseg = mpw.data.dseg[mpw.pkts_n];
-                       addr = rte_pktmbuf_mtod(buf, uintptr_t);
-                       *dseg = (struct mlx5_wqe_data_seg){
-                               .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
-                               .lkey = mlx5_tx_mb2mr(txq, buf),
-                               .addr = rte_cpu_to_be_64(addr),
-                       };
-#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
-                       length += DATA_LEN(buf);
-#endif
-                       buf = buf->next;
-                       ++mpw.pkts_n;
-                       ++j;
-               } while (--segs_n);
-               assert(length == mpw.len);
-               if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
-                       mlx5_mpw_close(txq, &mpw);
-#ifdef MLX5_PMD_SOFT_COUNTERS
-               /* Increment sent bytes counter. */
-               txq->stats.obytes += length;
-#endif
-               ++i;
-       } while (pkts_n);
-       /* Take a shortcut if nothing must be sent. */
-       if (unlikely(i == 0))
-               return 0;
-       /* Check whether completion threshold has been reached. */
-       /* "j" includes both packets and segments. */
-       comp = txq->elts_comp + j;
-       if (comp >= MLX5_TX_COMP_THRESH) {
-               volatile struct mlx5_wqe *wqe = mpw.wqe;
-
-               /* A CQE slot must always be available. */
-               assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
-               /* Request completion on last WQE. */
-               wqe->ctrl[2] = rte_cpu_to_be_32(8);
-               /* Save elts_head in unused "immediate" field of WQE. */
-               wqe->ctrl[3] = elts_head;
-               txq->elts_comp = 0;
+               /* Recovering failed - try again later on the same WQE. */
        } else {
-               txq->elts_comp = comp;
+               txq->cq_ci++;
        }
-#ifdef MLX5_PMD_SOFT_COUNTERS
-       /* Increment sent packets counter. */
-       txq->stats.opackets += i;
-#endif
-       /* Ring QP doorbell. */
-       if (mpw.state == MLX5_MPW_STATE_OPENED)
-               mlx5_mpw_close(txq, &mpw);
-       mlx5_tx_dbrec(txq, mpw.wqe);
-       txq->elts_head = elts_head;
-       return i;
+       /* Do not release buffers. */
+       return txq->elts_tail;
 }
 
 /**
- * Open a MPW inline session.
+ * Translate RX completion flags to packet type.
  *
- * @param txq
- *   Pointer to TX queue structure.
- * @param mpw
- *   Pointer to MPW session structure.
- * @param length
- *   Packet length.
- */
-static inline void
-mlx5_mpw_inline_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw,
-                   uint32_t length)
-{
-       uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
-       struct mlx5_wqe_inl_small *inl;
-
-       mpw->state = MLX5_MPW_INL_STATE_OPENED;
-       mpw->pkts_n = 0;
-       mpw->len = length;
-       mpw->total_len = 0;
-       mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
-       mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
-                                            (txq->wqe_ci << 8) |
-                                            MLX5_OPCODE_TSO);
-       mpw->wqe->ctrl[2] = 0;
-       mpw->wqe->ctrl[3] = 0;
-       mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
-       mpw->wqe->eseg.inline_hdr_sz = 0;
-       mpw->wqe->eseg.cs_flags = 0;
-       mpw->wqe->eseg.rsvd0 = 0;
-       mpw->wqe->eseg.rsvd1 = 0;
-       mpw->wqe->eseg.flow_table_metadata = 0;
-       inl = (struct mlx5_wqe_inl_small *)
-               (((uintptr_t)mpw->wqe) + 2 * MLX5_WQE_DWORD_SIZE);
-       mpw->data.raw = (uint8_t *)&inl->raw;
-}
-
-/**
- * Close a MPW inline session.
+ * @param[in] rxq
+ *   Pointer to RX queue structure.
+ * @param[in] cqe
+ *   Pointer to CQE.
  *
- * @param txq
- *   Pointer to TX queue structure.
- * @param mpw
- *   Pointer to MPW session structure.
+ * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
+ *
+ * @return
+ *   Packet type for struct rte_mbuf.
  */
-static inline void
-mlx5_mpw_inline_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
+static inline uint32_t
+rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
 {
-       unsigned int size;
-       struct mlx5_wqe_inl_small *inl = (struct mlx5_wqe_inl_small *)
-               (((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE));
+       uint8_t idx;
+       uint8_t pinfo = cqe->pkt_info;
+       uint16_t ptype = cqe->hdr_type_etc;
 
-       size = MLX5_WQE_SIZE - MLX5_MWQE64_INL_DATA + mpw->total_len;
        /*
-        * Store size in multiple of 16 bytes. Control and Ethernet segments
-        * count as 2.
+        * The index to the array should have:
+        * bit[1:0] = l3_hdr_type
+        * bit[4:2] = l4_hdr_type
+        * bit[5] = ip_frag
+        * bit[6] = tunneled
+        * bit[7] = outer_l3_type
         */
-       mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
-                                            MLX5_WQE_DS(size));
-       mpw->state = MLX5_MPW_STATE_CLOSED;
-       inl->byte_cnt = rte_cpu_to_be_32(mpw->total_len | MLX5_INLINE_SEG);
-       txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
+       idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
+       return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
 }
 
 /**
- * DPDK callback for TX with MPW inline support.
+ * Initialize Rx WQ and indexes.
  *
- * @param dpdk_txq
- *   Generic pointer to TX queue structure.
- * @param[in] pkts
- *   Packets to transmit.
- * @param pkts_n
- *   Number of packets in array.
- *
- * @return
- *   Number of packets successfully transmitted (<= pkts_n).
+ * @param[in] rxq
+ *   Pointer to RX queue structure.
  */
-uint16_t
-mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
-                        uint16_t pkts_n)
+void
+mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
 {
-       struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
-       uint16_t elts_head = txq->elts_head;
-       const uint16_t elts_n = 1 << txq->elts_n;
-       const uint16_t elts_m = elts_n - 1;
-       unsigned int i = 0;
-       unsigned int j = 0;
-       uint16_t max_elts;
-       uint16_t max_wqe;
-       unsigned int comp;
-       unsigned int inline_room = txq->max_inline * RTE_CACHE_LINE_SIZE;
-       struct mlx5_mpw mpw = {
-               .state = MLX5_MPW_STATE_CLOSED,
-       };
-       /*
-        * Compute the maximum number of WQE which can be consumed by inline
-        * code.
-        * - 2 DSEG for:
-        *   - 1 control segment,
-        *   - 1 Ethernet segment,
-        * - N Dseg from the inline request.
-        */
-       const unsigned int wqe_inl_n =
-               ((2 * MLX5_WQE_DWORD_SIZE +
-                 txq->max_inline * RTE_CACHE_LINE_SIZE) +
-                RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
+       const unsigned int wqe_n = 1 << rxq->elts_n;
+       unsigned int i;
 
-       if (unlikely(!pkts_n))
-               return 0;
-       /* Prefetch first packet cacheline. */
-       rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
-       rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
-       /* Start processing. */
-       mlx5_tx_complete(txq);
-       max_elts = (elts_n - (elts_head - txq->elts_tail));
-       do {
-               struct rte_mbuf *buf = *(pkts++);
+       for (i = 0; (i != wqe_n); ++i) {
+               volatile struct mlx5_wqe_data_seg *scat;
                uintptr_t addr;
-               uint32_t length;
-               unsigned int segs_n = buf->nb_segs;
-               uint8_t cs_flags;
-               rte_be32_t metadata;
+               uint32_t byte_count;
 
-               /*
-                * Make sure there is enough room to store this packet and
-                * that one ring entry remains unused.
-                */
-               assert(segs_n);
-               if (max_elts < segs_n)
-                       break;
-               /* Do not bother with large packets MPW cannot handle. */
-               if (segs_n > MLX5_MPW_DSEG_MAX) {
-                       txq->stats.oerrors++;
-                       break;
-               }
-               max_elts -= segs_n;
-               --pkts_n;
-               /*
-                * Compute max_wqe in case less WQE were consumed in previous
-                * iteration.
-                */
-               max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
-               cs_flags = txq_ol_cksum_to_cs(buf);
-               /* Copy metadata from mbuf if valid */
-               metadata = buf->ol_flags & PKT_TX_METADATA ? buf->tx_metadata :
-                                                            0;
-               /* Retrieve packet information. */
-               length = PKT_LEN(buf);
-               /* Start new session if packet differs. */
-               if (mpw.state == MLX5_MPW_STATE_OPENED) {
-                       if ((mpw.len != length) ||
-                           (segs_n != 1) ||
-                           (mpw.wqe->eseg.flow_table_metadata != metadata) ||
-                           (mpw.wqe->eseg.cs_flags != cs_flags))
-                               mlx5_mpw_close(txq, &mpw);
-               } else if (mpw.state == MLX5_MPW_INL_STATE_OPENED) {
-                       if ((mpw.len != length) ||
-                           (segs_n != 1) ||
-                           (length > inline_room) ||
-                           (mpw.wqe->eseg.flow_table_metadata != metadata) ||
-                           (mpw.wqe->eseg.cs_flags != cs_flags)) {
-                               mlx5_mpw_inline_close(txq, &mpw);
-                               inline_room =
-                                       txq->max_inline * RTE_CACHE_LINE_SIZE;
-                       }
-               }
-               if (mpw.state == MLX5_MPW_STATE_CLOSED) {
-                       if ((segs_n != 1) ||
-                           (length > inline_room)) {
-                               /*
-                                * Multi-Packet WQE consumes at most two WQE.
-                                * mlx5_mpw_new() expects to be able to use
-                                * such resources.
-                                */
-                               if (unlikely(max_wqe < 2))
-                                       break;
-                               max_wqe -= 2;
-                               mlx5_mpw_new(txq, &mpw, length);
-                               mpw.wqe->eseg.cs_flags = cs_flags;
-                               mpw.wqe->eseg.flow_table_metadata = metadata;
-                       } else {
-                               if (unlikely(max_wqe < wqe_inl_n))
-                                       break;
-                               max_wqe -= wqe_inl_n;
-                               mlx5_mpw_inline_new(txq, &mpw, length);
-                               mpw.wqe->eseg.cs_flags = cs_flags;
-                               mpw.wqe->eseg.flow_table_metadata = metadata;
-                       }
-               }
-               /* Multi-segment packets must be alone in their MPW. */
-               assert((segs_n == 1) || (mpw.pkts_n == 0));
-               if (mpw.state == MLX5_MPW_STATE_OPENED) {
-                       assert(inline_room ==
-                              txq->max_inline * RTE_CACHE_LINE_SIZE);
-#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
-                       length = 0;
-#endif
-                       do {
-                               volatile struct mlx5_wqe_data_seg *dseg;
-
-                               assert(buf);
-                               (*txq->elts)[elts_head++ & elts_m] = buf;
-                               dseg = mpw.data.dseg[mpw.pkts_n];
-                               addr = rte_pktmbuf_mtod(buf, uintptr_t);
-                               *dseg = (struct mlx5_wqe_data_seg){
-                                       .byte_count =
-                                              rte_cpu_to_be_32(DATA_LEN(buf)),
-                                       .lkey = mlx5_tx_mb2mr(txq, buf),
-                                       .addr = rte_cpu_to_be_64(addr),
-                               };
-#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
-                               length += DATA_LEN(buf);
-#endif
-                               buf = buf->next;
-                               ++mpw.pkts_n;
-                               ++j;
-                       } while (--segs_n);
-                       assert(length == mpw.len);
-                       if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
-                               mlx5_mpw_close(txq, &mpw);
+               if (mlx5_rxq_mprq_enabled(rxq)) {
+                       struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
+
+                       scat = &((volatile struct mlx5_wqe_mprq *)
+                               rxq->wqes)[i].dseg;
+                       addr = (uintptr_t)mlx5_mprq_buf_addr(buf);
+                       byte_count = (1 << rxq->strd_sz_n) *
+                                       (1 << rxq->strd_num_n);
                } else {
-                       unsigned int max;
+                       struct rte_mbuf *buf = (*rxq->elts)[i];
 
-                       assert(mpw.state == MLX5_MPW_INL_STATE_OPENED);
-                       assert(length <= inline_room);
-                       assert(length == DATA_LEN(buf));
+                       scat = &((volatile struct mlx5_wqe_data_seg *)
+                                       rxq->wqes)[i];
                        addr = rte_pktmbuf_mtod(buf, uintptr_t);
-                       (*txq->elts)[elts_head++ & elts_m] = buf;
-                       /* Maximum number of bytes before wrapping. */
-                       max = ((((uintptr_t)(txq->wqes)) +
-                               (1 << txq->wqe_n) *
-                               MLX5_WQE_SIZE) -
-                              (uintptr_t)mpw.data.raw);
-                       if (length > max) {
-                               rte_memcpy((void *)(uintptr_t)mpw.data.raw,
-                                          (void *)addr,
-                                          max);
-                               mpw.data.raw = (volatile void *)txq->wqes;
-                               rte_memcpy((void *)(uintptr_t)mpw.data.raw,
-                                          (void *)(addr + max),
-                                          length - max);
-                               mpw.data.raw += length - max;
-                       } else {
-                               rte_memcpy((void *)(uintptr_t)mpw.data.raw,
-                                          (void *)addr,
-                                          length);
-
-                               if (length == max)
-                                       mpw.data.raw =
-                                               (volatile void *)txq->wqes;
-                               else
-                                       mpw.data.raw += length;
-                       }
-                       ++mpw.pkts_n;
-                       mpw.total_len += length;
-                       ++j;
-                       if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) {
-                               mlx5_mpw_inline_close(txq, &mpw);
-                               inline_room =
-                                       txq->max_inline * RTE_CACHE_LINE_SIZE;
-                       } else {
-                               inline_room -= length;
-                       }
+                       byte_count = DATA_LEN(buf);
                }
-#ifdef MLX5_PMD_SOFT_COUNTERS
-               /* Increment sent bytes counter. */
-               txq->stats.obytes += length;
-#endif
-               ++i;
-       } while (pkts_n);
-       /* Take a shortcut if nothing must be sent. */
-       if (unlikely(i == 0))
-               return 0;
-       /* Check whether completion threshold has been reached. */
-       /* "j" includes both packets and segments. */
-       comp = txq->elts_comp + j;
-       if (comp >= MLX5_TX_COMP_THRESH) {
-               volatile struct mlx5_wqe *wqe = mpw.wqe;
-
-               /* A CQE slot must always be available. */
-               assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
-               /* Request completion on last WQE. */
-               wqe->ctrl[2] = rte_cpu_to_be_32(8);
-               /* Save elts_head in unused "immediate" field of WQE. */
-               wqe->ctrl[3] = elts_head;
-               txq->elts_comp = 0;
-       } else {
-               txq->elts_comp = comp;
-       }
-#ifdef MLX5_PMD_SOFT_COUNTERS
-       /* Increment sent packets counter. */
-       txq->stats.opackets += i;
-#endif
-       /* Ring QP doorbell. */
-       if (mpw.state == MLX5_MPW_INL_STATE_OPENED)
-               mlx5_mpw_inline_close(txq, &mpw);
-       else if (mpw.state == MLX5_MPW_STATE_OPENED)
-               mlx5_mpw_close(txq, &mpw);
-       mlx5_tx_dbrec(txq, mpw.wqe);
-       txq->elts_head = elts_head;
-       return i;
-}
-
-/**
- * Open an Enhanced MPW session.
- *
- * @param txq
- *   Pointer to TX queue structure.
- * @param mpw
- *   Pointer to MPW session structure.
- * @param length
- *   Packet length.
- */
-static inline void
-mlx5_empw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, int padding)
-{
-       uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
-
-       mpw->state = MLX5_MPW_ENHANCED_STATE_OPENED;
-       mpw->pkts_n = 0;
-       mpw->total_len = sizeof(struct mlx5_wqe);
-       mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
-       mpw->wqe->ctrl[0] =
-               rte_cpu_to_be_32((MLX5_OPC_MOD_ENHANCED_MPSW << 24) |
-                                (txq->wqe_ci << 8) |
-                                MLX5_OPCODE_ENHANCED_MPSW);
-       mpw->wqe->ctrl[2] = 0;
-       mpw->wqe->ctrl[3] = 0;
-       memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE);
-       if (unlikely(padding)) {
-               uintptr_t addr = (uintptr_t)(mpw->wqe + 1);
-
-               /* Pad the first 2 DWORDs with zero-length inline header. */
-               *(volatile uint32_t *)addr = rte_cpu_to_be_32(MLX5_INLINE_SEG);
-               *(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) =
-                       rte_cpu_to_be_32(MLX5_INLINE_SEG);
-               mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE;
-               /* Start from the next WQEBB. */
-               mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1));
-       } else {
-               mpw->data.raw = (volatile void *)(mpw->wqe + 1);
+               /* scat->addr must be able to store a pointer. */
+               assert(sizeof(scat->addr) >= sizeof(uintptr_t));
+               *scat = (struct mlx5_wqe_data_seg){
+                       .addr = rte_cpu_to_be_64(addr),
+                       .byte_count = rte_cpu_to_be_32(byte_count),
+                       .lkey = mlx5_rx_addr2mr(rxq, addr),
+               };
        }
+       rxq->consumed_strd = 0;
+       rxq->decompressed = 0;
+       rxq->rq_pi = 0;
+       rxq->zip = (struct rxq_zip){
+               .ai = 0,
+       };
+       /* Update doorbell counter. */
+       rxq->rq_ci = wqe_n >> rxq->sges_n;
+       rte_cio_wmb();
+       *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
 }
 
 /**
- * Close an Enhanced MPW session.
+ * Modify a Verbs queue state.
+ * This must be called from the primary process.
  *
- * @param txq
- *   Pointer to TX queue structure.
- * @param mpw
- *   Pointer to MPW session structure.
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param sm
+ *   State modify request parameters.
  *
  * @return
- *   Number of consumed WQEs.
+ *   0 in case of success else non-zero value and rte_errno is set.
  */
-static inline uint16_t
-mlx5_empw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
+int
+mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
+                       const struct mlx5_mp_arg_queue_state_modify *sm)
 {
-       uint16_t ret;
-
-       /* Store size in multiple of 16 bytes. Control and Ethernet segments
-        * count as 2.
-        */
-       mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
-                                            MLX5_WQE_DS(mpw->total_len));
-       mpw->state = MLX5_MPW_STATE_CLOSED;
-       ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
-       txq->wqe_ci += ret;
-       return ret;
-}
+       int ret;
+       struct mlx5_priv *priv = dev->data->dev_private;
 
-/**
- * TX with Enhanced MPW support.
- *
- * @param txq
- *   Pointer to TX queue structure.
- * @param[in] pkts
- *   Packets to transmit.
- * @param pkts_n
- *   Number of packets in array.
- *
- * @return
- *   Number of packets successfully transmitted (<= pkts_n).
- */
-static inline uint16_t
-txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
-              uint16_t pkts_n)
-{
-       uint16_t elts_head = txq->elts_head;
-       const uint16_t elts_n = 1 << txq->elts_n;
-       const uint16_t elts_m = elts_n - 1;
-       unsigned int i = 0;
-       unsigned int j = 0;
-       uint16_t max_elts;
-       uint16_t max_wqe;
-       unsigned int max_inline = txq->max_inline * RTE_CACHE_LINE_SIZE;
-       unsigned int mpw_room = 0;
-       unsigned int inl_pad = 0;
-       uint32_t inl_hdr;
-       uint64_t addr_64;
-       struct mlx5_mpw mpw = {
-               .state = MLX5_MPW_STATE_CLOSED,
-       };
+       if (sm->is_wq) {
+               struct ibv_wq_attr mod = {
+                       .attr_mask = IBV_WQ_ATTR_STATE,
+                       .wq_state = sm->state,
+               };
+               struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
+               struct mlx5_rxq_ctrl *rxq_ctrl =
+                       container_of(rxq, struct mlx5_rxq_ctrl, rxq);
 
-       if (unlikely(!pkts_n))
-               return 0;
-       /* Start processing. */
-       mlx5_tx_complete(txq);
-       max_elts = (elts_n - (elts_head - txq->elts_tail));
-       max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
-       if (unlikely(!max_wqe))
-               return 0;
-       do {
-               struct rte_mbuf *buf = *(pkts++);
-               uintptr_t addr;
-               unsigned int do_inline = 0; /* Whether inline is possible. */
-               uint32_t length;
-               uint8_t cs_flags;
-               rte_be32_t metadata;
-
-               /* Multi-segmented packet is handled in slow-path outside. */
-               assert(NB_SEGS(buf) == 1);
-               /* Make sure there is enough room to store this packet. */
-               if (max_elts - j == 0)
-                       break;
-               cs_flags = txq_ol_cksum_to_cs(buf);
-               /* Copy metadata from mbuf if valid */
-               metadata = buf->ol_flags & PKT_TX_METADATA ? buf->tx_metadata :
-                                                            0;
-               /* Retrieve packet information. */
-               length = PKT_LEN(buf);
-               /* Start new session if:
-                * - multi-segment packet
-                * - no space left even for a dseg
-                * - next packet can be inlined with a new WQE
-                * - cs_flag differs
-                */
-               if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED) {
-                       if ((inl_pad + sizeof(struct mlx5_wqe_data_seg) >
-                            mpw_room) ||
-                           (length <= txq->inline_max_packet_sz &&
-                            inl_pad + sizeof(inl_hdr) + length >
-                            mpw_room) ||
-                            (mpw.wqe->eseg.flow_table_metadata != metadata) ||
-                           (mpw.wqe->eseg.cs_flags != cs_flags))
-                               max_wqe -= mlx5_empw_close(txq, &mpw);
+               ret = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod);
+               if (ret) {
+                       DRV_LOG(ERR, "Cannot change Rx WQ state to %u  - %s\n",
+                                       sm->state, strerror(errno));
+                       rte_errno = errno;
+                       return ret;
                }
-               if (unlikely(mpw.state == MLX5_MPW_STATE_CLOSED)) {
-                       /* In Enhanced MPW, inline as much as the budget is
-                        * allowed. The remaining space is to be filled with
-                        * dsegs. If the title WQEBB isn't padded, it will have
-                        * 2 dsegs there.
-                        */
-                       mpw_room = RTE_MIN(MLX5_WQE_SIZE_MAX,
-                                          (max_inline ? max_inline :
-                                           pkts_n * MLX5_WQE_DWORD_SIZE) +
-                                          MLX5_WQE_SIZE);
-                       if (unlikely(max_wqe * MLX5_WQE_SIZE < mpw_room))
-                               break;
-                       /* Don't pad the title WQEBB to not waste WQ. */
-                       mlx5_empw_new(txq, &mpw, 0);
-                       mpw_room -= mpw.total_len;
-                       inl_pad = 0;
-                       do_inline = length <= txq->inline_max_packet_sz &&
-                                   sizeof(inl_hdr) + length <= mpw_room &&
-                                   !txq->mpw_hdr_dseg;
-                       mpw.wqe->eseg.cs_flags = cs_flags;
-                       mpw.wqe->eseg.flow_table_metadata = metadata;
-               } else {
-                       /* Evaluate whether the next packet can be inlined.
-                        * Inlininig is possible when:
-                        * - length is less than configured value
-                        * - length fits for remaining space
-                        * - not required to fill the title WQEBB with dsegs
-                        */
-                       do_inline =
-                               length <= txq->inline_max_packet_sz &&
-                               inl_pad + sizeof(inl_hdr) + length <=
-                                mpw_room &&
-                               (!txq->mpw_hdr_dseg ||
-                                mpw.total_len >= MLX5_WQE_SIZE);
+       } else {
+               struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
+               struct mlx5_txq_ctrl *txq_ctrl =
+                       container_of(txq, struct mlx5_txq_ctrl, txq);
+               struct ibv_qp_attr mod = {
+                       .qp_state = IBV_QPS_RESET,
+                       .port_num = (uint8_t)priv->ibv_port,
+               };
+               struct ibv_qp *qp = txq_ctrl->ibv->qp;
+
+               ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
+               if (ret) {
+                       DRV_LOG(ERR, "Cannot change the Tx QP state to RESET "
+                               "%s\n", strerror(errno));
+                       rte_errno = errno;
+                       return ret;
                }
-               if (max_inline && do_inline) {
-                       /* Inline packet into WQE. */
-                       unsigned int max;
-
-                       assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
-                       assert(length == DATA_LEN(buf));
-                       inl_hdr = rte_cpu_to_be_32(length | MLX5_INLINE_SEG);
-                       addr = rte_pktmbuf_mtod(buf, uintptr_t);
-                       mpw.data.raw = (volatile void *)
-                               ((uintptr_t)mpw.data.raw + inl_pad);
-                       max = tx_mlx5_wq_tailroom(txq,
-                                       (void *)(uintptr_t)mpw.data.raw);
-                       /* Copy inline header. */
-                       mpw.data.raw = (volatile void *)
-                               mlx5_copy_to_wq(
-                                         (void *)(uintptr_t)mpw.data.raw,
-                                         &inl_hdr,
-                                         sizeof(inl_hdr),
-                                         (void *)(uintptr_t)txq->wqes,
-                                         max);
-                       max = tx_mlx5_wq_tailroom(txq,
-                                       (void *)(uintptr_t)mpw.data.raw);
-                       /* Copy packet data. */
-                       mpw.data.raw = (volatile void *)
-                               mlx5_copy_to_wq(
-                                         (void *)(uintptr_t)mpw.data.raw,
-                                         (void *)addr,
-                                         length,
-                                         (void *)(uintptr_t)txq->wqes,
-                                         max);
-                       ++mpw.pkts_n;
-                       mpw.total_len += (inl_pad + sizeof(inl_hdr) + length);
-                       /* No need to get completion as the entire packet is
-                        * copied to WQ. Free the buf right away.
-                        */
-                       rte_pktmbuf_free_seg(buf);
-                       mpw_room -= (inl_pad + sizeof(inl_hdr) + length);
-                       /* Add pad in the next packet if any. */
-                       inl_pad = (((uintptr_t)mpw.data.raw +
-                                       (MLX5_WQE_DWORD_SIZE - 1)) &
-                                       ~(MLX5_WQE_DWORD_SIZE - 1)) -
-                                 (uintptr_t)mpw.data.raw;
-               } else {
-                       /* No inline. Load a dseg of packet pointer. */
-                       volatile rte_v128u32_t *dseg;
-
-                       assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
-                       assert((inl_pad + sizeof(*dseg)) <= mpw_room);
-                       assert(length == DATA_LEN(buf));
-                       if (!tx_mlx5_wq_tailroom(txq,
-                                       (void *)((uintptr_t)mpw.data.raw
-                                               + inl_pad)))
-                               dseg = (volatile void *)txq->wqes;
-                       else
-                               dseg = (volatile void *)
-                                       ((uintptr_t)mpw.data.raw +
-                                        inl_pad);
-                       (*txq->elts)[elts_head++ & elts_m] = buf;
-                       addr_64 = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
-                                                                   uintptr_t));
-                       *dseg = (rte_v128u32_t) {
-                               rte_cpu_to_be_32(length),
-                               mlx5_tx_mb2mr(txq, buf),
-                               addr_64,
-                               addr_64 >> 32,
-                       };
-                       mpw.data.raw = (volatile void *)(dseg + 1);
-                       mpw.total_len += (inl_pad + sizeof(*dseg));
-                       ++j;
-                       ++mpw.pkts_n;
-                       mpw_room -= (inl_pad + sizeof(*dseg));
-                       inl_pad = 0;
+               mod.qp_state = IBV_QPS_INIT;
+               ret = mlx5_glue->modify_qp(qp, &mod,
+                                          (IBV_QP_STATE | IBV_QP_PORT));
+               if (ret) {
+                       DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s\n",
+                               strerror(errno));
+                       rte_errno = errno;
+                       return ret;
+               }
+               mod.qp_state = IBV_QPS_RTR;
+               ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
+               if (ret) {
+                       DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s\n",
+                               strerror(errno));
+                       rte_errno = errno;
+                       return ret;
+               }
+               mod.qp_state = IBV_QPS_RTS;
+               ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
+               if (ret) {
+                       DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s\n",
+                               strerror(errno));
+                       rte_errno = errno;
+                       return ret;
                }
-#ifdef MLX5_PMD_SOFT_COUNTERS
-               /* Increment sent bytes counter. */
-               txq->stats.obytes += length;
-#endif
-               ++i;
-       } while (i < pkts_n);
-       /* Take a shortcut if nothing must be sent. */
-       if (unlikely(i == 0))
-               return 0;
-       /* Check whether completion threshold has been reached. */
-       if (txq->elts_comp + j >= MLX5_TX_COMP_THRESH ||
-                       (uint16_t)(txq->wqe_ci - txq->mpw_comp) >=
-                        (1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) {
-               volatile struct mlx5_wqe *wqe = mpw.wqe;
-
-               /* A CQE slot must always be available. */
-               assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
-               /* Request completion on last WQE. */
-               wqe->ctrl[2] = rte_cpu_to_be_32(8);
-               /* Save elts_head in unused "immediate" field of WQE. */
-               wqe->ctrl[3] = elts_head;
-               txq->elts_comp = 0;
-               txq->mpw_comp = txq->wqe_ci;
-       } else {
-               txq->elts_comp += j;
        }
-#ifdef MLX5_PMD_SOFT_COUNTERS
-       /* Increment sent packets counter. */
-       txq->stats.opackets += i;
-#endif
-       if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED)
-               mlx5_empw_close(txq, &mpw);
-       /* Ring QP doorbell. */
-       mlx5_tx_dbrec(txq, mpw.wqe);
-       txq->elts_head = elts_head;
-       return i;
+       return 0;
 }
 
 /**
- * DPDK callback for TX with Enhanced MPW support.
+ * Modify a Verbs queue state.
  *
- * @param dpdk_txq
- *   Generic pointer to TX queue structure.
- * @param[in] pkts
- *   Packets to transmit.
- * @param pkts_n
- *   Number of packets in array.
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param sm
+ *   State modify request parameters.
  *
  * @return
- *   Number of packets successfully transmitted (<= pkts_n).
+ *   0 in case of success else non-zero value.
  */
-uint16_t
-mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+static int
+mlx5_queue_state_modify(struct rte_eth_dev *dev,
+                       struct mlx5_mp_arg_queue_state_modify *sm)
 {
-       struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
-       uint16_t nb_tx = 0;
-
-       while (pkts_n > nb_tx) {
-               uint16_t n;
-               uint16_t ret;
-
-               n = txq_count_contig_multi_seg(&pkts[nb_tx], pkts_n - nb_tx);
-               if (n) {
-                       ret = mlx5_tx_burst(dpdk_txq, &pkts[nb_tx], n);
-                       if (!ret)
-                               break;
-                       nb_tx += ret;
-               }
-               n = txq_count_contig_single_seg(&pkts[nb_tx], pkts_n - nb_tx);
-               if (n) {
-                       ret = txq_burst_empw(txq, &pkts[nb_tx], n);
-                       if (!ret)
-                               break;
-                       nb_tx += ret;
-               }
+       int ret = 0;
+
+       switch (rte_eal_process_type()) {
+       case RTE_PROC_PRIMARY:
+               ret = mlx5_queue_state_modify_primary(dev, sm);
+               break;
+       case RTE_PROC_SECONDARY:
+               ret = mlx5_mp_req_queue_state_modify(dev, sm);
+               break;
+       default:
+               break;
        }
-       return nb_tx;
+       return ret;
 }
 
 /**
- * Translate RX completion flags to packet type.
+ * Handle a Rx error.
+ * The function inserts the RQ state to reset when the first error CQE is
+ * shown, then drains the CQ by the caller function loop. When the CQ is empty,
+ * it moves the RQ state to ready and initializes the RQ.
+ * Next CQE identification and error counting are in the caller responsibility.
  *
  * @param[in] rxq
  *   Pointer to RX queue structure.
- * @param[in] cqe
- *   Pointer to CQE.
- *
- * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
+ * @param[in] mbuf_prepare
+ *   Whether to prepare mbufs for the RQ.
  *
  * @return
- *   Packet type for struct rte_mbuf.
+ *   -1 in case of recovery error, otherwise the CQE status.
  */
-static inline uint32_t
-rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
+int
+mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t mbuf_prepare)
 {
-       uint8_t idx;
-       uint8_t pinfo = cqe->pkt_info;
-       uint16_t ptype = cqe->hdr_type_etc;
-
-       /*
-        * The index to the array should have:
-        * bit[1:0] = l3_hdr_type
-        * bit[4:2] = l4_hdr_type
-        * bit[5] = ip_frag
-        * bit[6] = tunneled
-        * bit[7] = outer_l3_type
-        */
-       idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
-       return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
+       const uint16_t cqe_n = 1 << rxq->cqe_n;
+       const uint16_t cqe_mask = cqe_n - 1;
+       const unsigned int wqe_n = 1 << rxq->elts_n;
+       struct mlx5_rxq_ctrl *rxq_ctrl =
+                       container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+       union {
+               volatile struct mlx5_cqe *cqe;
+               volatile struct mlx5_err_cqe *err_cqe;
+       } u = {
+               .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
+       };
+       struct mlx5_mp_arg_queue_state_modify sm;
+       int ret;
+
+       switch (rxq->err_state) {
+       case MLX5_RXQ_ERR_STATE_NO_ERROR:
+               rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
+               /* Fall-through */
+       case MLX5_RXQ_ERR_STATE_NEED_RESET:
+               sm.is_wq = 1;
+               sm.queue_id = rxq->idx;
+               sm.state = IBV_WQS_RESET;
+               if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
+                       return -1;
+               if (rxq_ctrl->dump_file_n <
+                   rxq_ctrl->priv->config.max_dump_files_num) {
+                       MKSTR(err_str, "Unexpected CQE error syndrome "
+                             "0x%02x CQN = %u RQN = %u wqe_counter = %u"
+                             " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
+                             rxq->cqn, rxq_ctrl->wqn,
+                             rte_be_to_cpu_16(u.err_cqe->wqe_counter),
+                             rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
+                       MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
+                             rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
+                       mlx5_dump_debug_information(name, NULL, err_str, 0);
+                       mlx5_dump_debug_information(name, "MLX5 Error CQ:",
+                                                   (const void *)((uintptr_t)
+                                                                   rxq->cqes),
+                                                   sizeof(*u.cqe) * cqe_n);
+                       mlx5_dump_debug_information(name, "MLX5 Error RQ:",
+                                                   (const void *)((uintptr_t)
+                                                                   rxq->wqes),
+                                                   16 * wqe_n);
+                       rxq_ctrl->dump_file_n++;
+               }
+               rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
+               /* Fall-through */
+       case MLX5_RXQ_ERR_STATE_NEED_READY:
+               ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
+               if (ret == MLX5_CQE_STATUS_HW_OWN) {
+                       rte_cio_wmb();
+                       *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
+                       rte_cio_wmb();
+                       /*
+                        * The RQ consumer index must be zeroed while moving
+                        * from RESET state to RDY state.
+                        */
+                       *rxq->rq_db = rte_cpu_to_be_32(0);
+                       rte_cio_wmb();
+                       sm.is_wq = 1;
+                       sm.queue_id = rxq->idx;
+                       sm.state = IBV_WQS_RDY;
+                       if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
+                                                   &sm))
+                               return -1;
+                       if (mbuf_prepare) {
+                               const uint16_t q_mask = wqe_n - 1;
+                               uint16_t elt_idx;
+                               struct rte_mbuf **elt;
+                               int i;
+                               unsigned int n = wqe_n - (rxq->rq_ci -
+                                                         rxq->rq_pi);
+
+                               for (i = 0; i < (int)n; ++i) {
+                                       elt_idx = (rxq->rq_ci + i) & q_mask;
+                                       elt = &(*rxq->elts)[elt_idx];
+                                       *elt = rte_mbuf_raw_alloc(rxq->mp);
+                                       if (!*elt) {
+                                               for (i--; i >= 0; --i) {
+                                                       elt_idx = (rxq->rq_ci +
+                                                                  i) & q_mask;
+                                                       elt = &(*rxq->elts)
+                                                               [elt_idx];
+                                                       rte_pktmbuf_free_seg
+                                                               (*elt);
+                                               }
+                                               return -1;
+                                       }
+                               }
+                       }
+                       mlx5_rxq_initialize(rxq);
+                       rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
+               }
+               return ret;
+       default:
+               return -1;
+       }
 }
 
 /**
@@ -1844,8 +875,7 @@ rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
  *   written.
  *
  * @return
- *   Packet size in bytes (0 if there is none), -1 in case of completion
- *   with error.
+ *   0 in case of empty CQE, otherwise the packet size in bytes.
  */
 static inline int
 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
@@ -1853,98 +883,118 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
 {
        struct rxq_zip *zip = &rxq->zip;
        uint16_t cqe_n = cqe_cnt + 1;
-       int len = 0;
+       int len;
        uint16_t idx, end;
 
-       /* Process compressed data in the CQE and mini arrays. */
-       if (zip->ai) {
-               volatile struct mlx5_mini_cqe8 (*mc)[8] =
-                       (volatile struct mlx5_mini_cqe8 (*)[8])
-                       (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info);
-
-               len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
-               *mcqe = &(*mc)[zip->ai & 7];
-               if ((++zip->ai & 7) == 0) {
-                       /* Invalidate consumed CQEs */
-                       idx = zip->ca;
-                       end = zip->na;
-                       while (idx != end) {
-                               (*rxq->cqes)[idx & cqe_cnt].op_own =
-                                       MLX5_CQE_INVALIDATE;
-                               ++idx;
-                       }
-                       /*
-                        * Increment consumer index to skip the number of
-                        * CQEs consumed. Hardware leaves holes in the CQ
-                        * ring for software use.
-                        */
-                       zip->ca = zip->na;
-                       zip->na += 8;
-               }
-               if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
-                       /* Invalidate the rest */
-                       idx = zip->ca;
-                       end = zip->cq_ci;
-
-                       while (idx != end) {
-                               (*rxq->cqes)[idx & cqe_cnt].op_own =
-                                       MLX5_CQE_INVALIDATE;
-                               ++idx;
-                       }
-                       rxq->cq_ci = zip->cq_ci;
-                       zip->ai = 0;
-               }
-       /* No compressed data, get next CQE and verify if it is compressed. */
-       } else {
-               int ret;
-               int8_t op_own;
-
-               ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
-               if (unlikely(ret == 1))
-                       return 0;
-               ++rxq->cq_ci;
-               op_own = cqe->op_own;
-               rte_cio_rmb();
-               if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
+       do {
+               len = 0;
+               /* Process compressed data in the CQE and mini arrays. */
+               if (zip->ai) {
                        volatile struct mlx5_mini_cqe8 (*mc)[8] =
                                (volatile struct mlx5_mini_cqe8 (*)[8])
-                               (uintptr_t)(&(*rxq->cqes)[rxq->cq_ci &
+                               (uintptr_t)(&(*rxq->cqes)[zip->ca &
                                                          cqe_cnt].pkt_info);
 
-                       /* Fix endianness. */
-                       zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
-                       /*
-                        * Current mini array position is the one returned by
-                        * check_cqe64().
-                        *
-                        * If completion comprises several mini arrays, as a
-                        * special case the second one is located 7 CQEs after
-                        * the initial CQE instead of 8 for subsequent ones.
-                        */
-                       zip->ca = rxq->cq_ci;
-                       zip->na = zip->ca + 7;
-                       /* Compute the next non compressed CQE. */
-                       --rxq->cq_ci;
-                       zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
-                       /* Get packet size to return. */
-                       len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
-                       *mcqe = &(*mc)[0];
-                       zip->ai = 1;
-                       /* Prefetch all the entries to be invalidated */
-                       idx = zip->ca;
-                       end = zip->cq_ci;
-                       while (idx != end) {
-                               rte_prefetch0(&(*rxq->cqes)[(idx) & cqe_cnt]);
-                               ++idx;
+                       len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
+                       *mcqe = &(*mc)[zip->ai & 7];
+                       if ((++zip->ai & 7) == 0) {
+                               /* Invalidate consumed CQEs */
+                               idx = zip->ca;
+                               end = zip->na;
+                               while (idx != end) {
+                                       (*rxq->cqes)[idx & cqe_cnt].op_own =
+                                               MLX5_CQE_INVALIDATE;
+                                       ++idx;
+                               }
+                               /*
+                                * Increment consumer index to skip the number
+                                * of CQEs consumed. Hardware leaves holes in
+                                * the CQ ring for software use.
+                                */
+                               zip->ca = zip->na;
+                               zip->na += 8;
+                       }
+                       if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
+                               /* Invalidate the rest */
+                               idx = zip->ca;
+                               end = zip->cq_ci;
+
+                               while (idx != end) {
+                                       (*rxq->cqes)[idx & cqe_cnt].op_own =
+                                               MLX5_CQE_INVALIDATE;
+                                       ++idx;
+                               }
+                               rxq->cq_ci = zip->cq_ci;
+                               zip->ai = 0;
+                       }
+               /*
+                * No compressed data, get next CQE and verify if it is
+                * compressed.
+                */
+               } else {
+                       int ret;
+                       int8_t op_own;
+
+                       ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
+                       if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
+                               if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
+                                            rxq->err_state)) {
+                                       ret = mlx5_rx_err_handle(rxq, 0);
+                                       if (ret == MLX5_CQE_STATUS_HW_OWN ||
+                                           ret == -1)
+                                               return 0;
+                               } else {
+                                       return 0;
+                               }
+                       }
+                       ++rxq->cq_ci;
+                       op_own = cqe->op_own;
+                       if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
+                               volatile struct mlx5_mini_cqe8 (*mc)[8] =
+                                       (volatile struct mlx5_mini_cqe8 (*)[8])
+                                       (uintptr_t)(&(*rxq->cqes)
+                                               [rxq->cq_ci &
+                                                cqe_cnt].pkt_info);
+
+                               /* Fix endianness. */
+                               zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
+                               /*
+                                * Current mini array position is the one
+                                * returned by check_cqe64().
+                                *
+                                * If completion comprises several mini arrays,
+                                * as a special case the second one is located
+                                * 7 CQEs after the initial CQE instead of 8
+                                * for subsequent ones.
+                                */
+                               zip->ca = rxq->cq_ci;
+                               zip->na = zip->ca + 7;
+                               /* Compute the next non compressed CQE. */
+                               --rxq->cq_ci;
+                               zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
+                               /* Get packet size to return. */
+                               len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
+                               *mcqe = &(*mc)[0];
+                               zip->ai = 1;
+                               /* Prefetch all to be invalidated */
+                               idx = zip->ca;
+                               end = zip->cq_ci;
+                               while (idx != end) {
+                                       rte_prefetch0(&(*rxq->cqes)[(idx) &
+                                                                   cqe_cnt]);
+                                       ++idx;
+                               }
+                       } else {
+                               len = rte_be_to_cpu_32(cqe->byte_cnt);
                        }
+               }
+               if (unlikely(rxq->err_state)) {
+                       cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
+                       ++rxq->stats.idropped;
                } else {
-                       len = rte_be_to_cpu_32(cqe->byte_cnt);
+                       return len;
                }
-               /* Error while receiving packet. */
-               if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR))
-                       return -1;
-       }
-       return len;
+       } while (1);
 }
 
 /**
@@ -2087,12 +1137,6 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                rte_mbuf_raw_free(rep);
                                break;
                        }
-                       if (unlikely(len == -1)) {
-                               /* RX error, packet is likely too large. */
-                               rte_mbuf_raw_free(rep);
-                               ++rxq->stats.idropped;
-                               goto skip;
-                       }
                        pkt = seg;
                        assert(len >= (rxq->crc_present << 2));
                        pkt->ol_flags = 0;
@@ -2135,7 +1179,6 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                pkt = NULL;
                --pkts_n;
                ++i;
-skip:
                /* Align consumer index to the next stride. */
                rq_ci >>= sges_n;
                ++rq_ci;
@@ -2268,11 +1311,6 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
                if (!ret)
                        break;
-               if (unlikely(ret == -1)) {
-                       /* RX error, packet is likely too large. */
-                       ++rxq->stats.idropped;
-                       continue;
-               }
                byte_cnt = ret;
                strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
                           MLX5_MPRQ_STRIDE_NUM_SHIFT;
@@ -2453,22 +1491,6 @@ removed_rx_burst(void *dpdk_txq __rte_unused,
  * (e.g.  mlx5_rxtx_vec_sse.c for x86).
  */
 
-__rte_weak uint16_t
-mlx5_tx_burst_raw_vec(void *dpdk_txq __rte_unused,
-                     struct rte_mbuf **pkts __rte_unused,
-                     uint16_t pkts_n __rte_unused)
-{
-       return 0;
-}
-
-__rte_weak uint16_t
-mlx5_tx_burst_vec(void *dpdk_txq __rte_unused,
-                 struct rte_mbuf **pkts __rte_unused,
-                 uint16_t pkts_n __rte_unused)
-{
-       return 0;
-}
-
 __rte_weak uint16_t
 mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
                  struct rte_mbuf **pkts __rte_unused,
@@ -2478,25 +1500,50 @@ mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
 }
 
 __rte_weak int
-mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev __rte_unused)
+mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
 {
        return -ENOTSUP;
 }
 
 __rte_weak int
-mlx5_check_vec_tx_support(struct rte_eth_dev *dev __rte_unused)
+mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
 {
        return -ENOTSUP;
 }
 
-__rte_weak int
-mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
+/**
+ * DPDK callback to check the status of a tx descriptor.
+ *
+ * @param tx_queue
+ *   The tx queue.
+ * @param[in] offset
+ *   The index of the descriptor in the ring.
+ *
+ * @return
+ *   The status of the tx descriptor.
+ */
+int
+mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
 {
-       return -ENOTSUP;
+       (void)tx_queue;
+       (void)offset;
+       return RTE_ETH_TX_DESC_FULL;
 }
 
-__rte_weak int
-mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
+/**
+ * Configure the TX function to use.
+ *
+ * @param dev
+ *   Pointer to private data structure.
+ *
+ * @return
+ *   Pointer to selected Tx burst function.
+ */
+eth_tx_burst_t
+mlx5_select_tx_function(struct rte_eth_dev *dev)
 {
-       return -ENOTSUP;
+       (void)dev;
+       return removed_tx_burst;
 }
+
+