net/mlx5: rename RxQ verbs to general RxQ object
[dpdk.git] / drivers / net / mlx5 / mlx5_rxq.c
index d2b25e8..20a4695 100644 (file)
@@ -52,10 +52,95 @@ uint8_t rss_hash_default_key[] = {
 };
 
 /* Length of the default RSS hash key. */
-const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
+static_assert(MLX5_RSS_HASH_KEY_LEN ==
+             (unsigned int)sizeof(rss_hash_default_key),
+             "wrong RSS default key size.");
 
 /**
- * Allocate RX queue elements.
+ * Check whether Multi-Packet RQ can be enabled for the device.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   1 if supported, negative errno value if not.
+ */
+inline int
+mlx5_check_mprq_support(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       if (priv->config.mprq.enabled &&
+           priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
+               return 1;
+       return -ENOTSUP;
+}
+
+/**
+ * Check whether Multi-Packet RQ is enabled for the Rx queue.
+ *
+ *  @param rxq
+ *     Pointer to receive queue structure.
+ *
+ * @return
+ *   0 if disabled, otherwise enabled.
+ */
+inline int
+mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
+{
+       return rxq->strd_num_n > 0;
+}
+
+/**
+ * Check whether Multi-Packet RQ is enabled for the device.
+ * MPRQ can be enabled explicitly, or implicitly by enabling LRO.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   0 if disabled, otherwise enabled.
+ */
+inline int
+mlx5_mprq_enabled(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       uint16_t i;
+       uint16_t n = 0;
+
+       if (mlx5_check_mprq_support(dev) < 0)
+               return 0;
+       /* All the configured queues should be enabled. */
+       for (i = 0; i < priv->rxqs_n; ++i) {
+               struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+
+               if (!rxq)
+                       continue;
+               if (mlx5_rxq_mprq_enabled(rxq))
+                       ++n;
+       }
+       /* Multi-Packet RQ can't be partially configured. */
+       assert(n == 0 || n == priv->rxqs_n);
+       return n == priv->rxqs_n;
+}
+
+/**
+ * Check whether LRO is supported and enabled for the device.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   0 if disabled, 1 if enabled.
+ */
+inline int
+mlx5_lro_on(struct rte_eth_dev *dev)
+{
+       return (MLX5_LRO_SUPPORTED(dev) && MLX5_LRO_ENABLED(dev));
+}
+
+/**
+ * Allocate RX queue elements for Multi-Packet RQ.
  *
  * @param rxq_ctrl
  *   Pointer to RX queue structure.
@@ -63,8 +148,58 @@ const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-int
-rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
+static int
+rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+       struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
+       unsigned int wqe_n = 1 << rxq->elts_n;
+       unsigned int i;
+       int err;
+
+       /* Iterate on segments. */
+       for (i = 0; i <= wqe_n; ++i) {
+               struct mlx5_mprq_buf *buf;
+
+               if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
+                       DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
+                       rte_errno = ENOMEM;
+                       goto error;
+               }
+               if (i < wqe_n)
+                       (*rxq->mprq_bufs)[i] = buf;
+               else
+                       rxq->mprq_repl = buf;
+       }
+       DRV_LOG(DEBUG,
+               "port %u Rx queue %u allocated and configured %u segments",
+               rxq->port_id, rxq->idx, wqe_n);
+       return 0;
+error:
+       err = rte_errno; /* Save rte_errno before cleanup. */
+       wqe_n = i;
+       for (i = 0; (i != wqe_n); ++i) {
+               if ((*rxq->mprq_bufs)[i] != NULL)
+                       rte_mempool_put(rxq->mprq_mp,
+                                       (*rxq->mprq_bufs)[i]);
+               (*rxq->mprq_bufs)[i] = NULL;
+       }
+       DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
+               rxq->port_id, rxq->idx);
+       rte_errno = err; /* Restore rte_errno. */
+       return -rte_errno;
+}
+
+/**
+ * Allocate RX queue elements for Single-Packet RQ.
+ *
+ * @param rxq_ctrl
+ *   Pointer to RX queue structure.
+ *
+ * @return
+ *   0 on success, errno value on failure.
+ */
+static int
+rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
 {
        const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
        unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
@@ -78,7 +213,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
                buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
                if (buf == NULL) {
                        DRV_LOG(ERR, "port %u empty mbuf pool",
-                               rxq_ctrl->priv->dev->data->port_id);
+                               PORT_ID(rxq_ctrl->priv));
                        rte_errno = ENOMEM;
                        goto error;
                }
@@ -122,7 +257,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
        DRV_LOG(DEBUG,
                "port %u Rx queue %u allocated and configured %u segments"
                " (max %u packets)",
-               rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx, elts_n,
+               PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
                elts_n / (1 << rxq_ctrl->rxq.sges_n));
        return 0;
 error:
@@ -134,19 +269,63 @@ error:
                (*rxq_ctrl->rxq.elts)[i] = NULL;
        }
        DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
-               rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
+               PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
        rte_errno = err; /* Restore rte_errno. */
        return -rte_errno;
 }
 
 /**
- * Free RX queue elements.
+ * Allocate RX queue elements.
+ *
+ * @param rxq_ctrl
+ *   Pointer to RX queue structure.
+ *
+ * @return
+ *   0 on success, errno value on failure.
+ */
+int
+rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+       return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
+              rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
+}
+
+/**
+ * Free RX queue elements for Multi-Packet RQ.
  *
  * @param rxq_ctrl
  *   Pointer to RX queue structure.
  */
 static void
-rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
+rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+       struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
+       uint16_t i;
+
+       DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
+               rxq->port_id, rxq->idx);
+       if (rxq->mprq_bufs == NULL)
+               return;
+       assert(mlx5_rxq_check_vec_support(rxq) < 0);
+       for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
+               if ((*rxq->mprq_bufs)[i] != NULL)
+                       mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
+               (*rxq->mprq_bufs)[i] = NULL;
+       }
+       if (rxq->mprq_repl != NULL) {
+               mlx5_mprq_buf_free(rxq->mprq_repl);
+               rxq->mprq_repl = NULL;
+       }
+}
+
+/**
+ * Free RX queue elements for Single-Packet RQ.
+ *
+ * @param rxq_ctrl
+ *   Pointer to RX queue structure.
+ */
+static void
+rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
 {
        struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
        const uint16_t q_n = (1 << rxq->elts_n);
@@ -155,7 +334,7 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
        uint16_t i;
 
        DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
-               rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
+               PORT_ID(rxq_ctrl->priv), rxq->idx);
        if (rxq->elts == NULL)
                return;
        /**
@@ -175,21 +354,18 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
 }
 
 /**
- * Clean up a RX queue.
- *
- * Destroy objects, free allocated memory and reset the structure for reuse.
+ * Free RX queue elements.
  *
  * @param rxq_ctrl
  *   Pointer to RX queue structure.
  */
-void
-mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
+static void
+rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
 {
-       DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u",
-               rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
-       if (rxq_ctrl->ibv)
-               mlx5_rxq_ibv_release(rxq_ctrl->ibv);
-       memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
+       if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
+               rxq_free_elts_mprq(rxq_ctrl);
+       else
+               rxq_free_elts_sprq(rxq_ctrl);
 }
 
 /**
@@ -204,14 +380,15 @@ mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
 uint64_t
 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_dev_config *config = &priv->config;
        uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
                             DEV_RX_OFFLOAD_TIMESTAMP |
                             DEV_RX_OFFLOAD_JUMBO_FRAME);
 
        if (config->hw_fcs_strip)
-               offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+               offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+
        if (config->hw_csum)
                offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
                             DEV_RX_OFFLOAD_UDP_CKSUM |
@@ -225,41 +402,47 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
 /**
  * Returns the per-port supported offloads.
  *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
  * @return
  *   Supported Rx offloads.
  */
 uint64_t
-mlx5_get_rx_port_offloads(void)
+mlx5_get_rx_port_offloads(struct rte_eth_dev *dev)
 {
        uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
 
+       if (MLX5_LRO_SUPPORTED(dev))
+               offloads |= DEV_RX_OFFLOAD_TCP_LRO;
        return offloads;
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
+ * Verify if the queue can be released.
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param offloads
- *   Per-queue offloads configuration.
+ * @param idx
+ *   RX queue index.
  *
  * @return
- *   1 if the configuration is valid, 0 otherwise.
+ *   1 if the queue can be released
+ *   0 if the queue can not be released, there are references to it.
+ *   Negative errno and rte_errno is set if queue doesn't exist.
  */
 static int
-mlx5_is_rx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
+mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
 {
-       uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-       uint64_t queue_supp_offloads = mlx5_get_rx_queue_offloads(dev);
-       uint64_t port_supp_offloads = mlx5_get_rx_port_offloads();
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_rxq_ctrl *rxq_ctrl;
 
-       if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-           offloads)
-               return 0;
-       if (((port_offloads ^ offloads) & port_supp_offloads))
-               return 0;
-       return 1;
+       if (!(*priv->rxqs)[idx]) {
+               rte_errno = EINVAL;
+               return -rte_errno;
+       }
+       rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
+       return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
 }
 
 /**
@@ -285,7 +468,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                    unsigned int socket, const struct rte_eth_rxconf *conf,
                    struct rte_mempool *mp)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
        struct mlx5_rxq_ctrl *rxq_ctrl =
                container_of(rxq, struct mlx5_rxq_ctrl, rxq);
@@ -305,18 +488,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                rte_errno = EOVERFLOW;
                return -rte_errno;
        }
-       if (!mlx5_is_rx_queue_offloads_allowed(dev, conf->offloads)) {
-               DRV_LOG(ERR,
-                       "port %u Rx queue offloads 0x%" PRIx64 " don't match"
-                       " port offloads 0x%" PRIx64 " or supported offloads 0x%"
-                       PRIx64,
-                       dev->data->port_id, conf->offloads,
-                       dev->data->dev_conf.rxmode.offloads,
-                       (mlx5_get_rx_port_offloads() |
-                        mlx5_get_rx_queue_offloads(dev)));
-               rte_errno = ENOTSUP;
-               return -rte_errno;
-       }
        if (!mlx5_rxq_releasable(dev, idx)) {
                DRV_LOG(ERR, "port %u unable to release queue index %u",
                        dev->data->port_id, idx);
@@ -348,17 +519,74 @@ mlx5_rx_queue_release(void *dpdk_rxq)
 {
        struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
        struct mlx5_rxq_ctrl *rxq_ctrl;
-       struct priv *priv;
+       struct mlx5_priv *priv;
 
        if (rxq == NULL)
                return;
        rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
        priv = rxq_ctrl->priv;
-       if (!mlx5_rxq_releasable(priv->dev, rxq_ctrl->rxq.stats.idx))
+       if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
                rte_panic("port %u Rx queue %u is still used by a flow and"
-                         " cannot be removed\n", priv->dev->data->port_id,
-                         rxq_ctrl->idx);
-       mlx5_rxq_release(priv->dev, rxq_ctrl->rxq.stats.idx);
+                         " cannot be removed\n",
+                         PORT_ID(priv), rxq->idx);
+       mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
+}
+
+/**
+ * Get an Rx queue Verbs/DevX object.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param idx
+ *   Queue index in DPDK Rx queue array
+ *
+ * @return
+ *   The Verbs/DevX object if it exists.
+ */
+static struct mlx5_rxq_obj *
+mlx5_rxq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
+       struct mlx5_rxq_ctrl *rxq_ctrl;
+
+       if (idx >= priv->rxqs_n)
+               return NULL;
+       if (!rxq_data)
+               return NULL;
+       rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+       if (rxq_ctrl->obj)
+               rte_atomic32_inc(&rxq_ctrl->obj->refcnt);
+       return rxq_ctrl->obj;
+}
+
+/**
+ * Release an Rx verbs/DevX queue object.
+ *
+ * @param rxq_obj
+ *   Verbs/DevX Rx queue object.
+ *
+ * @return
+ *   1 while a reference on it exists, 0 when freed.
+ */
+static int
+mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)
+{
+       assert(rxq_obj);
+       assert(rxq_obj->wq);
+       assert(rxq_obj->cq);
+       if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) {
+               rxq_free_elts(rxq_obj->rxq_ctrl);
+               claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
+               claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
+               if (rxq_obj->channel)
+                       claim_zero(mlx5_glue->destroy_comp_channel
+                                  (rxq_obj->channel));
+               LIST_REMOVE(rxq_obj, next);
+               rte_free(rxq_obj);
+               return 0;
+       }
+       return 1;
 }
 
 /**
@@ -373,14 +601,14 @@ mlx5_rx_queue_release(void *dpdk_rxq)
 int
 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        unsigned int i;
        unsigned int rxqs_n = priv->rxqs_n;
        unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
        unsigned int count = 0;
-       struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+       struct rte_intr_handle *intr_handle = dev->intr_handle;
 
-       if (!priv->dev->data->dev_conf.intr_conf.rxq)
+       if (!dev->data->dev_conf.intr_conf.rxq)
                return 0;
        mlx5_rx_intr_vec_disable(dev);
        intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
@@ -394,14 +622,14 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
        }
        intr_handle->type = RTE_INTR_HANDLE_EXT;
        for (i = 0; i != n; ++i) {
-               /* This rxq ibv must not be released in this function. */
-               struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i);
+               /* This rxq obj must not be released in this function. */
+               struct mlx5_rxq_obj *rxq_obj = mlx5_rxq_obj_get(dev, i);
                int fd;
                int flags;
                int rc;
 
                /* Skip queues that cannot request interrupts. */
-               if (!rxq_ibv || !rxq_ibv->channel) {
+               if (!rxq_obj || !rxq_obj->channel) {
                        /* Use invalid intr_vec[] index to disable entry. */
                        intr_handle->intr_vec[i] =
                                RTE_INTR_VEC_RXTX_OFFSET +
@@ -418,7 +646,7 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
                        rte_errno = ENOMEM;
                        return -rte_errno;
                }
-               fd = rxq_ibv->channel->fd;
+               fd = rxq_obj->channel->fd;
                flags = fcntl(fd, F_GETFL);
                rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
                if (rc < 0) {
@@ -451,13 +679,13 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
 void
 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
 {
-       struct priv *priv = dev->data->dev_private;
-       struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct rte_intr_handle *intr_handle = dev->intr_handle;
        unsigned int i;
        unsigned int rxqs_n = priv->rxqs_n;
        unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
 
-       if (!priv->dev->data->dev_conf.intr_conf.rxq)
+       if (!dev->data->dev_conf.intr_conf.rxq)
                return;
        if (!intr_handle->intr_vec)
                goto free;
@@ -470,11 +698,12 @@ mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
                        continue;
                /**
                 * Need to access directly the queue to release the reference
-                * kept in priv_rx_intr_vec_enable().
+                * kept in mlx5_rx_intr_vec_enable().
                 */
                rxq_data = (*priv->rxqs)[i];
                rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
-               mlx5_rxq_ibv_release(rxq_ctrl->ibv);
+               if (rxq_ctrl->obj)
+                       mlx5_rxq_obj_release(rxq_ctrl->obj);
        }
 free:
        rte_intr_free_epoll_fd(intr_handle);
@@ -505,7 +734,8 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
        doorbell = (uint64_t)doorbell_hi << 32;
        doorbell |=  rxq->cqn;
        rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
-       rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg);
+       mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
+                        cq_db_reg, rxq->uar_lock_cq);
 }
 
 /**
@@ -522,7 +752,7 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
 int
 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_rxq_data *rxq_data;
        struct mlx5_rxq_ctrl *rxq_ctrl;
 
@@ -533,15 +763,15 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
        }
        rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
        if (rxq_ctrl->irq) {
-               struct mlx5_rxq_ibv *rxq_ibv;
+               struct mlx5_rxq_obj *rxq_obj;
 
-               rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
-               if (!rxq_ibv) {
+               rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
+               if (!rxq_obj) {
                        rte_errno = EINVAL;
                        return -rte_errno;
                }
                mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
-               mlx5_rxq_ibv_release(rxq_ibv);
+               mlx5_rxq_obj_release(rxq_obj);
        }
        return 0;
 }
@@ -560,10 +790,10 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 int
 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_rxq_data *rxq_data;
        struct mlx5_rxq_ctrl *rxq_ctrl;
-       struct mlx5_rxq_ibv *rxq_ibv = NULL;
+       struct mlx5_rxq_obj *rxq_obj = NULL;
        struct ibv_cq *ev_cq;
        void *ev_ctx;
        int ret;
@@ -576,23 +806,24 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
        rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
        if (!rxq_ctrl->irq)
                return 0;
-       rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
-       if (!rxq_ibv) {
+       rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
+       if (!rxq_obj) {
                rte_errno = EINVAL;
                return -rte_errno;
        }
-       ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
-       if (ret || ev_cq != rxq_ibv->cq) {
+       ret = mlx5_glue->get_cq_event(rxq_obj->channel, &ev_cq, &ev_ctx);
+       if (ret || ev_cq != rxq_obj->cq) {
                rte_errno = EINVAL;
                goto exit;
        }
        rxq_data->cq_arm_sn++;
-       mlx5_glue->ack_cq_events(rxq_ibv->cq, 1);
+       mlx5_glue->ack_cq_events(rxq_obj->cq, 1);
+       mlx5_rxq_obj_release(rxq_obj);
        return 0;
 exit:
        ret = rte_errno; /* Save rte_errno before cleanup. */
-       if (rxq_ibv)
-               mlx5_rxq_ibv_release(rxq_ibv);
+       if (rxq_obj)
+               mlx5_rxq_obj_release(rxq_obj);
        DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
                dev->data->port_id, rx_queue_id);
        rte_errno = ret; /* Restore rte_errno. */
@@ -600,7 +831,7 @@ exit:
 }
 
 /**
- * Create the Rx queue Verbs object.
+ * Create the Rx queue Verbs/DevX object.
  *
  * @param dev
  *   Pointer to Ethernet device.
@@ -608,12 +839,12 @@ exit:
  *   Queue index in DPDK Rx queue array
  *
  * @return
- *   The Verbs object initialised, NULL otherwise and rte_errno is set.
+ *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
  */
-struct mlx5_rxq_ibv *
-mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
+struct mlx5_rxq_obj *
+mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
        struct mlx5_rxq_ctrl *rxq_ctrl =
                container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
@@ -623,17 +854,22 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
                        struct ibv_cq_init_attr_ex ibv;
                        struct mlx5dv_cq_init_attr mlx5;
                } cq;
-               struct ibv_wq_init_attr wq;
-               struct ibv_cq_ex cq_attr;
+               struct {
+                       struct ibv_wq_init_attr ibv;
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+                       struct mlx5dv_wq_init_attr mlx5;
+#endif
+               } wq;
        } attr;
-       unsigned int cqe_n = (1 << rxq_data->elts_n) - 1;
-       struct mlx5_rxq_ibv *tmpl;
+       unsigned int cqe_n;
+       unsigned int wqe_n = 1 << rxq_data->elts_n;
+       struct mlx5_rxq_obj *tmpl = NULL;
        struct mlx5dv_cq cq_info;
        struct mlx5dv_rwq rwq;
-       unsigned int i;
        int ret = 0;
        struct mlx5dv_obj obj;
        struct mlx5_dev_config *config = &priv->config;
+       const int mprq_en = mlx5_rxq_mprq_enabled(rxq_data);
 
        assert(rxq_data);
        assert(!rxq_ctrl->ibv);
@@ -644,23 +880,13 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
        if (!tmpl) {
                DRV_LOG(ERR,
                        "port %u Rx queue %u cannot allocate verbs resources",
-                       dev->data->port_id, rxq_ctrl->idx);
+                       dev->data->port_id, rxq_data->idx);
                rte_errno = ENOMEM;
                goto error;
        }
        tmpl->rxq_ctrl = rxq_ctrl;
-       /* Use the entire RX mempool as the memory region. */
-       tmpl->mr = mlx5_mr_get(dev, rxq_data->mp);
-       if (!tmpl->mr) {
-               tmpl->mr = mlx5_mr_new(dev, rxq_data->mp);
-               if (!tmpl->mr) {
-                       DRV_LOG(ERR, "port %u: memeroy region creation failure",
-                               dev->data->port_id);
-                       goto error;
-               }
-       }
        if (rxq_ctrl->irq) {
-               tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx);
+               tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx);
                if (!tmpl->channel) {
                        DRV_LOG(ERR, "port %u: comp channel creation failure",
                                dev->data->port_id);
@@ -668,6 +894,10 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
                        goto error;
                }
        }
+       if (mprq_en)
+               cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
+       else
+               cqe_n = wqe_n  - 1;
        attr.cq.ibv = (struct ibv_cq_init_attr_ex){
                .cqe = cqe_n,
                .channel = tmpl->channel,
@@ -679,7 +909,13 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
        if (config->cqe_comp && !rxq_data->hw_timestamp) {
                attr.cq.mlx5.comp_mask |=
                        MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+               attr.cq.mlx5.cqe_comp_res_format =
+                       mprq_en ? MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
+                                 MLX5DV_CQE_RES_FORMAT_HASH;
+#else
                attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
+#endif
                /*
                 * For vectorized Rx, it must not be doubled in order to
                 * make cq_ci and rq_ci aligned.
@@ -692,8 +928,14 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
                        " timestamp",
                        dev->data->port_id);
        }
+#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
+       if (config->cqe_pad) {
+               attr.cq.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
+               attr.cq.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
+       }
+#endif
        tmpl->cq = mlx5_glue->cq_ex_to_cq
-               (mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv,
+               (mlx5_glue->dv_create_cq(priv->sh->ctx, &attr.cq.ibv,
                                         &attr.cq.mlx5));
        if (tmpl->cq == NULL) {
                DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
@@ -701,18 +943,18 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
                rte_errno = ENOMEM;
                goto error;
        }
-       DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
-               dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
-       DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
-               dev->data->port_id, priv->device_attr.orig_attr.max_sge);
-       attr.wq = (struct ibv_wq_init_attr){
+       DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
+               dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
+       DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
+               dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
+       attr.wq.ibv = (struct ibv_wq_init_attr){
                .wq_context = NULL, /* Could be useful in the future. */
                .wq_type = IBV_WQT_RQ,
                /* Max number of outstanding WRs. */
-               .max_wr = (1 << rxq_data->elts_n) >> rxq_data->sges_n,
+               .max_wr = wqe_n >> rxq_data->sges_n,
                /* Max number of scatter/gather elements in a WR. */
                .max_sge = 1 << rxq_data->sges_n,
-               .pd = priv->pd,
+               .pd = priv->sh->pd,
                .cq = tmpl->cq,
                .comp_mask =
                        IBV_WQ_FLAGS_CVLAN_STRIPPING |
@@ -723,16 +965,38 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
        };
        /* By default, FCS (CRC) is stripped by hardware. */
        if (rxq_data->crc_present) {
-               attr.wq.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
-               attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+               attr.wq.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
+               attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
        }
-#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
        if (config->hw_padding) {
-               attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
-               attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+#if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
+               attr.wq.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
+               attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+#elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
+               attr.wq.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
+               attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+#endif
+       }
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+       attr.wq.mlx5 = (struct mlx5dv_wq_init_attr){
+               .comp_mask = 0,
+       };
+       if (mprq_en) {
+               struct mlx5dv_striding_rq_init_attr *mprq_attr =
+                       &attr.wq.mlx5.striding_rq_attrs;
+
+               attr.wq.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
+               *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
+                       .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
+                       .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
+                       .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
+               };
        }
+       tmpl->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &attr.wq.ibv,
+                                          &attr.wq.mlx5);
+#else
+       tmpl->wq = mlx5_glue->create_wq(priv->sh->ctx, &attr.wq.ibv);
 #endif
-       tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq);
        if (tmpl->wq == NULL) {
                DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
                        dev->data->port_id, idx);
@@ -743,16 +1007,14 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
         * Make sure number of WRs*SGEs match expectations since a queue
         * cannot allocate more than "desc" buffers.
         */
-       if (((int)attr.wq.max_wr !=
-            ((1 << rxq_data->elts_n) >> rxq_data->sges_n)) ||
-           ((int)attr.wq.max_sge != (1 << rxq_data->sges_n))) {
+       if (attr.wq.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
+           attr.wq.ibv.max_sge != (1u << rxq_data->sges_n)) {
                DRV_LOG(ERR,
                        "port %u Rx queue %u requested %u*%u but got %u*%u"
                        " WRs*SGEs",
                        dev->data->port_id, idx,
-                       ((1 << rxq_data->elts_n) >> rxq_data->sges_n),
-                       (1 << rxq_data->sges_n),
-                       attr.wq.max_wr, attr.wq.max_sge);
+                       wqe_n >> rxq_data->sges_n, (1 << rxq_data->sges_n),
+                       attr.wq.ibv.max_wr, attr.wq.ibv.max_sge);
                rte_errno = EINVAL;
                goto error;
        }
@@ -787,167 +1049,237 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
                goto error;
        }
        /* Fill the rings. */
-       rxq_data->wqes = (volatile struct mlx5_wqe_data_seg (*)[])
-               (uintptr_t)rwq.buf;
-       for (i = 0; (i != (unsigned int)(1 << rxq_data->elts_n)); ++i) {
-               struct rte_mbuf *buf = (*rxq_data->elts)[i];
-               volatile struct mlx5_wqe_data_seg *scat = &(*rxq_data->wqes)[i];
-
-               /* scat->addr must be able to store a pointer. */
-               assert(sizeof(scat->addr) >= sizeof(uintptr_t));
-               *scat = (struct mlx5_wqe_data_seg){
-                       .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
-                                                                 uintptr_t)),
-                       .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
-                       .lkey = tmpl->mr->lkey,
-               };
-       }
+       rxq_data->wqes = rwq.buf;
        rxq_data->rq_db = rwq.dbrec;
        rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
-       rxq_data->cq_ci = 0;
-       rxq_data->rq_ci = 0;
-       rxq_data->rq_pi = 0;
-       rxq_data->zip = (struct rxq_zip){
-               .ai = 0,
-       };
        rxq_data->cq_db = cq_info.dbrec;
        rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
        rxq_data->cq_uar = cq_info.cq_uar;
        rxq_data->cqn = cq_info.cqn;
        rxq_data->cq_arm_sn = 0;
-       /* Update doorbell counter. */
-       rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n;
-       rte_wmb();
-       *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
+       mlx5_rxq_initialize(rxq_data);
+       rxq_data->cq_ci = 0;
        DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
                idx, (void *)&tmpl);
        rte_atomic32_inc(&tmpl->refcnt);
-       DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
-               dev->data->port_id, idx, rte_atomic32_read(&tmpl->refcnt));
-       LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
+       LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
        priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
        return tmpl;
 error:
-       ret = rte_errno; /* Save rte_errno before cleanup. */
-       if (tmpl->wq)
-               claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
-       if (tmpl->cq)
-               claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
-       if (tmpl->channel)
-               claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel));
-       if (tmpl->mr)
-               mlx5_mr_release(tmpl->mr);
+       if (tmpl) {
+               ret = rte_errno; /* Save rte_errno before cleanup. */
+               if (tmpl->wq)
+                       claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
+               if (tmpl->cq)
+                       claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
+               if (tmpl->channel)
+                       claim_zero(mlx5_glue->destroy_comp_channel
+                                                       (tmpl->channel));
+               rte_free(tmpl);
+               rte_errno = ret; /* Restore rte_errno. */
+       }
        priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
-       rte_errno = ret; /* Restore rte_errno. */
        return NULL;
 }
 
 /**
- * Get an Rx queue Verbs object.
+ * Verify the Rx queue objects list is empty
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param idx
- *   Queue index in DPDK Rx queue array
  *
  * @return
- *   The Verbs object if it exists.
+ *   The number of objects not released.
  */
-struct mlx5_rxq_ibv *
-mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
+int
+mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
 {
-       struct priv *priv = dev->data->dev_private;
-       struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
-       struct mlx5_rxq_ctrl *rxq_ctrl;
+       struct mlx5_priv *priv = dev->data->dev_private;
+       int ret = 0;
+       struct mlx5_rxq_obj *rxq_obj;
 
-       if (idx >= priv->rxqs_n)
-               return NULL;
-       if (!rxq_data)
-               return NULL;
-       rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
-       if (rxq_ctrl->ibv) {
-               mlx5_mr_get(dev, rxq_data->mp);
-               rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
-               DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
-                       dev->data->port_id, rxq_ctrl->idx,
-                       rte_atomic32_read(&rxq_ctrl->ibv->refcnt));
-       }
-       return rxq_ctrl->ibv;
+       LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
+               DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
+                       dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
+               ++ret;
+       }
+       return ret;
 }
 
 /**
- * Release an Rx verbs queue object.
- *
- * @param rxq_ibv
- *   Verbs Rx queue object.
- *
- * @return
- *   1 while a reference on it exists, 0 when freed.
+ * Callback function to initialize mbufs for Multi-Packet RQ.
  */
-int
-mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
+static inline void
+mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg __rte_unused,
+                   void *_m, unsigned int i __rte_unused)
 {
-       int ret;
+       struct mlx5_mprq_buf *buf = _m;
 
-       assert(rxq_ibv);
-       assert(rxq_ibv->wq);
-       assert(rxq_ibv->cq);
-       assert(rxq_ibv->mr);
-       ret = mlx5_mr_release(rxq_ibv->mr);
-       if (!ret)
-               rxq_ibv->mr = NULL;
-       DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
-               rxq_ibv->rxq_ctrl->priv->dev->data->port_id,
-               rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt));
-       if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
-               rxq_free_elts(rxq_ibv->rxq_ctrl);
-               claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
-               claim_zero(mlx5_glue->destroy_cq(rxq_ibv->cq));
-               if (rxq_ibv->channel)
-                       claim_zero(mlx5_glue->destroy_comp_channel
-                                  (rxq_ibv->channel));
-               LIST_REMOVE(rxq_ibv, next);
-               rte_free(rxq_ibv);
-               return 0;
-       }
-       return 1;
+       memset(_m, 0, sizeof(*buf));
+       buf->mp = mp;
+       rte_atomic16_set(&buf->refcnt, 1);
 }
 
 /**
- * Verify the Verbs Rx queue list is empty
+ * Free mempool of Multi-Packet RQ.
  *
  * @param dev
  *   Pointer to Ethernet device.
  *
  * @return
- *   The number of object not released.
+ *   0 on success, negative errno value on failure.
  */
 int
-mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
+mlx5_mprq_free_mp(struct rte_eth_dev *dev)
 {
-       struct priv *priv = dev->data->dev_private;
-       int ret = 0;
-       struct mlx5_rxq_ibv *rxq_ibv;
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct rte_mempool *mp = priv->mprq_mp;
+       unsigned int i;
 
-       LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
-               DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced",
-                       dev->data->port_id, rxq_ibv->rxq_ctrl->idx);
-               ++ret;
+       if (mp == NULL)
+               return 0;
+       DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
+               dev->data->port_id, mp->name);
+       /*
+        * If a buffer in the pool has been externally attached to a mbuf and it
+        * is still in use by application, destroying the Rx queue can spoil
+        * the packet. It is unlikely to happen but if application dynamically
+        * creates and destroys with holding Rx packets, this can happen.
+        *
+        * TODO: It is unavoidable for now because the mempool for Multi-Packet
+        * RQ isn't provided by application but managed by PMD.
+        */
+       if (!rte_mempool_full(mp)) {
+               DRV_LOG(ERR,
+                       "port %u mempool for Multi-Packet RQ is still in use",
+                       dev->data->port_id);
+               rte_errno = EBUSY;
+               return -rte_errno;
        }
-       return ret;
+       rte_mempool_free(mp);
+       /* Unset mempool for each Rx queue. */
+       for (i = 0; i != priv->rxqs_n; ++i) {
+               struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+
+               if (rxq == NULL)
+                       continue;
+               rxq->mprq_mp = NULL;
+       }
+       priv->mprq_mp = NULL;
+       return 0;
 }
 
 /**
- * Return true if a single reference exists on the object.
+ * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
+ * mempool. If already allocated, reuse it if there're enough elements.
+ * Otherwise, resize it.
  *
- * @param rxq_ibv
- *   Verbs Rx queue object.
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   0 on success, negative errno value on failure.
  */
 int
-mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv)
+mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
 {
-       assert(rxq_ibv);
-       return (rte_atomic32_read(&rxq_ibv->refcnt) == 1);
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct rte_mempool *mp = priv->mprq_mp;
+       char name[RTE_MEMPOOL_NAMESIZE];
+       unsigned int desc = 0;
+       unsigned int buf_len;
+       unsigned int obj_num;
+       unsigned int obj_size;
+       unsigned int strd_num_n = 0;
+       unsigned int strd_sz_n = 0;
+       unsigned int i;
+
+       if (!mlx5_mprq_enabled(dev))
+               return 0;
+       /* Count the total number of descriptors configured. */
+       for (i = 0; i != priv->rxqs_n; ++i) {
+               struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+
+               if (rxq == NULL)
+                       continue;
+               desc += 1 << rxq->elts_n;
+               /* Get the max number of strides. */
+               if (strd_num_n < rxq->strd_num_n)
+                       strd_num_n = rxq->strd_num_n;
+               /* Get the max size of a stride. */
+               if (strd_sz_n < rxq->strd_sz_n)
+                       strd_sz_n = rxq->strd_sz_n;
+       }
+       assert(strd_num_n && strd_sz_n);
+       buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
+       obj_size = buf_len + sizeof(struct mlx5_mprq_buf);
+       /*
+        * Received packets can be either memcpy'd or externally referenced. In
+        * case that the packet is attached to an mbuf as an external buffer, as
+        * it isn't possible to predict how the buffers will be queued by
+        * application, there's no option to exactly pre-allocate needed buffers
+        * in advance but to speculatively prepares enough buffers.
+        *
+        * In the data path, if this Mempool is depleted, PMD will try to memcpy
+        * received packets to buffers provided by application (rxq->mp) until
+        * this Mempool gets available again.
+        */
+       desc *= 4;
+       obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * priv->rxqs_n;
+       /*
+        * rte_mempool_create_empty() has sanity check to refuse large cache
+        * size compared to the number of elements.
+        * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
+        * constant number 2 instead.
+        */
+       obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
+       /* Check a mempool is already allocated and if it can be resued. */
+       if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
+               DRV_LOG(DEBUG, "port %u mempool %s is being reused",
+                       dev->data->port_id, mp->name);
+               /* Reuse. */
+               goto exit;
+       } else if (mp != NULL) {
+               DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
+                       dev->data->port_id, mp->name);
+               /*
+                * If failed to free, which means it may be still in use, no way
+                * but to keep using the existing one. On buffer underrun,
+                * packets will be memcpy'd instead of external buffer
+                * attachment.
+                */
+               if (mlx5_mprq_free_mp(dev)) {
+                       if (mp->elt_size >= obj_size)
+                               goto exit;
+                       else
+                               return -rte_errno;
+               }
+       }
+       snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
+       mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
+                               0, NULL, NULL, mlx5_mprq_buf_init, NULL,
+                               dev->device->numa_node, 0);
+       if (mp == NULL) {
+               DRV_LOG(ERR,
+                       "port %u failed to allocate a mempool for"
+                       " Multi-Packet RQ, count=%u, size=%u",
+                       dev->data->port_id, obj_num, obj_size);
+               rte_errno = ENOMEM;
+               return -rte_errno;
+       }
+       priv->mprq_mp = mp;
+exit:
+       /* Set mempool for each Rx queue. */
+       for (i = 0; i != priv->rxqs_n; ++i) {
+               struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+
+               if (rxq == NULL)
+                       continue;
+               rxq->mprq_mp = mp;
+       }
+       DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
+               dev->data->port_id);
+       return 0;
 }
 
 /**
@@ -956,7 +1288,7 @@ mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv)
  * @param dev
  *   Pointer to Ethernet device.
  * @param idx
- *   TX queue index.
+ *   RX queue index.
  * @param desc
  *   Number of descriptors to configure in queue.
  * @param socket
@@ -970,16 +1302,20 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
             unsigned int socket, const struct rte_eth_rxconf *conf,
             struct rte_mempool *mp)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_rxq_ctrl *tmpl;
        unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
+       unsigned int mprq_stride_size;
        struct mlx5_dev_config *config = &priv->config;
        /*
         * Always allocate extra slots, even if eventually
         * the vector Rx will not be used.
         */
-       const uint16_t desc_n =
+       uint16_t desc_n =
                desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
+       uint64_t offloads = conf->offloads |
+                          dev->data->dev_conf.rxmode.offloads;
+       const int mprq_en = mlx5_check_mprq_support(dev) > 0;
 
        tmpl = rte_calloc_socket("RXQ", 1,
                                 sizeof(*tmpl) +
@@ -989,15 +1325,51 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                rte_errno = ENOMEM;
                return NULL;
        }
+       if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
+                              MLX5_MR_BTREE_CACHE_N, socket)) {
+               /* rte_errno is already set. */
+               goto error;
+       }
        tmpl->socket = socket;
-       if (priv->dev->data->dev_conf.intr_conf.rxq)
+       if (dev->data->dev_conf.intr_conf.rxq)
                tmpl->irq = 1;
-       /* Enable scattered packets support for this queue if necessary. */
+       /*
+        * This Rx queue can be configured as a Multi-Packet RQ if all of the
+        * following conditions are met:
+        *  - MPRQ is enabled.
+        *  - The number of descs is more than the number of strides.
+        *  - max_rx_pkt_len plus overhead is less than the max size of a
+        *    stride.
+        *  Otherwise, enable Rx scatter if necessary.
+        */
        assert(mb_len >= RTE_PKTMBUF_HEADROOM);
-       if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
-           (mb_len - RTE_PKTMBUF_HEADROOM)) {
+       mprq_stride_size =
+               dev->data->dev_conf.rxmode.max_rx_pkt_len +
+               sizeof(struct rte_mbuf_ext_shared_info) +
+               RTE_PKTMBUF_HEADROOM;
+       if (mprq_en &&
+           desc > (1U << config->mprq.stride_num_n) &&
+           mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
+               /* TODO: Rx scatter isn't supported yet. */
                tmpl->rxq.sges_n = 0;
-       } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
+               /* Trim the number of descs needed. */
+               desc >>= config->mprq.stride_num_n;
+               tmpl->rxq.strd_num_n = config->mprq.stride_num_n;
+               tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size),
+                                             config->mprq.min_stride_size_n);
+               tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
+               tmpl->rxq.mprq_max_memcpy_len =
+                       RTE_MIN(mb_len - RTE_PKTMBUF_HEADROOM,
+                               config->mprq.max_memcpy_len);
+               DRV_LOG(DEBUG,
+                       "port %u Rx queue %u: Multi-Packet RQ is enabled"
+                       " strd_num_n = %u, strd_sz_n = %u",
+                       dev->data->port_id, idx,
+                       tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
+       } else if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
+                  (mb_len - RTE_PKTMBUF_HEADROOM)) {
+               tmpl->rxq.sges_n = 0;
+       } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
                unsigned int size =
                        RTE_PKTMBUF_HEADROOM +
                        dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -1031,6 +1403,14 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                        dev->data->dev_conf.rxmode.max_rx_pkt_len,
                        mb_len - RTE_PKTMBUF_HEADROOM);
        }
+       if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
+               DRV_LOG(WARNING,
+                       "port %u MPRQ is requested but cannot be enabled"
+                       " (requested: desc = %u, stride_sz = %u,"
+                       " supported: min_stride_num = %u, max_stride_sz = %u).",
+                       dev->data->port_id, desc, mprq_stride_size,
+                       (1 << config->mprq.stride_num_n),
+                       (1 << config->mprq.max_stride_size_n));
        DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
                dev->data->port_id, 1 << tmpl->rxq.sges_n);
        if (desc % (1 << tmpl->rxq.sges_n)) {
@@ -1044,24 +1424,33 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                goto error;
        }
        /* Toggle RX checksum offload if hardware supports it. */
-       tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM);
-       tmpl->rxq.csum_l2tun = (!!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) &&
-                               priv->config.tunnel_en);
-       tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+       tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
+       tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
        /* Configure VLAN stripping. */
-       tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+       tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
        /* By default, FCS (CRC) is stripped by hardware. */
-       if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
-               tmpl->rxq.crc_present = 0;
-       } else if (config->hw_fcs_strip) {
-               tmpl->rxq.crc_present = 1;
-       } else {
-               DRV_LOG(WARNING,
-                       "port %u CRC stripping has been disabled but will"
-                       " still be performed by hardware, make sure MLNX_OFED"
-                       " and firmware are up to date",
-                       dev->data->port_id);
-               tmpl->rxq.crc_present = 0;
+       tmpl->rxq.crc_present = 0;
+       if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+               if (config->hw_fcs_strip) {
+                       /*
+                        * RQs used for LRO-enabled TIRs should not be
+                        * configured to scatter the FCS.
+                        */
+                       if (mlx5_lro_on(dev))
+                               DRV_LOG(WARNING,
+                                       "port %u CRC stripping has been "
+                                       "disabled but will still be performed "
+                                       "by hardware, because LRO is enabled",
+                                       dev->data->port_id);
+                       else
+                               tmpl->rxq.crc_present = 1;
+               } else {
+                       DRV_LOG(WARNING,
+                               "port %u CRC stripping has been disabled but will"
+                               " still be performed by hardware, make sure MLNX_OFED"
+                               " and firmware are up to date",
+                               dev->data->port_id);
+               }
        }
        DRV_LOG(DEBUG,
                "port %u CRC stripping is %s, %u bytes will be subtracted from"
@@ -1075,14 +1464,16 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        tmpl->rxq.port_id = dev->data->port_id;
        tmpl->priv = priv;
        tmpl->rxq.mp = mp;
-       tmpl->rxq.stats.idx = idx;
        tmpl->rxq.elts_n = log2above(desc);
+       tmpl->rxq.rq_repl_thresh =
+               MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
        tmpl->rxq.elts =
                (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
-       tmpl->idx = idx;
+#ifndef RTE_ARCH_64
+       tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;
+#endif
+       tmpl->rxq.idx = idx;
        rte_atomic32_inc(&tmpl->refcnt);
-       DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
-               idx, rte_atomic32_read(&tmpl->refcnt));
        LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
        return tmpl;
 error:
@@ -1096,7 +1487,7 @@ error:
  * @param dev
  *   Pointer to Ethernet device.
  * @param idx
- *   TX queue index.
+ *   RX queue index.
  *
  * @return
  *   A pointer to the queue if it exists, NULL otherwise.
@@ -1104,18 +1495,15 @@ error:
 struct mlx5_rxq_ctrl *
 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
 
        if ((*priv->rxqs)[idx]) {
                rxq_ctrl = container_of((*priv->rxqs)[idx],
                                        struct mlx5_rxq_ctrl,
                                        rxq);
-               mlx5_rxq_ibv_get(dev, idx);
+               mlx5_rxq_obj_get(dev, idx);
                rte_atomic32_inc(&rxq_ctrl->refcnt);
-               DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d",
-                       dev->data->port_id, rxq_ctrl->idx,
-                       rte_atomic32_read(&rxq_ctrl->refcnt));
        }
        return rxq_ctrl;
 }
@@ -1126,7 +1514,7 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
  * @param dev
  *   Pointer to Ethernet device.
  * @param idx
- *   TX queue index.
+ *   RX queue index.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
@@ -1134,18 +1522,17 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
 int
 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_rxq_ctrl *rxq_ctrl;
 
        if (!(*priv->rxqs)[idx])
                return 0;
        rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
        assert(rxq_ctrl->priv);
-       if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
-               rxq_ctrl->ibv = NULL;
-       DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
-               rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt));
+       if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
+               rxq_ctrl->obj = NULL;
        if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
+               mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
                LIST_REMOVE(rxq_ctrl, next);
                rte_free(rxq_ctrl);
                (*priv->rxqs)[idx] = NULL;
@@ -1154,32 +1541,6 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
        return 1;
 }
 
-/**
- * Verify if the queue can be released.
- *
- * @param dev
- *   Pointer to Ethernet device.
- * @param idx
- *   TX queue index.
- *
- * @return
- *   1 if the queue can be released, negative errno otherwise and rte_errno is
- *   set.
- */
-int
-mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
-{
-       struct priv *priv = dev->data->dev_private;
-       struct mlx5_rxq_ctrl *rxq_ctrl;
-
-       if (!(*priv->rxqs)[idx]) {
-               rte_errno = EINVAL;
-               return -rte_errno;
-       }
-       rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
-       return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
-}
-
 /**
  * Verify the Rx Queue list is empty
  *
@@ -1192,13 +1553,13 @@ mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
 int
 mlx5_rxq_verify(struct rte_eth_dev *dev)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_rxq_ctrl *rxq_ctrl;
        int ret = 0;
 
        LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
                DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
-                       dev->data->port_id, rxq_ctrl->idx);
+                       dev->data->port_id, rxq_ctrl->rxq.idx);
                ++ret;
        }
        return ret;
@@ -1217,11 +1578,11 @@ mlx5_rxq_verify(struct rte_eth_dev *dev)
  * @return
  *   The Verbs object initialised, NULL otherwise and rte_errno is set.
  */
-struct mlx5_ind_table_ibv *
+static struct mlx5_ind_table_ibv *
 mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
                       uint32_t queues_n)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_ind_table_ibv *ind_tbl;
        const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
                log2above(queues_n) :
@@ -1241,7 +1602,7 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
 
                if (!rxq)
                        goto error;
-               wq[i] = rxq->ibv->wq;
+               wq[i] = rxq->obj->wq;
                ind_tbl->queues[i] = queues[i];
        }
        ind_tbl->queues_n = queues_n;
@@ -1249,7 +1610,7 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
        for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
                wq[i] = wq[j];
        ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
-               (priv->ctx,
+               (priv->sh->ctx,
                 &(struct ibv_rwq_ind_table_init_attr){
                        .log_ind_tbl_size = wq_n,
                        .ind_tbl = wq,
@@ -1261,14 +1622,10 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
        }
        rte_atomic32_inc(&ind_tbl->refcnt);
        LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
-       DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
-               dev->data->port_id, (void *)ind_tbl,
-               rte_atomic32_read(&ind_tbl->refcnt));
        return ind_tbl;
 error:
        rte_free(ind_tbl);
-       DRV_LOG(DEBUG, "port %u cannot create indirection table",
-               dev->data->port_id);
+       DEBUG("port %u cannot create indirection table", dev->data->port_id);
        return NULL;
 }
 
@@ -1285,11 +1642,11 @@ error:
  * @return
  *   An indirection table if found.
  */
-struct mlx5_ind_table_ibv *
+static struct mlx5_ind_table_ibv *
 mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
                       uint32_t queues_n)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_ind_table_ibv *ind_tbl;
 
        LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
@@ -1303,9 +1660,6 @@ mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
                unsigned int i;
 
                rte_atomic32_inc(&ind_tbl->refcnt);
-               DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
-                       dev->data->port_id, (void *)ind_tbl,
-                       rte_atomic32_read(&ind_tbl->refcnt));
                for (i = 0; i != ind_tbl->queues_n; ++i)
                        mlx5_rxq_get(dev, ind_tbl->queues[i]);
        }
@@ -1323,15 +1677,12 @@ mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
-int
+static int
 mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
                           struct mlx5_ind_table_ibv *ind_tbl)
 {
        unsigned int i;
 
-       DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
-               ((struct priv *)dev->data->dev_private)->port,
-               (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
        if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
                claim_zero(mlx5_glue->destroy_rwq_ind_table
                           (ind_tbl->ind_table));
@@ -1357,7 +1708,7 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
 int
 mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_ind_table_ibv *ind_tbl;
        int ret = 0;
 
@@ -1386,6 +1737,8 @@ mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
  *   first queue index will be taken for the indirection table.
  * @param queues_n
  *   Number of queues.
+ * @param tunnel
+ *   Tunnel type.
  *
  * @return
  *   The Verbs object initialised, NULL otherwise and rte_errno is set.
@@ -1394,12 +1747,16 @@ struct mlx5_hrxq *
 mlx5_hrxq_new(struct rte_eth_dev *dev,
              const uint8_t *rss_key, uint32_t rss_key_len,
              uint64_t hash_fields,
-             const uint16_t *queues, uint32_t queues_n)
+             const uint16_t *queues, uint32_t queues_n,
+             int tunnel __rte_unused)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_hrxq *hrxq;
        struct mlx5_ind_table_ibv *ind_tbl;
        struct ibv_qp *qp;
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+       struct mlx5dv_qp_init_attr qp_init_attr;
+#endif
        int err;
 
        queues_n = hash_fields ? queues_n : 1;
@@ -1410,12 +1767,43 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
                rte_errno = ENOMEM;
                return NULL;
        }
-       if (!rss_key_len) {
-               rss_key_len = rss_hash_default_key_len;
-               rss_key = rss_hash_default_key;
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+       memset(&qp_init_attr, 0, sizeof(qp_init_attr));
+       if (tunnel) {
+               qp_init_attr.comp_mask =
+                               MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
+               qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
+       }
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+       if (dev->data->dev_conf.lpbk_mode) {
+               /* Allow packet sent from NIC loop back w/o source MAC check. */
+               qp_init_attr.comp_mask |=
+                               MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
+               qp_init_attr.create_flags |=
+                               MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
        }
+#endif
+       qp = mlx5_glue->dv_create_qp
+               (priv->sh->ctx,
+                &(struct ibv_qp_init_attr_ex){
+                       .qp_type = IBV_QPT_RAW_PACKET,
+                       .comp_mask =
+                               IBV_QP_INIT_ATTR_PD |
+                               IBV_QP_INIT_ATTR_IND_TABLE |
+                               IBV_QP_INIT_ATTR_RX_HASH,
+                       .rx_hash_conf = (struct ibv_rx_hash_conf){
+                               .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
+                               .rx_hash_key_len = rss_key_len,
+                               .rx_hash_key = (void *)(uintptr_t)rss_key,
+                               .rx_hash_fields_mask = hash_fields,
+                       },
+                       .rwq_ind_tbl = ind_tbl->ind_table,
+                       .pd = priv->sh->pd,
+                },
+                &qp_init_attr);
+#else
        qp = mlx5_glue->create_qp_ex
-               (priv->ctx,
+               (priv->sh->ctx,
                 &(struct ibv_qp_init_attr_ex){
                        .qp_type = IBV_QPT_RAW_PACKET,
                        .comp_mask =
@@ -1429,8 +1817,9 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
                                .rx_hash_fields_mask = hash_fields,
                        },
                        .rwq_ind_tbl = ind_tbl->ind_table,
-                       .pd = priv->pd,
+                       .pd = priv->sh->pd,
                 });
+#endif
        if (!qp) {
                rte_errno = errno;
                goto error;
@@ -1443,11 +1832,15 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
        hrxq->rss_key_len = rss_key_len;
        hrxq->hash_fields = hash_fields;
        memcpy(hrxq->rss_key, rss_key, rss_key_len);
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+       hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
+       if (!hrxq->action) {
+               rte_errno = errno;
+               goto error;
+       }
+#endif
        rte_atomic32_inc(&hrxq->refcnt);
        LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
-       DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
-               dev->data->port_id, (void *)hrxq,
-               rte_atomic32_read(&hrxq->refcnt));
        return hrxq;
 error:
        err = rte_errno; /* Save rte_errno before cleanup. */
@@ -1480,7 +1873,7 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
              uint64_t hash_fields,
              const uint16_t *queues, uint32_t queues_n)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_hrxq *hrxq;
 
        queues_n = hash_fields ? queues_n : 1;
@@ -1501,9 +1894,6 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
                        continue;
                }
                rte_atomic32_inc(&hrxq->refcnt);
-               DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
-                       dev->data->port_id, (void *)hrxq,
-                       rte_atomic32_read(&hrxq->refcnt));
                return hrxq;
        }
        return NULL;
@@ -1523,10 +1913,10 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
 int
 mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
 {
-       DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
-               ((struct priv *)dev->data->dev_private)->port,
-               (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
        if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+               mlx5_glue->destroy_flow_action(hrxq->action);
+#endif
                claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
                mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
                LIST_REMOVE(hrxq, next);
@@ -1549,7 +1939,7 @@ mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
 int
 mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_hrxq *hrxq;
        int ret = 0;
 
@@ -1561,3 +1951,246 @@ mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
        }
        return ret;
 }
+
+/**
+ * Create a drop Rx queue Verbs/DevX object.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_rxq_obj *
+mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct ibv_context *ctx = priv->sh->ctx;
+       struct ibv_cq *cq;
+       struct ibv_wq *wq = NULL;
+       struct mlx5_rxq_obj *rxq;
+
+       if (priv->drop_queue.rxq)
+               return priv->drop_queue.rxq;
+       cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
+       if (!cq) {
+               DEBUG("port %u cannot allocate CQ for drop queue",
+                     dev->data->port_id);
+               rte_errno = errno;
+               goto error;
+       }
+       wq = mlx5_glue->create_wq(ctx,
+                &(struct ibv_wq_init_attr){
+                       .wq_type = IBV_WQT_RQ,
+                       .max_wr = 1,
+                       .max_sge = 1,
+                       .pd = priv->sh->pd,
+                       .cq = cq,
+                });
+       if (!wq) {
+               DEBUG("port %u cannot allocate WQ for drop queue",
+                     dev->data->port_id);
+               rte_errno = errno;
+               goto error;
+       }
+       rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
+       if (!rxq) {
+               DEBUG("port %u cannot allocate drop Rx queue memory",
+                     dev->data->port_id);
+               rte_errno = ENOMEM;
+               goto error;
+       }
+       rxq->cq = cq;
+       rxq->wq = wq;
+       priv->drop_queue.rxq = rxq;
+       return rxq;
+error:
+       if (wq)
+               claim_zero(mlx5_glue->destroy_wq(wq));
+       if (cq)
+               claim_zero(mlx5_glue->destroy_cq(cq));
+       return NULL;
+}
+
+/**
+ * Release a drop Rx queue Verbs/DevX object.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
+ */
+static void
+mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
+
+       if (rxq->wq)
+               claim_zero(mlx5_glue->destroy_wq(rxq->wq));
+       if (rxq->cq)
+               claim_zero(mlx5_glue->destroy_cq(rxq->cq));
+       rte_free(rxq);
+       priv->drop_queue.rxq = NULL;
+}
+
+/**
+ * Create a drop indirection table.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_ind_table_ibv *
+mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_ind_table_ibv *ind_tbl;
+       struct mlx5_rxq_obj *rxq;
+       struct mlx5_ind_table_ibv tmpl;
+
+       rxq = mlx5_rxq_obj_drop_new(dev);
+       if (!rxq)
+               return NULL;
+       tmpl.ind_table = mlx5_glue->create_rwq_ind_table
+               (priv->sh->ctx,
+                &(struct ibv_rwq_ind_table_init_attr){
+                       .log_ind_tbl_size = 0,
+                       .ind_tbl = &rxq->wq,
+                       .comp_mask = 0,
+                });
+       if (!tmpl.ind_table) {
+               DEBUG("port %u cannot allocate indirection table for drop"
+                     " queue",
+                     dev->data->port_id);
+               rte_errno = errno;
+               goto error;
+       }
+       ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
+       if (!ind_tbl) {
+               rte_errno = ENOMEM;
+               goto error;
+       }
+       ind_tbl->ind_table = tmpl.ind_table;
+       return ind_tbl;
+error:
+       mlx5_rxq_obj_drop_release(dev);
+       return NULL;
+}
+
+/**
+ * Release a drop indirection table.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ */
+static void
+mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_ind_table_ibv *ind_tbl = priv->drop_queue.hrxq->ind_table;
+
+       claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
+       mlx5_rxq_obj_drop_release(dev);
+       rte_free(ind_tbl);
+       priv->drop_queue.hrxq->ind_table = NULL;
+}
+
+/**
+ * Create a drop Rx Hash queue.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_hrxq *
+mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_ind_table_ibv *ind_tbl;
+       struct ibv_qp *qp;
+       struct mlx5_hrxq *hrxq;
+
+       if (priv->drop_queue.hrxq) {
+               rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
+               return priv->drop_queue.hrxq;
+       }
+       ind_tbl = mlx5_ind_table_ibv_drop_new(dev);
+       if (!ind_tbl)
+               return NULL;
+       qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
+                &(struct ibv_qp_init_attr_ex){
+                       .qp_type = IBV_QPT_RAW_PACKET,
+                       .comp_mask =
+                               IBV_QP_INIT_ATTR_PD |
+                               IBV_QP_INIT_ATTR_IND_TABLE |
+                               IBV_QP_INIT_ATTR_RX_HASH,
+                       .rx_hash_conf = (struct ibv_rx_hash_conf){
+                               .rx_hash_function =
+                                       IBV_RX_HASH_FUNC_TOEPLITZ,
+                               .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
+                               .rx_hash_key = rss_hash_default_key,
+                               .rx_hash_fields_mask = 0,
+                               },
+                       .rwq_ind_tbl = ind_tbl->ind_table,
+                       .pd = priv->sh->pd
+                });
+       if (!qp) {
+               DEBUG("port %u cannot allocate QP for drop queue",
+                     dev->data->port_id);
+               rte_errno = errno;
+               goto error;
+       }
+       hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
+       if (!hrxq) {
+               DRV_LOG(WARNING,
+                       "port %u cannot allocate memory for drop queue",
+                       dev->data->port_id);
+               rte_errno = ENOMEM;
+               goto error;
+       }
+       hrxq->ind_table = ind_tbl;
+       hrxq->qp = qp;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+       hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
+       if (!hrxq->action) {
+               rte_errno = errno;
+               goto error;
+       }
+#endif
+       priv->drop_queue.hrxq = hrxq;
+       rte_atomic32_set(&hrxq->refcnt, 1);
+       return hrxq;
+error:
+       if (ind_tbl)
+               mlx5_ind_table_ibv_drop_release(dev);
+       return NULL;
+}
+
+/**
+ * Release a drop hash Rx queue.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ */
+void
+mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
+
+       if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+               mlx5_glue->destroy_flow_action(hrxq->action);
+#endif
+               claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
+               mlx5_ind_table_ibv_drop_release(dev);
+               rte_free(hrxq);
+               priv->drop_queue.hrxq = NULL;
+       }
+}