net/mlx5: improve Verbs flow priority discovery
[dpdk.git] / drivers / net / mlx5 / mlx5_rxq.c
index da7a8b3..2940c95 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <rte_mbuf.h>
 #include <rte_malloc.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
 #include <rte_common.h>
 #include <rte_interrupts.h>
 #include <rte_debug.h>
 
 #include <mlx5_glue.h>
 #include <mlx5_malloc.h>
+#include <mlx5_common_mr.h>
 
 #include "mlx5_defs.h"
 #include "mlx5.h"
-#include "mlx5_rxtx.h"
+#include "mlx5_tx.h"
+#include "mlx5_rx.h"
 #include "mlx5_utils.h"
 #include "mlx5_autoconf.h"
 
@@ -48,77 +50,6 @@ static_assert(MLX5_RSS_HASH_KEY_LEN ==
              (unsigned int)sizeof(rss_hash_default_key),
              "wrong RSS default key size.");
 
-/**
- * Check whether Multi-Packet RQ can be enabled for the device.
- *
- * @param dev
- *   Pointer to Ethernet device.
- *
- * @return
- *   1 if supported, negative errno value if not.
- */
-inline int
-mlx5_check_mprq_support(struct rte_eth_dev *dev)
-{
-       struct mlx5_priv *priv = dev->data->dev_private;
-
-       if (priv->config.mprq.enabled &&
-           priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
-               return 1;
-       return -ENOTSUP;
-}
-
-/**
- * Check whether Multi-Packet RQ is enabled for the Rx queue.
- *
- *  @param rxq
- *     Pointer to receive queue structure.
- *
- * @return
- *   0 if disabled, otherwise enabled.
- */
-inline int
-mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
-{
-       return rxq->strd_num_n > 0;
-}
-
-/**
- * Check whether Multi-Packet RQ is enabled for the device.
- *
- * @param dev
- *   Pointer to Ethernet device.
- *
- * @return
- *   0 if disabled, otherwise enabled.
- */
-inline int
-mlx5_mprq_enabled(struct rte_eth_dev *dev)
-{
-       struct mlx5_priv *priv = dev->data->dev_private;
-       uint32_t i;
-       uint16_t n = 0;
-       uint16_t n_ibv = 0;
-
-       if (mlx5_check_mprq_support(dev) < 0)
-               return 0;
-       /* All the configured queues should be enabled. */
-       for (i = 0; i < priv->rxqs_n; ++i) {
-               struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
-               struct mlx5_rxq_ctrl *rxq_ctrl = container_of
-                       (rxq, struct mlx5_rxq_ctrl, rxq);
-
-               if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
-                       continue;
-               n_ibv++;
-               if (mlx5_rxq_mprq_enabled(rxq))
-                       ++n;
-       }
-       /* Multi-Packet RQ can't be partially configured. */
-       MLX5_ASSERT(n == 0 || n == n_ibv);
-       return n == n_ibv;
-}
-
 /**
  * Calculate the number of CQEs in CQ for the Rx queue.
  *
@@ -346,7 +277,9 @@ rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
                (1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
                (1 << rxq->elts_n);
        const uint16_t q_mask = q_n - 1;
-       uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
+       uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
+               rxq->elts_ci : rxq->rq_ci;
+       uint16_t used = q_n - (elts_ci - rxq->rq_pi);
        uint16_t i;
 
        DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs",
@@ -359,8 +292,8 @@ rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
         */
        if (mlx5_rxq_check_vec_support(rxq) > 0) {
                for (i = 0; i < used; ++i)
-                       (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
-               rxq->rq_pi = rxq->rq_ci;
+                       (*rxq->elts)[(elts_ci + i) & q_mask] = NULL;
+               rxq->rq_pi = elts_ci;
        }
        for (i = 0; i != q_n; ++i) {
                if ((*rxq->elts)[i] != NULL)
@@ -402,14 +335,13 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_dev_config *config = &priv->config;
        uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
-                            RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT |
                             DEV_RX_OFFLOAD_TIMESTAMP |
-                            DEV_RX_OFFLOAD_JUMBO_FRAME |
                             DEV_RX_OFFLOAD_RSS_HASH);
 
+       if (!config->mprq.enabled)
+               offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
        if (config->hw_fcs_strip)
                offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
-
        if (config->hw_csum)
                offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
                             DEV_RX_OFFLOAD_UDP_CKSUM |
@@ -862,25 +794,22 @@ mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
 /**
  * DPDK callback to release a RX queue.
  *
- * @param dpdk_rxq
- *   Generic RX queue pointer.
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param qid
+ *   Receive queue index.
  */
 void
-mlx5_rx_queue_release(void *dpdk_rxq)
+mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
-       struct mlx5_rxq_ctrl *rxq_ctrl;
-       struct mlx5_priv *priv;
+       struct mlx5_rxq_data *rxq = dev->data->rx_queues[qid];
 
        if (rxq == NULL)
                return;
-       rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
-       priv = rxq_ctrl->priv;
-       if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
+       if (!mlx5_rxq_releasable(dev, qid))
                rte_panic("port %u Rx queue %u is still used by a flow and"
-                         " cannot be removed\n",
-                         PORT_ID(priv), rxq->idx);
-       mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
+                         " cannot be removed\n", dev->data->port_id, qid);
+       mlx5_rxq_release(dev, qid);
 }
 
 /**
@@ -902,9 +831,6 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
        unsigned int count = 0;
        struct rte_intr_handle *intr_handle = dev->intr_handle;
 
-       /* Representor shares dev->intr_handle with PF. */
-       if (priv->representor)
-               return 0;
        if (!dev->data->dev_conf.intr_conf.rxq)
                return 0;
        mlx5_rx_intr_vec_disable(dev);
@@ -985,9 +911,6 @@ mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
        unsigned int rxqs_n = priv->rxqs_n;
        unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
 
-       /* Representor shares dev->intr_handle with PF. */
-       if (priv->representor)
-               return;
        if (!dev->data->dev_conf.intr_conf.rxq)
                return;
        if (!intr_handle->intr_vec)
@@ -1239,6 +1162,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
        unsigned int strd_sz_n = 0;
        unsigned int i;
        unsigned int n_ibv = 0;
+       int ret;
 
        if (!mlx5_mprq_enabled(dev))
                return 0;
@@ -1308,7 +1232,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
        snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
        mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
                                0, NULL, NULL, mlx5_mprq_buf_init,
-                               (void *)(uintptr_t)(1 << strd_num_n),
+                               (void *)((uintptr_t)1 << strd_num_n),
                                dev->device->numa_node, 0);
        if (mp == NULL) {
                DRV_LOG(ERR,
@@ -1318,6 +1242,16 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
                rte_errno = ENOMEM;
                return -rte_errno;
        }
+       ret = mlx5_mr_mempool_register(&priv->sh->cdev->mr_scache,
+                                      priv->sh->cdev->pd, mp, &priv->mp_id);
+       if (ret < 0 && rte_errno != EEXIST) {
+               ret = rte_errno;
+               DRV_LOG(ERR, "port %u failed to register a mempool for Multi-Packet RQ",
+                       dev->data->port_id);
+               rte_mempool_free(mp);
+               rte_errno = ret;
+               return -rte_errno;
+       }
        priv->mprq_mp = mp;
 exit:
        /* Set mempool for each Rx queue. */
@@ -1404,10 +1338,11 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        uint64_t offloads = conf->offloads |
                           dev->data->dev_conf.rxmode.offloads;
        unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
-       unsigned int max_rx_pkt_len = lro_on_queue ?
+       unsigned int max_rx_pktlen = lro_on_queue ?
                        dev->data->dev_conf.rxmode.max_lro_pkt_size :
-                       dev->data->dev_conf.rxmode.max_rx_pkt_len;
-       unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
+                       dev->data->mtu + (unsigned int)RTE_ETHER_HDR_LEN +
+                               RTE_ETHER_CRC_LEN;
+       unsigned int non_scatter_min_mbuf_size = max_rx_pktlen +
                                                        RTE_PKTMBUF_HEADROOM;
        unsigned int max_lro_size = 0;
        unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
@@ -1446,7 +1381,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
         * needed to handle max size packets, replace zero length
         * with the buffer length from the pool.
         */
-       tail_len = max_rx_pkt_len;
+       tail_len = max_rx_pktlen;
        do {
                struct mlx5_eth_rxseg *hw_seg =
                                        &tmpl->rxq.rxseg[tmpl->rxq.rxseg_n];
@@ -1484,7 +1419,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                                "port %u too many SGEs (%u) needed to handle"
                                " requested maximum packet size %u, the maximum"
                                " supported are %u", dev->data->port_id,
-                               tmpl->rxq.rxseg_n, max_rx_pkt_len,
+                               tmpl->rxq.rxseg_n, max_rx_pktlen,
                                MLX5_MAX_RXQ_NSEG);
                        rte_errno = ENOTSUP;
                        goto error;
@@ -1509,14 +1444,14 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
                        " configured and no enough mbuf space(%u) to contain "
                        "the maximum RX packet length(%u) with head-room(%u)",
-                       dev->data->port_id, idx, mb_len, max_rx_pkt_len,
+                       dev->data->port_id, idx, mb_len, max_rx_pktlen,
                        RTE_PKTMBUF_HEADROOM);
                rte_errno = ENOSPC;
                goto error;
        }
        tmpl->type = MLX5_RXQ_TYPE_STANDARD;
-       if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
-                              MLX5_MR_BTREE_CACHE_N, socket)) {
+       if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl,
+                             &priv->sh->cdev->mr_scache.dev_gen, socket)) {
                /* rte_errno is already set. */
                goto error;
        }
@@ -1528,7 +1463,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
         * following conditions are met:
         *  - MPRQ is enabled.
         *  - The number of descs is more than the number of strides.
-        *  - max_rx_pkt_len plus overhead is less than the max size
+        *  - max_rx_pktlen plus overhead is less than the max size
         *    of a stride or mprq_stride_size is specified by a user.
         *    Need to make sure that there are enough strides to encap
         *    the maximum packet size in case mprq_stride_size is set.
@@ -1552,7 +1487,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                                !!(offloads & DEV_RX_OFFLOAD_SCATTER);
                tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
                                config->mprq.max_memcpy_len);
-               max_lro_size = RTE_MIN(max_rx_pkt_len,
+               max_lro_size = RTE_MIN(max_rx_pktlen,
                                       (1u << tmpl->rxq.strd_num_n) *
                                       (1u << tmpl->rxq.strd_sz_n));
                DRV_LOG(DEBUG,
@@ -1561,9 +1496,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                        dev->data->port_id, idx,
                        tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
        } else if (tmpl->rxq.rxseg_n == 1) {
-               MLX5_ASSERT(max_rx_pkt_len <= first_mb_free_size);
+               MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size);
                tmpl->rxq.sges_n = 0;
-               max_lro_size = max_rx_pkt_len;
+               max_lro_size = max_rx_pktlen;
        } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
                unsigned int sges_n;
 
@@ -1585,13 +1520,13 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                                "port %u too many SGEs (%u) needed to handle"
                                " requested maximum packet size %u, the maximum"
                                " supported are %u", dev->data->port_id,
-                               1 << sges_n, max_rx_pkt_len,
+                               1 << sges_n, max_rx_pktlen,
                                1u << MLX5_MAX_LOG_RQ_SEGS);
                        rte_errno = ENOTSUP;
                        goto error;
                }
                tmpl->rxq.sges_n = sges_n;
-               max_lro_size = max_rx_pkt_len;
+               max_lro_size = max_rx_pktlen;
        }
        if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
                DRV_LOG(WARNING,
@@ -1689,6 +1624,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
        return tmpl;
 error:
+       mlx5_mr_btree_free(&tmpl->rxq.mr_ctrl.cache_bh);
        mlx5_free(tmpl);
        return NULL;
 }
@@ -1779,7 +1715,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_rxq_ctrl *rxq_ctrl;
 
-       if (!(*priv->rxqs)[idx])
+       if (priv->rxqs == NULL || (*priv->rxqs)[idx] == NULL)
                return 0;
        rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
        if (__atomic_sub_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
@@ -1924,20 +1860,18 @@ mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_ind_table_obj *ind_tbl;
 
+       rte_rwlock_read_lock(&priv->ind_tbls_lock);
        LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
                if ((ind_tbl->queues_n == queues_n) &&
                    (memcmp(ind_tbl->queues, queues,
                            ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
-                    == 0))
+                    == 0)) {
+                       __atomic_fetch_add(&ind_tbl->refcnt, 1,
+                                          __ATOMIC_RELAXED);
                        break;
+               }
        }
-       if (ind_tbl) {
-               unsigned int i;
-
-               __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
-               for (i = 0; i != ind_tbl->queues_n; ++i)
-                       mlx5_rxq_get(dev, ind_tbl->queues[i]);
-       }
+       rte_rwlock_read_unlock(&priv->ind_tbls_lock);
        return ind_tbl;
 }
 
@@ -1960,19 +1894,20 @@ mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
                           bool standalone)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       unsigned int i;
+       unsigned int i, ret;
 
-       if (__atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED) == 0)
-               priv->obj_ops.ind_table_destroy(ind_tbl);
+       rte_rwlock_write_lock(&priv->ind_tbls_lock);
+       ret = __atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
+       if (!ret && !standalone)
+               LIST_REMOVE(ind_tbl, next);
+       rte_rwlock_write_unlock(&priv->ind_tbls_lock);
+       if (ret)
+               return 1;
+       priv->obj_ops.ind_table_destroy(ind_tbl);
        for (i = 0; i != ind_tbl->queues_n; ++i)
                claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
-       if (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) == 0) {
-               if (!standalone)
-                       LIST_REMOVE(ind_tbl, next);
-               mlx5_free(ind_tbl);
-               return 0;
-       }
-       return 1;
+       mlx5_free(ind_tbl);
+       return 0;
 }
 
 /**
@@ -1991,12 +1926,14 @@ mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
        struct mlx5_ind_table_obj *ind_tbl;
        int ret = 0;
 
+       rte_rwlock_read_lock(&priv->ind_tbls_lock);
        LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
                DRV_LOG(DEBUG,
                        "port %u indirection table obj %p still referenced",
                        dev->data->port_id, (void *)ind_tbl);
                ++ret;
        }
+       rte_rwlock_read_unlock(&priv->ind_tbls_lock);
        return ret;
 }
 
@@ -2040,7 +1977,8 @@ error:
        for (j = 0; j < i; j++)
                mlx5_rxq_release(dev, ind_tbl->queues[j]);
        rte_errno = err;
-       DEBUG("Port %u cannot setup indirection table.", dev->data->port_id);
+       DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
+               dev->data->port_id);
        return ret;
 }
 
@@ -2081,8 +2019,11 @@ mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
                mlx5_free(ind_tbl);
                return NULL;
        }
-       if (!standalone)
+       if (!standalone) {
+               rte_rwlock_write_lock(&priv->ind_tbls_lock);
                LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
+               rte_rwlock_write_unlock(&priv->ind_tbls_lock);
+       }
        return ind_tbl;
 }
 
@@ -2124,8 +2065,9 @@ mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
                 * reference unsupported. Intended for standalone indirection
                 * tables only.
                 */
-               DEBUG("Port %u cannot modify indirection table (refcnt> 1).",
-                     dev->data->port_id);
+               DRV_LOG(DEBUG,
+                       "Port %u cannot modify indirection table (refcnt> 1).",
+                       dev->data->port_id);
                rte_errno = EINVAL;
                return -rte_errno;
        }
@@ -2147,45 +2089,27 @@ mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
 error:
        err = rte_errno;
        for (j = 0; j < i; j++)
-               mlx5_rxq_release(dev, ind_tbl->queues[j]);
+               mlx5_rxq_release(dev, queues[j]);
        rte_errno = err;
-       DEBUG("Port %u cannot setup indirection table.", dev->data->port_id);
+       DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
+               dev->data->port_id);
        return ret;
 }
 
-/**
- * Match an Rx Hash queue.
- *
- * @param list
- *   Cache list pointer.
- * @param entry
- *   Hash queue entry pointer.
- * @param cb_ctx
- *   Context of the callback function.
- *
- * @return
- *   0 if match, none zero if not match.
- */
 int
-mlx5_hrxq_match_cb(struct mlx5_cache_list *list,
-                  struct mlx5_cache_entry *entry,
+mlx5_hrxq_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
                   void *cb_ctx)
 {
-       struct rte_eth_dev *dev = list->ctx;
        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
        struct mlx5_flow_rss_desc *rss_desc = ctx->data;
        struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
-       struct mlx5_ind_table_obj *ind_tbl;
 
-       if (hrxq->rss_key_len != rss_desc->key_len ||
+       return (hrxq->rss_key_len != rss_desc->key_len ||
            memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) ||
-           hrxq->hash_fields != rss_desc->hash_fields)
-               return 1;
-       ind_tbl = mlx5_ind_table_obj_get(dev, rss_desc->queue,
-                                        rss_desc->queue_num);
-       if (ind_tbl)
-               mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone);
-       return ind_tbl != hrxq->ind_table;
+           hrxq->hash_fields != rss_desc->hash_fields ||
+           hrxq->ind_table->queues_n != rss_desc->queue_num ||
+           memcmp(hrxq->ind_table->queues, rss_desc->queue,
+           rss_desc->queue_num * sizeof(rss_desc->queue[0])));
 }
 
 /**
@@ -2306,15 +2230,14 @@ __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
  *   Index to Hash Rx queue to release.
  *
  * @param list
- *   Cache list pointer.
+ *   mlx5 list pointer.
  * @param entry
  *   Hash queue entry pointer.
  */
 void
-mlx5_hrxq_remove_cb(struct mlx5_cache_list *list,
-                   struct mlx5_cache_entry *entry)
+mlx5_hrxq_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
 {
-       struct rte_eth_dev *dev = list->ctx;
+       struct rte_eth_dev *dev = tool_ctx;
        struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
 
        __mlx5_hrxq_remove(dev, hrxq);
@@ -2365,25 +2288,10 @@ error:
        return NULL;
 }
 
-/**
- * Create an Rx Hash queue.
- *
- * @param list
- *   Cache list pointer.
- * @param entry
- *   Hash queue entry pointer.
- * @param cb_ctx
- *   Context of the callback function.
- *
- * @return
- *   queue entry on success, NULL otherwise.
- */
-struct mlx5_cache_entry *
-mlx5_hrxq_create_cb(struct mlx5_cache_list *list,
-                   struct mlx5_cache_entry *entry __rte_unused,
-                   void *cb_ctx)
+struct mlx5_list_entry *
+mlx5_hrxq_create_cb(void *tool_ctx, void *cb_ctx)
 {
-       struct rte_eth_dev *dev = list->ctx;
+       struct rte_eth_dev *dev = tool_ctx;
        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
        struct mlx5_flow_rss_desc *rss_desc = ctx->data;
        struct mlx5_hrxq *hrxq;
@@ -2392,6 +2300,33 @@ mlx5_hrxq_create_cb(struct mlx5_cache_list *list,
        return hrxq ? &hrxq->entry : NULL;
 }
 
+struct mlx5_list_entry *
+mlx5_hrxq_clone_cb(void *tool_ctx, struct mlx5_list_entry *entry,
+                   void *cb_ctx __rte_unused)
+{
+       struct rte_eth_dev *dev = tool_ctx;
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_hrxq *hrxq;
+       uint32_t hrxq_idx = 0;
+
+       hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
+       if (!hrxq)
+               return NULL;
+       memcpy(hrxq, entry, sizeof(*hrxq) + MLX5_RSS_HASH_KEY_LEN);
+       hrxq->idx = hrxq_idx;
+       return &hrxq->entry;
+}
+
+void
+mlx5_hrxq_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
+{
+       struct rte_eth_dev *dev = tool_ctx;
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
+
+       mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
+}
+
 /**
  * Get an Rx Hash queue.
  *
@@ -2408,7 +2343,7 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_hrxq *hrxq;
-       struct mlx5_cache_entry *entry;
+       struct mlx5_list_entry *entry;
        struct mlx5_flow_cb_ctx ctx = {
                .data = rss_desc,
        };
@@ -2416,12 +2351,14 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
        if (rss_desc->shared_rss) {
                hrxq = __mlx5_hrxq_create(dev, rss_desc);
        } else {
-               entry = mlx5_cache_register(&priv->hrxqs, &ctx);
+               entry = mlx5_list_register(priv->hrxqs, &ctx);
                if (!entry)
                        return 0;
                hrxq = container_of(entry, typeof(*hrxq), entry);
        }
-       return hrxq->idx;
+       if (hrxq)
+               return hrxq->idx;
+       return 0;
 }
 
 /**
@@ -2444,7 +2381,7 @@ int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
        if (!hrxq)
                return 0;
        if (!hrxq->standalone)
-               return mlx5_cache_unregister(&priv->hrxqs, &hrxq->entry);
+               return mlx5_list_unregister(priv->hrxqs, &hrxq->entry);
        __mlx5_hrxq_remove(dev, hrxq);
        return 0;
 }
@@ -2532,7 +2469,7 @@ mlx5_hrxq_verify(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
 
-       return mlx5_cache_list_get_entry_num(&priv->hrxqs);
+       return mlx5_list_get_entry_num(priv->hrxqs);
 }
 
 /**