+ return NULL;
+}
+
+/**
+ * Create an Rx Hash queue.
+ *
+ * @param list
+ * Cache list pointer.
+ * @param entry
+ * Hash queue entry pointer.
+ * @param cb_ctx
+ * Context of the callback function.
+ *
+ * @return
+ * queue entry on success, NULL otherwise.
+ */
+struct mlx5_cache_entry *
+mlx5_hrxq_create_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry __rte_unused,
+ void *cb_ctx)
+{
+ struct rte_eth_dev *dev = list->ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_rss_desc *rss_desc = ctx->data;
+ struct mlx5_hrxq *hrxq;
+
+ hrxq = __mlx5_hrxq_create(dev, rss_desc);
+ return hrxq ? &hrxq->entry : NULL;
+}
+
+/**
+ * Get an Rx Hash queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param rss_desc
+ * RSS configuration for the Rx hash queue.
+ *
+ * @return
+ * An hash Rx queue index on success.
+ */
+uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
+ struct mlx5_flow_rss_desc *rss_desc)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq;
+ struct mlx5_cache_entry *entry;
+ struct mlx5_flow_cb_ctx ctx = {
+ .data = rss_desc,
+ };
+
+ if (rss_desc->shared_rss) {
+ hrxq = __mlx5_hrxq_create(dev, rss_desc);
+ } else {
+ entry = mlx5_cache_register(&priv->hrxqs, &ctx);
+ if (!entry)
+ return 0;
+ hrxq = container_of(entry, typeof(*hrxq), entry);
+ }
+ if (hrxq)
+ return hrxq->idx;
+ return 0;
+}
+
+/**
+ * Release the hash Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param hrxq_idx
+ * Index to Hash Rx queue to release.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq;
+
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
+ if (!hrxq)
+ return 0;
+ if (!hrxq->standalone)
+ return mlx5_cache_unregister(&priv->hrxqs, &hrxq->entry);
+ __mlx5_hrxq_remove(dev, hrxq);