struct mlx5_rxq_data rxq; /* Data path structure. */
        LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
        uint32_t refcnt; /* Reference counter. */
+       LIST_HEAD(priv, mlx5_rxq_priv) owners; /* Owner rxq list. */
        struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */
+       struct mlx5_dev_ctx_shared *sh; /* Shared context. */
        struct mlx5_priv *priv; /* Back pointer to private data. */
        enum mlx5_rxq_type type; /* Rxq type. */
        unsigned int socket; /* CPU socket ID for allocations. */
+       uint32_t share_group; /* Group ID of shared RXQ. */
+       uint16_t share_qid; /* Shared RxQ ID in group. */
        unsigned int irq:1; /* Whether IRQ is enabled. */
        uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
        uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
        uint32_t hairpin_status; /* Hairpin binding status. */
 };
 
+/* RX queue private data. */
+struct mlx5_rxq_priv {
+       uint16_t idx; /* Queue index. */
+       struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
+       LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
+       struct mlx5_priv *priv; /* Back pointer to private data. */
+};
+
 /* mlx5_rxq.c */
 
 extern uint8_t rss_hash_default_key[];
 int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int mlx5_rxq_obj_verify(struct rte_eth_dev *dev);
-struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
+struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev,
+                                  struct mlx5_rxq_priv *rxq,
                                   uint16_t desc, unsigned int socket,
                                   const struct rte_eth_rxconf *conf,
                                   const struct rte_eth_rxseg_split *rx_seg,
                                   uint16_t n_seg);
 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
-       (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+       (struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, uint16_t desc,
         const struct rte_eth_hairpin_conf *hairpin_conf);
 struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
 int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
 
                    struct rte_mempool *mp)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_rxq_priv *rxq;
        struct mlx5_rxq_ctrl *rxq_ctrl;
        struct rte_eth_rxseg_split *rx_seg =
                                (struct rte_eth_rxseg_split *)conf->rx_seg;
        res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
        if (res)
                return res;
-       rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, n_seg);
+       rxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0,
+                         SOCKET_ID_ANY);
+       if (!rxq) {
+               DRV_LOG(ERR, "port %u unable to allocate rx queue index %u private data",
+                       dev->data->port_id, idx);
+               rte_errno = ENOMEM;
+               return -rte_errno;
+       }
+       rxq->priv = priv;
+       rxq->idx = idx;
+       (*priv->rxq_privs)[idx] = rxq;
+       rxq_ctrl = mlx5_rxq_new(dev, rxq, desc, socket, conf, rx_seg, n_seg);
        if (!rxq_ctrl) {
-               DRV_LOG(ERR, "port %u unable to allocate queue index %u",
+               DRV_LOG(ERR, "port %u unable to allocate rx queue index %u",
                        dev->data->port_id, idx);
+               mlx5_free(rxq);
+               (*priv->rxq_privs)[idx] = NULL;
                rte_errno = ENOMEM;
                return -rte_errno;
        }
                            const struct rte_eth_hairpin_conf *hairpin_conf)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_rxq_priv *rxq;
        struct mlx5_rxq_ctrl *rxq_ctrl;
        int res;
 
                        return -rte_errno;
                }
        }
-       rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
+       rxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0,
+                         SOCKET_ID_ANY);
+       if (!rxq) {
+               DRV_LOG(ERR, "port %u unable to allocate hairpin rx queue index %u private data",
+                       dev->data->port_id, idx);
+               rte_errno = ENOMEM;
+               return -rte_errno;
+       }
+       rxq->priv = priv;
+       rxq->idx = idx;
+       (*priv->rxq_privs)[idx] = rxq;
+       rxq_ctrl = mlx5_rxq_hairpin_new(dev, rxq, desc, hairpin_conf);
        if (!rxq_ctrl) {
-               DRV_LOG(ERR, "port %u unable to allocate queue index %u",
+               DRV_LOG(ERR, "port %u unable to allocate hairpin queue index %u",
                        dev->data->port_id, idx);
+               mlx5_free(rxq);
+               (*priv->rxq_privs)[idx] = NULL;
                rte_errno = ENOMEM;
                return -rte_errno;
        }
-       DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
+       DRV_LOG(DEBUG, "port %u adding hairpin Rx queue %u to list",
                dev->data->port_id, idx);
        (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
        return 0;
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param idx
- *   RX queue index.
+ * @param rxq
+ *   RX queue private data.
  * @param desc
  *   Number of descriptors to configure in queue.
  * @param socket
  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
  */
 struct mlx5_rxq_ctrl *
-mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
+            uint16_t desc,
             unsigned int socket, const struct rte_eth_rxconf *conf,
             const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
 {
+       uint16_t idx = rxq->idx;
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_rxq_ctrl *tmpl;
        unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
                rte_errno = ENOMEM;
                return NULL;
        }
+       LIST_INIT(&tmpl->owners);
+       rxq->ctrl = tmpl;
+       LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
        MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
        /*
         * Build the array of actual buffer offsets and lengths.
        tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
                (!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS));
        tmpl->rxq.port_id = dev->data->port_id;
+       tmpl->sh = priv->sh;
        tmpl->priv = priv;
        tmpl->rxq.mp = rx_seg[0].mp;
        tmpl->rxq.elts_n = log2above(desc);
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param idx
- *   RX queue index.
+ * @param rxq
+ *   RX queue.
  * @param desc
  *   Number of descriptors to configure in queue.
  * @param hairpin_conf
  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
  */
 struct mlx5_rxq_ctrl *
-mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
+                    uint16_t desc,
                     const struct rte_eth_hairpin_conf *hairpin_conf)
 {
+       uint16_t idx = rxq->idx;
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_rxq_ctrl *tmpl;
 
                rte_errno = ENOMEM;
                return NULL;
        }
+       LIST_INIT(&tmpl->owners);
+       rxq->ctrl = tmpl;
+       LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
        tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
        tmpl->socket = SOCKET_ID_ANY;
        tmpl->rxq.rss_hash = 0;
        tmpl->rxq.port_id = dev->data->port_id;
+       tmpl->sh = priv->sh;
        tmpl->priv = priv;
        tmpl->rxq.mp = NULL;
        tmpl->rxq.elts_n = log2above(desc);
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_rxq_ctrl *rxq_ctrl;
+       struct mlx5_rxq_priv *rxq = (*priv->rxq_privs)[idx];
 
        if (priv->rxqs == NULL || (*priv->rxqs)[idx] == NULL)
                return 0;
        if (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) {
                if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
                        mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
+               LIST_REMOVE(rxq, owner_entry);
                LIST_REMOVE(rxq_ctrl, next);
                mlx5_free(rxq_ctrl);
                (*priv->rxqs)[idx] = NULL;
+               mlx5_free(rxq);
+               (*priv->rxq_privs)[idx] = NULL;
        }
        return 0;
 }