+struct mlx5_external_rxq *
+mlx5_ext_rxq_ref(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
+
+ __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+ return rxq;
+}
+
+/**
+ * Decrease an external Rx queue reference count.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * External RX queue index.
+ *
+ * @return
+ * Updated reference count.
+ */
+uint32_t
+mlx5_ext_rxq_deref(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
+
+ return __atomic_sub_fetch(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+}
+
+/**
+ * Get an external Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * External Rx queue index.
+ *
+ * @return
+ * A pointer to the queue if it exists, NULL otherwise.
+ */
+struct mlx5_external_rxq *
+mlx5_ext_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ MLX5_ASSERT(mlx5_is_external_rxq(dev, idx));
+ return &priv->ext_rxqs[idx - MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
+}
+
+/**
+ * Dereference a list of Rx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param queues
+ * List of Rx queues to deref.
+ * @param queues_n
+ * Number of queues in the array.
+ */
+static void
+mlx5_rxqs_deref(struct rte_eth_dev *dev, uint16_t *queues,
+ const uint32_t queues_n)
+{
+ uint32_t i;
+
+ for (i = 0; i < queues_n; i++) {
+ if (mlx5_is_external_rxq(dev, queues[i]))
+ claim_nonzero(mlx5_ext_rxq_deref(dev, queues[i]));
+ else
+ claim_nonzero(mlx5_rxq_deref(dev, queues[i]));
+ }
+}
+
+/**
+ * Increase reference count for list of Rx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param queues
+ * List of Rx queues to ref.
+ * @param queues_n
+ * Number of queues in the array.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_rxqs_ref(struct rte_eth_dev *dev, uint16_t *queues,
+ const uint32_t queues_n)
+{
+ uint32_t i;
+
+ for (i = 0; i != queues_n; ++i) {
+ if (mlx5_is_external_rxq(dev, queues[i])) {
+ if (mlx5_ext_rxq_ref(dev, queues[i]) == NULL)
+ goto error;
+ } else {
+ if (mlx5_rxq_ref(dev, queues[i]) == NULL)
+ goto error;
+ }
+ }
+ return 0;
+error:
+ mlx5_rxqs_deref(dev, queues, i);
+ rte_errno = EINVAL;
+ return -rte_errno;
+}
+
+/**
+ * Release a Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+int
+mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_priv *rxq;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ uint32_t refcnt;
+
+ if (priv->rxq_privs == NULL)
+ return 0;
+ rxq = mlx5_rxq_get(dev, idx);
+ if (rxq == NULL || rxq->refcnt == 0)
+ return 0;
+ rxq_ctrl = rxq->ctrl;
+ refcnt = mlx5_rxq_deref(dev, idx);
+ if (refcnt > 1) {
+ return 1;
+ } else if (refcnt == 1) { /* RxQ stopped. */
+ priv->obj_ops.rxq_obj_release(rxq);
+ if (!rxq_ctrl->started && rxq_ctrl->obj != NULL) {
+ LIST_REMOVE(rxq_ctrl->obj, next);
+ mlx5_free(rxq_ctrl->obj);
+ rxq_ctrl->obj = NULL;
+ }
+ if (!rxq_ctrl->is_hairpin) {
+ if (!rxq_ctrl->started)
+ rxq_free_elts(rxq_ctrl);
+ dev->data->rx_queue_state[idx] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+ } else { /* Refcnt zero, closing device. */
+ LIST_REMOVE(rxq, owner_entry);
+ if (LIST_EMPTY(&rxq_ctrl->owners)) {
+ if (!rxq_ctrl->is_hairpin)
+ mlx5_mr_btree_free
+ (&rxq_ctrl->rxq.mr_ctrl.cache_bh);
+ if (rxq_ctrl->rxq.shared)
+ LIST_REMOVE(rxq_ctrl, share_entry);
+ LIST_REMOVE(rxq_ctrl, next);
+ mlx5_free(rxq_ctrl);
+ }
+ dev->data->rx_queues[idx] = NULL;
+ mlx5_free(rxq);
+ (*priv->rxq_privs)[idx] = NULL;
+ }
+ return 0;
+}
+
+/**
+ * Verify the Rx Queue list is empty
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The number of object not released.
+ */
+int
+mlx5_rxq_verify(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ int ret = 0;
+
+ LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
+ DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
+ dev->data->port_id, rxq_ctrl->rxq.idx);
+ ++ret;
+ }
+ return ret;
+}
+
+/**
+ * Verify the external Rx Queue list is empty.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The number of object not released.
+ */
+int
+mlx5_ext_rxq_verify(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_external_rxq *rxq;
+ uint32_t i;
+ int ret = 0;
+
+ if (priv->ext_rxqs == NULL)
+ return 0;
+
+ for (i = MLX5_EXTERNAL_RX_QUEUE_ID_MIN; i <= UINT16_MAX ; ++i) {
+ rxq = mlx5_ext_rxq_get(dev, i);
+ if (rxq->refcnt < 2)
+ continue;
+ DRV_LOG(DEBUG, "Port %u external RxQ %u still referenced.",
+ dev->data->port_id, i);
+ ++ret;
+ }
+ return ret;
+}
+
+/**
+ * Check whether RxQ type is Hairpin.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * Rx queue index.
+ *
+ * @return
+ * True if Rx queue type is Hairpin, otherwise False.
+ */
+bool
+mlx5_rxq_is_hairpin(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
+ if (mlx5_is_external_rxq(dev, idx))
+ return false;
+ rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
+ return (idx < priv->rxqs_n && rxq_ctrl != NULL && rxq_ctrl->is_hairpin);
+}
+
+/*
+ * Get a Rx hairpin queue configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * Rx queue index.
+ *
+ * @return
+ * Pointer to the configuration if a hairpin RX queue, otherwise NULL.
+ */
+const struct rte_eth_hairpin_conf *
+mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+ return mlx5_rxq_is_hairpin(dev, idx) ? &rxq->hairpin_conf : NULL;
+}
+
+/**
+ * Match queues listed in arguments to queues contained in indirection table
+ * object.
+ *
+ * @param ind_tbl
+ * Pointer to indirection table to match.
+ * @param queues
+ * Queues to match to ques in indirection table.
+ * @param queues_n
+ * Number of queues in the array.
+ *
+ * @return
+ * 1 if all queues in indirection table match 0 otherwise.
+ */
+static int
+mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl,
+ const uint16_t *queues, uint32_t queues_n)