+ * @param dev
+ * Pointer to Ethernet device.
+ * @param queues
+ * Queues entering in the indirection table.
+ * @param queues_n
+ * Number of queues in the array.
+ * @param standalone
+ * Indirection table for Standalone queue.
+ *
+ * @return
+ * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_ind_table_obj *
+mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
+ uint32_t queues_n, bool standalone)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_ind_table_obj *ind_tbl;
+ int ret;
+
+ ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
+ queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
+ if (!ind_tbl) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ ind_tbl->queues_n = queues_n;
+ ind_tbl->queues = (uint16_t *)(ind_tbl + 1);
+ memcpy(ind_tbl->queues, queues, queues_n * sizeof(*queues));
+ ret = mlx5_ind_table_obj_setup(dev, ind_tbl);
+ if (ret < 0) {
+ mlx5_free(ind_tbl);
+ return NULL;
+ }
+ if (!standalone)
+ LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
+ return ind_tbl;
+}
+
+/**
+ * Modify an indirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param ind_table
+ * Indirection table to modify.
+ * @param queues
+ * Queues replacement for the indirection table.
+ * @param queues_n
+ * Number of queues in the array.
+ * @param standalone
+ * Indirection table for Standalone queue.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_obj *ind_tbl,
+ uint16_t *queues, const uint32_t queues_n,
+ bool standalone)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ unsigned int i, j;
+ int ret = 0, err;
+ const unsigned int n = rte_is_power_of_2(queues_n) ?
+ log2above(queues_n) :
+ log2above(priv->config.ind_table_max_size);
+
+ MLX5_ASSERT(standalone);
+ RTE_SET_USED(standalone);
+ if (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) > 1) {
+ /*
+ * Modification of indirection ntables having more than 1
+ * reference unsupported. Intended for standalone indirection
+ * tables only.
+ */
+ DRV_LOG(DEBUG,
+ "Port %u cannot modify indirection table (refcnt> 1).",
+ dev->data->port_id);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ for (i = 0; i != queues_n; ++i) {
+ if (!mlx5_rxq_get(dev, queues[i])) {
+ ret = -rte_errno;
+ goto error;
+ }
+ }
+ MLX5_ASSERT(priv->obj_ops.ind_table_modify);
+ ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl);
+ if (ret)
+ goto error;
+ for (j = 0; j < ind_tbl->queues_n; j++)
+ mlx5_rxq_release(dev, ind_tbl->queues[j]);
+ ind_tbl->queues_n = queues_n;
+ ind_tbl->queues = queues;
+ return 0;
+error:
+ err = rte_errno;
+ for (j = 0; j < i; j++)
+ mlx5_rxq_release(dev, ind_tbl->queues[j]);
+ rte_errno = err;
+ DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
+ dev->data->port_id);
+ return ret;
+}
+
+/**
+ * Match an Rx Hash queue.
+ *
+ * @param list
+ * Cache list pointer.
+ * @param entry
+ * Hash queue entry pointer.
+ * @param cb_ctx
+ * Context of the callback function.
+ *
+ * @return
+ * 0 if match, none zero if not match.
+ */
+int
+mlx5_hrxq_match_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry,
+ void *cb_ctx)
+{
+ struct rte_eth_dev *dev = list->ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_rss_desc *rss_desc = ctx->data;
+ struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
+ struct mlx5_ind_table_obj *ind_tbl;
+
+ if (hrxq->rss_key_len != rss_desc->key_len ||
+ memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) ||
+ hrxq->hash_fields != rss_desc->hash_fields)
+ return 1;
+ ind_tbl = mlx5_ind_table_obj_get(dev, rss_desc->queue,
+ rss_desc->queue_num);
+ if (ind_tbl)
+ mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone);
+ return ind_tbl != hrxq->ind_table;
+}
+
+/**
+ * Modify an Rx Hash queue configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param hrxq
+ * Index to Hash Rx queue to modify.