* Pointer to Ethernet device.
* @param ind_table
* Indirection table to release.
+ * @param standalone
+ * Indirection table for Standalone queue.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
int
mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
- struct mlx5_ind_table_obj *ind_tbl)
+ struct mlx5_ind_table_obj *ind_tbl,
+ bool standalone)
{
struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
for (i = 0; i != ind_tbl->queues_n; ++i)
claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
if (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) == 0) {
- LIST_REMOVE(ind_tbl, next);
+ if (!standalone)
+ LIST_REMOVE(ind_tbl, next);
mlx5_free(ind_tbl);
return 0;
}
* Queues entering in the indirection table.
* @param queues_n
* Number of queues in the array.
+ * @param standalone
+ * Indirection table for Standalone queue.
*
* @return
* The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
*/
static struct mlx5_ind_table_obj *
mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
- uint32_t queues_n)
+ uint32_t queues_n, bool standalone)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ind_table_obj *ind_tbl;
if (ret < 0)
goto error;
__atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
- LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
+ if (!standalone)
+ LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
return ind_tbl;
error:
ret = rte_errno;
hrxq, next) {
struct mlx5_ind_table_obj *ind_tbl;
- if (hrxq->shared)
- continue;
if (hrxq->rss_key_len != rss_key_len)
continue;
if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
if (!ind_tbl)
continue;
if (ind_tbl != hrxq->ind_table) {
- mlx5_ind_table_obj_release(dev, ind_tbl);
+ mlx5_ind_table_obj_release(dev, ind_tbl,
+ hrxq->standalone);
continue;
}
__atomic_fetch_add(&hrxq->refcnt, 1, __ATOMIC_RELAXED);
} else {
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
- ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
+ ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
+ hrxq->standalone);
}
if (!ind_tbl) {
rte_errno = ENOMEM;
goto error;
}
if (ind_tbl != hrxq->ind_table) {
- mlx5_ind_table_obj_release(dev, hrxq->ind_table);
+ mlx5_ind_table_obj_release(dev, hrxq->ind_table,
+ hrxq->standalone);
hrxq->ind_table = ind_tbl;
}
hrxq->hash_fields = hash_fields;
error:
err = rte_errno;
if (ind_tbl != hrxq->ind_table)
- mlx5_ind_table_obj_release(dev, ind_tbl);
+ mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone);
rte_errno = err;
return -rte_errno;
}
mlx5_glue->destroy_flow_action(hrxq->action);
#endif
priv->obj_ops.hrxq_destroy(hrxq);
- mlx5_ind_table_obj_release(dev, hrxq->ind_table);
- ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs,
- hrxq_idx, hrxq, next);
+ mlx5_ind_table_obj_release(dev, hrxq->ind_table,
+ hrxq->standalone);
+ if (!hrxq->standalone)
+ ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ &priv->hrxqs, hrxq_idx, hrxq, next);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
return 0;
}
- claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
+ claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table,
+ hrxq->standalone));
return 1;
}
* Number of queues.
* @param tunnel
* Tunnel type.
- * @param shared
- * If true new object of Rx Hash queue will be used in shared action.
+ * @param standalone
+ * Object of Rx Hash queue will be used in standalone shared action or not.
*
* @return
* The DevX object initialized index, 0 otherwise and rte_errno is set.
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n,
- int tunnel, bool shared)
+ int tunnel, bool standalone)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq = NULL;
queues_n = hash_fields ? queues_n : 1;
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
- ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
+ ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
+ standalone);
if (!ind_tbl) {
rte_errno = ENOMEM;
return 0;
hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
if (!hrxq)
goto error;
- hrxq->shared = !!shared;
+ hrxq->standalone = !!standalone;
hrxq->ind_table = ind_tbl;
hrxq->rss_key_len = rss_key_len;
hrxq->hash_fields = hash_fields;
goto error;
}
__atomic_fetch_add(&hrxq->refcnt, 1, __ATOMIC_RELAXED);
- ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx,
- hrxq, next);
+ if (!hrxq->standalone)
+ ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs,
+ hrxq_idx, hrxq, next);
return hrxq_idx;
error:
ret = rte_errno; /* Save rte_errno before cleanup. */
- mlx5_ind_table_obj_release(dev, ind_tbl);
+ mlx5_ind_table_obj_release(dev, ind_tbl, standalone);
if (hrxq)
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
rte_errno = ret; /* Restore rte_errno. */