uint32_t key_len; /**< RSS hash key len. */
uint32_t tunnel; /**< Queue in tunnel. */
uint32_t shared_rss; /**< Shared RSS index. */
+ struct mlx5_ind_table_obj *ind_tbl;
+ /**< Indirection table for shared RSS hash RX queues. */
union {
uint16_t *queue; /**< Destination queues. */
const uint16_t *const_q; /**< Const pointer convert. */
struct mlx5_devx_obj *rqt; /* DevX RQT object. */
};
uint32_t queues_n; /**< Number of queues in the list. */
- uint16_t queues[]; /**< Queue list. */
+ uint16_t *queues; /**< Queue list. */
};
/* Hash Rx queue. */
void (*rxq_obj_release)(struct mlx5_rxq_obj *rxq_obj);
int (*ind_table_new)(struct rte_eth_dev *dev, const unsigned int log_n,
struct mlx5_ind_table_obj *ind_tbl);
+ int (*ind_table_modify)(struct rte_eth_dev *dev,
+ const unsigned int log_n,
+ const uint16_t *queues, const uint32_t queues_n,
+ struct mlx5_ind_table_obj *ind_tbl);
void (*ind_table_destroy)(struct mlx5_ind_table_obj *ind_tbl);
int (*hrxq_new)(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
int tunnel __rte_unused);
}
/**
- * Create RQT using DevX API as a filed of indirection table.
+ * Prepare RQT attribute structure for DevX RQT API.
*
* @param dev
* Pointer to Ethernet device.
* DevX indirection table object.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * The RQT attr object initialized, NULL otherwise and rte_errno is set.
*/
-static int
-mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
- struct mlx5_ind_table_obj *ind_tbl)
+static struct mlx5_devx_rqt_attr *
+mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,
+ const unsigned int log_n,
+ const uint16_t *queues,
+ const uint32_t queues_n)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_devx_rqt_attr *rqt_attr = NULL;
const unsigned int rqt_n = 1 << log_n;
unsigned int i, j;
- MLX5_ASSERT(ind_tbl);
rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY);
if (!rqt_attr) {
DRV_LOG(ERR, "Port %u cannot allocate RQT resources.",
dev->data->port_id);
rte_errno = ENOMEM;
- return -rte_errno;
+ return NULL;
}
rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
rqt_attr->rqt_actual_size = rqt_n;
- for (i = 0; i != ind_tbl->queues_n; ++i) {
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]];
+ for (i = 0; i != queues_n; ++i) {
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
MLX5_ASSERT(i > 0);
for (j = 0; i != rqt_n; ++j, ++i)
rqt_attr->rq_list[i] = rqt_attr->rq_list[j];
+ return rqt_attr;
+}
+
+/**
+ * Create RQT using DevX API as a filed of indirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param log_n
+ * Log of number of queues in the array.
+ * @param ind_tbl
+ * DevX indirection table object.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
+ struct mlx5_ind_table_obj *ind_tbl)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_devx_rqt_attr *rqt_attr = NULL;
+
+ MLX5_ASSERT(ind_tbl);
+ rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n,
+ ind_tbl->queues,
+ ind_tbl->queues_n);
+ if (!rqt_attr)
+ return -rte_errno;
ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr);
mlx5_free(rqt_attr);
if (!ind_tbl->rqt) {
return 0;
}
+/**
+ * Modify RQT using DevX API as a filed of indirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param log_n
+ * Log of number of queues in the array.
+ * @param ind_tbl
+ * DevX indirection table object.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_devx_ind_table_modify(struct rte_eth_dev *dev, const unsigned int log_n,
+ const uint16_t *queues, const uint32_t queues_n,
+ struct mlx5_ind_table_obj *ind_tbl)
+{
+ int ret = 0;
+ struct mlx5_devx_rqt_attr *rqt_attr = NULL;
+
+ MLX5_ASSERT(ind_tbl);
+ rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n,
+ queues,
+ queues_n);
+ if (!rqt_attr)
+ return -rte_errno;
+ ret = mlx5_devx_cmd_modify_rqt(ind_tbl->rqt, rqt_attr);
+ mlx5_free(rqt_attr);
+ if (ret)
+ DRV_LOG(ERR, "Port %u cannot modify DevX RQT.",
+ dev->data->port_id);
+ return ret;
+}
+
/**
* Destroy the DevX RQT object.
*
.rxq_obj_modify = mlx5_devx_modify_rq,
.rxq_obj_release = mlx5_rxq_devx_obj_release,
.ind_table_new = mlx5_devx_ind_table_new,
+ .ind_table_modify = mlx5_devx_ind_table_modify,
.ind_table_destroy = mlx5_devx_ind_table_destroy,
.hrxq_new = mlx5_devx_hrxq_new,
.hrxq_destroy = mlx5_devx_tir_destroy,
uint32_t refcnt; /**< Atomically accessed refcnt. */
struct rte_flow_action_rss origin; /**< Original rte RSS action. */
uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
- uint16_t *queue; /**< Queue indices to use. */
+ struct mlx5_ind_table_obj *ind_tbl;
+ /**< Hash RX queues (hrxq, hrxq_tunnel fields) indirection table. */
uint32_t hrxq[MLX5_RSS_HASH_FIELDS_LEN];
/**< Hash RX queue indexes mapped to mlx5_rss_hash_fields */
uint32_t hrxq_tunnel[MLX5_RSS_HASH_FIELDS_LEN];
/**< Hash RX queue indexes for tunneled RSS */
+ rte_spinlock_t action_rss_sl; /**< Shared RSS action spinlock. */
};
struct rte_flow_shared_action {
*/
static int
__flow_dv_action_rss_setup(struct rte_eth_dev *dev,
- uint32_t action_idx,
- struct mlx5_shared_action_rss *action,
- struct rte_flow_error *error)
+ uint32_t action_idx,
+ struct mlx5_shared_action_rss *action,
+ struct rte_flow_error *error)
{
struct mlx5_flow_rss_desc rss_desc = { 0 };
size_t i;
int err;
+ if (mlx5_ind_table_obj_setup(dev, action->ind_tbl)) {
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot setup indirection table");
+ }
memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN);
rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
rss_desc.const_q = action->origin.queue;
rss_desc.queue_num = action->origin.queue_num;
/* Set non-zero value to indicate a shared RSS. */
rss_desc.shared_rss = action_idx;
+ rss_desc.ind_tbl = action->ind_tbl;
for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
uint32_t hrxq_idx;
uint64_t hash_fields = mlx5_rss_hash_fields[i];
error_hrxq_new:
err = rte_errno;
__flow_dv_action_rss_hrxqs_release(dev, action);
+ if (!mlx5_ind_table_obj_release(dev, action->ind_tbl, true))
+ action->ind_tbl = NULL;
rte_errno = err;
return -rte_errno;
}
"rss action number out of range");
goto error_rss_init;
}
- shared_action->queue = queue;
+ shared_action->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*shared_action->ind_tbl),
+ 0, SOCKET_ID_ANY);
+ if (!shared_action->ind_tbl) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate resource memory");
+ goto error_rss_init;
+ }
+ memcpy(queue, rss->queue, queue_size);
+ shared_action->ind_tbl->queues = queue;
+ shared_action->ind_tbl->queues_n = rss->queue_num;
origin = &shared_action->origin;
origin->func = rss->func;
origin->level = rss->level;
memcpy(shared_action->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
origin->key = &shared_action->key[0];
origin->key_len = MLX5_RSS_HASH_KEY_LEN;
- memcpy(shared_action->queue, rss->queue, queue_size);
- origin->queue = shared_action->queue;
+ origin->queue = queue;
origin->queue_num = rss->queue_num;
if (__flow_dv_action_rss_setup(dev, idx, shared_action, error))
goto error_rss_init;
+ rte_spinlock_init(&shared_action->action_rss_sl);
__atomic_add_fetch(&shared_action->refcnt, 1, __ATOMIC_RELAXED);
rte_spinlock_lock(&priv->shared_act_sl);
ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
rte_spinlock_unlock(&priv->shared_act_sl);
return idx;
error_rss_init:
- if (shared_action)
+ if (shared_action) {
+ if (shared_action->ind_tbl)
+ mlx5_free(shared_action->ind_tbl);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
idx);
+ }
if (queue)
mlx5_free(queue);
return 0;
mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
uint32_t old_refcnt = 1;
int remaining;
+ uint16_t *queue = NULL;
if (!shared_rss)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"shared rss hrxq has references");
+ queue = shared_rss->ind_tbl->queues;
+ remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
+ if (remaining)
+ return rte_flow_error_set(error, ETOOMANYREFS,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "shared rss indirection table has"
+ " references");
if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
0, 0, __ATOMIC_ACQUIRE,
__ATOMIC_RELAXED))
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"shared rss has references");
- rte_free(shared_rss->queue);
+ mlx5_free(queue);
rte_spinlock_lock(&priv->shared_act_sl);
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
&priv->rss_shared_actions, idx, shared_rss, next);
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_shared_action_rss *shared_rss =
mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
- size_t i;
- int ret;
+ int ret = 0;
void *queue = NULL;
- const uint8_t *rss_key;
- uint32_t rss_key_len;
+ uint16_t *queue_old = NULL;
uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
if (!shared_rss)
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"cannot allocate resource memory");
- if (action_conf->key) {
- rss_key = action_conf->key;
- rss_key_len = action_conf->key_len;
+ memcpy(queue, action_conf->queue, queue_size);
+ MLX5_ASSERT(shared_rss->ind_tbl);
+ rte_spinlock_lock(&shared_rss->action_rss_sl);
+ queue_old = shared_rss->ind_tbl->queues;
+ ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
+ queue, action_conf->queue_num, true);
+ if (ret) {
+ mlx5_free(queue);
+ ret = rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "cannot update indirection table");
} else {
- rss_key = rss_hash_default_key;
- rss_key_len = MLX5_RSS_HASH_KEY_LEN;
+ mlx5_free(queue_old);
+ shared_rss->origin.queue = queue;
+ shared_rss->origin.queue_num = action_conf->queue_num;
}
- for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
- uint32_t hrxq_idx;
- uint64_t hash_fields = mlx5_rss_hash_fields[i];
- int tunnel;
-
- for (tunnel = 0; tunnel < 2; tunnel++) {
- hrxq_idx = __flow_dv_action_rss_hrxq_lookup
- (dev, idx, hash_fields, tunnel);
- MLX5_ASSERT(hrxq_idx);
- ret = mlx5_hrxq_modify
- (dev, hrxq_idx,
- rss_key, rss_key_len,
- hash_fields,
- action_conf->queue, action_conf->queue_num);
- if (ret) {
- mlx5_free(queue);
- return rte_flow_error_set
- (error, rte_errno,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "cannot update hash queue");
- }
- }
- }
- mlx5_free(shared_rss->queue);
- shared_rss->queue = queue;
- memcpy(shared_rss->queue, action_conf->queue, queue_size);
- shared_rss->origin.queue = shared_rss->queue;
- shared_rss->origin.queue_num = action_conf->queue_num;
- return 0;
+ rte_spinlock_unlock(&shared_rss->action_rss_sl);
+ return ret;
}
/**
return ret;
}
+/**
+ * Setup an indirection table structure fields.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param ind_table
+ * Indirection table to modify.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_obj *ind_tbl)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t queues_n = ind_tbl->queues_n;
+ uint16_t *queues = ind_tbl->queues;
+ unsigned int i, j;
+ int ret = 0, err;
+ const unsigned int n = rte_is_power_of_2(queues_n) ?
+ log2above(queues_n) :
+ log2above(priv->config.ind_table_max_size);
+
+ for (i = 0; i != queues_n; ++i) {
+ if (!mlx5_rxq_get(dev, queues[i])) {
+ ret = -rte_errno;
+ goto error;
+ }
+ }
+ ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
+ if (ret)
+ goto error;
+ __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
+ return 0;
+error:
+ err = rte_errno;
+ for (j = 0; j < i; j++)
+ mlx5_rxq_release(dev, ind_tbl->queues[j]);
+ rte_errno = err;
+ DEBUG("Port %u cannot setup indirection table.", dev->data->port_id);
+ return ret;
+}
+
/**
* Create an indirection table.
*
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ind_table_obj *ind_tbl;
- const unsigned int n = rte_is_power_of_2(queues_n) ?
- log2above(queues_n) :
- log2above(priv->config.ind_table_max_size);
- unsigned int i, j;
int ret;
ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
return NULL;
}
ind_tbl->queues_n = queues_n;
- for (i = 0; i != queues_n; ++i) {
- struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
- if (!rxq)
- goto error;
- ind_tbl->queues[i] = queues[i];
+ ind_tbl->queues = (uint16_t *)(ind_tbl + 1);
+ memcpy(ind_tbl->queues, queues, queues_n * sizeof(*queues));
+ ret = mlx5_ind_table_obj_setup(dev, ind_tbl);
+ if (ret < 0) {
+ mlx5_free(ind_tbl);
+ return NULL;
}
- ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
- if (ret < 0)
- goto error;
- __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
if (!standalone)
LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
return ind_tbl;
+}
+
+/**
+ * Modify an indirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param ind_table
+ * Indirection table to modify.
+ * @param queues
+ * Queues replacement for the indirection table.
+ * @param queues_n
+ * Number of queues in the array.
+ * @param standalone
+ * Indirection table for Standalone queue.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_obj *ind_tbl,
+ uint16_t *queues, const uint32_t queues_n,
+ bool standalone)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ unsigned int i, j;
+ int ret = 0, err;
+ const unsigned int n = rte_is_power_of_2(queues_n) ?
+ log2above(queues_n) :
+ log2above(priv->config.ind_table_max_size);
+
+ MLX5_ASSERT(standalone);
+ RTE_SET_USED(standalone);
+ if (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) > 1) {
+ /*
+ * Modification of indirection ntables having more than 1
+ * reference unsupported. Intended for standalone indirection
+ * tables only.
+ */
+ DEBUG("Port %u cannot modify indirection table (refcnt> 1).",
+ dev->data->port_id);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ for (i = 0; i != queues_n; ++i) {
+ if (!mlx5_rxq_get(dev, queues[i])) {
+ ret = -rte_errno;
+ goto error;
+ }
+ }
+ MLX5_ASSERT(priv->obj_ops.ind_table_modify);
+ ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl);
+ if (ret)
+ goto error;
+ for (j = 0; j < ind_tbl->queues_n; j++)
+ mlx5_rxq_release(dev, ind_tbl->queues[j]);
+ ind_tbl->queues_n = queues_n;
+ ind_tbl->queues = queues;
+ return 0;
error:
- ret = rte_errno;
+ err = rte_errno;
for (j = 0; j < i; j++)
mlx5_rxq_release(dev, ind_tbl->queues[j]);
- rte_errno = ret;
- mlx5_free(ind_tbl);
- DEBUG("Port %u cannot create indirection table.", dev->data->port_id);
- return NULL;
+ rte_errno = err;
+ DEBUG("Port %u cannot setup indirection table.", dev->data->port_id);
+ return ret;
}
/**
queues, queues_n)) {
ind_tbl = hrxq->ind_table;
} else {
+ if (hrxq->standalone) {
+ /*
+ * Replacement of indirection table unsupported for
+ * stanalone hrxq objects (used by shared RSS).
+ */
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
goto error;
}
if (ind_tbl != hrxq->ind_table) {
+ MLX5_ASSERT(!hrxq->standalone);
mlx5_ind_table_obj_release(dev, hrxq->ind_table,
hrxq->standalone);
hrxq->ind_table = ind_tbl;
return 0;
error:
err = rte_errno;
- if (ind_tbl != hrxq->ind_table)
+ if (ind_tbl != hrxq->ind_table) {
+ MLX5_ASSERT(!hrxq->standalone);
mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone);
+ }
rte_errno = err;
return -rte_errno;
}
mlx5_glue->destroy_flow_action(hrxq->action);
#endif
priv->obj_ops.hrxq_destroy(hrxq);
- mlx5_ind_table_obj_release(dev, hrxq->ind_table, hrxq->standalone);
+ if (!hrxq->standalone) {
+ mlx5_ind_table_obj_release(dev, hrxq->ind_table,
+ hrxq->standalone);
+ }
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
}
uint32_t queues_n = rss_desc->queue_num;
struct mlx5_hrxq *hrxq = NULL;
uint32_t hrxq_idx = 0;
- struct mlx5_ind_table_obj *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl = rss_desc->ind_tbl;
int ret;
queues_n = rss_desc->hash_fields ? queues_n : 1;
- ind_tbl = standalone ? NULL :
- mlx5_ind_table_obj_get(dev, queues, queues_n);
+ if (!ind_tbl)
+ ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
standalone);
goto error;
return hrxq;
error:
- mlx5_ind_table_obj_release(dev, ind_tbl, standalone);
+ if (!rss_desc->ind_tbl)
+ mlx5_ind_table_obj_release(dev, ind_tbl, standalone);
if (hrxq)
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
return NULL;
int mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl,
bool standalone);
+int mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_obj *ind_tbl);
+int mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_obj *ind_tbl,
+ uint16_t *queues, const uint32_t queues_n,
+ bool standalone);
struct mlx5_cache_entry *mlx5_hrxq_create_cb(struct mlx5_cache_list *list,
struct mlx5_cache_entry *entry __rte_unused, void *cb_ctx);
int mlx5_hrxq_match_cb(struct mlx5_cache_list *list,