*/
static int
flow_verbs_counter_create(struct rte_eth_dev *dev,
- struct mlx5_flow_counter *counter)
+ struct mlx5_flow_counter_ext *counter)
{
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
struct mlx5_priv *priv = dev->data->dev_private;
* Counter identifier.
*
* @return
- * A pointer to the counter, NULL otherwise and rte_errno is set.
+ * Index to the counter, 0 otherwise and rte_errno is set.
*/
-static struct mlx5_flow_counter *
+static uint32_t
flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, 0, 0);
struct mlx5_flow_counter_pool *pool = NULL;
+ struct mlx5_flow_counter_ext *cnt_ext = NULL;
struct mlx5_flow_counter *cnt = NULL;
uint32_t n_valid = rte_atomic16_read(&cont->n_valid);
uint32_t pool_idx;
for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) {
pool = cont->pools[pool_idx];
for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
- cnt = &pool->counters_raw[i];
- if (cnt->shared && cnt->id == id) {
- cnt->ref_cnt++;
- return (struct mlx5_flow_counter *)
- (uintptr_t)
- MLX5_MAKE_CNT_IDX(pool_idx, i);
+ cnt_ext = MLX5_GET_POOL_CNT_EXT(pool, i);
+ if (cnt_ext->shared && cnt_ext->id == id) {
+ cnt_ext->ref_cnt++;
+ return MLX5_MAKE_CNT_IDX(pool_idx, i);
}
}
}
(n_valid + MLX5_CNT_CONTAINER_RESIZE);
pools = rte_zmalloc(__func__, size, 0);
if (!pools)
- return NULL;
+ return 0;
if (n_valid) {
memcpy(pools, cont->pools,
sizeof(struct mlx5_flow_counter_pool *) *
cont->n += MLX5_CNT_CONTAINER_RESIZE;
}
/* Allocate memory for new pool*/
- size = sizeof(*pool) + sizeof(*cnt) * MLX5_COUNTERS_PER_POOL;
+ size = sizeof(*pool) + sizeof(*cnt_ext) *
+ MLX5_COUNTERS_PER_POOL;
pool = rte_calloc(__func__, 1, size, 0);
if (!pool)
- return NULL;
+ return 0;
for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
cnt = &pool->counters_raw[i];
TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
rte_atomic16_add(&cont->n_valid, 1);
TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
}
- cnt->id = id;
- cnt->shared = shared;
- cnt->ref_cnt = 1;
+ i = cnt - pool->counters_raw;
+ cnt_ext = MLX5_GET_POOL_CNT_EXT(pool, i);
+ cnt_ext->id = id;
+ cnt_ext->shared = shared;
+ cnt_ext->ref_cnt = 1;
cnt->hits = 0;
cnt->bytes = 0;
/* Create counter with Verbs. */
- ret = flow_verbs_counter_create(dev, cnt);
+ ret = flow_verbs_counter_create(dev, cnt_ext);
if (!ret) {
TAILQ_REMOVE(&pool->counters, cnt, next);
- return (struct mlx5_flow_counter *)(uintptr_t)
- MLX5_MAKE_CNT_IDX(pool_idx, (cnt - pool->counters_raw));
+ return MLX5_MAKE_CNT_IDX(pool_idx, i);
}
/* Some error occurred in Verbs library. */
rte_errno = -ret;
- return NULL;
+ return 0;
}
/**
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[in] counter
- * Pointer to the counter handler.
+ * Index to the counter handler.
*/
static void
-flow_verbs_counter_release(struct rte_eth_dev *dev,
- struct mlx5_flow_counter *counter)
+flow_verbs_counter_release(struct rte_eth_dev *dev, uint32_t counter)
{
struct mlx5_flow_counter_pool *pool;
struct mlx5_flow_counter *cnt;
+ struct mlx5_flow_counter_ext *cnt_ext;
- cnt = flow_verbs_counter_get_by_idx(dev, (uintptr_t)(void *)counter,
+ cnt = flow_verbs_counter_get_by_idx(dev, counter,
&pool);
- if (--counter->ref_cnt == 0) {
+ cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
+ if (--cnt_ext->ref_cnt == 0) {
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
- claim_zero(mlx5_glue->destroy_counter_set(cnt->cs));
- cnt->cs = NULL;
+ claim_zero(mlx5_glue->destroy_counter_set(cnt_ext->cs));
+ cnt_ext->cs = NULL;
#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
- claim_zero(mlx5_glue->destroy_counters(cnt->cs));
- cnt->cs = NULL;
+ claim_zero(mlx5_glue->destroy_counters(cnt_ext->cs));
+ cnt_ext->cs = NULL;
#endif
TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
}
{
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
- if (flow->counter && flow->counter->cs) {
+ if (flow->counter) {
+ struct mlx5_flow_counter_pool *pool;
struct mlx5_flow_counter *cnt = flow_verbs_counter_get_by_idx
- (dev, (uintptr_t)(void *)
- flow->counter, NULL);
+ (dev, flow->counter, &pool);
+ struct mlx5_flow_counter_ext *cnt_ext = MLX5_CNT_TO_CNT_EXT
+ (pool, cnt);
struct rte_flow_query_count *qc = data;
uint64_t counters[2] = {0, 0};
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
struct ibv_query_counter_set_attr query_cs_attr = {
- .cs = cnt->cs,
+ .cs = cnt_ext->cs,
.query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
};
struct ibv_counter_set_data query_out = {
&query_out);
#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
int err = mlx5_glue->query_counters
- (cnt->cs, counters,
+ (cnt_ext->cs, counters,
RTE_DIM(counters),
IBV_READ_COUNTERS_ATTR_PREFER_CACHED);
#endif
{
const struct rte_flow_action_count *count = action->conf;
struct rte_flow *flow = dev_flow->flow;
- struct mlx5_flow_counter *cnt = NULL;
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
+ struct mlx5_flow_counter_pool *pool;
+ struct mlx5_flow_counter *cnt = NULL;
+ struct mlx5_flow_counter_ext *cnt_ext;
unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
struct ibv_flow_spec_counter_action counter = {
.type = IBV_FLOW_SPEC_ACTION_COUNT,
"cannot get counter"
" context.");
}
- cnt = flow_verbs_counter_get_by_idx(dev, (uintptr_t)(void *)
- flow->counter, NULL);
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
- counter.counter_set_handle = cnt->cs->handle;
+ cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
+ cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
+ counter.counter_set_handle = cnt_ext->cs->handle;
flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
- counter.counters = cnt->cs;
+ cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
+ cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
+ counter.counters = cnt_ext->cs;
flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
#endif
return 0;
struct rte_flow_error *error)
{
size_t size = 0;
+ uint32_t handle_idx = 0;
struct mlx5_flow *dev_flow;
struct mlx5_flow_handle *dev_handle;
struct mlx5_priv *priv = dev->data->dev_private;
"not free temporary device flow");
return NULL;
}
- dev_handle = rte_calloc(__func__, 1, MLX5_FLOW_HANDLE_VERBS_SIZE, 0);
+ dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+ &handle_idx);
if (!dev_handle) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
/* No multi-thread supporting. */
dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
dev_flow->handle = dev_handle;
+ dev_flow->handle_idx = handle_idx;
/* Memcpy is used, only size needs to be cleared to 0. */
dev_flow->verbs.size = 0;
dev_flow->verbs.attr.num_of_specs = 0;
static void
flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_handle *handle;
+ uint32_t handle_idx;
if (!flow)
return;
- LIST_FOREACH(handle, &flow->dev_handles, next) {
+ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ handle_idx, handle, next) {
if (handle->ib_flow) {
claim_zero(mlx5_glue->destroy_flow(handle->ib_flow));
handle->ib_flow = NULL;
}
+ /* hrxq is union, don't touch it only the flag is set. */
if (handle->hrxq) {
- if (handle->act_flags & MLX5_FLOW_ACTION_DROP)
+ if (handle->act_flags & MLX5_FLOW_ACTION_DROP) {
mlx5_hrxq_drop_release(dev);
- else
+ handle->hrxq = 0;
+ } else if (handle->act_flags &
+ (MLX5_FLOW_ACTION_QUEUE |
+ MLX5_FLOW_ACTION_RSS)) {
mlx5_hrxq_release(dev, handle->hrxq);
- handle->hrxq = NULL;
+ handle->hrxq = 0;
+ }
}
if (handle->vf_vlan.tag && handle->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
static void
flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_handle *handle;
if (!flow)
return;
flow_verbs_remove(dev, flow);
- while (!LIST_EMPTY(&flow->dev_handles)) {
- handle = LIST_FIRST(&flow->dev_handles);
- LIST_REMOVE(handle, next);
- rte_free(handle);
+ while (flow->dev_handles) {
+ uint32_t tmp_idx = flow->dev_handles;
+
+ handle = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+ tmp_idx);
+ if (!handle)
+ return;
+ flow->dev_handles = handle->next.next;
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+ tmp_idx);
}
if (flow->counter) {
flow_verbs_counter_release(dev, flow->counter);
- flow->counter = NULL;
+ flow->counter = 0;
}
}
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_handle *handle;
struct mlx5_flow *dev_flow;
+ struct mlx5_hrxq *hrxq;
+ uint32_t dev_handles;
int err;
int idx;
- for (idx = priv->flow_idx - 1; idx >= 0; idx--) {
+ for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) {
dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
handle = dev_flow->handle;
if (handle->act_flags & MLX5_FLOW_ACTION_DROP) {
- handle->hrxq = mlx5_hrxq_drop_new(dev);
- if (!handle->hrxq) {
+ hrxq = mlx5_hrxq_drop_new(dev);
+ if (!hrxq) {
rte_flow_error_set
(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
goto error;
}
} else {
- struct mlx5_hrxq *hrxq;
+ uint32_t hrxq_idx;
MLX5_ASSERT(flow->rss.queue);
- hrxq = mlx5_hrxq_get(dev, flow->rss.key,
+ hrxq_idx = mlx5_hrxq_get(dev, flow->rss.key,
MLX5_RSS_HASH_KEY_LEN,
dev_flow->hash_fields,
(*flow->rss.queue),
flow->rss.queue_num);
- if (!hrxq)
- hrxq = mlx5_hrxq_new(dev, flow->rss.key,
+ if (!hrxq_idx)
+ hrxq_idx = mlx5_hrxq_new(dev, flow->rss.key,
MLX5_RSS_HASH_KEY_LEN,
dev_flow->hash_fields,
(*flow->rss.queue),
flow->rss.queue_num,
!!(handle->layers &
MLX5_FLOW_LAYER_TUNNEL));
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ hrxq_idx);
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
"cannot get hash queue");
goto error;
}
- handle->hrxq = hrxq;
+ handle->hrxq = hrxq_idx;
}
- handle->ib_flow = mlx5_glue->create_flow(handle->hrxq->qp,
+ MLX5_ASSERT(hrxq);
+ handle->ib_flow = mlx5_glue->create_flow(hrxq->qp,
&dev_flow->verbs.attr);
if (!handle->ib_flow) {
rte_flow_error_set(error, errno,
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
- LIST_FOREACH(handle, &flow->dev_handles, next) {
+ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ dev_handles, handle, next) {
+ /* hrxq is union, don't touch it only the flag is set. */
if (handle->hrxq) {
- if (handle->act_flags & MLX5_FLOW_ACTION_DROP)
+ if (handle->act_flags & MLX5_FLOW_ACTION_DROP) {
mlx5_hrxq_drop_release(dev);
- else
+ handle->hrxq = 0;
+ } else if (handle->act_flags &
+ (MLX5_FLOW_ACTION_QUEUE |
+ MLX5_FLOW_ACTION_RSS)) {
mlx5_hrxq_release(dev, handle->hrxq);
- handle->hrxq = NULL;
+ handle->hrxq = 0;
+ }
}
if (handle->vf_vlan.tag && handle->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);