X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=b8d03d4df5a0f8b5a22ad7275825cd61f55b7f10;hb=2823b082f93c94c5c97fa572b5b84b637e088668;hp=9032d5028e74eeb8e69e3fa9b4dd6e943c55d2e5;hpb=956d5c74d7863bcdd0de904331661129cb1bf89e;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 9032d5028e..b8d03d4df5 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -73,13 +73,6 @@ union flow_dv_attr { uint32_t attr; }; -static struct mlx5_flow_counter_pool * -flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, bool fallback, - int id); -static struct mlx5_pools_container * -flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, - uint32_t batch); - /** * Initialize flow attributes structure according to flow items' types. * @@ -3827,130 +3820,6 @@ flow_dv_modify_hdr_resource_register return 0; } -/** - * Get or create a flow counter. - * - * @param[in] dev - * Pointer to the Ethernet device structure. - * @param[in] shared - * Indicate if this counter is shared with other flows. - * @param[in] id - * Counter identifier. - * - * @return - * Index to flow counter on success, 0 otherwise and rte_errno is set. - */ -static uint32_t -flow_dv_counter_alloc_fallback(struct rte_eth_dev *dev, uint32_t shared, - uint32_t id) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, 0, 0); - struct mlx5_flow_counter_pool *pool; - struct mlx5_flow_counter *cnt = NULL; - struct mlx5_devx_obj *dcs = NULL; - uint32_t offset; - - if (!priv->config.devx) { - rte_errno = ENOTSUP; - return 0; - } - dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0); - if (!dcs) - return 0; - pool = flow_dv_find_pool_by_id(cont, true, dcs->id); - if (!pool) { - cont = flow_dv_pool_create(dev, dcs, 0); - if (!cont) { - mlx5_devx_cmd_destroy(dcs); - rte_errno = ENOMEM; - return 0; - } - pool = TAILQ_FIRST(&cont->pool_list); - } - offset = dcs->id % MLX5_COUNTERS_PER_POOL; - cnt = &pool->counters_raw[offset]; - struct mlx5_flow_counter tmpl = { - .shared = shared, - .ref_cnt = 1, - .id = id, - .dcs = dcs, - }; - tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0); - if (!tmpl.action) { - claim_zero(mlx5_devx_cmd_destroy(cnt->dcs)); - rte_errno = errno; - return 0; - } - *cnt = tmpl; - return MLX5_MAKE_CNT_IDX(pool->index, offset); -} - -/** - * Release a flow counter. - * - * @param[in] dev - * Pointer to the Ethernet device structure. - * @param[in] counter - * Index to the counter handler. - */ -static void -flow_dv_counter_release_fallback(struct rte_eth_dev *dev __rte_unused, - struct mlx5_flow_counter *counter) -{ - if (!counter) - return; - if (--counter->ref_cnt == 0) { - claim_zero(mlx5_glue->destroy_flow_action(counter->action)); - claim_zero(mlx5_devx_cmd_destroy(counter->dcs)); - counter->action = NULL; - counter->dcs = NULL; - } -} - -/** - * Query a devx flow counter. - * - * @param[in] dev - * Pointer to the Ethernet device structure. - * @param[in] cnt - * Pointer to the flow counter. - * @param[out] pkts - * The statistics value of packets. - * @param[out] bytes - * The statistics value of bytes. - * - * @return - * 0 on success, otherwise a negative errno value and rte_errno is set. - */ -static inline int -_flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused, - struct mlx5_flow_counter *cnt, uint64_t *pkts, - uint64_t *bytes) -{ - return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes, - 0, NULL, NULL, 0); -} - -/** - * Get a pool by a counter. - * - * @param[in] cnt - * Pointer to the counter. - * - * @return - * The counter pool. - */ -static struct mlx5_flow_counter_pool * -flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt) -{ - if (!cnt->batch) { - cnt -= cnt->dcs->id % MLX5_COUNTERS_PER_POOL; - return (struct mlx5_flow_counter_pool *)cnt - 1; - } - return cnt->pool; -} - /** * Get DV flow counter by index. * @@ -3993,8 +3862,6 @@ flow_dv_counter_get_by_idx(struct rte_eth_dev *dev, * * @param[in] cont * Pointer to the counter container. - * @param[in] fallback - * Fallback mode. * @param[in] id * The counter devx ID. * @@ -4002,16 +3869,15 @@ flow_dv_counter_get_by_idx(struct rte_eth_dev *dev, * The counter pool pointer if exists, NULL otherwise, */ static struct mlx5_flow_counter_pool * -flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, bool fallback, - int id) +flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id) { uint32_t i; uint32_t n_valid = rte_atomic16_read(&cont->n_valid); for (i = 0; i < n_valid; i++) { struct mlx5_flow_counter_pool *pool = cont->pools[i]; - int base = ((fallback ? pool->dcs_id : pool->min_dcs->id) / - MLX5_COUNTERS_PER_POOL) * MLX5_COUNTERS_PER_POOL; + int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) * + MLX5_COUNTERS_PER_POOL; if (id >= base && id < base + MLX5_COUNTERS_PER_POOL) { /* @@ -4114,12 +3980,14 @@ flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch) MLX5_CNT_CONTAINER(priv->sh, batch, 0); struct mlx5_pools_container *new_cont = MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0); - struct mlx5_counter_stats_mem_mng *mem_mng; + struct mlx5_counter_stats_mem_mng *mem_mng = NULL; uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE; uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize; int i; - if (cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) { + /* Fallback mode has no background thread. Skip the check. */ + if (!priv->counter_fallback && + cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) { /* The last resize still hasn't detected by the host thread. */ rte_errno = EAGAIN; return NULL; @@ -4132,16 +4000,29 @@ flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch) if (cont->n) memcpy(new_cont->pools, cont->pools, cont->n * sizeof(struct mlx5_flow_counter_pool *)); - mem_mng = flow_dv_create_counter_stat_mem_mng(dev, - MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES); - if (!mem_mng) { - rte_free(new_cont->pools); - return NULL; + /* + * Fallback mode query the counter directly, no background query + * resources are needed. + */ + if (!priv->counter_fallback) { + mem_mng = flow_dv_create_counter_stat_mem_mng(dev, + MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES); + if (!mem_mng) { + rte_free(new_cont->pools); + return NULL; + } + for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i) + LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws, + mem_mng->raws + + MLX5_CNT_CONTAINER_RESIZE + + i, next); + } else { + /* + * Release the old container pools directly as no background + * thread helps that. + */ + rte_free(cont->pools); } - for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i) - LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws, - mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE + - i, next); new_cont->n = resize; rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid)); TAILQ_INIT(&new_cont->pool_list); @@ -4159,7 +4040,7 @@ flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch) * @param[in] dev * Pointer to the Ethernet device structure. * @param[in] cnt - * Pointer to the flow counter. + * Index to the flow counter. * @param[out] pkts * The statistics value of packets. * @param[out] bytes @@ -4169,17 +4050,23 @@ flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch) * 0 on success, otherwise a negative errno value and rte_errno is set. */ static inline int -_flow_dv_query_count(struct rte_eth_dev *dev, - struct mlx5_flow_counter *cnt, uint64_t *pkts, +_flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts, uint64_t *bytes) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_counter_pool *pool = - flow_dv_counter_pool_get(cnt); - int offset = cnt - &pool->counters_raw[0]; + struct mlx5_flow_counter_pool *pool = NULL; + struct mlx5_flow_counter *cnt; + struct mlx5_flow_counter_ext *cnt_ext = NULL; + int offset; - if (priv->counter_fallback) - return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes); + cnt = flow_dv_counter_get_by_idx(dev, counter, &pool); + MLX5_ASSERT(pool); + if (counter < MLX5_CNT_BATCH_OFFSET) { + cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); + if (priv->counter_fallback) + return mlx5_devx_cmd_flow_counter_query(cnt_ext->dcs, 0, + 0, pkts, bytes, 0, NULL, NULL, 0); + } rte_spinlock_lock(&pool->sl); /* @@ -4187,10 +4074,11 @@ _flow_dv_query_count(struct rte_eth_dev *dev, * current allocated in parallel to the host reading. * In this case the new counter values must be reported as 0. */ - if (unlikely(!cnt->batch && cnt->dcs->id < pool->raw->min_dcs_id)) { + if (unlikely(cnt_ext && cnt_ext->dcs->id < pool->raw->min_dcs_id)) { *pkts = 0; *bytes = 0; } else { + offset = cnt - &pool->counters_raw[0]; *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits); *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes); } @@ -4229,18 +4117,18 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, if (!cont) return NULL; } - size = sizeof(*pool) + MLX5_COUNTERS_PER_POOL * - sizeof(struct mlx5_flow_counter); + size = sizeof(*pool); + if (!batch) + size += MLX5_COUNTERS_PER_POOL * + sizeof(struct mlx5_flow_counter_ext); pool = rte_calloc(__func__, 1, size, 0); if (!pool) { rte_errno = ENOMEM; return NULL; } - if (priv->counter_fallback) - pool->dcs_id = dcs->id; - else - pool->min_dcs = dcs; - pool->raw = cont->init_mem_mng->raws + n_valid % + pool->min_dcs = dcs; + if (!priv->counter_fallback) + pool->raw = cont->init_mem_mng->raws + n_valid % MLX5_CNT_CONTAINER_RESIZE; pool->raw_hw = NULL; rte_spinlock_init(&pool->sl); @@ -4252,7 +4140,13 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, * without the last query finished and stats updated to the memory. */ rte_atomic64_set(&pool->start_query_gen, 0x2); - rte_atomic64_set(&pool->end_query_gen, 0x2); + /* + * There's no background query thread for fallback mode, set the + * end_query_gen to the maximum value since no need to wait for + * statistics update. + */ + rte_atomic64_set(&pool->end_query_gen, priv->counter_fallback ? + INT64_MAX : 0x2); TAILQ_INIT(&pool->counters); TAILQ_INSERT_HEAD(&cont->pool_list, pool, next); pool->index = n_valid; @@ -4295,7 +4189,7 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0); if (!dcs) return NULL; - pool = flow_dv_find_pool_by_id(cont, false, dcs->id); + pool = flow_dv_find_pool_by_id(cont, dcs->id); if (!pool) { cont = flow_dv_pool_create(dev, dcs, batch); if (!cont) { @@ -4307,9 +4201,10 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, rte_atomic64_set(&pool->a64_dcs, (int64_t)(uintptr_t)dcs); } - cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL]; + i = dcs->id % MLX5_COUNTERS_PER_POOL; + cnt = &pool->counters_raw[i]; TAILQ_INSERT_HEAD(&pool->counters, cnt, next); - cnt->dcs = dcs; + MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs; *cnt_free = cnt; return cont; } @@ -4328,7 +4223,6 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, pool = TAILQ_FIRST(&cont->pool_list); for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { cnt = &pool->counters_raw[i]; - cnt->pool = pool; TAILQ_INSERT_HEAD(&pool->counters, cnt, next); } *cnt_free = &pool->counters_raw[0]; @@ -4346,13 +4240,13 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, * mlx5 flow counter pool in the container, * * @return - * NULL if not existed, otherwise pointer to the shared counter. + * NULL if not existed, otherwise pointer to the shared extend counter. */ -static struct mlx5_flow_counter * +static struct mlx5_flow_counter_ext * flow_dv_counter_shared_search(struct mlx5_pools_container *cont, uint32_t id, struct mlx5_flow_counter_pool **ppool) { - static struct mlx5_flow_counter *cnt; + static struct mlx5_flow_counter_ext *cnt; struct mlx5_flow_counter_pool *pool; uint32_t i; uint32_t n_valid = rte_atomic16_read(&cont->n_valid); @@ -4360,10 +4254,10 @@ flow_dv_counter_shared_search(struct mlx5_pools_container *cont, uint32_t id, for (i = 0; i < n_valid; i++) { pool = cont->pools[i]; for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { - cnt = &pool->counters_raw[i]; + cnt = MLX5_GET_POOL_CNT_EXT(pool, i); if (cnt->ref_cnt && cnt->shared && cnt->id == id) { if (ppool) - *ppool = pool; + *ppool = cont->pools[i]; return cnt; } } @@ -4393,6 +4287,7 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_counter_pool *pool = NULL; struct mlx5_flow_counter *cnt_free = NULL; + struct mlx5_flow_counter_ext *cnt_ext = NULL; /* * Currently group 0 flow counter cannot be assigned to a flow if it is * not the first one in the batch counter allocation, so it is better @@ -4401,7 +4296,7 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, * A counter can be shared between different groups so need to take * shared counters from the single container. */ - uint32_t batch = (group && !shared) ? 1 : 0; + uint32_t batch = (group && !shared && !priv->counter_fallback) ? 1 : 0; struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch, 0); uint32_t cnt_idx; @@ -4411,21 +4306,19 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, return 0; } if (shared) { - cnt_free = flow_dv_counter_shared_search(cont, id, &pool); - if (cnt_free) { - if (cnt_free->ref_cnt + 1 == 0) { + cnt_ext = flow_dv_counter_shared_search(cont, id, &pool); + if (cnt_ext) { + if (cnt_ext->ref_cnt + 1 == 0) { rte_errno = E2BIG; return 0; } - cnt_free->ref_cnt++; + cnt_ext->ref_cnt++; cnt_idx = pool->index * MLX5_COUNTERS_PER_POOL + - (cnt_free - pool->counters_raw) + 1; + (cnt_ext->dcs->id % MLX5_COUNTERS_PER_POOL) + + 1; return cnt_idx; } } - if (priv->counter_fallback) - return flow_dv_counter_alloc_fallback(dev, shared, id); - /* Pools which has a free counters are in the start. */ TAILQ_FOREACH(pool, &cont->pool_list, next) { /* @@ -4449,7 +4342,8 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, return 0; pool = TAILQ_FIRST(&cont->pool_list); } - cnt_free->batch = batch; + if (!batch) + cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free); /* Create a DV counter action only in the first time usage. */ if (!cnt_free->action) { uint16_t offset; @@ -4460,7 +4354,7 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, dcs = pool->min_dcs; } else { offset = 0; - dcs = cnt_free->dcs; + dcs = cnt_ext->dcs; } cnt_free->action = mlx5_glue->dv_create_flow_action_counter (dcs->obj, offset); @@ -4469,14 +4363,19 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, return 0; } } + cnt_idx = MLX5_MAKE_CNT_IDX(pool->index, + (cnt_free - pool->counters_raw)); + cnt_idx += batch * MLX5_CNT_BATCH_OFFSET; /* Update the counter reset values. */ - if (_flow_dv_query_count(dev, cnt_free, &cnt_free->hits, + if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits, &cnt_free->bytes)) return 0; - cnt_free->shared = shared; - cnt_free->ref_cnt = 1; - cnt_free->id = id; - if (!priv->sh->cmng.query_thread_on) + if (cnt_ext) { + cnt_ext->shared = shared; + cnt_ext->ref_cnt = 1; + cnt_ext->id = id; + } + if (!priv->counter_fallback && !priv->sh->cmng.query_thread_on) /* Start the asynchronous batch query by the host thread. */ mlx5_set_query_alarm(priv->sh); TAILQ_REMOVE(&pool->counters, cnt_free, next); @@ -4485,9 +4384,6 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, TAILQ_REMOVE(&cont->pool_list, pool, next); TAILQ_INSERT_TAIL(&cont->pool_list, pool, next); } - cnt_idx = MLX5_MAKE_CNT_IDX(pool->index, - (cnt_free - pool->counters_raw)); - cnt_idx += batch * MLX5_CNT_BATCH_OFFSET; return cnt_idx; } @@ -4502,28 +4398,28 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, static void flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_counter_pool *pool; + struct mlx5_flow_counter_pool *pool = NULL; struct mlx5_flow_counter *cnt; + struct mlx5_flow_counter_ext *cnt_ext = NULL; if (!counter) return; cnt = flow_dv_counter_get_by_idx(dev, counter, &pool); - if (priv->counter_fallback) { - flow_dv_counter_release_fallback(dev, cnt); - return; - } - if (--cnt->ref_cnt == 0) { - /* Put the counter in the end - the last updated one. */ - TAILQ_INSERT_TAIL(&pool->counters, cnt, next); - /* - * Counters released between query trigger and handler need - * to wait the next round of query. Since the packets arrive - * in the gap period will not be taken into account to the - * old counter. - */ - cnt->query_gen = rte_atomic64_read(&pool->start_query_gen); + MLX5_ASSERT(pool); + if (counter < MLX5_CNT_BATCH_OFFSET) { + cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); + if (cnt_ext && --cnt_ext->ref_cnt) + return; } + /* Put the counter in the end - the last updated one. */ + TAILQ_INSERT_TAIL(&pool->counters, cnt, next); + /* + * Counters released between query trigger and handler need + * to wait the next round of query. Since the packets arrive + * in the gap period will not be taken into account to the + * old counter. + */ + cnt->query_gen = rte_atomic64_read(&pool->start_query_gen); } /** @@ -8525,7 +8421,7 @@ flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, cnt = flow_dv_counter_get_by_idx(dev, flow->counter, NULL); - int err = _flow_dv_query_count(dev, cnt, &pkts, + int err = _flow_dv_query_count(dev, flow->counter, &pkts, &bytes); if (err) @@ -9035,10 +8931,10 @@ flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear, if (!priv->config.devx) return -1; - cnt = flow_dv_counter_get_by_idx(dev, counter, NULL); - ret = _flow_dv_query_count(dev, cnt, &inn_pkts, &inn_bytes); + ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes); if (ret) return -1; + cnt = flow_dv_counter_get_by_idx(dev, counter, NULL); *pkts = inn_pkts - cnt->hits; *bytes = inn_bytes - cnt->bytes; if (clear) {