From: Suanming Mou Date: Thu, 18 Jun 2020 07:24:44 +0000 (+0800) Subject: net/mlx5: optimize single counter pool search X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=b1cc226644f1dcfc0400835f6f97ee116c4b4feb;p=dpdk.git net/mlx5: optimize single counter pool search For single counter, when allocate a new counter, it needs to find the pool it belongs in order to do the query together. Once there are millions of counters allocated, the pool array in the counter container will become very large. In this case, the pool search from the pool array will become extremely slow. Save the minimum and maximum counter ID to have a quick check of current counter ID range. And start searching the pool from the last pool in the container will mostly get the needed pool since counter ID increases sequentially. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 80e8bf6b3e..97b2249d29 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -457,6 +457,9 @@ mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh) memset(&sh->cmng, 0, sizeof(sh->cmng)); TAILQ_INIT(&sh->cmng.flow_counters); for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i) { + sh->cmng.ccont[i].min_id = MLX5_CNT_BATCH_OFFSET; + sh->cmng.ccont[i].max_id = -1; + sh->cmng.ccont[i].last_pool_idx = POOL_IDX_INVALID; TAILQ_INIT(&sh->cmng.ccont[i].pool_list); rte_spinlock_init(&sh->cmng.ccont[i].resize_sl); } diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 87e383b1e2..99368c515f 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -312,6 +312,12 @@ struct mlx5_drop { MLX5_CNT_TO_CNT_EXT(pool, MLX5_POOL_GET_CNT((pool), (offset))) #define MLX5_CNT_TO_AGE(cnt) \ ((struct mlx5_age_param *)((cnt) + 1)) +/* + * The maximum single counter is 0x800000 as MLX5_CNT_BATCH_OFFSET + * defines. The pool size is 512, pool index should never reach + * INT16_MAX. + */ +#define POOL_IDX_INVALID UINT16_MAX struct mlx5_flow_counter_pool; @@ -420,6 +426,9 @@ TAILQ_HEAD(mlx5_counter_pools, mlx5_flow_counter_pool); struct mlx5_pools_container { rte_atomic16_t n_valid; /* Number of valid pools. */ uint16_t n; /* Number of pools. */ + uint16_t last_pool_idx; /* Last used pool index */ + int min_id; /* The minimum counter ID in the pools. */ + int max_id; /* The maximum counter ID in the pools. */ rte_spinlock_t resize_sl; /* The resize lock. */ struct mlx5_counter_pools pool_list; /* Counter pool list. */ struct mlx5_flow_counter_pool **pools; /* Counter pool array. */ diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 6e4e10c621..9fa8568a23 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -4050,6 +4050,28 @@ flow_dv_counter_get_by_idx(struct rte_eth_dev *dev, return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL); } +/** + * Check the devx counter belongs to the pool. + * + * @param[in] pool + * Pointer to the counter pool. + * @param[in] id + * The counter devx ID. + * + * @return + * True if counter belongs to the pool, false otherwise. + */ +static bool +flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id) +{ + int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) * + MLX5_COUNTERS_PER_POOL; + + if (id >= base && id < base + MLX5_COUNTERS_PER_POOL) + return true; + return false; +} + /** * Get a pool by devx counter ID. * @@ -4065,24 +4087,25 @@ static struct mlx5_flow_counter_pool * flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id) { uint32_t i; - uint32_t n_valid = rte_atomic16_read(&cont->n_valid); - for (i = 0; i < n_valid; i++) { + /* Check last used pool. */ + if (cont->last_pool_idx != POOL_IDX_INVALID && + flow_dv_is_counter_in_pool(cont->pools[cont->last_pool_idx], id)) + return cont->pools[cont->last_pool_idx]; + /* ID out of range means no suitable pool in the container. */ + if (id > cont->max_id || id < cont->min_id) + return NULL; + /* + * Find the pool from the end of the container, since mostly counter + * ID is sequence increasing, and the last pool should be the needed + * one. + */ + i = rte_atomic16_read(&cont->n_valid); + while (i--) { struct mlx5_flow_counter_pool *pool = cont->pools[i]; - int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) * - MLX5_COUNTERS_PER_POOL; - if (id >= base && id < base + MLX5_COUNTERS_PER_POOL) { - /* - * Move the pool to the head, as counter allocate - * always gets the first pool in the container. - */ - if (pool != TAILQ_FIRST(&cont->pool_list)) { - TAILQ_REMOVE(&cont->pool_list, pool, next); - TAILQ_INSERT_HEAD(&cont->pool_list, pool, next); - } + if (flow_dv_is_counter_in_pool(pool, id)) return pool; - } } return NULL; } @@ -4337,6 +4360,15 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, TAILQ_INSERT_HEAD(&cont->pool_list, pool, next); pool->index = n_valid; cont->pools[n_valid] = pool; + if (!batch) { + int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL); + + if (base < cont->min_id) + cont->min_id = base; + if (base > cont->max_id) + cont->max_id = base + MLX5_COUNTERS_PER_POOL - 1; + cont->last_pool_idx = pool->index; + } /* Pool initialization must be updated before host thread access. */ rte_cio_wmb(); rte_atomic16_add(&cont->n_valid, 1);