From 632f0f19056fdccfcaf9a1881240a406f6747316 Mon Sep 17 00:00:00 2001 From: Suanming Mou Date: Thu, 18 Jun 2020 15:24:43 +0800 Subject: [PATCH] net/mlx5: manage shared counters in three-level table Currently, to check if any shared counter with same ID existing, it will have to loop the counter pools to search for the counter. Even add the counter to the list will also not so helpful while there are thousands of shared counters in the list. Change Three-Level table to look up the counter index saved in the relevant table entry will be more efficient. This patch introduces the Three-level table to save the ID relevant counter index in the table. Then the next while the same ID comes, just check the table entry of this ID will get the counter index directly. No search will be needed. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5.c | 13 ++++++++ drivers/net/mlx5/mlx5.h | 1 + drivers/net/mlx5/mlx5_flow_dv.c | 53 +++++++++++++++++++-------------- 3 files changed, 45 insertions(+), 22 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index c13e71608a..80e8bf6b3e 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -716,6 +716,11 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, mlx5_os_set_reg_mr_cb(&sh->share_cache.reg_mr_cb, &sh->share_cache.dereg_mr_cb); mlx5_os_dev_shared_handler_install(sh); + sh->cnt_id_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_DWORD); + if (!sh->cnt_id_tbl) { + err = rte_errno; + goto error; + } mlx5_flow_aging_init(sh); mlx5_flow_counters_mng_init(sh); mlx5_flow_ipool_create(sh, config); @@ -732,6 +737,10 @@ exit: error: pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); MLX5_ASSERT(sh); + if (sh->cnt_id_tbl) { + mlx5_l3t_destroy(sh->cnt_id_tbl); + sh->cnt_id_tbl = NULL; + } if (sh->tis) claim_zero(mlx5_devx_cmd_destroy(sh->tis)); if (sh->td) @@ -793,6 +802,10 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) mlx5_flow_counters_mng_close(sh); mlx5_flow_ipool_destroy(sh); mlx5_os_dev_shared_handler_uninstall(sh); + if (sh->cnt_id_tbl) { + mlx5_l3t_destroy(sh->cnt_id_tbl); + sh->cnt_id_tbl = NULL; + } if (sh->pd) claim_zero(mlx5_glue->dealloc_pd(sh->pd)); if (sh->tis) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 8ecb59c93b..87e383b1e2 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -565,6 +565,7 @@ struct mlx5_dev_ctx_shared { struct mlx5_flow_counter_mng cmng; /* Counters management structure. */ struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX]; /* Memory Pool for mlx5 flow resources. */ + struct mlx5_l3t_tbl *cnt_id_tbl; /* Shared counter lookup table. */ /* Shared interrupt handler section. */ struct rte_intr_handle intr_handle; /* Interrupt handler for device. */ struct rte_intr_handle intr_handle_devx; /* DEVX interrupt handler. */ diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 5bb252e697..6e4e10c621 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -4453,8 +4453,8 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, /** * Search for existed shared counter. * - * @param[in] cont - * Pointer to the relevant counter pool container. + * @param[in] dev + * Pointer to the Ethernet device structure. * @param[in] id * The shared counter ID to search. * @param[out] ppool @@ -4464,26 +4464,22 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, * NULL if not existed, otherwise pointer to the shared extend counter. */ static struct mlx5_flow_counter_ext * -flow_dv_counter_shared_search(struct mlx5_pools_container *cont, uint32_t id, +flow_dv_counter_shared_search(struct rte_eth_dev *dev, uint32_t id, struct mlx5_flow_counter_pool **ppool) { - struct mlx5_flow_counter_ext *cnt; - struct mlx5_flow_counter_pool *pool; - uint32_t i, j; - uint32_t n_valid = rte_atomic16_read(&cont->n_valid); + struct mlx5_priv *priv = dev->data->dev_private; + union mlx5_l3t_data data; + uint32_t cnt_idx; - for (i = 0; i < n_valid; i++) { - pool = cont->pools[i]; - for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) { - cnt = MLX5_GET_POOL_CNT_EXT(pool, j); - if (cnt->ref_cnt && cnt->shared && cnt->id == id) { - if (ppool) - *ppool = cont->pools[i]; - return cnt; - } - } - } - return NULL; + if (mlx5_l3t_get_entry(priv->sh->cnt_id_tbl, id, &data) || !data.dword) + return NULL; + cnt_idx = data.dword; + /* + * Shared counters don't have age info. The counter extend is after + * the counter datat structure. + */ + return (struct mlx5_flow_counter_ext *) + ((flow_dv_counter_get_by_idx(dev, cnt_idx, ppool)) + 1); } /** @@ -4529,7 +4525,7 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, return 0; } if (shared) { - cnt_ext = flow_dv_counter_shared_search(cont, id, &pool); + cnt_ext = flow_dv_counter_shared_search(dev, id, &pool); if (cnt_ext) { if (cnt_ext->ref_cnt + 1 == 0) { rte_errno = E2BIG; @@ -4597,6 +4593,13 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, cnt_ext->shared = shared; cnt_ext->ref_cnt = 1; cnt_ext->id = id; + if (shared) { + union mlx5_l3t_data data; + + data.dword = cnt_idx; + if (mlx5_l3t_set_entry(priv->sh->cnt_id_tbl, id, &data)) + return 0; + } } if (!priv->counter_fallback && !priv->sh->cmng.query_thread_on) /* Start the asynchronous batch query by the host thread. */ @@ -4679,6 +4682,7 @@ flow_dv_counter_remove_from_age(struct rte_eth_dev *dev, static void flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter) { + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_counter_pool *pool = NULL; struct mlx5_flow_counter *cnt; struct mlx5_flow_counter_ext *cnt_ext = NULL; @@ -4689,8 +4693,13 @@ flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter) MLX5_ASSERT(pool); if (counter < MLX5_CNT_BATCH_OFFSET) { cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); - if (cnt_ext && --cnt_ext->ref_cnt) - return; + if (cnt_ext) { + if (--cnt_ext->ref_cnt) + return; + if (cnt_ext->shared) + mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, + cnt_ext->id); + } } if (IS_AGE_POOL(pool)) flow_dv_counter_remove_from_age(dev, counter, cnt); -- 2.20.1