From 4ae8825c5085332ee02f2498cd4f215e8321b9e1 Mon Sep 17 00:00:00 2001 From: Xueming Li Date: Wed, 28 Oct 2020 17:33:24 +0800 Subject: [PATCH] net/mlx5: use indexed pool as id generator The ID generation API used an integer pool to save released ID, To support multiple flow, it has to be enhanced to be thread safe. Indexed pool could be used to generate unique ID by setting size of pool entry to zero. Since bitmap is used, an extra benefits is saving memory to about one bit per entry. Further more indexed pool could be thread safe by enabling lock. This patch leverages indexed pool to generate ID, removes unused ID generating API. Signed-off-by: Xueming Li Acked-by: Matan Azrad --- drivers/net/mlx5/linux/mlx5_os.c | 13 --- drivers/net/mlx5/mlx5.c | 136 ++++--------------------------- drivers/net/mlx5/mlx5.h | 4 +- drivers/net/mlx5/mlx5_flow.c | 116 ++++++++++++-------------- drivers/net/mlx5/mlx5_flow.h | 7 -- drivers/net/mlx5/mlx5_flow_dv.c | 4 +- drivers/net/mlx5/mlx5_utils.c | 5 ++ 7 files changed, 78 insertions(+), 207 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index 4ba6d8e395..0b59e74bd7 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -1395,17 +1395,6 @@ err_secondary: err = mlx5_alloc_shared_dr(priv); if (err) goto error; - /* - * RSS id is shared with meter flow id. Meter flow id can only - * use the 24 MSB of the register. - */ - priv->qrss_id_pool = mlx5_flow_id_pool_alloc(UINT32_MAX >> - MLX5_MTR_COLOR_BITS); - if (!priv->qrss_id_pool) { - DRV_LOG(ERR, "can't create flow id pool"); - err = ENOMEM; - goto error; - } } if (config->devx && config->dv_flow_en && config->dest_tir) { priv->obj_ops = devx_obj_ops; @@ -1492,8 +1481,6 @@ error: close(priv->nl_socket_rdma); if (priv->vmwa_context) mlx5_vlan_vmwa_exit(priv->vmwa_context); - if (priv->qrss_id_pool) - mlx5_flow_id_pool_release(priv->qrss_id_pool); if (own_domain_id) claim_zero(rte_eth_switch_domain_free(priv->domain_id)); mlx5_free(priv); diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index eca97b051a..fa00e45d3a 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -321,6 +321,21 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .free = mlx5_free, .type = "rte_flow_ipool", }, + { + .size = 0, + .need_lock = 1, + .type = "mlx5_flow_rss_id_ipool", + }, + { + .size = 0, + .need_lock = 1, + .type = "mlx5_flow_tnl_flow_ipool", + }, + { + .size = 0, + .need_lock = 1, + .type = "mlx5_flow_tnl_tbl_ipool", + }, }; @@ -329,127 +344,6 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { #define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 4096 -/** - * Allocate ID pool structure. - * - * @param[in] max_id - * The maximum id can be allocated from the pool. - * - * @return - * Pointer to pool object, NULL value otherwise. - */ -struct mlx5_flow_id_pool * -mlx5_flow_id_pool_alloc(uint32_t max_id) -{ - struct mlx5_flow_id_pool *pool; - void *mem; - - pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), - RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); - if (!pool) { - DRV_LOG(ERR, "can't allocate id pool"); - rte_errno = ENOMEM; - return NULL; - } - mem = mlx5_malloc(MLX5_MEM_ZERO, - MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t), - RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); - if (!mem) { - DRV_LOG(ERR, "can't allocate mem for id pool"); - rte_errno = ENOMEM; - goto error; - } - pool->free_arr = mem; - pool->curr = pool->free_arr; - pool->last = pool->free_arr + MLX5_FLOW_MIN_ID_POOL_SIZE; - pool->base_index = 0; - pool->max_id = max_id; - return pool; -error: - mlx5_free(pool); - return NULL; -} - -/** - * Release ID pool structure. - * - * @param[in] pool - * Pointer to flow id pool object to free. - */ -void -mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool) -{ - mlx5_free(pool->free_arr); - mlx5_free(pool); -} - -/** - * Generate ID. - * - * @param[in] pool - * Pointer to flow id pool. - * @param[out] id - * The generated ID. - * - * @return - * 0 on success, error value otherwise. - */ -uint32_t -mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id) -{ - if (pool->curr == pool->free_arr) { - if (pool->base_index == pool->max_id) { - rte_errno = ENOMEM; - DRV_LOG(ERR, "no free id"); - return -rte_errno; - } - *id = ++pool->base_index; - return 0; - } - *id = *(--pool->curr); - return 0; -} - -/** - * Release ID. - * - * @param[in] pool - * Pointer to flow id pool. - * @param[out] id - * The generated ID. - * - * @return - * 0 on success, error value otherwise. - */ -uint32_t -mlx5_flow_id_release(struct mlx5_flow_id_pool *pool, uint32_t id) -{ - uint32_t size; - uint32_t size2; - void *mem; - - if (pool->curr == pool->last) { - size = pool->curr - pool->free_arr; - size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR; - MLX5_ASSERT(size2 > size); - mem = mlx5_malloc(0, size2 * sizeof(uint32_t), 0, - SOCKET_ID_ANY); - if (!mem) { - DRV_LOG(ERR, "can't allocate mem for id pool"); - rte_errno = ENOMEM; - return -rte_errno; - } - memcpy(mem, pool->free_arr, size * sizeof(uint32_t)); - mlx5_free(pool->free_arr); - pool->free_arr = mem; - pool->curr = pool->free_arr + size; - pool->last = pool->free_arr + size2; - } - *pool->curr = id; - pool->curr++; - return 0; -} - /** * Initialize the shared aging list information per port. * diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 8e000652a4..5bda23355f 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -47,6 +47,9 @@ enum mlx5_ipool_index { MLX5_IPOOL_HRXQ, /* Pool for hrxq resource. */ MLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */ MLX5_IPOOL_RTE_FLOW, /* Pool for rte_flow. */ + MLX5_IPOOL_RSS_EXPANTION_FLOW_ID, /* Pool for Queue/RSS flow ID. */ + MLX5_IPOOL_TUNNEL_ID, /* Pool for flow tunnel ID. */ + MLX5_IPOOL_TNL_TBL_ID, /* Pool for tunnel table ID. */ MLX5_IPOOL_MAX, }; @@ -877,7 +880,6 @@ struct mlx5_priv { int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */ struct mlx5_dbr_page_list dbrpgs; /* Door-bell pages. */ struct mlx5_nl_vlan_vmwa_context *vmwa_context; /* VLAN WA context. */ - struct mlx5_flow_id_pool *qrss_id_pool; struct mlx5_hlist *mreg_cp_tbl; /* Hash table of Rx metadata register copy table. */ uint8_t mtr_sfx_reg; /* Meter prefix-suffix flow match REG_C. */ diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 47bd1256ba..cd581355ca 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -2935,30 +2935,6 @@ mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item, MLX5_ITEM_RANGE_NOT_ACCEPTED, error); } -/* Allocate unique ID for the split Q/RSS subflows. */ -static uint32_t -flow_qrss_get_id(struct rte_eth_dev *dev) -{ - struct mlx5_priv *priv = dev->data->dev_private; - uint32_t qrss_id, ret; - - ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id); - if (ret) - return 0; - MLX5_ASSERT(qrss_id); - return qrss_id; -} - -/* Free unique ID for the split Q/RSS subflows. */ -static void -flow_qrss_free_id(struct rte_eth_dev *dev, uint32_t qrss_id) -{ - struct mlx5_priv *priv = dev->data->dev_private; - - if (qrss_id) - mlx5_flow_id_release(priv->qrss_id_pool, qrss_id); -} - /** * Release resource related QUEUE/RSS action split. * @@ -2978,7 +2954,9 @@ flow_mreg_split_qrss_release(struct rte_eth_dev *dev, SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, handle_idx, dev_handle, next) if (dev_handle->split_flow_id) - flow_qrss_free_id(dev, dev_handle->split_flow_id); + mlx5_ipool_free(priv->sh->ipool + [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], + dev_handle->split_flow_id); } static int @@ -4499,6 +4477,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev, struct rte_flow_action actions_sfx[], struct rte_flow_action actions_pre[]) { + struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow_action *tag_action = NULL; struct rte_flow_item *tag_item; struct mlx5_rte_flow_action_set_tag *set_tag; @@ -4507,7 +4486,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev, const struct rte_flow_action_raw_decap *raw_decap; struct mlx5_rte_flow_item_tag *tag_spec; struct mlx5_rte_flow_item_tag *tag_mask; - uint32_t tag_id; + uint32_t tag_id = 0; bool copy_vlan = false; /* Prepare the actions for prefix and suffix flow. */ @@ -4556,10 +4535,17 @@ flow_meter_split_prep(struct rte_eth_dev *dev, /* Set the tag. */ set_tag = (void *)actions_pre; set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error); - /* - * Get the id from the qrss_pool to make qrss share the id with meter. - */ - tag_id = flow_qrss_get_id(dev); + mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], + &tag_id); + if (tag_id >= (1 << (sizeof(tag_id) * 8 - MLX5_MTR_COLOR_BITS))) { + DRV_LOG(ERR, "Port %u meter flow id exceed max limit.", + dev->data->port_id); + mlx5_ipool_free(priv->sh->ipool + [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], tag_id); + return 0; + } else if (!tag_id) { + return 0; + } set_tag->data = tag_id << MLX5_MTR_COLOR_BITS; assert(tag_action); tag_action->conf = set_tag; @@ -4652,6 +4638,7 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, const struct rte_flow_action *qrss, int actions_n, struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rte_flow_action_set_tag *set_tag; struct rte_flow_action_jump *jump; const int qrss_idx = qrss - actions; @@ -4683,7 +4670,8 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, * representors) domain even if they have coinciding * IDs. */ - flow_id = flow_qrss_get_id(dev); + mlx5_ipool_malloc(priv->sh->ipool + [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &flow_id); if (!flow_id) return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION, @@ -4895,6 +4883,7 @@ flow_sample_split_prep(struct rte_eth_dev *dev, int qrss_action_pos, struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rte_flow_action_set_tag *set_tag; struct mlx5_rte_flow_item_tag *tag_spec; struct mlx5_rte_flow_item_tag *tag_mask; @@ -4914,7 +4903,8 @@ flow_sample_split_prep(struct rte_eth_dev *dev, if (ret < 0) return ret; set_tag->id = ret; - tag_id = flow_qrss_get_id(dev); + mlx5_ipool_malloc(priv->sh->ipool + [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &tag_id); set_tag->data = tag_id; /* Prepare the suffix subflow items. */ tag_spec = (void *)(sfx_items + SAMPLE_SUFFIX_ITEM); @@ -5209,7 +5199,8 @@ exit: * These ones are included into parent flow list and will be destroyed * by flow_drv_destroy. */ - flow_qrss_free_id(dev, qrss_id); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], + qrss_id); mlx5_free(ext_actions); return ret; } @@ -7576,6 +7567,7 @@ tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, uint32_t group, uint32_t *table, struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hlist_entry *he; struct tunnel_tbl_entry *tte; union tunnel_tbl_key key = { @@ -7588,16 +7580,21 @@ tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, group_hash = tunnel ? tunnel->groups : thub->groups; he = mlx5_hlist_lookup(group_hash, key.val); if (!he) { - int ret; tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tte), 0, SOCKET_ID_ANY); if (!tte) goto err; tte->hash.key = key.val; - ret = mlx5_flow_id_get(thub->table_ids, &tte->flow_table); - if (ret) { - mlx5_free(tte); + mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + &tte->flow_table); + if (tte->flow_table >= MLX5_MAX_TABLES) { + DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.", + tte->flow_table); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + tte->flow_table); + goto err; + } else if (!tte->flow_table) { goto err; } tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table); @@ -7611,6 +7608,8 @@ tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, return 0; err: + if (tte) + mlx5_free(tte); return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL, "tunnel group index not supported"); } @@ -8078,14 +8077,14 @@ static void mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel) { - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_flow_id_pool *id_pool = thub->tunnel_ids; + struct mlx5_priv *priv = dev->data->dev_private; DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x", dev->data->port_id, tunnel->tunnel_id); RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED)); LIST_REMOVE(tunnel, chain); - mlx5_flow_id_release(id_pool, tunnel->tunnel_id); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID], + tunnel->tunnel_id); mlx5_hlist_destroy(tunnel->groups, NULL, NULL); mlx5_free(tunnel); } @@ -8108,15 +8107,20 @@ static struct mlx5_flow_tunnel * mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev, const struct rte_flow_tunnel *app_tunnel) { - int ret; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_tunnel *tunnel; - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_flow_id_pool *id_pool = thub->tunnel_ids; uint32_t id; - ret = mlx5_flow_id_get(id_pool, &id); - if (ret) + mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], + &id); + if (id >= MLX5_MAX_TUNNELS) { + mlx5_ipool_free(priv->sh->ipool + [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id); + DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id); + return NULL; + } else if (!id) { return NULL; + } /** * mlx5 flow tunnel is an auxlilary data structure * It's not part of IO. No need to allocate it from @@ -8125,12 +8129,14 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev, tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel), 0, SOCKET_ID_ANY); if (!tunnel) { - mlx5_flow_id_pool_release(id_pool); + mlx5_ipool_free(priv->sh->ipool + [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id); return NULL; } tunnel->groups = mlx5_hlist_create("tunnel groups", 1024); if (!tunnel->groups) { - mlx5_flow_id_pool_release(id_pool); + mlx5_ipool_free(priv->sh->ipool + [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id); mlx5_free(tunnel); return NULL; } @@ -8192,8 +8198,6 @@ void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id) return; if (!LIST_EMPTY(&thub->tunnels)) DRV_LOG(WARNING, "port %u tunnels present\n", port_id); - mlx5_flow_id_pool_release(thub->tunnel_ids); - mlx5_flow_id_pool_release(thub->table_ids); mlx5_hlist_destroy(thub->groups, NULL, NULL); mlx5_free(thub); } @@ -8208,16 +8212,6 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh) if (!thub) return -ENOMEM; LIST_INIT(&thub->tunnels); - thub->tunnel_ids = mlx5_flow_id_pool_alloc(MLX5_MAX_TUNNELS); - if (!thub->tunnel_ids) { - err = -rte_errno; - goto err; - } - thub->table_ids = mlx5_flow_id_pool_alloc(MLX5_MAX_TABLES); - if (!thub->table_ids) { - err = -rte_errno; - goto err; - } thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES); if (!thub->groups) { err = -rte_errno; @@ -8230,10 +8224,6 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh) err: if (thub->groups) mlx5_hlist_destroy(thub->groups, NULL, NULL); - if (thub->table_ids) - mlx5_flow_id_pool_release(thub->table_ids); - if (thub->tunnel_ids) - mlx5_flow_id_pool_release(thub->tunnel_ids); if (thub) mlx5_free(thub); return err; diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 844c99e5b3..6cac3ab033 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -951,8 +951,6 @@ struct mlx5_flow_tunnel { /** PMD tunnel related context */ struct mlx5_flow_tunnel_hub { LIST_HEAD(, mlx5_flow_tunnel) tunnels; - struct mlx5_flow_id_pool *tunnel_ids; - struct mlx5_flow_id_pool *table_ids; struct mlx5_hlist *groups; /** non tunnel groups */ }; @@ -1213,11 +1211,6 @@ struct mlx5_flow_driver_ops { /* mlx5_flow.c */ struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void); -struct mlx5_flow_id_pool *mlx5_flow_id_pool_alloc(uint32_t max_id); -void mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool); -uint32_t mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id); -uint32_t mlx5_flow_id_release(struct mlx5_flow_id_pool *pool, - uint32_t id); __extension__ struct flow_grp_info { uint64_t external:1; diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index f4f8e157e3..bfcba45594 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -8132,8 +8132,8 @@ flow_dv_tbl_resource_release(struct rte_eth_dev *dev, mlx5_hlist_remove(tunnel_grp_hash, he); mlx5_free(tte); } - mlx5_flow_id_release(mlx5_tunnel_hub(dev)->table_ids, - tunnel_flow_tbl_to_id(table_id)); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + tunnel_flow_tbl_to_id(table_id)); DRV_LOG(DEBUG, "port %u release table_id %#x tunnel %u group %u", dev->data->port_id, table_id, diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c index 3a1f87a361..7a6b0c69ed 100644 --- a/drivers/net/mlx5/mlx5_utils.c +++ b/drivers/net/mlx5/mlx5_utils.c @@ -362,6 +362,11 @@ mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx) MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx)); rte_bitmap_clear(trunk->bmp, iidx); p = &trunk->data[iidx * pool->cfg.size]; + /* + * The ipool index should grow continually from small to big, + * some features as metering only accept limited bits of index. + * Random index with MSB set may be rejected. + */ iidx += mlx5_trunk_idx_offset_get(pool, trunk->idx); iidx += 1; /* non-zero index. */ trunk->free--; -- 2.20.1