From: Dong Zhou Date: Wed, 29 Apr 2020 02:25:08 +0000 (+0300) Subject: net/mlx5: modify ext-counter memory allocation X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=8d93c830e450;p=dpdk.git net/mlx5: modify ext-counter memory allocation Currently, the counter pool needs 512 ext-counter memory for no batch counters, it's allocated separately by once, behind the 512 basic-counter memory. This is not easy to get ext-counter pointer by corresponding basic-counter pointer. This is also no easy for expanding some other potential additional type of counter memory. So, need allocate every one of ext-counter and basic-counter together, as a single piece of memory. It's will be same for further additional type of counter memory. In this case, one piece of memory contains all type of memory for one counter, it's easy to get each type memory by using offsetting. Signed-off-by: Dong Zhou Acked-by: Matan Azrad --- diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 75ada96cc5..dbe22d6e80 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -505,10 +505,10 @@ mlx5_flow_counters_mng_close(struct mlx5_ibv_shared *sh) (mlx5_devx_cmd_destroy(pool->min_dcs)); } for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) { - if (pool->counters_raw[j].action) + if (MLX5_POOL_GET_CNT(pool, j)->action) claim_zero (mlx5_glue->destroy_flow_action - (pool->counters_raw[j].action)); + (MLX5_POOL_GET_CNT(pool, j)->action)); if (!batch && MLX5_GET_POOL_CNT_EXT (pool, j)->dcs) claim_zero(mlx5_devx_cmd_destroy diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 50349abf34..4d9984f603 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -222,6 +222,19 @@ struct mlx5_drop { #define MLX5_COUNTERS_PER_POOL 512 #define MLX5_MAX_PENDING_QUERIES 4 #define MLX5_CNT_CONTAINER_RESIZE 64 +#define CNT_SIZE (sizeof(struct mlx5_flow_counter)) +#define CNTEXT_SIZE (sizeof(struct mlx5_flow_counter_ext)) + +#define CNT_POOL_TYPE_EXT (1 << 0) +#define IS_EXT_POOL(pool) (((pool)->type) & CNT_POOL_TYPE_EXT) +#define MLX5_CNT_LEN(pool) \ + (CNT_SIZE + (IS_EXT_POOL(pool) ? CNTEXT_SIZE : 0)) +#define MLX5_POOL_GET_CNT(pool, index) \ + ((struct mlx5_flow_counter *) \ + ((uint8_t *)((pool) + 1) + (index) * (MLX5_CNT_LEN(pool)))) +#define MLX5_CNT_ARRAY_IDX(pool, cnt) \ + ((int)(((uint8_t *)(cnt) - (uint8_t *)((pool) + 1)) / \ + MLX5_CNT_LEN(pool))) /* * The pool index and offset of counter in the pool array makes up the * counter index. In case the counter is from pool 0 and offset 0, it @@ -230,11 +243,10 @@ struct mlx5_drop { */ #define MLX5_MAKE_CNT_IDX(pi, offset) \ ((pi) * MLX5_COUNTERS_PER_POOL + (offset) + 1) -#define MLX5_CNT_TO_CNT_EXT(pool, cnt) (&((struct mlx5_flow_counter_ext *) \ - ((pool) + 1))[((cnt) - (pool)->counters_raw)]) +#define MLX5_CNT_TO_CNT_EXT(cnt) \ + ((struct mlx5_flow_counter_ext *)((cnt) + 1)) #define MLX5_GET_POOL_CNT_EXT(pool, offset) \ - (&((struct mlx5_flow_counter_ext *) \ - ((pool) + 1))[offset]) + MLX5_CNT_TO_CNT_EXT(MLX5_POOL_GET_CNT((pool), (offset))) struct mlx5_flow_counter_pool; @@ -287,11 +299,10 @@ struct mlx5_flow_counter_pool { rte_atomic64_t start_query_gen; /* Query start round. */ rte_atomic64_t end_query_gen; /* Query end round. */ uint32_t index; /* Pool index in container. */ + uint32_t type: 2; /* Memory type behind the counter array. */ rte_spinlock_t sl; /* The pool lock. */ struct mlx5_counter_stats_raw *raw; struct mlx5_counter_stats_raw *raw_hw; /* The raw on HW working. */ - struct mlx5_flow_counter counters_raw[MLX5_COUNTERS_PER_POOL]; - /* The pool counters memory. */ }; struct mlx5_counter_stats_raw; diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 18d9d302e3..99df3c5df5 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -3906,7 +3906,7 @@ flow_dv_counter_get_by_idx(struct rte_eth_dev *dev, MLX5_ASSERT(pool); if (ppool) *ppool = pool; - return &pool->counters_raw[idx % MLX5_COUNTERS_PER_POOL]; + return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL); } /** @@ -4114,7 +4114,7 @@ _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts, cnt = flow_dv_counter_get_by_idx(dev, counter, &pool); MLX5_ASSERT(pool); if (counter < MLX5_CNT_BATCH_OFFSET) { - cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); + cnt_ext = MLX5_CNT_TO_CNT_EXT(cnt); if (priv->counter_fallback) return mlx5_devx_cmd_flow_counter_query(cnt_ext->dcs, 0, 0, pkts, bytes, 0, NULL, NULL, 0); @@ -4130,7 +4130,7 @@ _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts, *pkts = 0; *bytes = 0; } else { - offset = cnt - &pool->counters_raw[0]; + offset = MLX5_CNT_ARRAY_IDX(pool, cnt); *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits); *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes); } @@ -4170,9 +4170,9 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, return NULL; } size = sizeof(*pool); + size += MLX5_COUNTERS_PER_POOL * CNT_SIZE; if (!batch) - size += MLX5_COUNTERS_PER_POOL * - sizeof(struct mlx5_flow_counter_ext); + size += MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE; pool = rte_calloc(__func__, 1, size, 0); if (!pool) { rte_errno = ENOMEM; @@ -4183,6 +4183,9 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, pool->raw = cont->init_mem_mng->raws + n_valid % MLX5_CNT_CONTAINER_RESIZE; pool->raw_hw = NULL; + pool->type = 0; + if (!batch) + pool->type |= CNT_POOL_TYPE_EXT; rte_spinlock_init(&pool->sl); /* * The generation of the new allocated counters in this pool is 0, 2 in @@ -4254,7 +4257,7 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, (int64_t)(uintptr_t)dcs); } i = dcs->id % MLX5_COUNTERS_PER_POOL; - cnt = &pool->counters_raw[i]; + cnt = MLX5_POOL_GET_CNT(pool, i); TAILQ_INSERT_HEAD(&pool->counters, cnt, next); MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs; *cnt_free = cnt; @@ -4274,10 +4277,10 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, } pool = TAILQ_FIRST(&cont->pool_list); for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { - cnt = &pool->counters_raw[i]; + cnt = MLX5_POOL_GET_CNT(pool, i); TAILQ_INSERT_HEAD(&pool->counters, cnt, next); } - *cnt_free = &pool->counters_raw[0]; + *cnt_free = MLX5_POOL_GET_CNT(pool, 0); return cont; } @@ -4395,14 +4398,14 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, pool = TAILQ_FIRST(&cont->pool_list); } if (!batch) - cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free); + cnt_ext = MLX5_CNT_TO_CNT_EXT(cnt_free); /* Create a DV counter action only in the first time usage. */ if (!cnt_free->action) { uint16_t offset; struct mlx5_devx_obj *dcs; if (batch) { - offset = cnt_free - &pool->counters_raw[0]; + offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free); dcs = pool->min_dcs; } else { offset = 0; @@ -4416,7 +4419,7 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, } } cnt_idx = MLX5_MAKE_CNT_IDX(pool->index, - (cnt_free - pool->counters_raw)); + MLX5_CNT_ARRAY_IDX(pool, cnt_free)); cnt_idx += batch * MLX5_CNT_BATCH_OFFSET; /* Update the counter reset values. */ if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits, @@ -4459,7 +4462,7 @@ flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter) cnt = flow_dv_counter_get_by_idx(dev, counter, &pool); MLX5_ASSERT(pool); if (counter < MLX5_CNT_BATCH_OFFSET) { - cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); + cnt_ext = MLX5_CNT_TO_CNT_EXT(cnt); if (cnt_ext && --cnt_ext->ref_cnt) return; } diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c index d20098ce45..236d665852 100644 --- a/drivers/net/mlx5/mlx5_flow_verbs.c +++ b/drivers/net/mlx5/mlx5_flow_verbs.c @@ -64,7 +64,7 @@ flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev, MLX5_ASSERT(pool); if (ppool) *ppool = pool; - return &pool->counters_raw[idx % MLX5_COUNTERS_PER_POOL]; + return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL); } /** @@ -207,16 +207,16 @@ flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id) if (!pool) return 0; for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { - cnt = &pool->counters_raw[i]; + cnt = MLX5_POOL_GET_CNT(pool, i); TAILQ_INSERT_HEAD(&pool->counters, cnt, next); } - cnt = &pool->counters_raw[0]; + cnt = MLX5_POOL_GET_CNT(pool, 0); cont->pools[n_valid] = pool; pool_idx = n_valid; rte_atomic16_add(&cont->n_valid, 1); TAILQ_INSERT_HEAD(&cont->pool_list, pool, next); } - i = cnt - pool->counters_raw; + i = MLX5_CNT_ARRAY_IDX(pool, cnt); cnt_ext = MLX5_GET_POOL_CNT_EXT(pool, i); cnt_ext->id = id; cnt_ext->shared = shared; @@ -251,7 +251,7 @@ flow_verbs_counter_release(struct rte_eth_dev *dev, uint32_t counter) cnt = flow_verbs_counter_get_by_idx(dev, counter, &pool); - cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); + cnt_ext = MLX5_CNT_TO_CNT_EXT(cnt); if (--cnt_ext->ref_cnt == 0) { #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) claim_zero(mlx5_glue->destroy_counter_set(cnt_ext->cs)); @@ -282,7 +282,7 @@ flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused, struct mlx5_flow_counter *cnt = flow_verbs_counter_get_by_idx (dev, flow->counter, &pool); struct mlx5_flow_counter_ext *cnt_ext = MLX5_CNT_TO_CNT_EXT - (pool, cnt); + (cnt); struct rte_flow_query_count *qc = data; uint64_t counters[2] = {0, 0}; #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) @@ -1083,12 +1083,12 @@ flow_verbs_translate_action_count(struct mlx5_flow *dev_flow, } #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool); - cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); + cnt_ext = MLX5_CNT_TO_CNT_EXT(cnt); counter.counter_set_handle = cnt_ext->cs->handle; flow_verbs_spec_add(&dev_flow->verbs, &counter, size); #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool); - cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); + cnt_ext = MLX5_CNT_TO_CNT_EXT(cnt); counter.counters = cnt_ext->cs; flow_verbs_spec_add(&dev_flow->verbs, &counter, size); #endif