+ struct mlx5_flow_counter_pool **pools;
+ uint32_t size;
+
+ if (n_valid == cont->n) {
+ /* Resize the container pool array. */
+ size = sizeof(struct mlx5_flow_counter_pool *) *
+ (n_valid + MLX5_CNT_CONTAINER_RESIZE);
+ pools = rte_zmalloc(__func__, size, 0);
+ if (!pools)
+ return 0;
+ if (n_valid) {
+ memcpy(pools, cont->pools,
+ sizeof(struct mlx5_flow_counter_pool *) *
+ n_valid);
+ rte_free(cont->pools);
+ }
+ cont->pools = pools;
+ cont->n += MLX5_CNT_CONTAINER_RESIZE;
+ }
+ /* Allocate memory for new pool*/
+ size = sizeof(*pool) + sizeof(*cnt_ext) *
+ MLX5_COUNTERS_PER_POOL;
+ pool = rte_calloc(__func__, 1, size, 0);
+ if (!pool)
+ return 0;
+ for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
+ cnt = MLX5_POOL_GET_CNT(pool, i);
+ TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
+ }
+ cnt = MLX5_POOL_GET_CNT(pool, 0);
+ cont->pools[n_valid] = pool;
+ pool_idx = n_valid;
+ rte_atomic16_add(&cont->n_valid, 1);
+ TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);