#define MLX5_COUNTERS_PER_POOL 512
#define MLX5_MAX_PENDING_QUERIES 4
#define MLX5_CNT_CONTAINER_RESIZE 64
-#define MLX5_CNT_AGE_OFFSET 0x80000000
#define CNT_SIZE (sizeof(struct mlx5_flow_counter))
#define CNTEXT_SIZE (sizeof(struct mlx5_flow_counter_ext))
#define AGE_SIZE (sizeof(struct mlx5_age_param))
#define CNT_POOL_TYPE_AGE (1 << 1)
#define IS_EXT_POOL(pool) (((pool)->type) & CNT_POOL_TYPE_EXT)
#define IS_AGE_POOL(pool) (((pool)->type) & CNT_POOL_TYPE_AGE)
-#define MLX_CNT_IS_AGE(counter) ((counter) & MLX5_CNT_AGE_OFFSET ? 1 : 0)
#define MLX5_CNT_LEN(pool) \
(CNT_SIZE + \
(IS_AGE_POOL(pool) ? AGE_SIZE : 0) + \
AGE_TMOUT, /* Timeout, wait for rte_flow_get_aged_flows and destroy. */
};
-#define MLX5_CNT_CONTAINER(sh, batch, age) (&(sh)->cmng.ccont \
- [(batch) * 2 + (age)])
+#define MLX5_CNT_CONTAINER(sh, batch) (&(sh)->cmng.ccont[batch])
enum {
MLX5_CCONT_TYPE_SINGLE,
- MLX5_CCONT_TYPE_SINGLE_FOR_AGE,
MLX5_CCONT_TYPE_BATCH,
- MLX5_CCONT_TYPE_BATCH_FOR_AGE,
MLX5_CCONT_TYPE_MAX,
};
+enum mlx5_counter_type {
+ MLX5_COUNTER_TYPE_ORIGIN,
+ MLX5_COUNTER_TYPE_AGE,
+ MLX5_COUNTER_TYPE_MAX,
+};
+
/* Counter age parameter. */
struct mlx5_age_param {
uint16_t state; /**< Age state (atomically accessed). */
int max_id; /* The maximum counter ID in the pools. */
rte_spinlock_t resize_sl; /* The resize lock. */
rte_spinlock_t csl; /* The counter free list lock. */
- struct mlx5_counters counters; /* Free counter list. */
+ struct mlx5_counters counters[MLX5_COUNTER_TYPE_MAX];
+ /* Free counter list. */
struct mlx5_counter_pools pool_list; /* Counter pool list. */
struct mlx5_flow_counter_pool **pools; /* Counter pool array. */
struct mlx5_counter_stats_mem_mng *mem_mng;
uint8_t pending_queries;
uint8_t batch;
uint16_t pool_index;
- uint8_t age;
uint8_t query_thread_on;
LIST_HEAD(mem_mngs, mlx5_counter_stats_mem_mng) mem_mngs;
LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws;
uint16_t offset;
int ret;
uint8_t batch = sh->cmng.batch;
- uint8_t age = sh->cmng.age;
uint16_t pool_index = sh->cmng.pool_index;
struct mlx5_pools_container *cont;
struct mlx5_flow_counter_pool *pool;
if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
goto set_alarm;
next_container:
- cont = MLX5_CNT_CONTAINER(sh, batch, age);
+ cont = MLX5_CNT_CONTAINER(sh, batch);
rte_spinlock_lock(&cont->resize_sl);
if (!cont->pools) {
rte_spinlock_unlock(&cont->resize_sl);
goto set_alarm;
batch ^= 0x1;
pool_index = 0;
- if (batch == 0 && pool_index == 0) {
- age ^= 0x1;
- sh->cmng.batch = batch;
- sh->cmng.age = age;
- }
goto next_container;
}
pool = cont->pools[pool_index];
if (pool_index >= rte_atomic16_read(&cont->n_valid)) {
batch ^= 0x1;
pool_index = 0;
- if (batch == 0 && pool_index == 0)
- age ^= 0x1;
}
set_alarm:
sh->cmng.batch = batch;
sh->cmng.pool_index = pool_index;
- sh->cmng.age = age;
mlx5_set_query_alarm(sh);
}
struct mlx5_flow_counter_pool *pool =
(struct mlx5_flow_counter_pool *)(uintptr_t)async_id;
struct mlx5_counter_stats_raw *raw_to_free;
- uint8_t age = !!IS_AGE_POOL(pool);
uint8_t query_gen = pool->query_gen ^ 1;
struct mlx5_pools_container *cont =
- MLX5_CNT_CONTAINER(sh, !IS_EXT_POOL(pool), age);
+ MLX5_CNT_CONTAINER(sh, !IS_EXT_POOL(pool));
+ enum mlx5_counter_type cnt_type =
+ IS_AGE_POOL(pool) ? MLX5_COUNTER_TYPE_AGE :
+ MLX5_COUNTER_TYPE_ORIGIN;
if (unlikely(status)) {
raw_to_free = pool->raw_hw;
rte_io_wmb();
if (!TAILQ_EMPTY(&pool->counters[query_gen])) {
rte_spinlock_lock(&cont->csl);
- TAILQ_CONCAT(&cont->counters,
+ TAILQ_CONCAT(&cont->counters[cnt_type],
&pool->counters[query_gen], next);
rte_spinlock_unlock(&cont->csl);
}
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_pools_container *cont;
struct mlx5_flow_counter_pool *pool;
- uint32_t batch = 0, age = 0;
+ uint32_t batch = 0;
idx--;
- age = MLX_CNT_IS_AGE(idx);
- idx = age ? idx - MLX5_CNT_AGE_OFFSET : idx;
if (idx >= MLX5_CNT_BATCH_OFFSET) {
idx -= MLX5_CNT_BATCH_OFFSET;
batch = 1;
}
- cont = MLX5_CNT_CONTAINER(priv->sh, batch, age);
+ cont = MLX5_CNT_CONTAINER(priv->sh, batch);
MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cont->n);
pool = cont->pools[idx / MLX5_COUNTERS_PER_POOL];
MLX5_ASSERT(pool);
* Pointer to the Ethernet device structure.
* @param[in] batch
* Whether the pool is for counter that was allocated by batch command.
- * @param[in] age
- * Whether the pool is for Aging counter.
*
* @return
* 0 on success, otherwise negative errno value and rte_errno is set.
*/
static int
-flow_dv_container_resize(struct rte_eth_dev *dev,
- uint32_t batch, uint32_t age)
+flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
- age);
+ struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch);
struct mlx5_counter_stats_mem_mng *mem_mng = NULL;
void *old_pools = cont->pools;
uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool;
- struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
- age);
+ struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch);
int16_t n_valid = rte_atomic16_read(&cont->n_valid);
uint32_t size = sizeof(*pool);
- if (cont->n == n_valid && flow_dv_container_resize(dev, batch, age))
+ if (cont->n == n_valid && flow_dv_container_resize(dev, batch))
return NULL;
size += MLX5_COUNTERS_PER_POOL * CNT_SIZE;
size += (batch ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE);
struct mlx5_devx_obj *last_min_dcs;
struct mlx5_devx_obj *dcs = NULL;
struct mlx5_flow_counter *cnt;
+ enum mlx5_counter_type cnt_type =
+ age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
uint32_t add2other;
uint32_t i;
- cont = MLX5_CNT_CONTAINER(priv->sh, batch, age);
+ cont = MLX5_CNT_CONTAINER(priv->sh, batch);
if (!batch) {
retry:
add2other = 0;
if (!dcs)
return NULL;
pool = flow_dv_find_pool_by_id(cont, dcs->id);
- /* Check if counter belongs to exist pool ID range. */
+ /*
+ * If pool eixsts but with other type, counter will be added
+ * to the other pool, need to reallocate new counter in the
+ * ragne with same type later.
+ */
if (!pool) {
- pool = flow_dv_find_pool_by_id
- (MLX5_CNT_CONTAINER
- (priv->sh, batch, (age ^ 0x1)), dcs->id);
- /*
- * Pool exists, counter will be added to the other
- * container, need to reallocate it later.
- */
- if (pool) {
- add2other = 1;
- } else {
- pool = flow_dv_pool_create(dev, dcs, batch,
- age);
- if (!pool) {
- mlx5_devx_cmd_destroy(dcs);
- return NULL;
- }
+ pool = flow_dv_pool_create(dev, dcs, batch,
+ age);
+ if (!pool) {
+ mlx5_devx_cmd_destroy(dcs);
+ return NULL;
}
+ } else if ((!!IS_AGE_POOL(pool)) != age) {
+ add2other = 1;
}
if ((dcs->id < pool->min_dcs->id ||
pool->min_dcs->id &
TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
}
rte_spinlock_lock(&cont->csl);
- TAILQ_CONCAT(&cont->counters, &tmp_tq, next);
+ TAILQ_CONCAT(&cont->counters[cnt_type], &tmp_tq, next);
rte_spinlock_unlock(&cont->csl);
*cnt_free = MLX5_POOL_GET_CNT(pool, 0);
(*cnt_free)->pool = pool;
* shared counters from the single container.
*/
uint32_t batch = (group && !shared && !priv->counter_fallback) ? 1 : 0;
- struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
- age);
+ struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch);
+ enum mlx5_counter_type cnt_type =
+ age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
uint32_t cnt_idx;
if (!priv->config.devx) {
}
/* Get free counters from container. */
rte_spinlock_lock(&cont->csl);
- cnt_free = TAILQ_FIRST(&cont->counters);
+ cnt_free = TAILQ_FIRST(&cont->counters[cnt_type]);
if (cnt_free)
- TAILQ_REMOVE(&cont->counters, cnt_free, next);
+ TAILQ_REMOVE(&cont->counters[cnt_type], cnt_free, next);
rte_spinlock_unlock(&cont->csl);
if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free,
batch, age))
cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
MLX5_CNT_ARRAY_IDX(pool, cnt_free));
cnt_idx += batch * MLX5_CNT_BATCH_OFFSET;
- cnt_idx += age * MLX5_CNT_AGE_OFFSET;
/* Update the counter reset values. */
if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
&cnt_free->bytes))
if (cnt_free) {
cnt_free->pool = pool;
rte_spinlock_lock(&cont->csl);
- TAILQ_INSERT_TAIL(&cont->counters, cnt_free, next);
+ TAILQ_INSERT_TAIL(&cont->counters[cnt_type], cnt_free, next);
rte_spinlock_unlock(&cont->csl);
}
return 0;
struct mlx5_flow_counter_pool *pool = NULL;
struct mlx5_flow_counter *cnt;
struct mlx5_flow_counter_ext *cnt_ext = NULL;
+ enum mlx5_counter_type cnt_type;
if (!counter)
return;
* function both operate with the different list.
*
*/
- if (!priv->counter_fallback)
+ if (!priv->counter_fallback) {
TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
- else
+ } else {
+ cnt_type = IS_AGE_POOL(pool) ? MLX5_COUNTER_TYPE_AGE :
+ MLX5_COUNTER_TYPE_ORIGIN;
TAILQ_INSERT_TAIL(&((MLX5_CNT_CONTAINER
- (priv->sh, 0, 0))->counters),
+ (priv->sh, 0))->counters[cnt_type]),
cnt, next);
+ }
}
/**