rte_atomic64_t a64_dcs;
};
/* The devx object of the minimum counter ID. */
- rte_atomic64_t query_gen;
+ rte_atomic64_t start_query_gen; /* Query start round. */
+ rte_atomic64_t end_query_gen; /* Query end round. */
uint32_t n_counters: 16; /* Number of devx allocated counters. */
rte_spinlock_t sl; /* The pool lock. */
struct mlx5_counter_stats_raw *raw;
dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read
(&pool->a64_dcs);
offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL;
+ /*
+ * Identify the counters released between query trigger and query
+ * handle more effiecntly. The counter released in this gap period
+ * should wait for a new round of query as the new arrived packets
+ * will not be taken into account.
+ */
+ rte_atomic64_add(&pool->start_query_gen, 1);
ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL -
offset, NULL, NULL,
pool->raw_hw->mem_mng->dm->id,
sh->devx_comp,
(uint64_t)(uintptr_t)pool);
if (ret) {
+ rte_atomic64_sub(&pool->start_query_gen, 1);
DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID"
" %d", pool->min_dcs->id);
pool->raw_hw = NULL;
struct mlx5_counter_stats_raw *raw_to_free;
if (unlikely(status)) {
+ rte_atomic64_sub(&pool->start_query_gen, 1);
raw_to_free = pool->raw_hw;
} else {
raw_to_free = pool->raw;
rte_spinlock_lock(&pool->sl);
pool->raw = pool->raw_hw;
rte_spinlock_unlock(&pool->sl);
- rte_atomic64_add(&pool->query_gen, 1);
+ MLX5_ASSERT(rte_atomic64_read(&pool->end_query_gen) + 1 ==
+ rte_atomic64_read(&pool->start_query_gen));
+ rte_atomic64_set(&pool->end_query_gen,
+ rte_atomic64_read(&pool->start_query_gen));
/* Be sure the new raw counters data is updated in memory. */
rte_cio_wmb();
}
/*
* The generation of the new allocated counters in this pool is 0, 2 in
* the pool generation makes all the counters valid for allocation.
+ * The start and end query generation protect the counters be released
+ * between the query and update gap period will not be reallocated
+ * without the last query finished and stats updated to the memory.
*/
- rte_atomic64_set(&pool->query_gen, 0x2);
+ rte_atomic64_set(&pool->start_query_gen, 0x2);
+ rte_atomic64_set(&pool->end_query_gen, 0x2);
TAILQ_INIT(&pool->counters);
TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
cont->pools[n_valid] = pool;
* updated too.
*/
cnt_free = TAILQ_FIRST(&pool->counters);
- if (cnt_free && cnt_free->query_gen + 1 <
- rte_atomic64_read(&pool->query_gen))
+ if (cnt_free && cnt_free->query_gen <
+ rte_atomic64_read(&pool->end_query_gen))
break;
cnt_free = NULL;
}
/* Put the counter in the end - the last updated one. */
TAILQ_INSERT_TAIL(&pool->counters, counter, next);
- counter->query_gen = rte_atomic64_read(&pool->query_gen);
+ /*
+ * Counters released between query trigger and handler need
+ * to wait the next round of query. Since the packets arrive
+ * in the gap period will not be taken into account to the
+ * old counter.
+ */
+ counter->query_gen = rte_atomic64_read(&pool->start_query_gen);
}
}