MLX5_CNT_TO_CNT_EXT(pool, MLX5_POOL_GET_CNT((pool), (offset)))
#define MLX5_CNT_TO_AGE(cnt) \
((struct mlx5_age_param *)((cnt) + 1))
+/*
+ * The maximum single counter is 0x800000 as MLX5_CNT_BATCH_OFFSET
+ * defines. The pool size is 512, pool index should never reach
+ * INT16_MAX.
+ */
+#define POOL_IDX_INVALID UINT16_MAX
struct mlx5_flow_counter_pool;
struct mlx5_pools_container {
rte_atomic16_t n_valid; /* Number of valid pools. */
uint16_t n; /* Number of pools. */
+ uint16_t last_pool_idx; /* Last used pool index */
+ int min_id; /* The minimum counter ID in the pools. */
+ int max_id; /* The maximum counter ID in the pools. */
rte_spinlock_t resize_sl; /* The resize lock. */
struct mlx5_counter_pools pool_list; /* Counter pool list. */
struct mlx5_flow_counter_pool **pools; /* Counter pool array. */
return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
}
+/**
+ * Check the devx counter belongs to the pool.
+ *
+ * @param[in] pool
+ * Pointer to the counter pool.
+ * @param[in] id
+ * The counter devx ID.
+ *
+ * @return
+ * True if counter belongs to the pool, false otherwise.
+ */
+static bool
+flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
+{
+ int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
+ MLX5_COUNTERS_PER_POOL;
+
+ if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
+ return true;
+ return false;
+}
+
/**
* Get a pool by devx counter ID.
*
flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
{
uint32_t i;
- uint32_t n_valid = rte_atomic16_read(&cont->n_valid);
- for (i = 0; i < n_valid; i++) {
+ /* Check last used pool. */
+ if (cont->last_pool_idx != POOL_IDX_INVALID &&
+ flow_dv_is_counter_in_pool(cont->pools[cont->last_pool_idx], id))
+ return cont->pools[cont->last_pool_idx];
+ /* ID out of range means no suitable pool in the container. */
+ if (id > cont->max_id || id < cont->min_id)
+ return NULL;
+ /*
+ * Find the pool from the end of the container, since mostly counter
+ * ID is sequence increasing, and the last pool should be the needed
+ * one.
+ */
+ i = rte_atomic16_read(&cont->n_valid);
+ while (i--) {
struct mlx5_flow_counter_pool *pool = cont->pools[i];
- int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
- MLX5_COUNTERS_PER_POOL;
- if (id >= base && id < base + MLX5_COUNTERS_PER_POOL) {
- /*
- * Move the pool to the head, as counter allocate
- * always gets the first pool in the container.
- */
- if (pool != TAILQ_FIRST(&cont->pool_list)) {
- TAILQ_REMOVE(&cont->pool_list, pool, next);
- TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
- }
+ if (flow_dv_is_counter_in_pool(pool, id))
return pool;
- }
}
return NULL;
}
TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
pool->index = n_valid;
cont->pools[n_valid] = pool;
+ if (!batch) {
+ int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
+
+ if (base < cont->min_id)
+ cont->min_id = base;
+ if (base > cont->max_id)
+ cont->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
+ cont->last_pool_idx = pool->index;
+ }
/* Pool initialization must be updated before host thread access. */
rte_cio_wmb();
rte_atomic16_add(&cont->n_valid, 1);