net/mlx5: optimize single counter pool search
authorSuanming Mou <suanmingm@mellanox.com>
Thu, 18 Jun 2020 07:24:44 +0000 (15:24 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 30 Jun 2020 12:52:29 +0000 (14:52 +0200)
For single counter, when allocate a new counter, it needs to find the pool
it belongs in order to do the query together.

Once there are millions of counters allocated, the pool array in the
counter container will become very large. In this case, the pool search
from the pool array will become extremely slow.

Save the minimum and maximum counter ID to have a quick check of current
counter ID range. And start searching the pool from the last pool in the
container will mostly get the needed pool since counter ID increases
sequentially.

Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/mlx5_flow_dv.c

index 80e8bf6..97b2249 100644 (file)
@@ -457,6 +457,9 @@ mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
        memset(&sh->cmng, 0, sizeof(sh->cmng));
        TAILQ_INIT(&sh->cmng.flow_counters);
        for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i) {
+               sh->cmng.ccont[i].min_id = MLX5_CNT_BATCH_OFFSET;
+               sh->cmng.ccont[i].max_id = -1;
+               sh->cmng.ccont[i].last_pool_idx = POOL_IDX_INVALID;
                TAILQ_INIT(&sh->cmng.ccont[i].pool_list);
                rte_spinlock_init(&sh->cmng.ccont[i].resize_sl);
        }
index 87e383b..99368c5 100644 (file)
@@ -312,6 +312,12 @@ struct mlx5_drop {
        MLX5_CNT_TO_CNT_EXT(pool, MLX5_POOL_GET_CNT((pool), (offset)))
 #define MLX5_CNT_TO_AGE(cnt) \
        ((struct mlx5_age_param *)((cnt) + 1))
+/*
+ * The maximum single counter is 0x800000 as MLX5_CNT_BATCH_OFFSET
+ * defines. The pool size is 512, pool index should never reach
+ * INT16_MAX.
+ */
+#define POOL_IDX_INVALID UINT16_MAX
 
 struct mlx5_flow_counter_pool;
 
@@ -420,6 +426,9 @@ TAILQ_HEAD(mlx5_counter_pools, mlx5_flow_counter_pool);
 struct mlx5_pools_container {
        rte_atomic16_t n_valid; /* Number of valid pools. */
        uint16_t n; /* Number of pools. */
+       uint16_t last_pool_idx; /* Last used pool index */
+       int min_id; /* The minimum counter ID in the pools. */
+       int max_id; /* The maximum counter ID in the pools. */
        rte_spinlock_t resize_sl; /* The resize lock. */
        struct mlx5_counter_pools pool_list; /* Counter pool list. */
        struct mlx5_flow_counter_pool **pools; /* Counter pool array. */
index 6e4e10c..9fa8568 100644 (file)
@@ -4050,6 +4050,28 @@ flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
        return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
 }
 
+/**
+ * Check the devx counter belongs to the pool.
+ *
+ * @param[in] pool
+ *   Pointer to the counter pool.
+ * @param[in] id
+ *   The counter devx ID.
+ *
+ * @return
+ *   True if counter belongs to the pool, false otherwise.
+ */
+static bool
+flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
+{
+       int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
+                  MLX5_COUNTERS_PER_POOL;
+
+       if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
+               return true;
+       return false;
+}
+
 /**
  * Get a pool by devx counter ID.
  *
@@ -4065,24 +4087,25 @@ static struct mlx5_flow_counter_pool *
 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
 {
        uint32_t i;
-       uint32_t n_valid = rte_atomic16_read(&cont->n_valid);
 
-       for (i = 0; i < n_valid; i++) {
+       /* Check last used pool. */
+       if (cont->last_pool_idx != POOL_IDX_INVALID &&
+           flow_dv_is_counter_in_pool(cont->pools[cont->last_pool_idx], id))
+               return cont->pools[cont->last_pool_idx];
+       /* ID out of range means no suitable pool in the container. */
+       if (id > cont->max_id || id < cont->min_id)
+               return NULL;
+       /*
+        * Find the pool from the end of the container, since mostly counter
+        * ID is sequence increasing, and the last pool should be the needed
+        * one.
+        */
+       i = rte_atomic16_read(&cont->n_valid);
+       while (i--) {
                struct mlx5_flow_counter_pool *pool = cont->pools[i];
-               int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
-                          MLX5_COUNTERS_PER_POOL;
 
-               if (id >= base && id < base + MLX5_COUNTERS_PER_POOL) {
-                       /*
-                        * Move the pool to the head, as counter allocate
-                        * always gets the first pool in the container.
-                        */
-                       if (pool != TAILQ_FIRST(&cont->pool_list)) {
-                               TAILQ_REMOVE(&cont->pool_list, pool, next);
-                               TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
-                       }
+               if (flow_dv_is_counter_in_pool(pool, id))
                        return pool;
-               }
        }
        return NULL;
 }
@@ -4337,6 +4360,15 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
        TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
        pool->index = n_valid;
        cont->pools[n_valid] = pool;
+       if (!batch) {
+               int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
+
+               if (base < cont->min_id)
+                       cont->min_id = base;
+               if (base > cont->max_id)
+                       cont->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
+               cont->last_pool_idx = pool->index;
+       }
        /* Pool initialization must be updated before host thread access. */
        rte_cio_wmb();
        rte_atomic16_add(&cont->n_valid, 1);