]> git.droids-corp.org - dpdk.git/commitdiff
net/mlx5: add indexed pool iterator
authorSuanming Mou <suanmingm@nvidia.com>
Tue, 13 Jul 2021 08:44:37 +0000 (11:44 +0300)
committerRaslan Darawsheh <rasland@nvidia.com>
Thu, 15 Jul 2021 13:19:08 +0000 (15:19 +0200)
In some cases, application may want to know all the allocated
index in order to apply some operations to the allocated index.

This commit adds the indexed pool functions to support foreach
operation.

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
drivers/net/mlx5/mlx5_utils.c
drivers/net/mlx5/mlx5_utils.h

index 215024632d8a26b108b66d1cd99a230fb840ab31..32f8d650736a6c4b6f185f7955c1813eb25c78c2 100644 (file)
@@ -529,6 +529,16 @@ mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, uint32_t idx)
                rte_errno = ENOTSUP;
                return NULL;
        }
+       if (unlikely(!pool->cache[cidx])) {
+               pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO,
+                       sizeof(struct mlx5_ipool_per_lcore) +
+                       (pool->cfg.per_core_cache * sizeof(uint32_t)),
+                       RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+               if (!pool->cache[cidx]) {
+                       DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx);
+                       return NULL;
+               }
+       }
        lc = mlx5_ipool_update_global_cache(pool, cidx);
        idx -= 1;
        trunk_idx = mlx5_trunk_idx_get(pool, idx);
@@ -839,6 +849,92 @@ mlx5_ipool_destroy(struct mlx5_indexed_pool *pool)
        return 0;
 }
 
+void
+mlx5_ipool_flush_cache(struct mlx5_indexed_pool *pool)
+{
+       uint32_t i, j;
+       struct mlx5_indexed_cache *gc;
+       struct rte_bitmap *ibmp;
+       uint32_t bmp_num, mem_size;
+
+       if (!pool->cfg.per_core_cache)
+               return;
+       gc = pool->gc;
+       if (!gc)
+               return;
+       /* Reset bmp. */
+       bmp_num = mlx5_trunk_idx_offset_get(pool, gc->n_trunk_valid);
+       mem_size = rte_bitmap_get_memory_footprint(bmp_num);
+       pool->bmp_mem = pool->cfg.malloc(MLX5_MEM_ZERO, mem_size,
+                                        RTE_CACHE_LINE_SIZE, rte_socket_id());
+       if (!pool->bmp_mem) {
+               DRV_LOG(ERR, "Ipool bitmap mem allocate failed.\n");
+               return;
+       }
+       ibmp = rte_bitmap_init_with_all_set(bmp_num, pool->bmp_mem, mem_size);
+       if (!ibmp) {
+               pool->cfg.free(pool->bmp_mem);
+               pool->bmp_mem = NULL;
+               DRV_LOG(ERR, "Ipool bitmap create failed.\n");
+               return;
+       }
+       pool->ibmp = ibmp;
+       /* Clear global cache. */
+       for (i = 0; i < gc->len; i++)
+               rte_bitmap_clear(ibmp, gc->idx[i] - 1);
+       /* Clear core cache. */
+       for (i = 0; i < RTE_MAX_LCORE; i++) {
+               struct mlx5_ipool_per_lcore *ilc = pool->cache[i];
+
+               if (!ilc)
+                       continue;
+               for (j = 0; j < ilc->len; j++)
+                       rte_bitmap_clear(ibmp, ilc->idx[j] - 1);
+       }
+}
+
+static void *
+mlx5_ipool_get_next_cache(struct mlx5_indexed_pool *pool, uint32_t *pos)
+{
+       struct rte_bitmap *ibmp;
+       uint64_t slab = 0;
+       uint32_t iidx = *pos;
+
+       ibmp = pool->ibmp;
+       if (!ibmp || !rte_bitmap_scan(ibmp, &iidx, &slab)) {
+               if (pool->bmp_mem) {
+                       pool->cfg.free(pool->bmp_mem);
+                       pool->bmp_mem = NULL;
+                       pool->ibmp = NULL;
+               }
+               return NULL;
+       }
+       iidx += __builtin_ctzll(slab);
+       rte_bitmap_clear(ibmp, iidx);
+       iidx++;
+       *pos = iidx;
+       return mlx5_ipool_get_cache(pool, iidx);
+}
+
+void *
+mlx5_ipool_get_next(struct mlx5_indexed_pool *pool, uint32_t *pos)
+{
+       uint32_t idx = *pos;
+       void *entry;
+
+       if (pool->cfg.per_core_cache)
+               return mlx5_ipool_get_next_cache(pool, pos);
+       while (idx <= mlx5_trunk_idx_offset_get(pool, pool->n_trunk)) {
+               entry = mlx5_ipool_get(pool, idx);
+               if (entry) {
+                       *pos = idx;
+                       return entry;
+               }
+               idx++;
+       }
+       return NULL;
+}
+
 void
 mlx5_ipool_dump(struct mlx5_indexed_pool *pool)
 {
index 046906269579339e0b7a99fb091444917620a1a7..737dd7052d9d4baa75a9e0365c3f595e747d3536 100644 (file)
@@ -261,6 +261,9 @@ struct mlx5_indexed_pool {
                        /* Global cache. */
                        struct mlx5_ipool_per_lcore *cache[RTE_MAX_LCORE];
                        /* Local cache. */
+                       struct rte_bitmap *ibmp;
+                       void *bmp_mem;
+                       /* Allocate objects bitmap. Use during flush. */
                };
        };
 #ifdef POOL_DEBUG
@@ -862,4 +865,9 @@ struct {                                                            \
             (entry);                                                   \
             idx++, (entry) = mlx5_l3t_get_next((tbl), &idx))
 
+#define MLX5_IPOOL_FOREACH(ipool, idx, entry)                          \
+       for ((idx) = 0, mlx5_ipool_flush_cache((ipool)),                \
+           (entry) = mlx5_ipool_get_next((ipool), &idx);               \
+           (entry); idx++, (entry) = mlx5_ipool_get_next((ipool), &idx))
+
 #endif /* RTE_PMD_MLX5_UTILS_H_ */