X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_utils.c;h=d29fbcbc835014d8296eefd5d5ce24eac9103001;hb=86421846cc20e963e5b6878724727abd427cd1b6;hp=4b4fc3cce3790afcfe249fa2f357865a69c17c84;hpb=8e46d4e18f0961f022218c4b693cd56434a444f0;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c index 4b4fc3cce3..d29fbcbc83 100644 --- a/drivers/net/mlx5/mlx5_utils.c +++ b/drivers/net/mlx5/mlx5_utils.c @@ -117,3 +117,368 @@ mlx5_hlist_destroy(struct mlx5_hlist *h, } rte_free(h); } + +static inline void +mlx5_ipool_lock(struct mlx5_indexed_pool *pool) +{ + if (pool->cfg.need_lock) + rte_spinlock_lock(&pool->lock); +} + +static inline void +mlx5_ipool_unlock(struct mlx5_indexed_pool *pool) +{ + if (pool->cfg.need_lock) + rte_spinlock_unlock(&pool->lock); +} + +static inline uint32_t +mlx5_trunk_idx_get(struct mlx5_indexed_pool *pool, uint32_t entry_idx) +{ + struct mlx5_indexed_pool_config *cfg = &pool->cfg; + uint32_t trunk_idx = 0; + uint32_t i; + + if (!cfg->grow_trunk) + return entry_idx / cfg->trunk_size; + if (entry_idx >= pool->grow_tbl[cfg->grow_trunk - 1]) { + trunk_idx = (entry_idx - pool->grow_tbl[cfg->grow_trunk - 1]) / + (cfg->trunk_size << (cfg->grow_shift * + cfg->grow_trunk)) + cfg->grow_trunk; + } else { + for (i = 0; i < cfg->grow_trunk; i++) { + if (entry_idx < pool->grow_tbl[i]) + break; + } + trunk_idx = i; + } + return trunk_idx; +} + +static inline uint32_t +mlx5_trunk_size_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx) +{ + struct mlx5_indexed_pool_config *cfg = &pool->cfg; + + return cfg->trunk_size << (cfg->grow_shift * + (trunk_idx > cfg->grow_trunk ? cfg->grow_trunk : trunk_idx)); +} + +static inline uint32_t +mlx5_trunk_idx_offset_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx) +{ + struct mlx5_indexed_pool_config *cfg = &pool->cfg; + uint32_t offset = 0; + + if (!trunk_idx) + return 0; + if (!cfg->grow_trunk) + return cfg->trunk_size * trunk_idx; + if (trunk_idx < cfg->grow_trunk) + offset = pool->grow_tbl[trunk_idx - 1]; + else + offset = pool->grow_tbl[cfg->grow_trunk - 1] + + (cfg->trunk_size << (cfg->grow_shift * + cfg->grow_trunk)) * (trunk_idx - cfg->grow_trunk); + return offset; +} + +struct mlx5_indexed_pool * +mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg) +{ + struct mlx5_indexed_pool *pool; + uint32_t i; + + if (!cfg || !cfg->size || (!cfg->malloc ^ !cfg->free) || + (cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) || + ((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32)))) + return NULL; + pool = rte_zmalloc("mlx5_ipool", sizeof(*pool) + cfg->grow_trunk * + sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE); + if (!pool) + return NULL; + pool->cfg = *cfg; + if (!pool->cfg.trunk_size) + pool->cfg.trunk_size = MLX5_IPOOL_DEFAULT_TRUNK_SIZE; + if (!cfg->malloc && !cfg->free) { + pool->cfg.malloc = rte_malloc_socket; + pool->cfg.free = rte_free; + } + pool->free_list = TRUNK_INVALID; + if (pool->cfg.need_lock) + rte_spinlock_init(&pool->lock); + /* + * Initialize the dynamic grow trunk size lookup table to have a quick + * lookup for the trunk entry index offset. + */ + for (i = 0; i < cfg->grow_trunk; i++) { + pool->grow_tbl[i] = cfg->trunk_size << (cfg->grow_shift * i); + if (i > 0) + pool->grow_tbl[i] += pool->grow_tbl[i - 1]; + } + return pool; +} + +static int +mlx5_ipool_grow(struct mlx5_indexed_pool *pool) +{ + struct mlx5_indexed_trunk *trunk; + struct mlx5_indexed_trunk **trunk_tmp; + struct mlx5_indexed_trunk **p; + size_t trunk_size = 0; + size_t data_size; + size_t bmp_size; + uint32_t idx; + + if (pool->n_trunk_valid == TRUNK_MAX_IDX) + return -ENOMEM; + if (pool->n_trunk_valid == pool->n_trunk) { + /* No free trunk flags, expand trunk list. */ + int n_grow = pool->n_trunk_valid ? pool->n_trunk : + RTE_CACHE_LINE_SIZE / sizeof(void *); + + p = pool->cfg.malloc(pool->cfg.type, + (pool->n_trunk_valid + n_grow) * + sizeof(struct mlx5_indexed_trunk *), + RTE_CACHE_LINE_SIZE, rte_socket_id()); + if (!p) + return -ENOMEM; + if (pool->trunks) + memcpy(p, pool->trunks, pool->n_trunk_valid * + sizeof(struct mlx5_indexed_trunk *)); + memset(RTE_PTR_ADD(p, pool->n_trunk_valid * sizeof(void *)), 0, + n_grow * sizeof(void *)); + trunk_tmp = pool->trunks; + pool->trunks = p; + if (trunk_tmp) + pool->cfg.free(trunk_tmp); + pool->n_trunk += n_grow; + } + if (!pool->cfg.release_mem_en) { + idx = pool->n_trunk_valid; + } else { + /* Find the first available slot in trunk list */ + for (idx = 0; idx < pool->n_trunk; idx++) + if (pool->trunks[idx] == NULL) + break; + } + trunk_size += sizeof(*trunk); + data_size = mlx5_trunk_size_get(pool, idx); + bmp_size = rte_bitmap_get_memory_footprint(data_size); + /* rte_bitmap requires memory cacheline aligned. */ + trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size); + trunk_size += bmp_size; + trunk = pool->cfg.malloc(pool->cfg.type, trunk_size, + RTE_CACHE_LINE_SIZE, rte_socket_id()); + if (!trunk) + return -ENOMEM; + pool->trunks[idx] = trunk; + trunk->idx = idx; + trunk->free = data_size; + trunk->prev = TRUNK_INVALID; + trunk->next = TRUNK_INVALID; + MLX5_ASSERT(pool->free_list == TRUNK_INVALID); + pool->free_list = idx; + /* Mark all entries as available. */ + trunk->bmp = rte_bitmap_init_with_all_set(data_size, &trunk->data + [RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size)], + bmp_size); + MLX5_ASSERT(trunk->bmp); + pool->n_trunk_valid++; +#ifdef POOL_DEBUG + pool->trunk_new++; + pool->trunk_avail++; +#endif + return 0; +} + +void * +mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx) +{ + struct mlx5_indexed_trunk *trunk; + uint64_t slab = 0; + uint32_t iidx = 0; + void *p; + + mlx5_ipool_lock(pool); + if (pool->free_list == TRUNK_INVALID) { + /* If no available trunks, grow new. */ + if (mlx5_ipool_grow(pool)) { + mlx5_ipool_unlock(pool); + return NULL; + } + } + MLX5_ASSERT(pool->free_list != TRUNK_INVALID); + trunk = pool->trunks[pool->free_list]; + MLX5_ASSERT(trunk->free); + if (!rte_bitmap_scan(trunk->bmp, &iidx, &slab)) { + mlx5_ipool_unlock(pool); + return NULL; + } + MLX5_ASSERT(slab); + iidx += __builtin_ctzll(slab); + MLX5_ASSERT(iidx != UINT32_MAX); + MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx)); + rte_bitmap_clear(trunk->bmp, iidx); + p = &trunk->data[iidx * pool->cfg.size]; + iidx += mlx5_trunk_idx_offset_get(pool, trunk->idx); + iidx += 1; /* non-zero index. */ + trunk->free--; +#ifdef POOL_DEBUG + pool->n_entry++; +#endif + if (!trunk->free) { + /* Full trunk will be removed from free list in imalloc. */ + MLX5_ASSERT(pool->free_list == trunk->idx); + pool->free_list = trunk->next; + if (trunk->next != TRUNK_INVALID) + pool->trunks[trunk->next]->prev = TRUNK_INVALID; + trunk->prev = TRUNK_INVALID; + trunk->next = TRUNK_INVALID; +#ifdef POOL_DEBUG + pool->trunk_empty++; + pool->trunk_avail--; +#endif + } + *idx = iidx; + mlx5_ipool_unlock(pool); + return p; +} + +void * +mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx) +{ + void *entry = mlx5_ipool_malloc(pool, idx); + + if (entry) + memset(entry, 0, pool->cfg.size); + return entry; +} + +void +mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx) +{ + struct mlx5_indexed_trunk *trunk; + uint32_t trunk_idx; + uint32_t entry_idx; + + if (!idx) + return; + idx -= 1; + mlx5_ipool_lock(pool); + trunk_idx = mlx5_trunk_idx_get(pool, idx); + if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) || + (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk)) + goto out; + trunk = pool->trunks[trunk_idx]; + if (!trunk) + goto out; + entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx); + if (trunk_idx != trunk->idx || + rte_bitmap_get(trunk->bmp, entry_idx)) + goto out; + rte_bitmap_set(trunk->bmp, entry_idx); + trunk->free++; + if (pool->cfg.release_mem_en && trunk->free == mlx5_trunk_size_get + (pool, trunk->idx)) { + if (pool->free_list == trunk->idx) + pool->free_list = trunk->next; + if (trunk->next != TRUNK_INVALID) + pool->trunks[trunk->next]->prev = trunk->prev; + if (trunk->prev != TRUNK_INVALID) + pool->trunks[trunk->prev]->next = trunk->next; + pool->cfg.free(trunk); + pool->trunks[trunk_idx] = NULL; + pool->n_trunk_valid--; +#ifdef POOL_DEBUG + pool->trunk_avail--; + pool->trunk_free++; +#endif + if (pool->n_trunk_valid == 0) { + pool->cfg.free(pool->trunks); + pool->trunks = NULL; + pool->n_trunk = 0; + } + } else if (trunk->free == 1) { + /* Put into free trunk list head. */ + MLX5_ASSERT(pool->free_list != trunk->idx); + trunk->next = pool->free_list; + trunk->prev = TRUNK_INVALID; + if (pool->free_list != TRUNK_INVALID) + pool->trunks[pool->free_list]->prev = trunk->idx; + pool->free_list = trunk->idx; +#ifdef POOL_DEBUG + pool->trunk_empty--; + pool->trunk_avail++; +#endif + } +#ifdef POOL_DEBUG + pool->n_entry--; +#endif +out: + mlx5_ipool_unlock(pool); +} + +void * +mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx) +{ + struct mlx5_indexed_trunk *trunk; + void *p = NULL; + uint32_t trunk_idx; + uint32_t entry_idx; + + if (!idx) + return NULL; + idx -= 1; + mlx5_ipool_lock(pool); + trunk_idx = mlx5_trunk_idx_get(pool, idx); + if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) || + (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk)) + goto out; + trunk = pool->trunks[trunk_idx]; + if (!trunk) + goto out; + entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx); + if (trunk_idx != trunk->idx || + rte_bitmap_get(trunk->bmp, entry_idx)) + goto out; + p = &trunk->data[entry_idx * pool->cfg.size]; +out: + mlx5_ipool_unlock(pool); + return p; +} + +int +mlx5_ipool_destroy(struct mlx5_indexed_pool *pool) +{ + struct mlx5_indexed_trunk **trunks; + uint32_t i; + + MLX5_ASSERT(pool); + mlx5_ipool_lock(pool); + trunks = pool->trunks; + for (i = 0; i < pool->n_trunk; i++) { + if (trunks[i]) + pool->cfg.free(trunks[i]); + } + if (!pool->trunks) + pool->cfg.free(pool->trunks); + mlx5_ipool_unlock(pool); + rte_free(pool); + return 0; +} + +void +mlx5_ipool_dump(struct mlx5_indexed_pool *pool) +{ + printf("Pool %s entry size %u, trunks %u, %d entry per trunk, " + "total: %d\n", + pool->cfg.type, pool->cfg.size, pool->n_trunk_valid, + pool->cfg.trunk_size, pool->n_trunk_valid); +#ifdef POOL_DEBUG + printf("Pool %s entry %u, trunk alloc %u, empty: %u, " + "available %u free %u\n", + pool->cfg.type, pool->n_entry, pool->trunk_new, + pool->trunk_empty, pool->trunk_avail, pool->trunk_free); +#endif +}