#include "mlx5_utils.h"
-
-/********************* Cache list ************************/
-
-static struct mlx5_cache_entry *
-mlx5_clist_default_create_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry __rte_unused,
- void *ctx __rte_unused)
-{
- return mlx5_malloc(MLX5_MEM_ZERO, list->entry_sz, 0, SOCKET_ID_ANY);
-}
-
-static void
-mlx5_clist_default_remove_cb(struct mlx5_cache_list *list __rte_unused,
- struct mlx5_cache_entry *entry)
-{
- mlx5_free(entry);
-}
-
-int
-mlx5_cache_list_init(struct mlx5_cache_list *list, const char *name,
- uint32_t entry_size, void *ctx,
- mlx5_cache_create_cb cb_create,
- mlx5_cache_match_cb cb_match,
- mlx5_cache_remove_cb cb_remove)
-{
- MLX5_ASSERT(list);
- if (!cb_match || (!cb_create ^ !cb_remove))
- return -1;
- if (name)
- snprintf(list->name, sizeof(list->name), "%s", name);
- list->entry_sz = entry_size;
- list->ctx = ctx;
- list->cb_create = cb_create ? cb_create : mlx5_clist_default_create_cb;
- list->cb_match = cb_match;
- list->cb_remove = cb_remove ? cb_remove : mlx5_clist_default_remove_cb;
- rte_rwlock_init(&list->lock);
- DRV_LOG(DEBUG, "Cache list %s initialized.", list->name);
- LIST_INIT(&list->head);
- return 0;
-}
-
-static struct mlx5_cache_entry *
-__cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
-{
- struct mlx5_cache_entry *entry;
-
- LIST_FOREACH(entry, &list->head, next) {
- if (list->cb_match(list, entry, ctx))
- continue;
- if (reuse) {
- __atomic_add_fetch(&entry->ref_cnt, 1,
- __ATOMIC_RELAXED);
- DRV_LOG(DEBUG, "Cache list %s entry %p ref++: %u.",
- list->name, (void *)entry, entry->ref_cnt);
- }
- break;
- }
- return entry;
-}
-
-static struct mlx5_cache_entry *
-cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
-{
- struct mlx5_cache_entry *entry;
-
- rte_rwlock_read_lock(&list->lock);
- entry = __cache_lookup(list, ctx, reuse);
- rte_rwlock_read_unlock(&list->lock);
- return entry;
-}
-
-struct mlx5_cache_entry *
-mlx5_cache_lookup(struct mlx5_cache_list *list, void *ctx)
-{
- return cache_lookup(list, ctx, false);
-}
-
-struct mlx5_cache_entry *
-mlx5_cache_register(struct mlx5_cache_list *list, void *ctx)
-{
- struct mlx5_cache_entry *entry;
- uint32_t prev_gen_cnt = 0;
-
- MLX5_ASSERT(list);
- prev_gen_cnt = __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE);
- /* Lookup with read lock, reuse if found. */
- entry = cache_lookup(list, ctx, true);
- if (entry)
- return entry;
- /* Not found, append with write lock - block read from other threads. */
- rte_rwlock_write_lock(&list->lock);
- /* If list changed by other threads before lock, search again. */
- if (prev_gen_cnt != __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE)) {
- /* Lookup and reuse w/o read lock. */
- entry = __cache_lookup(list, ctx, true);
- if (entry)
- goto done;
- }
- entry = list->cb_create(list, entry, ctx);
- if (!entry) {
- DRV_LOG(ERR, "Failed to init cache list %s entry %p.",
- list->name, (void *)entry);
- goto done;
- }
- entry->ref_cnt = 1;
- LIST_INSERT_HEAD(&list->head, entry, next);
- __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_RELEASE);
- __atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE);
- DRV_LOG(DEBUG, "Cache list %s entry %p new: %u.",
- list->name, (void *)entry, entry->ref_cnt);
-done:
- rte_rwlock_write_unlock(&list->lock);
- return entry;
-}
-
-int
-mlx5_cache_unregister(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry)
-{
- rte_rwlock_write_lock(&list->lock);
- MLX5_ASSERT(entry && entry->next.le_prev);
- DRV_LOG(DEBUG, "Cache list %s entry %p ref--: %u.",
- list->name, (void *)entry, entry->ref_cnt);
- if (--entry->ref_cnt) {
- rte_rwlock_write_unlock(&list->lock);
- return 1;
- }
- __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_ACQUIRE);
- __atomic_sub_fetch(&list->count, 1, __ATOMIC_ACQUIRE);
- LIST_REMOVE(entry, next);
- list->cb_remove(list, entry);
- rte_rwlock_write_unlock(&list->lock);
- DRV_LOG(DEBUG, "Cache list %s entry %p removed.",
- list->name, (void *)entry);
- return 0;
-}
-
-void
-mlx5_cache_list_destroy(struct mlx5_cache_list *list)
-{
- struct mlx5_cache_entry *entry;
-
- MLX5_ASSERT(list);
- /* no LIST_FOREACH_SAFE, using while instead */
- while (!LIST_EMPTY(&list->head)) {
- entry = LIST_FIRST(&list->head);
- LIST_REMOVE(entry, next);
- list->cb_remove(list, entry);
- DRV_LOG(DEBUG, "Cache list %s entry %p destroyed.",
- list->name, (void *)entry);
- }
- memset(list, 0, sizeof(*list));
-}
-
-uint32_t
-mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list)
-{
- MLX5_ASSERT(list);
- return __atomic_load_n(&list->count, __ATOMIC_RELAXED);
-}
-
/********************* Indexed pool **********************/
static inline void
mlx5_trunk_idx_offset_get(pool, TRUNK_MAX_IDX + 1);
if (!cfg->per_core_cache)
pool->free_list = TRUNK_INVALID;
+ rte_spinlock_init(&pool->lcore_lock);
return pool;
}
}
static void *
-mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, uint32_t idx)
+_mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx)
{
struct mlx5_indexed_trunk *trunk;
struct mlx5_indexed_cache *lc;
uint32_t trunk_idx;
uint32_t entry_idx;
- int cidx;
MLX5_ASSERT(idx);
- cidx = rte_lcore_index(rte_lcore_id());
- if (unlikely(cidx == -1)) {
- rte_errno = ENOTSUP;
- return NULL;
+ if (unlikely(!pool->cache[cidx])) {
+ pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO,
+ sizeof(struct mlx5_ipool_per_lcore) +
+ (pool->cfg.per_core_cache * sizeof(uint32_t)),
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+ if (!pool->cache[cidx]) {
+ DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx);
+ return NULL;
+ }
}
lc = mlx5_ipool_update_global_cache(pool, cidx);
idx -= 1;
}
static void *
-mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, uint32_t *idx)
+mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, uint32_t idx)
{
+ void *entry;
int cidx;
cidx = rte_lcore_index(rte_lcore_id());
if (unlikely(cidx == -1)) {
- rte_errno = ENOTSUP;
- return NULL;
+ cidx = RTE_MAX_LCORE;
+ rte_spinlock_lock(&pool->lcore_lock);
}
+ entry = _mlx5_ipool_get_cache(pool, cidx, idx);
+ if (unlikely(cidx == RTE_MAX_LCORE))
+ rte_spinlock_unlock(&pool->lcore_lock);
+ return entry;
+}
+
+
+static void *
+_mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, int cidx,
+ uint32_t *idx)
+{
if (unlikely(!pool->cache[cidx])) {
pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO,
sizeof(struct mlx5_ipool_per_lcore) +
} else if (pool->cache[cidx]->len) {
pool->cache[cidx]->len--;
*idx = pool->cache[cidx]->idx[pool->cache[cidx]->len];
- return mlx5_ipool_get_cache(pool, *idx);
+ return _mlx5_ipool_get_cache(pool, cidx, *idx);
}
/* Not enough idx in global cache. Keep fetching from global. */
*idx = mlx5_ipool_allocate_from_global(pool, cidx);
if (unlikely(!(*idx)))
return NULL;
- return mlx5_ipool_get_cache(pool, *idx);
+ return _mlx5_ipool_get_cache(pool, cidx, *idx);
}
-static void
-mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, uint32_t idx)
+static void *
+mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, uint32_t *idx)
{
+ void *entry;
int cidx;
+
+ cidx = rte_lcore_index(rte_lcore_id());
+ if (unlikely(cidx == -1)) {
+ cidx = RTE_MAX_LCORE;
+ rte_spinlock_lock(&pool->lcore_lock);
+ }
+ entry = _mlx5_ipool_malloc_cache(pool, cidx, idx);
+ if (unlikely(cidx == RTE_MAX_LCORE))
+ rte_spinlock_unlock(&pool->lcore_lock);
+ return entry;
+}
+
+static void
+_mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx)
+{
struct mlx5_ipool_per_lcore *ilc;
struct mlx5_indexed_cache *gc, *olc = NULL;
uint32_t reclaim_num = 0;
MLX5_ASSERT(idx);
- cidx = rte_lcore_index(rte_lcore_id());
- if (unlikely(cidx == -1)) {
- rte_errno = ENOTSUP;
- return;
- }
/*
* When index was allocated on core A but freed on core B. In this
* case check if local cache on core B was allocated before.
pool->cache[cidx]->len++;
}
+static void
+mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, uint32_t idx)
+{
+ int cidx;
+
+ cidx = rte_lcore_index(rte_lcore_id());
+ if (unlikely(cidx == -1)) {
+ cidx = RTE_MAX_LCORE;
+ rte_spinlock_lock(&pool->lcore_lock);
+ }
+ _mlx5_ipool_free_cache(pool, cidx, idx);
+ if (unlikely(cidx == RTE_MAX_LCORE))
+ rte_spinlock_unlock(&pool->lcore_lock);
+}
+
void *
mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx)
{
MLX5_ASSERT(pool);
mlx5_ipool_lock(pool);
if (pool->cfg.per_core_cache) {
- for (i = 0; i < RTE_MAX_LCORE; i++) {
+ for (i = 0; i <= RTE_MAX_LCORE; i++) {
/*
* Free only old global cache. Pool gc will be
* freed at last.
return 0;
}
+void
+mlx5_ipool_flush_cache(struct mlx5_indexed_pool *pool)
+{
+ uint32_t i, j;
+ struct mlx5_indexed_cache *gc;
+ struct rte_bitmap *ibmp;
+ uint32_t bmp_num, mem_size;
+
+ if (!pool->cfg.per_core_cache)
+ return;
+ gc = pool->gc;
+ if (!gc)
+ return;
+ /* Reset bmp. */
+ bmp_num = mlx5_trunk_idx_offset_get(pool, gc->n_trunk_valid);
+ mem_size = rte_bitmap_get_memory_footprint(bmp_num);
+ pool->bmp_mem = pool->cfg.malloc(MLX5_MEM_ZERO, mem_size,
+ RTE_CACHE_LINE_SIZE, rte_socket_id());
+ if (!pool->bmp_mem) {
+ DRV_LOG(ERR, "Ipool bitmap mem allocate failed.\n");
+ return;
+ }
+ ibmp = rte_bitmap_init_with_all_set(bmp_num, pool->bmp_mem, mem_size);
+ if (!ibmp) {
+ pool->cfg.free(pool->bmp_mem);
+ pool->bmp_mem = NULL;
+ DRV_LOG(ERR, "Ipool bitmap create failed.\n");
+ return;
+ }
+ pool->ibmp = ibmp;
+ /* Clear global cache. */
+ for (i = 0; i < gc->len; i++)
+ rte_bitmap_clear(ibmp, gc->idx[i] - 1);
+ /* Clear core cache. */
+ for (i = 0; i < RTE_MAX_LCORE + 1; i++) {
+ struct mlx5_ipool_per_lcore *ilc = pool->cache[i];
+
+ if (!ilc)
+ continue;
+ for (j = 0; j < ilc->len; j++)
+ rte_bitmap_clear(ibmp, ilc->idx[j] - 1);
+ }
+}
+
+static void *
+mlx5_ipool_get_next_cache(struct mlx5_indexed_pool *pool, uint32_t *pos)
+{
+ struct rte_bitmap *ibmp;
+ uint64_t slab = 0;
+ uint32_t iidx = *pos;
+
+ ibmp = pool->ibmp;
+ if (!ibmp || !rte_bitmap_scan(ibmp, &iidx, &slab)) {
+ if (pool->bmp_mem) {
+ pool->cfg.free(pool->bmp_mem);
+ pool->bmp_mem = NULL;
+ pool->ibmp = NULL;
+ }
+ return NULL;
+ }
+ iidx += __builtin_ctzll(slab);
+ rte_bitmap_clear(ibmp, iidx);
+ iidx++;
+ *pos = iidx;
+ return mlx5_ipool_get_cache(pool, iidx);
+}
+
+void *
+mlx5_ipool_get_next(struct mlx5_indexed_pool *pool, uint32_t *pos)
+{
+ uint32_t idx = *pos;
+ void *entry;
+
+ if (pool->cfg.per_core_cache)
+ return mlx5_ipool_get_next_cache(pool, pos);
+ while (idx <= mlx5_trunk_idx_offset_get(pool, pool->n_trunk)) {
+ entry = mlx5_ipool_get(pool, idx);
+ if (entry) {
+ *pos = idx;
+ return entry;
+ }
+ idx++;
+ }
+ return NULL;
+}
+
void
mlx5_ipool_dump(struct mlx5_indexed_pool *pool)
{