X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_utils.c;h=e4e66ae4c55e95cdc35243fd0c3eb6327207251e;hb=c2a42d19d967e24223f06f2fc797eaed8e17c345;hp=32f8d650736a6c4b6f185f7955c1813eb25c78c2;hpb=64a80f1c4818ff662f2c54cd0477baca0a7a018f;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c index 32f8d65073..e4e66ae4c5 100644 --- a/drivers/net/mlx5/mlx5_utils.c +++ b/drivers/net/mlx5/mlx5_utils.c @@ -8,167 +8,6 @@ #include "mlx5_utils.h" - -/********************* Cache list ************************/ - -static struct mlx5_cache_entry * -mlx5_clist_default_create_cb(struct mlx5_cache_list *list, - struct mlx5_cache_entry *entry __rte_unused, - void *ctx __rte_unused) -{ - return mlx5_malloc(MLX5_MEM_ZERO, list->entry_sz, 0, SOCKET_ID_ANY); -} - -static void -mlx5_clist_default_remove_cb(struct mlx5_cache_list *list __rte_unused, - struct mlx5_cache_entry *entry) -{ - mlx5_free(entry); -} - -int -mlx5_cache_list_init(struct mlx5_cache_list *list, const char *name, - uint32_t entry_size, void *ctx, - mlx5_cache_create_cb cb_create, - mlx5_cache_match_cb cb_match, - mlx5_cache_remove_cb cb_remove) -{ - MLX5_ASSERT(list); - if (!cb_match || (!cb_create ^ !cb_remove)) - return -1; - if (name) - snprintf(list->name, sizeof(list->name), "%s", name); - list->entry_sz = entry_size; - list->ctx = ctx; - list->cb_create = cb_create ? cb_create : mlx5_clist_default_create_cb; - list->cb_match = cb_match; - list->cb_remove = cb_remove ? cb_remove : mlx5_clist_default_remove_cb; - rte_rwlock_init(&list->lock); - DRV_LOG(DEBUG, "Cache list %s initialized.", list->name); - LIST_INIT(&list->head); - return 0; -} - -static struct mlx5_cache_entry * -__cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse) -{ - struct mlx5_cache_entry *entry; - - LIST_FOREACH(entry, &list->head, next) { - if (list->cb_match(list, entry, ctx)) - continue; - if (reuse) { - __atomic_add_fetch(&entry->ref_cnt, 1, - __ATOMIC_RELAXED); - DRV_LOG(DEBUG, "Cache list %s entry %p ref++: %u.", - list->name, (void *)entry, entry->ref_cnt); - } - break; - } - return entry; -} - -static struct mlx5_cache_entry * -cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse) -{ - struct mlx5_cache_entry *entry; - - rte_rwlock_read_lock(&list->lock); - entry = __cache_lookup(list, ctx, reuse); - rte_rwlock_read_unlock(&list->lock); - return entry; -} - -struct mlx5_cache_entry * -mlx5_cache_lookup(struct mlx5_cache_list *list, void *ctx) -{ - return cache_lookup(list, ctx, false); -} - -struct mlx5_cache_entry * -mlx5_cache_register(struct mlx5_cache_list *list, void *ctx) -{ - struct mlx5_cache_entry *entry; - uint32_t prev_gen_cnt = 0; - - MLX5_ASSERT(list); - prev_gen_cnt = __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE); - /* Lookup with read lock, reuse if found. */ - entry = cache_lookup(list, ctx, true); - if (entry) - return entry; - /* Not found, append with write lock - block read from other threads. */ - rte_rwlock_write_lock(&list->lock); - /* If list changed by other threads before lock, search again. */ - if (prev_gen_cnt != __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE)) { - /* Lookup and reuse w/o read lock. */ - entry = __cache_lookup(list, ctx, true); - if (entry) - goto done; - } - entry = list->cb_create(list, entry, ctx); - if (!entry) { - DRV_LOG(ERR, "Failed to init cache list %s entry %p.", - list->name, (void *)entry); - goto done; - } - entry->ref_cnt = 1; - LIST_INSERT_HEAD(&list->head, entry, next); - __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_RELEASE); - __atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE); - DRV_LOG(DEBUG, "Cache list %s entry %p new: %u.", - list->name, (void *)entry, entry->ref_cnt); -done: - rte_rwlock_write_unlock(&list->lock); - return entry; -} - -int -mlx5_cache_unregister(struct mlx5_cache_list *list, - struct mlx5_cache_entry *entry) -{ - rte_rwlock_write_lock(&list->lock); - MLX5_ASSERT(entry && entry->next.le_prev); - DRV_LOG(DEBUG, "Cache list %s entry %p ref--: %u.", - list->name, (void *)entry, entry->ref_cnt); - if (--entry->ref_cnt) { - rte_rwlock_write_unlock(&list->lock); - return 1; - } - __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_ACQUIRE); - __atomic_sub_fetch(&list->count, 1, __ATOMIC_ACQUIRE); - LIST_REMOVE(entry, next); - list->cb_remove(list, entry); - rte_rwlock_write_unlock(&list->lock); - DRV_LOG(DEBUG, "Cache list %s entry %p removed.", - list->name, (void *)entry); - return 0; -} - -void -mlx5_cache_list_destroy(struct mlx5_cache_list *list) -{ - struct mlx5_cache_entry *entry; - - MLX5_ASSERT(list); - /* no LIST_FOREACH_SAFE, using while instead */ - while (!LIST_EMPTY(&list->head)) { - entry = LIST_FIRST(&list->head); - LIST_REMOVE(entry, next); - list->cb_remove(list, entry); - DRV_LOG(DEBUG, "Cache list %s entry %p destroyed.", - list->name, (void *)entry); - } - memset(list, 0, sizeof(*list)); -} - -uint32_t -mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list) -{ - MLX5_ASSERT(list); - return __atomic_load_n(&list->count, __ATOMIC_RELAXED); -} - /********************* Indexed pool **********************/ static inline void @@ -275,6 +114,7 @@ mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg) mlx5_trunk_idx_offset_get(pool, TRUNK_MAX_IDX + 1); if (!cfg->per_core_cache) pool->free_list = TRUNK_INVALID; + rte_spinlock_init(&pool->lcore_lock); return pool; } @@ -515,20 +355,14 @@ check_again: } static void * -mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, uint32_t idx) +_mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx) { struct mlx5_indexed_trunk *trunk; struct mlx5_indexed_cache *lc; uint32_t trunk_idx; uint32_t entry_idx; - int cidx; MLX5_ASSERT(idx); - cidx = rte_lcore_index(rte_lcore_id()); - if (unlikely(cidx == -1)) { - rte_errno = ENOTSUP; - return NULL; - } if (unlikely(!pool->cache[cidx])) { pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_ipool_per_lcore) + @@ -549,15 +383,27 @@ mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, uint32_t idx) } static void * -mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, uint32_t *idx) +mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, uint32_t idx) { + void *entry; int cidx; cidx = rte_lcore_index(rte_lcore_id()); if (unlikely(cidx == -1)) { - rte_errno = ENOTSUP; - return NULL; + cidx = RTE_MAX_LCORE; + rte_spinlock_lock(&pool->lcore_lock); } + entry = _mlx5_ipool_get_cache(pool, cidx, idx); + if (unlikely(cidx == RTE_MAX_LCORE)) + rte_spinlock_unlock(&pool->lcore_lock); + return entry; +} + + +static void * +_mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, int cidx, + uint32_t *idx) +{ if (unlikely(!pool->cache[cidx])) { pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_ipool_per_lcore) + @@ -570,29 +416,40 @@ mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, uint32_t *idx) } else if (pool->cache[cidx]->len) { pool->cache[cidx]->len--; *idx = pool->cache[cidx]->idx[pool->cache[cidx]->len]; - return mlx5_ipool_get_cache(pool, *idx); + return _mlx5_ipool_get_cache(pool, cidx, *idx); } /* Not enough idx in global cache. Keep fetching from global. */ *idx = mlx5_ipool_allocate_from_global(pool, cidx); if (unlikely(!(*idx))) return NULL; - return mlx5_ipool_get_cache(pool, *idx); + return _mlx5_ipool_get_cache(pool, cidx, *idx); } -static void -mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, uint32_t idx) +static void * +mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, uint32_t *idx) { + void *entry; int cidx; + + cidx = rte_lcore_index(rte_lcore_id()); + if (unlikely(cidx == -1)) { + cidx = RTE_MAX_LCORE; + rte_spinlock_lock(&pool->lcore_lock); + } + entry = _mlx5_ipool_malloc_cache(pool, cidx, idx); + if (unlikely(cidx == RTE_MAX_LCORE)) + rte_spinlock_unlock(&pool->lcore_lock); + return entry; +} + +static void +_mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx) +{ struct mlx5_ipool_per_lcore *ilc; struct mlx5_indexed_cache *gc, *olc = NULL; uint32_t reclaim_num = 0; MLX5_ASSERT(idx); - cidx = rte_lcore_index(rte_lcore_id()); - if (unlikely(cidx == -1)) { - rte_errno = ENOTSUP; - return; - } /* * When index was allocated on core A but freed on core B. In this * case check if local cache on core B was allocated before. @@ -635,6 +492,21 @@ mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, uint32_t idx) pool->cache[cidx]->len++; } +static void +mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, uint32_t idx) +{ + int cidx; + + cidx = rte_lcore_index(rte_lcore_id()); + if (unlikely(cidx == -1)) { + cidx = RTE_MAX_LCORE; + rte_spinlock_lock(&pool->lcore_lock); + } + _mlx5_ipool_free_cache(pool, cidx, idx); + if (unlikely(cidx == RTE_MAX_LCORE)) + rte_spinlock_unlock(&pool->lcore_lock); +} + void * mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx) { @@ -814,7 +686,7 @@ mlx5_ipool_destroy(struct mlx5_indexed_pool *pool) MLX5_ASSERT(pool); mlx5_ipool_lock(pool); if (pool->cfg.per_core_cache) { - for (i = 0; i < RTE_MAX_LCORE; i++) { + for (i = 0; i <= RTE_MAX_LCORE; i++) { /* * Free only old global cache. Pool gc will be * freed at last. @@ -883,7 +755,7 @@ mlx5_ipool_flush_cache(struct mlx5_indexed_pool *pool) for (i = 0; i < gc->len; i++) rte_bitmap_clear(ibmp, gc->idx[i] - 1); /* Clear core cache. */ - for (i = 0; i < RTE_MAX_LCORE; i++) { + for (i = 0; i < RTE_MAX_LCORE + 1; i++) { struct mlx5_ipool_per_lcore *ilc = pool->cache[i]; if (!ilc)