X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcommon%2Fmlx5%2Fmlx5_common_utils.c;h=775fabd478b98846ac2247d27ec45f78b0185163;hb=90a2ec4ae81f2ef52f7c14bfc9307e75a4127fa4;hp=0f400f635346510868967d56f0efb3f8964dabfd;hpb=6507c9f51d9dc3e1ac074ce85bcadaf69afa9dee;p=dpdk.git diff --git a/drivers/common/mlx5/mlx5_common_utils.c b/drivers/common/mlx5/mlx5_common_utils.c index 0f400f6353..775fabd478 100644 --- a/drivers/common/mlx5/mlx5_common_utils.c +++ b/drivers/common/mlx5/mlx5_common_utils.c @@ -13,6 +13,19 @@ /********************* mlx5 list ************************/ +static int +mlx5_list_init(struct mlx5_list_inconst *l_inconst, + struct mlx5_list_const *l_const, + struct mlx5_list_cache *gc) +{ + rte_rwlock_init(&l_inconst->lock); + if (l_const->lcores_share) { + l_inconst->cache[MLX5_LIST_GLOBAL] = gc; + LIST_INIT(&l_inconst->cache[MLX5_LIST_GLOBAL]->h); + } + return 0; +} + struct mlx5_list * mlx5_list_create(const char *name, void *ctx, bool lcores_share, mlx5_list_create_cb cb_create, @@ -22,51 +35,62 @@ mlx5_list_create(const char *name, void *ctx, bool lcores_share, mlx5_list_clone_free_cb cb_clone_free) { struct mlx5_list *list; - int i; + struct mlx5_list_cache *gc = NULL; if (!cb_match || !cb_create || !cb_remove || !cb_clone || !cb_clone_free) { rte_errno = EINVAL; return NULL; } - list = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*list), 0, SOCKET_ID_ANY); + list = mlx5_malloc(MLX5_MEM_ZERO, + sizeof(*list) + (lcores_share ? sizeof(*gc) : 0), + 0, SOCKET_ID_ANY); + if (!list) return NULL; if (name) - snprintf(list->name, sizeof(list->name), "%s", name); - list->ctx = ctx; - list->lcores_share = lcores_share; - list->cb_create = cb_create; - list->cb_match = cb_match; - list->cb_remove = cb_remove; - list->cb_clone = cb_clone; - list->cb_clone_free = cb_clone_free; - rte_rwlock_init(&list->lock); - DRV_LOG(DEBUG, "mlx5 list %s initialized.", list->name); - for (i = 0; i <= RTE_MAX_LCORE; i++) - LIST_INIT(&list->cache[i].h); + snprintf(list->l_const.name, + sizeof(list->l_const.name), "%s", name); + list->l_const.ctx = ctx; + list->l_const.lcores_share = lcores_share; + list->l_const.cb_create = cb_create; + list->l_const.cb_match = cb_match; + list->l_const.cb_remove = cb_remove; + list->l_const.cb_clone = cb_clone; + list->l_const.cb_clone_free = cb_clone_free; + rte_spinlock_init(&list->l_const.lcore_lock); + if (lcores_share) + gc = (struct mlx5_list_cache *)(list + 1); + if (mlx5_list_init(&list->l_inconst, &list->l_const, gc) != 0) { + mlx5_free(list); + return NULL; + } + DRV_LOG(DEBUG, "mlx5 list %s was created.", name); return list; } static struct mlx5_list_entry * -__list_lookup(struct mlx5_list *list, int lcore_index, void *ctx, bool reuse) +__list_lookup(struct mlx5_list_inconst *l_inconst, + struct mlx5_list_const *l_const, + int lcore_index, void *ctx, bool reuse) { - struct mlx5_list_entry *entry = LIST_FIRST(&list->cache[lcore_index].h); + struct mlx5_list_entry *entry = + LIST_FIRST(&l_inconst->cache[lcore_index]->h); uint32_t ret; while (entry != NULL) { - if (list->cb_match(list->ctx, entry, ctx) == 0) { + if (l_const->cb_match(l_const->ctx, entry, ctx) == 0) { if (reuse) { ret = __atomic_add_fetch(&entry->ref_cnt, 1, __ATOMIC_RELAXED) - 1; DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.", - list->name, (void *)entry, + l_const->name, (void *)entry, entry->ref_cnt); - } else if (lcore_index < RTE_MAX_LCORE) { + } else if (lcore_index < MLX5_LIST_GLOBAL) { ret = __atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED); } - if (likely(ret != 0 || lcore_index == RTE_MAX_LCORE)) + if (likely(ret != 0 || lcore_index == MLX5_LIST_GLOBAL)) return entry; if (reuse && ret == 0) entry->ref_cnt--; /* Invalid entry. */ @@ -76,41 +100,56 @@ __list_lookup(struct mlx5_list *list, int lcore_index, void *ctx, bool reuse) return NULL; } -struct mlx5_list_entry * -mlx5_list_lookup(struct mlx5_list *list, void *ctx) +static inline struct mlx5_list_entry * +_mlx5_list_lookup(struct mlx5_list_inconst *l_inconst, + struct mlx5_list_const *l_const, void *ctx) { struct mlx5_list_entry *entry = NULL; int i; - rte_rwlock_read_lock(&list->lock); - for (i = 0; i < RTE_MAX_LCORE; i++) { - entry = __list_lookup(list, i, ctx, false); + rte_rwlock_read_lock(&l_inconst->lock); + for (i = 0; i < MLX5_LIST_GLOBAL; i++) { + if (!l_inconst->cache[i]) + continue; + entry = __list_lookup(l_inconst, l_const, i, + ctx, false); if (entry) break; } - rte_rwlock_read_unlock(&list->lock); + rte_rwlock_read_unlock(&l_inconst->lock); return entry; } +struct mlx5_list_entry * +mlx5_list_lookup(struct mlx5_list *list, void *ctx) +{ + return _mlx5_list_lookup(&list->l_inconst, &list->l_const, ctx); +} + + static struct mlx5_list_entry * -mlx5_list_cache_insert(struct mlx5_list *list, int lcore_index, +mlx5_list_cache_insert(struct mlx5_list_inconst *l_inconst, + struct mlx5_list_const *l_const, int lcore_index, struct mlx5_list_entry *gentry, void *ctx) { - struct mlx5_list_entry *lentry = list->cb_clone(list->ctx, gentry, ctx); + struct mlx5_list_entry *lentry = + l_const->cb_clone(l_const->ctx, gentry, ctx); if (unlikely(!lentry)) return NULL; lentry->ref_cnt = 1u; lentry->gentry = gentry; lentry->lcore_idx = (uint32_t)lcore_index; - LIST_INSERT_HEAD(&list->cache[lcore_index].h, lentry, next); + LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, lentry, next); return lentry; } static void -__list_cache_clean(struct mlx5_list *list, int lcore_index) +__list_cache_clean(struct mlx5_list_inconst *l_inconst, + struct mlx5_list_const *l_const, + int lcore_index) { - struct mlx5_list_cache *c = &list->cache[lcore_index]; + struct mlx5_list_cache *c = l_inconst->cache[lcore_index]; struct mlx5_list_entry *entry = LIST_FIRST(&c->h); uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0, __ATOMIC_RELAXED); @@ -120,161 +159,219 @@ __list_cache_clean(struct mlx5_list *list, int lcore_index) if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) { LIST_REMOVE(entry, next); - if (list->lcores_share) - list->cb_clone_free(list->ctx, entry); + if (l_const->lcores_share) + l_const->cb_clone_free(l_const->ctx, entry); else - list->cb_remove(list->ctx, entry); + l_const->cb_remove(l_const->ctx, entry); inv_cnt--; } entry = nentry; } } -struct mlx5_list_entry * -mlx5_list_register(struct mlx5_list *list, void *ctx) +static inline struct mlx5_list_entry * +_mlx5_list_register(struct mlx5_list_inconst *l_inconst, + struct mlx5_list_const *l_const, + void *ctx, int lcore_index) { struct mlx5_list_entry *entry = NULL, *local_entry; volatile uint32_t prev_gen_cnt = 0; - int lcore_index = rte_lcore_index(rte_lcore_id()); - - MLX5_ASSERT(list); - MLX5_ASSERT(lcore_index < RTE_MAX_LCORE); - if (unlikely(lcore_index == -1)) { - rte_errno = ENOTSUP; - return NULL; + MLX5_ASSERT(l_inconst); + if (unlikely(!l_inconst->cache[lcore_index])) { + l_inconst->cache[lcore_index] = mlx5_malloc(0, + sizeof(struct mlx5_list_cache), + RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); + if (!l_inconst->cache[lcore_index]) { + rte_errno = ENOMEM; + return NULL; + } + l_inconst->cache[lcore_index]->inv_cnt = 0; + LIST_INIT(&l_inconst->cache[lcore_index]->h); } /* 0. Free entries that was invalidated by other lcores. */ - __list_cache_clean(list, lcore_index); + __list_cache_clean(l_inconst, l_const, lcore_index); /* 1. Lookup in local cache. */ - local_entry = __list_lookup(list, lcore_index, ctx, true); + local_entry = __list_lookup(l_inconst, l_const, lcore_index, ctx, true); if (local_entry) return local_entry; - if (list->lcores_share) { + if (l_const->lcores_share) { /* 2. Lookup with read lock on global list, reuse if found. */ - rte_rwlock_read_lock(&list->lock); - entry = __list_lookup(list, RTE_MAX_LCORE, ctx, true); + rte_rwlock_read_lock(&l_inconst->lock); + entry = __list_lookup(l_inconst, l_const, MLX5_LIST_GLOBAL, + ctx, true); if (likely(entry)) { - rte_rwlock_read_unlock(&list->lock); - return mlx5_list_cache_insert(list, lcore_index, entry, - ctx); + rte_rwlock_read_unlock(&l_inconst->lock); + return mlx5_list_cache_insert(l_inconst, l_const, + lcore_index, + entry, ctx); } - prev_gen_cnt = list->gen_cnt; - rte_rwlock_read_unlock(&list->lock); + prev_gen_cnt = l_inconst->gen_cnt; + rte_rwlock_read_unlock(&l_inconst->lock); } /* 3. Prepare new entry for global list and for cache. */ - entry = list->cb_create(list->ctx, ctx); + entry = l_const->cb_create(l_const->ctx, ctx); if (unlikely(!entry)) return NULL; entry->ref_cnt = 1u; - if (!list->lcores_share) { + if (!l_const->lcores_share) { entry->lcore_idx = (uint32_t)lcore_index; - LIST_INSERT_HEAD(&list->cache[lcore_index].h, entry, next); - __atomic_add_fetch(&list->count, 1, __ATOMIC_RELAXED); + LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, + entry, next); + __atomic_add_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED); DRV_LOG(DEBUG, "MLX5 list %s c%d entry %p new: %u.", - list->name, lcore_index, (void *)entry, entry->ref_cnt); + l_const->name, lcore_index, + (void *)entry, entry->ref_cnt); return entry; } - local_entry = list->cb_clone(list->ctx, entry, ctx); + local_entry = l_const->cb_clone(l_const->ctx, entry, ctx); if (unlikely(!local_entry)) { - list->cb_remove(list->ctx, entry); + l_const->cb_remove(l_const->ctx, entry); return NULL; } local_entry->ref_cnt = 1u; local_entry->gentry = entry; local_entry->lcore_idx = (uint32_t)lcore_index; - rte_rwlock_write_lock(&list->lock); + rte_rwlock_write_lock(&l_inconst->lock); /* 4. Make sure the same entry was not created before the write lock. */ - if (unlikely(prev_gen_cnt != list->gen_cnt)) { - struct mlx5_list_entry *oentry = __list_lookup(list, - RTE_MAX_LCORE, + if (unlikely(prev_gen_cnt != l_inconst->gen_cnt)) { + struct mlx5_list_entry *oentry = __list_lookup(l_inconst, + l_const, + MLX5_LIST_GLOBAL, ctx, true); if (unlikely(oentry)) { /* 4.5. Found real race!!, reuse the old entry. */ - rte_rwlock_write_unlock(&list->lock); - list->cb_remove(list->ctx, entry); - list->cb_clone_free(list->ctx, local_entry); - return mlx5_list_cache_insert(list, lcore_index, oentry, - ctx); + rte_rwlock_write_unlock(&l_inconst->lock); + l_const->cb_remove(l_const->ctx, entry); + l_const->cb_clone_free(l_const->ctx, local_entry); + return mlx5_list_cache_insert(l_inconst, l_const, + lcore_index, + oentry, ctx); } } /* 5. Update lists. */ - LIST_INSERT_HEAD(&list->cache[RTE_MAX_LCORE].h, entry, next); - list->gen_cnt++; - rte_rwlock_write_unlock(&list->lock); - LIST_INSERT_HEAD(&list->cache[lcore_index].h, local_entry, next); - __atomic_add_fetch(&list->count, 1, __ATOMIC_RELAXED); - DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", list->name, + LIST_INSERT_HEAD(&l_inconst->cache[MLX5_LIST_GLOBAL]->h, entry, next); + l_inconst->gen_cnt++; + rte_rwlock_write_unlock(&l_inconst->lock); + LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, local_entry, next); + __atomic_add_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED); + DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", l_const->name, (void *)entry, entry->ref_cnt); return local_entry; } -int -mlx5_list_unregister(struct mlx5_list *list, - struct mlx5_list_entry *entry) +struct mlx5_list_entry * +mlx5_list_register(struct mlx5_list *list, void *ctx) +{ + struct mlx5_list_entry *entry; + int lcore_index = rte_lcore_index(rte_lcore_id()); + + if (unlikely(lcore_index == -1)) { + lcore_index = MLX5_LIST_NLCORE; + rte_spinlock_lock(&list->l_const.lcore_lock); + } + entry = _mlx5_list_register(&list->l_inconst, &list->l_const, ctx, + lcore_index); + if (unlikely(lcore_index == MLX5_LIST_NLCORE)) + rte_spinlock_unlock(&list->l_const.lcore_lock); + return entry; +} + +static inline int +_mlx5_list_unregister(struct mlx5_list_inconst *l_inconst, + struct mlx5_list_const *l_const, + struct mlx5_list_entry *entry, + int lcore_idx) { struct mlx5_list_entry *gentry = entry->gentry; - int lcore_idx; if (__atomic_sub_fetch(&entry->ref_cnt, 1, __ATOMIC_RELAXED) != 0) return 1; - lcore_idx = rte_lcore_index(rte_lcore_id()); - MLX5_ASSERT(lcore_idx < RTE_MAX_LCORE); if (entry->lcore_idx == (uint32_t)lcore_idx) { LIST_REMOVE(entry, next); - if (list->lcores_share) - list->cb_clone_free(list->ctx, entry); + if (l_const->lcores_share) + l_const->cb_clone_free(l_const->ctx, entry); else - list->cb_remove(list->ctx, entry); + l_const->cb_remove(l_const->ctx, entry); } else if (likely(lcore_idx != -1)) { - __atomic_add_fetch(&list->cache[entry->lcore_idx].inv_cnt, 1, - __ATOMIC_RELAXED); + __atomic_add_fetch(&l_inconst->cache[entry->lcore_idx]->inv_cnt, + 1, __ATOMIC_RELAXED); } else { return 0; } - if (!list->lcores_share) { - __atomic_sub_fetch(&list->count, 1, __ATOMIC_RELAXED); + if (!l_const->lcores_share) { + __atomic_sub_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED); DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.", - list->name, (void *)entry); + l_const->name, (void *)entry); return 0; } if (__atomic_sub_fetch(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) != 0) return 1; - rte_rwlock_write_lock(&list->lock); + rte_rwlock_write_lock(&l_inconst->lock); if (likely(gentry->ref_cnt == 0)) { LIST_REMOVE(gentry, next); - rte_rwlock_write_unlock(&list->lock); - list->cb_remove(list->ctx, gentry); - __atomic_sub_fetch(&list->count, 1, __ATOMIC_RELAXED); + rte_rwlock_write_unlock(&l_inconst->lock); + l_const->cb_remove(l_const->ctx, gentry); + __atomic_sub_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED); DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.", - list->name, (void *)gentry); + l_const->name, (void *)gentry); return 0; } - rte_rwlock_write_unlock(&list->lock); + rte_rwlock_write_unlock(&l_inconst->lock); return 1; } -void -mlx5_list_destroy(struct mlx5_list *list) +int +mlx5_list_unregister(struct mlx5_list *list, + struct mlx5_list_entry *entry) +{ + int ret; + int lcore_index = rte_lcore_index(rte_lcore_id()); + + if (unlikely(lcore_index == -1)) { + lcore_index = MLX5_LIST_NLCORE; + rte_spinlock_lock(&list->l_const.lcore_lock); + } + ret = _mlx5_list_unregister(&list->l_inconst, &list->l_const, entry, + lcore_index); + if (unlikely(lcore_index == MLX5_LIST_NLCORE)) + rte_spinlock_unlock(&list->l_const.lcore_lock); + return ret; + +} + +static void +mlx5_list_uninit(struct mlx5_list_inconst *l_inconst, + struct mlx5_list_const *l_const) { struct mlx5_list_entry *entry; int i; - MLX5_ASSERT(list); - for (i = 0; i <= RTE_MAX_LCORE; i++) { - while (!LIST_EMPTY(&list->cache[i].h)) { - entry = LIST_FIRST(&list->cache[i].h); + MLX5_ASSERT(l_inconst); + for (i = 0; i < MLX5_LIST_MAX; i++) { + if (!l_inconst->cache[i]) + continue; + while (!LIST_EMPTY(&l_inconst->cache[i]->h)) { + entry = LIST_FIRST(&l_inconst->cache[i]->h); LIST_REMOVE(entry, next); - if (i == RTE_MAX_LCORE) { - list->cb_remove(list, entry); + if (i == MLX5_LIST_GLOBAL) { + l_const->cb_remove(l_const->ctx, entry); DRV_LOG(DEBUG, "mlx5 list %s entry %p " - "destroyed.", list->name, + "destroyed.", l_const->name, (void *)entry); } else { - list->cb_clone_free(list, entry); + l_const->cb_clone_free(l_const->ctx, entry); } } + if (i != MLX5_LIST_GLOBAL) + mlx5_free(l_inconst->cache[i]); } +} + +void +mlx5_list_destroy(struct mlx5_list *list) +{ + mlx5_list_uninit(&list->l_inconst, &list->l_const); mlx5_free(list); } @@ -282,47 +379,42 @@ uint32_t mlx5_list_get_entry_num(struct mlx5_list *list) { MLX5_ASSERT(list); - return __atomic_load_n(&list->count, __ATOMIC_RELAXED); + return __atomic_load_n(&list->l_inconst.count, __ATOMIC_RELAXED); } /********************* Hash List **********************/ -static struct mlx5_hlist_entry * -mlx5_hlist_default_create_cb(struct mlx5_hlist *h, uint64_t key __rte_unused, - void *ctx __rte_unused) -{ - return mlx5_malloc(MLX5_MEM_ZERO, h->entry_sz, 0, SOCKET_ID_ANY); -} - -static void -mlx5_hlist_default_remove_cb(struct mlx5_hlist *h __rte_unused, - struct mlx5_hlist_entry *entry) -{ - mlx5_free(entry); -} - struct mlx5_hlist * -mlx5_hlist_create(const char *name, uint32_t size, uint32_t entry_size, - uint32_t flags, mlx5_hlist_create_cb cb_create, - mlx5_hlist_match_cb cb_match, mlx5_hlist_remove_cb cb_remove) +mlx5_hlist_create(const char *name, uint32_t size, bool direct_key, + bool lcores_share, void *ctx, mlx5_list_create_cb cb_create, + mlx5_list_match_cb cb_match, + mlx5_list_remove_cb cb_remove, + mlx5_list_clone_cb cb_clone, + mlx5_list_clone_free_cb cb_clone_free) { struct mlx5_hlist *h; + struct mlx5_list_cache *gc; uint32_t act_size; uint32_t alloc_size; uint32_t i; - if (!size || !cb_match || (!cb_create ^ !cb_remove)) + if (!cb_match || !cb_create || !cb_remove || !cb_clone || + !cb_clone_free) { + rte_errno = EINVAL; return NULL; + } /* Align to the next power of 2, 32bits integer is enough now. */ if (!rte_is_power_of_2(size)) { act_size = rte_align32pow2(size); - DRV_LOG(DEBUG, "Size 0x%" PRIX32 " is not power of 2, " - "will be aligned to 0x%" PRIX32 ".", size, act_size); + DRV_LOG(WARNING, "Size 0x%" PRIX32 " is not power of 2, will " + "be aligned to 0x%" PRIX32 ".", size, act_size); } else { act_size = size; } alloc_size = sizeof(struct mlx5_hlist) + sizeof(struct mlx5_hlist_bucket) * act_size; + if (lcores_share) + alloc_size += sizeof(struct mlx5_list_cache) * act_size; /* Using zmalloc, then no need to initialize the heads. */ h = mlx5_malloc(MLX5_MEM_ZERO, alloc_size, RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); @@ -332,60 +424,32 @@ mlx5_hlist_create(const char *name, uint32_t size, uint32_t entry_size, return NULL; } if (name) - snprintf(h->name, MLX5_HLIST_NAMESIZE, "%s", name); - h->table_sz = act_size; + snprintf(h->l_const.name, sizeof(h->l_const.name), "%s", name); + h->l_const.ctx = ctx; + h->l_const.lcores_share = lcores_share; + h->l_const.cb_create = cb_create; + h->l_const.cb_match = cb_match; + h->l_const.cb_remove = cb_remove; + h->l_const.cb_clone = cb_clone; + h->l_const.cb_clone_free = cb_clone_free; + rte_spinlock_init(&h->l_const.lcore_lock); h->mask = act_size - 1; - h->entry_sz = entry_size; - h->direct_key = !!(flags & MLX5_HLIST_DIRECT_KEY); - h->write_most = !!(flags & MLX5_HLIST_WRITE_MOST); - h->cb_create = cb_create ? cb_create : mlx5_hlist_default_create_cb; - h->cb_match = cb_match; - h->cb_remove = cb_remove ? cb_remove : mlx5_hlist_default_remove_cb; - for (i = 0; i < act_size; i++) - rte_rwlock_init(&h->buckets[i].lock); - DRV_LOG(DEBUG, "Hash list with %s size 0x%" PRIX32 " is created.", - h->name, act_size); - return h; -} - -static struct mlx5_hlist_entry * -__hlist_lookup(struct mlx5_hlist *h, uint64_t key, uint32_t idx, - void *ctx, bool reuse) -{ - struct mlx5_hlist_head *first; - struct mlx5_hlist_entry *node; - - MLX5_ASSERT(h); - first = &h->buckets[idx].head; - LIST_FOREACH(node, first, next) { - if (!h->cb_match(h, node, key, ctx)) { - if (reuse) { - __atomic_add_fetch(&node->ref_cnt, 1, - __ATOMIC_RELAXED); - DRV_LOG(DEBUG, "Hash list %s entry %p " - "reuse: %u.", - h->name, (void *)node, node->ref_cnt); - } - break; + h->direct_key = direct_key; + gc = (struct mlx5_list_cache *)&h->buckets[act_size]; + for (i = 0; i < act_size; i++) { + if (mlx5_list_init(&h->buckets[i].l, &h->l_const, + lcores_share ? &gc[i] : NULL) != 0) { + mlx5_free(h); + return NULL; } } - return node; + DRV_LOG(DEBUG, "Hash list %s with size 0x%" PRIX32 " was created.", + name, act_size); + return h; } -static struct mlx5_hlist_entry * -hlist_lookup(struct mlx5_hlist *h, uint64_t key, uint32_t idx, - void *ctx, bool reuse) -{ - struct mlx5_hlist_entry *node; - - MLX5_ASSERT(h); - rte_rwlock_read_lock(&h->buckets[idx].lock); - node = __hlist_lookup(h, key, idx, ctx, reuse); - rte_rwlock_read_unlock(&h->buckets[idx].lock); - return node; -} -struct mlx5_hlist_entry * +struct mlx5_list_entry * mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx) { uint32_t idx; @@ -394,102 +458,61 @@ mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx) idx = (uint32_t)(key & h->mask); else idx = rte_hash_crc_8byte(key, 0) & h->mask; - return hlist_lookup(h, key, idx, ctx, false); + return _mlx5_list_lookup(&h->buckets[idx].l, &h->l_const, ctx); } -struct mlx5_hlist_entry* +struct mlx5_list_entry* mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, void *ctx) { uint32_t idx; - struct mlx5_hlist_head *first; - struct mlx5_hlist_bucket *b; - struct mlx5_hlist_entry *entry; - uint32_t prev_gen_cnt = 0; + struct mlx5_list_entry *entry; + int lcore_index = rte_lcore_index(rte_lcore_id()); if (h->direct_key) idx = (uint32_t)(key & h->mask); else idx = rte_hash_crc_8byte(key, 0) & h->mask; - MLX5_ASSERT(h); - b = &h->buckets[idx]; - /* Use write lock directly for write-most list. */ - if (!h->write_most) { - prev_gen_cnt = __atomic_load_n(&b->gen_cnt, __ATOMIC_ACQUIRE); - entry = hlist_lookup(h, key, idx, ctx, true); - if (entry) - return entry; - } - rte_rwlock_write_lock(&b->lock); - /* Check if the list changed by other threads. */ - if (h->write_most || - prev_gen_cnt != __atomic_load_n(&b->gen_cnt, __ATOMIC_ACQUIRE)) { - entry = __hlist_lookup(h, key, idx, ctx, true); - if (entry) - goto done; + if (unlikely(lcore_index == -1)) { + lcore_index = MLX5_LIST_NLCORE; + rte_spinlock_lock(&h->l_const.lcore_lock); } - first = &b->head; - entry = h->cb_create(h, key, ctx); - if (!entry) { - rte_errno = ENOMEM; - DRV_LOG(DEBUG, "Can't allocate hash list %s entry.", h->name); - goto done; + entry = _mlx5_list_register(&h->buckets[idx].l, &h->l_const, ctx, + lcore_index); + if (likely(entry)) { + if (h->l_const.lcores_share) + entry->gentry->bucket_idx = idx; + else + entry->bucket_idx = idx; } - entry->idx = idx; - entry->ref_cnt = 1; - LIST_INSERT_HEAD(first, entry, next); - __atomic_add_fetch(&b->gen_cnt, 1, __ATOMIC_ACQ_REL); - DRV_LOG(DEBUG, "Hash list %s entry %p new: %u.", - h->name, (void *)entry, entry->ref_cnt); -done: - rte_rwlock_write_unlock(&b->lock); + if (unlikely(lcore_index == MLX5_LIST_NLCORE)) + rte_spinlock_unlock(&h->l_const.lcore_lock); return entry; } int -mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry) +mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_list_entry *entry) { - uint32_t idx = entry->idx; - - rte_rwlock_write_lock(&h->buckets[idx].lock); - MLX5_ASSERT(entry && entry->ref_cnt && entry->next.le_prev); - DRV_LOG(DEBUG, "Hash list %s entry %p deref: %u.", - h->name, (void *)entry, entry->ref_cnt); - if (--entry->ref_cnt) { - rte_rwlock_write_unlock(&h->buckets[idx].lock); - return 1; + int lcore_index = rte_lcore_index(rte_lcore_id()); + int ret; + uint32_t idx = h->l_const.lcores_share ? entry->gentry->bucket_idx : + entry->bucket_idx; + if (unlikely(lcore_index == -1)) { + lcore_index = MLX5_LIST_NLCORE; + rte_spinlock_lock(&h->l_const.lcore_lock); } - LIST_REMOVE(entry, next); - /* Set to NULL to get rid of removing action for more than once. */ - entry->next.le_prev = NULL; - h->cb_remove(h, entry); - rte_rwlock_write_unlock(&h->buckets[idx].lock); - DRV_LOG(DEBUG, "Hash list %s entry %p removed.", - h->name, (void *)entry); - return 0; + ret = _mlx5_list_unregister(&h->buckets[idx].l, &h->l_const, entry, + lcore_index); + if (unlikely(lcore_index == MLX5_LIST_NLCORE)) + rte_spinlock_unlock(&h->l_const.lcore_lock); + return ret; } void mlx5_hlist_destroy(struct mlx5_hlist *h) { - uint32_t idx; - struct mlx5_hlist_entry *entry; + uint32_t i; - MLX5_ASSERT(h); - for (idx = 0; idx < h->table_sz; ++idx) { - /* No LIST_FOREACH_SAFE, using while instead. */ - while (!LIST_EMPTY(&h->buckets[idx].head)) { - entry = LIST_FIRST(&h->buckets[idx].head); - LIST_REMOVE(entry, next); - /* - * The owner of whole element which contains data entry - * is the user, so it's the user's duty to do the clean - * up and the free work because someone may not put the - * hlist entry at the beginning(suggested to locate at - * the beginning). Or else the default free function - * will be used. - */ - h->cb_remove(h, entry); - } - } + for (i = 0; i <= h->mask; i++) + mlx5_list_uninit(&h->buckets[i].l, &h->l_const); mlx5_free(h); }