1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
5 #include <rte_malloc.h>
6 #include <rte_hash_crc.h>
9 #include <mlx5_malloc.h>
11 #include "mlx5_common_utils.h"
12 #include "mlx5_common_log.h"
14 /********************* mlx5 list ************************/
17 mlx5_list_init(struct mlx5_list_inconst *l_inconst,
18 struct mlx5_list_const *l_const,
19 struct mlx5_list_cache *gc)
21 rte_rwlock_init(&l_inconst->lock);
22 if (l_const->lcores_share) {
23 l_inconst->cache[RTE_MAX_LCORE] = gc;
24 LIST_INIT(&l_inconst->cache[RTE_MAX_LCORE]->h);
26 DRV_LOG(DEBUG, "mlx5 list %s initialized.", l_const->name);
31 mlx5_list_create(const char *name, void *ctx, bool lcores_share,
32 mlx5_list_create_cb cb_create,
33 mlx5_list_match_cb cb_match,
34 mlx5_list_remove_cb cb_remove,
35 mlx5_list_clone_cb cb_clone,
36 mlx5_list_clone_free_cb cb_clone_free)
38 struct mlx5_list *list;
39 struct mlx5_list_cache *gc = NULL;
41 if (!cb_match || !cb_create || !cb_remove || !cb_clone ||
46 list = mlx5_malloc(MLX5_MEM_ZERO,
47 sizeof(*list) + (lcores_share ? sizeof(*gc) : 0),
53 snprintf(list->l_const.name,
54 sizeof(list->l_const.name), "%s", name);
55 list->l_const.ctx = ctx;
56 list->l_const.lcores_share = lcores_share;
57 list->l_const.cb_create = cb_create;
58 list->l_const.cb_match = cb_match;
59 list->l_const.cb_remove = cb_remove;
60 list->l_const.cb_clone = cb_clone;
61 list->l_const.cb_clone_free = cb_clone_free;
63 gc = (struct mlx5_list_cache *)(list + 1);
64 if (mlx5_list_init(&list->l_inconst, &list->l_const, gc) != 0) {
71 static struct mlx5_list_entry *
72 __list_lookup(struct mlx5_list_inconst *l_inconst,
73 struct mlx5_list_const *l_const,
74 int lcore_index, void *ctx, bool reuse)
76 struct mlx5_list_entry *entry =
77 LIST_FIRST(&l_inconst->cache[lcore_index]->h);
80 while (entry != NULL) {
81 if (l_const->cb_match(l_const->ctx, entry, ctx) == 0) {
83 ret = __atomic_add_fetch(&entry->ref_cnt, 1,
84 __ATOMIC_RELAXED) - 1;
85 DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
86 l_const->name, (void *)entry,
88 } else if (lcore_index < RTE_MAX_LCORE) {
89 ret = __atomic_load_n(&entry->ref_cnt,
92 if (likely(ret != 0 || lcore_index == RTE_MAX_LCORE))
94 if (reuse && ret == 0)
95 entry->ref_cnt--; /* Invalid entry. */
97 entry = LIST_NEXT(entry, next);
102 static inline struct mlx5_list_entry *
103 _mlx5_list_lookup(struct mlx5_list_inconst *l_inconst,
104 struct mlx5_list_const *l_const, void *ctx)
106 struct mlx5_list_entry *entry = NULL;
109 rte_rwlock_read_lock(&l_inconst->lock);
110 for (i = 0; i < RTE_MAX_LCORE; i++) {
111 if (!l_inconst->cache[i])
113 entry = __list_lookup(l_inconst, l_const, i, ctx, false);
117 rte_rwlock_read_unlock(&l_inconst->lock);
121 struct mlx5_list_entry *
122 mlx5_list_lookup(struct mlx5_list *list, void *ctx)
124 return _mlx5_list_lookup(&list->l_inconst, &list->l_const, ctx);
128 static struct mlx5_list_entry *
129 mlx5_list_cache_insert(struct mlx5_list_inconst *l_inconst,
130 struct mlx5_list_const *l_const, int lcore_index,
131 struct mlx5_list_entry *gentry, void *ctx)
133 struct mlx5_list_entry *lentry =
134 l_const->cb_clone(l_const->ctx, gentry, ctx);
136 if (unlikely(!lentry))
138 lentry->ref_cnt = 1u;
139 lentry->gentry = gentry;
140 lentry->lcore_idx = (uint32_t)lcore_index;
141 LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, lentry, next);
146 __list_cache_clean(struct mlx5_list_inconst *l_inconst,
147 struct mlx5_list_const *l_const,
150 struct mlx5_list_cache *c = l_inconst->cache[lcore_index];
151 struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
152 uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,
155 while (inv_cnt != 0 && entry != NULL) {
156 struct mlx5_list_entry *nentry = LIST_NEXT(entry, next);
158 if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
159 LIST_REMOVE(entry, next);
160 if (l_const->lcores_share)
161 l_const->cb_clone_free(l_const->ctx, entry);
163 l_const->cb_remove(l_const->ctx, entry);
170 static inline struct mlx5_list_entry *
171 _mlx5_list_register(struct mlx5_list_inconst *l_inconst,
172 struct mlx5_list_const *l_const,
175 struct mlx5_list_entry *entry = NULL, *local_entry;
176 volatile uint32_t prev_gen_cnt = 0;
177 int lcore_index = rte_lcore_index(rte_lcore_id());
179 MLX5_ASSERT(l_inconst);
180 MLX5_ASSERT(lcore_index < RTE_MAX_LCORE);
181 if (unlikely(lcore_index == -1)) {
185 if (unlikely(!l_inconst->cache[lcore_index])) {
186 l_inconst->cache[lcore_index] = mlx5_malloc(0,
187 sizeof(struct mlx5_list_cache),
188 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
189 if (!l_inconst->cache[lcore_index]) {
193 l_inconst->cache[lcore_index]->inv_cnt = 0;
194 LIST_INIT(&l_inconst->cache[lcore_index]->h);
196 /* 0. Free entries that was invalidated by other lcores. */
197 __list_cache_clean(l_inconst, l_const, lcore_index);
198 /* 1. Lookup in local cache. */
199 local_entry = __list_lookup(l_inconst, l_const, lcore_index, ctx, true);
202 if (l_const->lcores_share) {
203 /* 2. Lookup with read lock on global list, reuse if found. */
204 rte_rwlock_read_lock(&l_inconst->lock);
205 entry = __list_lookup(l_inconst, l_const, RTE_MAX_LCORE,
208 rte_rwlock_read_unlock(&l_inconst->lock);
209 return mlx5_list_cache_insert(l_inconst, l_const,
213 prev_gen_cnt = l_inconst->gen_cnt;
214 rte_rwlock_read_unlock(&l_inconst->lock);
216 /* 3. Prepare new entry for global list and for cache. */
217 entry = l_const->cb_create(l_const->ctx, ctx);
218 if (unlikely(!entry))
221 if (!l_const->lcores_share) {
222 entry->lcore_idx = (uint32_t)lcore_index;
223 LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h,
225 __atomic_add_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);
226 DRV_LOG(DEBUG, "MLX5 list %s c%d entry %p new: %u.",
227 l_const->name, lcore_index,
228 (void *)entry, entry->ref_cnt);
231 local_entry = l_const->cb_clone(l_const->ctx, entry, ctx);
232 if (unlikely(!local_entry)) {
233 l_const->cb_remove(l_const->ctx, entry);
236 local_entry->ref_cnt = 1u;
237 local_entry->gentry = entry;
238 local_entry->lcore_idx = (uint32_t)lcore_index;
239 rte_rwlock_write_lock(&l_inconst->lock);
240 /* 4. Make sure the same entry was not created before the write lock. */
241 if (unlikely(prev_gen_cnt != l_inconst->gen_cnt)) {
242 struct mlx5_list_entry *oentry = __list_lookup(l_inconst,
247 if (unlikely(oentry)) {
248 /* 4.5. Found real race!!, reuse the old entry. */
249 rte_rwlock_write_unlock(&l_inconst->lock);
250 l_const->cb_remove(l_const->ctx, entry);
251 l_const->cb_clone_free(l_const->ctx, local_entry);
252 return mlx5_list_cache_insert(l_inconst, l_const,
257 /* 5. Update lists. */
258 LIST_INSERT_HEAD(&l_inconst->cache[RTE_MAX_LCORE]->h, entry, next);
259 l_inconst->gen_cnt++;
260 rte_rwlock_write_unlock(&l_inconst->lock);
261 LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, local_entry, next);
262 __atomic_add_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);
263 DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", l_const->name,
264 (void *)entry, entry->ref_cnt);
268 struct mlx5_list_entry *
269 mlx5_list_register(struct mlx5_list *list, void *ctx)
271 return _mlx5_list_register(&list->l_inconst, &list->l_const, ctx);
275 _mlx5_list_unregister(struct mlx5_list_inconst *l_inconst,
276 struct mlx5_list_const *l_const,
277 struct mlx5_list_entry *entry)
279 struct mlx5_list_entry *gentry = entry->gentry;
282 if (__atomic_sub_fetch(&entry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
284 lcore_idx = rte_lcore_index(rte_lcore_id());
285 MLX5_ASSERT(lcore_idx < RTE_MAX_LCORE);
286 if (entry->lcore_idx == (uint32_t)lcore_idx) {
287 LIST_REMOVE(entry, next);
288 if (l_const->lcores_share)
289 l_const->cb_clone_free(l_const->ctx, entry);
291 l_const->cb_remove(l_const->ctx, entry);
292 } else if (likely(lcore_idx != -1)) {
293 __atomic_add_fetch(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
294 1, __ATOMIC_RELAXED);
298 if (!l_const->lcores_share) {
299 __atomic_sub_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);
300 DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
301 l_const->name, (void *)entry);
304 if (__atomic_sub_fetch(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
306 rte_rwlock_write_lock(&l_inconst->lock);
307 if (likely(gentry->ref_cnt == 0)) {
308 LIST_REMOVE(gentry, next);
309 rte_rwlock_write_unlock(&l_inconst->lock);
310 l_const->cb_remove(l_const->ctx, gentry);
311 __atomic_sub_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);
312 DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
313 l_const->name, (void *)gentry);
316 rte_rwlock_write_unlock(&l_inconst->lock);
321 mlx5_list_unregister(struct mlx5_list *list,
322 struct mlx5_list_entry *entry)
324 return _mlx5_list_unregister(&list->l_inconst, &list->l_const, entry);
328 mlx5_list_uninit(struct mlx5_list_inconst *l_inconst,
329 struct mlx5_list_const *l_const)
331 struct mlx5_list_entry *entry;
334 MLX5_ASSERT(l_inconst);
335 for (i = 0; i <= RTE_MAX_LCORE; i++) {
336 if (!l_inconst->cache[i])
338 while (!LIST_EMPTY(&l_inconst->cache[i]->h)) {
339 entry = LIST_FIRST(&l_inconst->cache[i]->h);
340 LIST_REMOVE(entry, next);
341 if (i == RTE_MAX_LCORE) {
342 l_const->cb_remove(l_const->ctx, entry);
343 DRV_LOG(DEBUG, "mlx5 list %s entry %p "
344 "destroyed.", l_const->name,
347 l_const->cb_clone_free(l_const->ctx, entry);
350 if (i != RTE_MAX_LCORE)
351 mlx5_free(l_inconst->cache[i]);
356 mlx5_list_destroy(struct mlx5_list *list)
358 mlx5_list_uninit(&list->l_inconst, &list->l_const);
363 mlx5_list_get_entry_num(struct mlx5_list *list)
366 return __atomic_load_n(&list->l_inconst.count, __ATOMIC_RELAXED);
369 /********************* Hash List **********************/
372 mlx5_hlist_create(const char *name, uint32_t size, bool direct_key,
373 bool lcores_share, void *ctx, mlx5_list_create_cb cb_create,
374 mlx5_list_match_cb cb_match,
375 mlx5_list_remove_cb cb_remove,
376 mlx5_list_clone_cb cb_clone,
377 mlx5_list_clone_free_cb cb_clone_free)
379 struct mlx5_hlist *h;
380 struct mlx5_list_cache *gc;
385 if (!cb_match || !cb_create || !cb_remove || !cb_clone ||
390 /* Align to the next power of 2, 32bits integer is enough now. */
391 if (!rte_is_power_of_2(size)) {
392 act_size = rte_align32pow2(size);
393 DRV_LOG(WARNING, "Size 0x%" PRIX32 " is not power of 2, will "
394 "be aligned to 0x%" PRIX32 ".", size, act_size);
398 alloc_size = sizeof(struct mlx5_hlist) +
399 sizeof(struct mlx5_hlist_bucket) * act_size;
401 alloc_size += sizeof(struct mlx5_list_cache) * act_size;
402 /* Using zmalloc, then no need to initialize the heads. */
403 h = mlx5_malloc(MLX5_MEM_ZERO, alloc_size, RTE_CACHE_LINE_SIZE,
406 DRV_LOG(ERR, "No memory for hash list %s creation",
407 name ? name : "None");
411 snprintf(h->l_const.name, sizeof(h->l_const.name), "%s", name);
412 h->l_const.ctx = ctx;
413 h->l_const.lcores_share = lcores_share;
414 h->l_const.cb_create = cb_create;
415 h->l_const.cb_match = cb_match;
416 h->l_const.cb_remove = cb_remove;
417 h->l_const.cb_clone = cb_clone;
418 h->l_const.cb_clone_free = cb_clone_free;
419 h->mask = act_size - 1;
420 h->direct_key = direct_key;
421 gc = (struct mlx5_list_cache *)&h->buckets[act_size];
422 for (i = 0; i < act_size; i++) {
423 if (mlx5_list_init(&h->buckets[i].l, &h->l_const,
424 lcores_share ? &gc[i] : NULL) != 0) {
429 DRV_LOG(DEBUG, "Hash list %s with size 0x%" PRIX32 " was created.",
435 struct mlx5_list_entry *
436 mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx)
441 idx = (uint32_t)(key & h->mask);
443 idx = rte_hash_crc_8byte(key, 0) & h->mask;
444 return _mlx5_list_lookup(&h->buckets[idx].l, &h->l_const, ctx);
447 struct mlx5_list_entry*
448 mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, void *ctx)
451 struct mlx5_list_entry *entry;
454 idx = (uint32_t)(key & h->mask);
456 idx = rte_hash_crc_8byte(key, 0) & h->mask;
457 entry = _mlx5_list_register(&h->buckets[idx].l, &h->l_const, ctx);
459 if (h->l_const.lcores_share)
460 entry->gentry->bucket_idx = idx;
462 entry->bucket_idx = idx;
468 mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_list_entry *entry)
470 uint32_t idx = h->l_const.lcores_share ? entry->gentry->bucket_idx :
473 return _mlx5_list_unregister(&h->buckets[idx].l, &h->l_const, entry);
477 mlx5_hlist_destroy(struct mlx5_hlist *h)
481 for (i = 0; i <= h->mask; i++)
482 mlx5_list_uninit(&h->buckets[i].l, &h->l_const);