1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
5 #include <rte_malloc.h>
6 #include <rte_hash_crc.h>
9 #include <mlx5_malloc.h>
11 #include "mlx5_common_utils.h"
12 #include "mlx5_common_log.h"
14 /********************* mlx5 list ************************/
17 mlx5_list_init(struct mlx5_list *list, const char *name, void *ctx,
18 bool lcores_share, struct mlx5_list_cache *gc,
19 mlx5_list_create_cb cb_create,
20 mlx5_list_match_cb cb_match,
21 mlx5_list_remove_cb cb_remove,
22 mlx5_list_clone_cb cb_clone,
23 mlx5_list_clone_free_cb cb_clone_free)
25 if (!cb_match || !cb_create || !cb_remove || !cb_clone ||
31 snprintf(list->name, sizeof(list->name), "%s", name);
33 list->lcores_share = lcores_share;
34 list->cb_create = cb_create;
35 list->cb_match = cb_match;
36 list->cb_remove = cb_remove;
37 list->cb_clone = cb_clone;
38 list->cb_clone_free = cb_clone_free;
39 rte_rwlock_init(&list->lock);
41 list->cache[RTE_MAX_LCORE] = gc;
42 LIST_INIT(&list->cache[RTE_MAX_LCORE]->h);
44 DRV_LOG(DEBUG, "mlx5 list %s initialized.", list->name);
49 mlx5_list_create(const char *name, void *ctx, bool lcores_share,
50 mlx5_list_create_cb cb_create,
51 mlx5_list_match_cb cb_match,
52 mlx5_list_remove_cb cb_remove,
53 mlx5_list_clone_cb cb_clone,
54 mlx5_list_clone_free_cb cb_clone_free)
56 struct mlx5_list *list;
57 struct mlx5_list_cache *gc = NULL;
59 list = mlx5_malloc(MLX5_MEM_ZERO,
60 sizeof(*list) + (lcores_share ? sizeof(*gc) : 0),
65 gc = (struct mlx5_list_cache *)(list + 1);
66 if (mlx5_list_init(list, name, ctx, lcores_share, gc,
67 cb_create, cb_match, cb_remove, cb_clone,
68 cb_clone_free) != 0) {
75 static struct mlx5_list_entry *
76 __list_lookup(struct mlx5_list *list, int lcore_index, void *ctx, bool reuse)
78 struct mlx5_list_entry *entry =
79 LIST_FIRST(&list->cache[lcore_index]->h);
82 while (entry != NULL) {
83 if (list->cb_match(list->ctx, entry, ctx) == 0) {
85 ret = __atomic_add_fetch(&entry->ref_cnt, 1,
86 __ATOMIC_RELAXED) - 1;
87 DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
88 list->name, (void *)entry,
90 } else if (lcore_index < RTE_MAX_LCORE) {
91 ret = __atomic_load_n(&entry->ref_cnt,
94 if (likely(ret != 0 || lcore_index == RTE_MAX_LCORE))
96 if (reuse && ret == 0)
97 entry->ref_cnt--; /* Invalid entry. */
99 entry = LIST_NEXT(entry, next);
104 struct mlx5_list_entry *
105 mlx5_list_lookup(struct mlx5_list *list, void *ctx)
107 struct mlx5_list_entry *entry = NULL;
110 rte_rwlock_read_lock(&list->lock);
111 for (i = 0; i < RTE_MAX_LCORE; i++) {
112 entry = __list_lookup(list, i, ctx, false);
116 rte_rwlock_read_unlock(&list->lock);
120 static struct mlx5_list_entry *
121 mlx5_list_cache_insert(struct mlx5_list *list, int lcore_index,
122 struct mlx5_list_entry *gentry, void *ctx)
124 struct mlx5_list_entry *lentry = list->cb_clone(list->ctx, gentry, ctx);
126 if (unlikely(!lentry))
128 lentry->ref_cnt = 1u;
129 lentry->gentry = gentry;
130 lentry->lcore_idx = (uint32_t)lcore_index;
131 LIST_INSERT_HEAD(&list->cache[lcore_index]->h, lentry, next);
136 __list_cache_clean(struct mlx5_list *list, int lcore_index)
138 struct mlx5_list_cache *c = list->cache[lcore_index];
139 struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
140 uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,
143 while (inv_cnt != 0 && entry != NULL) {
144 struct mlx5_list_entry *nentry = LIST_NEXT(entry, next);
146 if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
147 LIST_REMOVE(entry, next);
148 if (list->lcores_share)
149 list->cb_clone_free(list->ctx, entry);
151 list->cb_remove(list->ctx, entry);
158 struct mlx5_list_entry *
159 mlx5_list_register(struct mlx5_list *list, void *ctx)
161 struct mlx5_list_entry *entry = NULL, *local_entry;
162 volatile uint32_t prev_gen_cnt = 0;
163 int lcore_index = rte_lcore_index(rte_lcore_id());
166 MLX5_ASSERT(lcore_index < RTE_MAX_LCORE);
167 if (unlikely(lcore_index == -1)) {
171 if (unlikely(!list->cache[lcore_index])) {
172 list->cache[lcore_index] = mlx5_malloc(0,
173 sizeof(struct mlx5_list_cache),
174 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
175 if (!list->cache[lcore_index]) {
179 list->cache[lcore_index]->inv_cnt = 0;
180 LIST_INIT(&list->cache[lcore_index]->h);
182 /* 0. Free entries that was invalidated by other lcores. */
183 __list_cache_clean(list, lcore_index);
184 /* 1. Lookup in local cache. */
185 local_entry = __list_lookup(list, lcore_index, ctx, true);
188 if (list->lcores_share) {
189 /* 2. Lookup with read lock on global list, reuse if found. */
190 rte_rwlock_read_lock(&list->lock);
191 entry = __list_lookup(list, RTE_MAX_LCORE, ctx, true);
193 rte_rwlock_read_unlock(&list->lock);
194 return mlx5_list_cache_insert(list, lcore_index, entry,
197 prev_gen_cnt = list->gen_cnt;
198 rte_rwlock_read_unlock(&list->lock);
200 /* 3. Prepare new entry for global list and for cache. */
201 entry = list->cb_create(list->ctx, ctx);
202 if (unlikely(!entry))
205 if (!list->lcores_share) {
206 entry->lcore_idx = (uint32_t)lcore_index;
207 LIST_INSERT_HEAD(&list->cache[lcore_index]->h, entry, next);
208 __atomic_add_fetch(&list->count, 1, __ATOMIC_RELAXED);
209 DRV_LOG(DEBUG, "MLX5 list %s c%d entry %p new: %u.",
210 list->name, lcore_index, (void *)entry, entry->ref_cnt);
213 local_entry = list->cb_clone(list->ctx, entry, ctx);
214 if (unlikely(!local_entry)) {
215 list->cb_remove(list->ctx, entry);
218 local_entry->ref_cnt = 1u;
219 local_entry->gentry = entry;
220 local_entry->lcore_idx = (uint32_t)lcore_index;
221 rte_rwlock_write_lock(&list->lock);
222 /* 4. Make sure the same entry was not created before the write lock. */
223 if (unlikely(prev_gen_cnt != list->gen_cnt)) {
224 struct mlx5_list_entry *oentry = __list_lookup(list,
228 if (unlikely(oentry)) {
229 /* 4.5. Found real race!!, reuse the old entry. */
230 rte_rwlock_write_unlock(&list->lock);
231 list->cb_remove(list->ctx, entry);
232 list->cb_clone_free(list->ctx, local_entry);
233 return mlx5_list_cache_insert(list, lcore_index, oentry,
237 /* 5. Update lists. */
238 LIST_INSERT_HEAD(&list->cache[RTE_MAX_LCORE]->h, entry, next);
240 rte_rwlock_write_unlock(&list->lock);
241 LIST_INSERT_HEAD(&list->cache[lcore_index]->h, local_entry, next);
242 __atomic_add_fetch(&list->count, 1, __ATOMIC_RELAXED);
243 DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", list->name,
244 (void *)entry, entry->ref_cnt);
249 mlx5_list_unregister(struct mlx5_list *list,
250 struct mlx5_list_entry *entry)
252 struct mlx5_list_entry *gentry = entry->gentry;
255 if (__atomic_sub_fetch(&entry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
257 lcore_idx = rte_lcore_index(rte_lcore_id());
258 MLX5_ASSERT(lcore_idx < RTE_MAX_LCORE);
259 if (entry->lcore_idx == (uint32_t)lcore_idx) {
260 LIST_REMOVE(entry, next);
261 if (list->lcores_share)
262 list->cb_clone_free(list->ctx, entry);
264 list->cb_remove(list->ctx, entry);
265 } else if (likely(lcore_idx != -1)) {
266 __atomic_add_fetch(&list->cache[entry->lcore_idx]->inv_cnt, 1,
271 if (!list->lcores_share) {
272 __atomic_sub_fetch(&list->count, 1, __ATOMIC_RELAXED);
273 DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
274 list->name, (void *)entry);
277 if (__atomic_sub_fetch(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
279 rte_rwlock_write_lock(&list->lock);
280 if (likely(gentry->ref_cnt == 0)) {
281 LIST_REMOVE(gentry, next);
282 rte_rwlock_write_unlock(&list->lock);
283 list->cb_remove(list->ctx, gentry);
284 __atomic_sub_fetch(&list->count, 1, __ATOMIC_RELAXED);
285 DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
286 list->name, (void *)gentry);
289 rte_rwlock_write_unlock(&list->lock);
294 mlx5_list_uninit(struct mlx5_list *list)
296 struct mlx5_list_entry *entry;
300 for (i = 0; i <= RTE_MAX_LCORE; i++) {
303 while (!LIST_EMPTY(&list->cache[i]->h)) {
304 entry = LIST_FIRST(&list->cache[i]->h);
305 LIST_REMOVE(entry, next);
306 if (i == RTE_MAX_LCORE) {
307 list->cb_remove(list->ctx, entry);
308 DRV_LOG(DEBUG, "mlx5 list %s entry %p "
309 "destroyed.", list->name,
312 list->cb_clone_free(list->ctx, entry);
315 if (i != RTE_MAX_LCORE)
316 mlx5_free(list->cache[i]);
321 mlx5_list_destroy(struct mlx5_list *list)
323 mlx5_list_uninit(list);
328 mlx5_list_get_entry_num(struct mlx5_list *list)
331 return __atomic_load_n(&list->count, __ATOMIC_RELAXED);
334 /********************* Hash List **********************/
337 mlx5_hlist_create(const char *name, uint32_t size, bool direct_key,
338 bool lcores_share, void *ctx, mlx5_list_create_cb cb_create,
339 mlx5_list_match_cb cb_match,
340 mlx5_list_remove_cb cb_remove,
341 mlx5_list_clone_cb cb_clone,
342 mlx5_list_clone_free_cb cb_clone_free)
344 struct mlx5_hlist *h;
345 struct mlx5_list_cache *gc;
350 /* Align to the next power of 2, 32bits integer is enough now. */
351 if (!rte_is_power_of_2(size)) {
352 act_size = rte_align32pow2(size);
353 DRV_LOG(WARNING, "Size 0x%" PRIX32 " is not power of 2, will "
354 "be aligned to 0x%" PRIX32 ".", size, act_size);
358 alloc_size = sizeof(struct mlx5_hlist) +
359 sizeof(struct mlx5_hlist_bucket) * act_size;
361 alloc_size += sizeof(struct mlx5_list_cache) * act_size;
362 /* Using zmalloc, then no need to initialize the heads. */
363 h = mlx5_malloc(MLX5_MEM_ZERO, alloc_size, RTE_CACHE_LINE_SIZE,
366 DRV_LOG(ERR, "No memory for hash list %s creation",
367 name ? name : "None");
370 h->mask = act_size - 1;
371 h->lcores_share = lcores_share;
372 h->direct_key = direct_key;
373 gc = (struct mlx5_list_cache *)&h->buckets[act_size];
374 for (i = 0; i < act_size; i++) {
375 if (mlx5_list_init(&h->buckets[i].l, name, ctx, lcores_share,
376 lcores_share ? &gc[i] : NULL,
377 cb_create, cb_match, cb_remove, cb_clone,
378 cb_clone_free) != 0) {
383 DRV_LOG(DEBUG, "Hash list %s with size 0x%" PRIX32 " was created.",
388 struct mlx5_list_entry *
389 mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx)
394 idx = (uint32_t)(key & h->mask);
396 idx = rte_hash_crc_8byte(key, 0) & h->mask;
397 return mlx5_list_lookup(&h->buckets[idx].l, ctx);
400 struct mlx5_list_entry*
401 mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, void *ctx)
404 struct mlx5_list_entry *entry;
407 idx = (uint32_t)(key & h->mask);
409 idx = rte_hash_crc_8byte(key, 0) & h->mask;
410 entry = mlx5_list_register(&h->buckets[idx].l, ctx);
413 entry->gentry->bucket_idx = idx;
415 entry->bucket_idx = idx;
421 mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_list_entry *entry)
423 uint32_t idx = h->lcores_share ? entry->gentry->bucket_idx :
426 return mlx5_list_unregister(&h->buckets[idx].l, entry);
430 mlx5_hlist_destroy(struct mlx5_hlist *h)
434 for (i = 0; i <= h->mask; i++)
435 mlx5_list_uninit(&h->buckets[i].l);