573e40e88ca2551dd6f4b84c70f6c65dab973141
[dpdk.git] / drivers / common / mlx5 / mlx5_common_utils.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4
5 #include <rte_malloc.h>
6 #include <rte_hash_crc.h>
7 #include <rte_errno.h>
8
9 #include <mlx5_malloc.h>
10
11 #include "mlx5_common_utils.h"
12 #include "mlx5_common_log.h"
13
14 /********************* mlx5 list ************************/
15
16 static int
17 mlx5_list_init(struct mlx5_list_inconst *l_inconst,
18                struct mlx5_list_const *l_const,
19                struct mlx5_list_cache *gc)
20 {
21         rte_rwlock_init(&l_inconst->lock);
22         if (l_const->lcores_share) {
23                 l_inconst->cache[RTE_MAX_LCORE] = gc;
24                 LIST_INIT(&l_inconst->cache[RTE_MAX_LCORE]->h);
25         }
26         DRV_LOG(DEBUG, "mlx5 list %s initialized.", l_const->name);
27         return 0;
28 }
29
30 struct mlx5_list *
31 mlx5_list_create(const char *name, void *ctx, bool lcores_share,
32                  mlx5_list_create_cb cb_create,
33                  mlx5_list_match_cb cb_match,
34                  mlx5_list_remove_cb cb_remove,
35                  mlx5_list_clone_cb cb_clone,
36                  mlx5_list_clone_free_cb cb_clone_free)
37 {
38         struct mlx5_list *list;
39         struct mlx5_list_cache *gc = NULL;
40
41         if (!cb_match || !cb_create || !cb_remove || !cb_clone ||
42             !cb_clone_free) {
43                 rte_errno = EINVAL;
44                 return NULL;
45         }
46         list = mlx5_malloc(MLX5_MEM_ZERO,
47                            sizeof(*list) + (lcores_share ? sizeof(*gc) : 0),
48                            0, SOCKET_ID_ANY);
49
50         if (!list)
51                 return NULL;
52         if (name)
53                 snprintf(list->l_const.name,
54                          sizeof(list->l_const.name), "%s", name);
55         list->l_const.ctx = ctx;
56         list->l_const.lcores_share = lcores_share;
57         list->l_const.cb_create = cb_create;
58         list->l_const.cb_match = cb_match;
59         list->l_const.cb_remove = cb_remove;
60         list->l_const.cb_clone = cb_clone;
61         list->l_const.cb_clone_free = cb_clone_free;
62         if (lcores_share)
63                 gc = (struct mlx5_list_cache *)(list + 1);
64         if (mlx5_list_init(&list->l_inconst, &list->l_const, gc) != 0) {
65                 mlx5_free(list);
66                 return NULL;
67         }
68         return list;
69 }
70
71 static struct mlx5_list_entry *
72 __list_lookup(struct mlx5_list_inconst *l_inconst,
73               struct mlx5_list_const *l_const,
74               int lcore_index, void *ctx, bool reuse)
75 {
76         struct mlx5_list_entry *entry =
77                                 LIST_FIRST(&l_inconst->cache[lcore_index]->h);
78         uint32_t ret;
79
80         while (entry != NULL) {
81                 if (l_const->cb_match(l_const->ctx, entry, ctx) == 0) {
82                         if (reuse) {
83                                 ret = __atomic_add_fetch(&entry->ref_cnt, 1,
84                                                          __ATOMIC_RELAXED) - 1;
85                                 DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
86                                         l_const->name, (void *)entry,
87                                         entry->ref_cnt);
88                         } else if (lcore_index < RTE_MAX_LCORE) {
89                                 ret = __atomic_load_n(&entry->ref_cnt,
90                                                       __ATOMIC_RELAXED);
91                         }
92                         if (likely(ret != 0 || lcore_index == RTE_MAX_LCORE))
93                                 return entry;
94                         if (reuse && ret == 0)
95                                 entry->ref_cnt--; /* Invalid entry. */
96                 }
97                 entry = LIST_NEXT(entry, next);
98         }
99         return NULL;
100 }
101
102 static inline struct mlx5_list_entry *
103 _mlx5_list_lookup(struct mlx5_list_inconst *l_inconst,
104                   struct mlx5_list_const *l_const, void *ctx)
105 {
106         struct mlx5_list_entry *entry = NULL;
107         int i;
108
109         rte_rwlock_read_lock(&l_inconst->lock);
110         for (i = 0; i < RTE_MAX_LCORE; i++) {
111                 if (!l_inconst->cache[i])
112                         continue;
113                 entry = __list_lookup(l_inconst, l_const, i, ctx, false);
114                 if (entry)
115                         break;
116         }
117         rte_rwlock_read_unlock(&l_inconst->lock);
118         return entry;
119 }
120
121 struct mlx5_list_entry *
122 mlx5_list_lookup(struct mlx5_list *list, void *ctx)
123 {
124         return _mlx5_list_lookup(&list->l_inconst, &list->l_const, ctx);
125 }
126
127
128 static struct mlx5_list_entry *
129 mlx5_list_cache_insert(struct mlx5_list_inconst *l_inconst,
130                        struct mlx5_list_const *l_const, int lcore_index,
131                        struct mlx5_list_entry *gentry, void *ctx)
132 {
133         struct mlx5_list_entry *lentry =
134                         l_const->cb_clone(l_const->ctx, gentry, ctx);
135
136         if (unlikely(!lentry))
137                 return NULL;
138         lentry->ref_cnt = 1u;
139         lentry->gentry = gentry;
140         lentry->lcore_idx = (uint32_t)lcore_index;
141         LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, lentry, next);
142         return lentry;
143 }
144
145 static void
146 __list_cache_clean(struct mlx5_list_inconst *l_inconst,
147                    struct mlx5_list_const *l_const,
148                    int lcore_index)
149 {
150         struct mlx5_list_cache *c = l_inconst->cache[lcore_index];
151         struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
152         uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,
153                                                __ATOMIC_RELAXED);
154
155         while (inv_cnt != 0 && entry != NULL) {
156                 struct mlx5_list_entry *nentry = LIST_NEXT(entry, next);
157
158                 if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
159                         LIST_REMOVE(entry, next);
160                         if (l_const->lcores_share)
161                                 l_const->cb_clone_free(l_const->ctx, entry);
162                         else
163                                 l_const->cb_remove(l_const->ctx, entry);
164                         inv_cnt--;
165                 }
166                 entry = nentry;
167         }
168 }
169
170 static inline struct mlx5_list_entry *
171 _mlx5_list_register(struct mlx5_list_inconst *l_inconst,
172                     struct mlx5_list_const *l_const,
173                     void *ctx)
174 {
175         struct mlx5_list_entry *entry = NULL, *local_entry;
176         volatile uint32_t prev_gen_cnt = 0;
177         int lcore_index = rte_lcore_index(rte_lcore_id());
178
179         MLX5_ASSERT(l_inconst);
180         MLX5_ASSERT(lcore_index < RTE_MAX_LCORE);
181         if (unlikely(lcore_index == -1)) {
182                 rte_errno = ENOTSUP;
183                 return NULL;
184         }
185         if (unlikely(!l_inconst->cache[lcore_index])) {
186                 l_inconst->cache[lcore_index] = mlx5_malloc(0,
187                                         sizeof(struct mlx5_list_cache),
188                                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
189                 if (!l_inconst->cache[lcore_index]) {
190                         rte_errno = ENOMEM;
191                         return NULL;
192                 }
193                 l_inconst->cache[lcore_index]->inv_cnt = 0;
194                 LIST_INIT(&l_inconst->cache[lcore_index]->h);
195         }
196         /* 0. Free entries that was invalidated by other lcores. */
197         __list_cache_clean(l_inconst, l_const, lcore_index);
198         /* 1. Lookup in local cache. */
199         local_entry = __list_lookup(l_inconst, l_const, lcore_index, ctx, true);
200         if (local_entry)
201                 return local_entry;
202         if (l_const->lcores_share) {
203                 /* 2. Lookup with read lock on global list, reuse if found. */
204                 rte_rwlock_read_lock(&l_inconst->lock);
205                 entry = __list_lookup(l_inconst, l_const, RTE_MAX_LCORE,
206                                       ctx, true);
207                 if (likely(entry)) {
208                         rte_rwlock_read_unlock(&l_inconst->lock);
209                         return mlx5_list_cache_insert(l_inconst, l_const,
210                                                       lcore_index,
211                                                       entry, ctx);
212                 }
213                 prev_gen_cnt = l_inconst->gen_cnt;
214                 rte_rwlock_read_unlock(&l_inconst->lock);
215         }
216         /* 3. Prepare new entry for global list and for cache. */
217         entry = l_const->cb_create(l_const->ctx, ctx);
218         if (unlikely(!entry))
219                 return NULL;
220         entry->ref_cnt = 1u;
221         if (!l_const->lcores_share) {
222                 entry->lcore_idx = (uint32_t)lcore_index;
223                 LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h,
224                                  entry, next);
225                 __atomic_add_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);
226                 DRV_LOG(DEBUG, "MLX5 list %s c%d entry %p new: %u.",
227                         l_const->name, lcore_index,
228                         (void *)entry, entry->ref_cnt);
229                 return entry;
230         }
231         local_entry = l_const->cb_clone(l_const->ctx, entry, ctx);
232         if (unlikely(!local_entry)) {
233                 l_const->cb_remove(l_const->ctx, entry);
234                 return NULL;
235         }
236         local_entry->ref_cnt = 1u;
237         local_entry->gentry = entry;
238         local_entry->lcore_idx = (uint32_t)lcore_index;
239         rte_rwlock_write_lock(&l_inconst->lock);
240         /* 4. Make sure the same entry was not created before the write lock. */
241         if (unlikely(prev_gen_cnt != l_inconst->gen_cnt)) {
242                 struct mlx5_list_entry *oentry = __list_lookup(l_inconst,
243                                                                l_const,
244                                                                RTE_MAX_LCORE,
245                                                                ctx, true);
246
247                 if (unlikely(oentry)) {
248                         /* 4.5. Found real race!!, reuse the old entry. */
249                         rte_rwlock_write_unlock(&l_inconst->lock);
250                         l_const->cb_remove(l_const->ctx, entry);
251                         l_const->cb_clone_free(l_const->ctx, local_entry);
252                         return mlx5_list_cache_insert(l_inconst, l_const,
253                                                       lcore_index,
254                                                       oentry, ctx);
255                 }
256         }
257         /* 5. Update lists. */
258         LIST_INSERT_HEAD(&l_inconst->cache[RTE_MAX_LCORE]->h, entry, next);
259         l_inconst->gen_cnt++;
260         rte_rwlock_write_unlock(&l_inconst->lock);
261         LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, local_entry, next);
262         __atomic_add_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);
263         DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", l_const->name,
264                 (void *)entry, entry->ref_cnt);
265         return local_entry;
266 }
267
268 struct mlx5_list_entry *
269 mlx5_list_register(struct mlx5_list *list, void *ctx)
270 {
271         return _mlx5_list_register(&list->l_inconst, &list->l_const, ctx);
272 }
273
274 static inline int
275 _mlx5_list_unregister(struct mlx5_list_inconst *l_inconst,
276                       struct mlx5_list_const *l_const,
277                       struct mlx5_list_entry *entry)
278 {
279         struct mlx5_list_entry *gentry = entry->gentry;
280         int lcore_idx;
281
282         if (__atomic_sub_fetch(&entry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
283                 return 1;
284         lcore_idx = rte_lcore_index(rte_lcore_id());
285         MLX5_ASSERT(lcore_idx < RTE_MAX_LCORE);
286         if (entry->lcore_idx == (uint32_t)lcore_idx) {
287                 LIST_REMOVE(entry, next);
288                 if (l_const->lcores_share)
289                         l_const->cb_clone_free(l_const->ctx, entry);
290                 else
291                         l_const->cb_remove(l_const->ctx, entry);
292         } else if (likely(lcore_idx != -1)) {
293                 __atomic_add_fetch(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
294                                    1, __ATOMIC_RELAXED);
295         } else {
296                 return 0;
297         }
298         if (!l_const->lcores_share) {
299                 __atomic_sub_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);
300                 DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
301                         l_const->name, (void *)entry);
302                 return 0;
303         }
304         if (__atomic_sub_fetch(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
305                 return 1;
306         rte_rwlock_write_lock(&l_inconst->lock);
307         if (likely(gentry->ref_cnt == 0)) {
308                 LIST_REMOVE(gentry, next);
309                 rte_rwlock_write_unlock(&l_inconst->lock);
310                 l_const->cb_remove(l_const->ctx, gentry);
311                 __atomic_sub_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);
312                 DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
313                         l_const->name, (void *)gentry);
314                 return 0;
315         }
316         rte_rwlock_write_unlock(&l_inconst->lock);
317         return 1;
318 }
319
320 int
321 mlx5_list_unregister(struct mlx5_list *list,
322                       struct mlx5_list_entry *entry)
323 {
324         return _mlx5_list_unregister(&list->l_inconst, &list->l_const, entry);
325 }
326
327 static void
328 mlx5_list_uninit(struct mlx5_list_inconst *l_inconst,
329                  struct mlx5_list_const *l_const)
330 {
331         struct mlx5_list_entry *entry;
332         int i;
333
334         MLX5_ASSERT(l_inconst);
335         for (i = 0; i <= RTE_MAX_LCORE; i++) {
336                 if (!l_inconst->cache[i])
337                         continue;
338                 while (!LIST_EMPTY(&l_inconst->cache[i]->h)) {
339                         entry = LIST_FIRST(&l_inconst->cache[i]->h);
340                         LIST_REMOVE(entry, next);
341                         if (i == RTE_MAX_LCORE) {
342                                 l_const->cb_remove(l_const->ctx, entry);
343                                 DRV_LOG(DEBUG, "mlx5 list %s entry %p "
344                                         "destroyed.", l_const->name,
345                                         (void *)entry);
346                         } else {
347                                 l_const->cb_clone_free(l_const->ctx, entry);
348                         }
349                 }
350                 if (i != RTE_MAX_LCORE)
351                         mlx5_free(l_inconst->cache[i]);
352         }
353 }
354
355 void
356 mlx5_list_destroy(struct mlx5_list *list)
357 {
358         mlx5_list_uninit(&list->l_inconst, &list->l_const);
359         mlx5_free(list);
360 }
361
362 uint32_t
363 mlx5_list_get_entry_num(struct mlx5_list *list)
364 {
365         MLX5_ASSERT(list);
366         return __atomic_load_n(&list->l_inconst.count, __ATOMIC_RELAXED);
367 }
368
369 /********************* Hash List **********************/
370
371 struct mlx5_hlist *
372 mlx5_hlist_create(const char *name, uint32_t size, bool direct_key,
373                   bool lcores_share, void *ctx, mlx5_list_create_cb cb_create,
374                   mlx5_list_match_cb cb_match,
375                   mlx5_list_remove_cb cb_remove,
376                   mlx5_list_clone_cb cb_clone,
377                   mlx5_list_clone_free_cb cb_clone_free)
378 {
379         struct mlx5_hlist *h;
380         struct mlx5_list_cache *gc;
381         uint32_t act_size;
382         uint32_t alloc_size;
383         uint32_t i;
384
385         if (!cb_match || !cb_create || !cb_remove || !cb_clone ||
386             !cb_clone_free) {
387                 rte_errno = EINVAL;
388                 return NULL;
389         }
390         /* Align to the next power of 2, 32bits integer is enough now. */
391         if (!rte_is_power_of_2(size)) {
392                 act_size = rte_align32pow2(size);
393                 DRV_LOG(WARNING, "Size 0x%" PRIX32 " is not power of 2, will "
394                         "be aligned to 0x%" PRIX32 ".", size, act_size);
395         } else {
396                 act_size = size;
397         }
398         alloc_size = sizeof(struct mlx5_hlist) +
399                      sizeof(struct mlx5_hlist_bucket) * act_size;
400         if (lcores_share)
401                 alloc_size += sizeof(struct mlx5_list_cache)  * act_size;
402         /* Using zmalloc, then no need to initialize the heads. */
403         h = mlx5_malloc(MLX5_MEM_ZERO, alloc_size, RTE_CACHE_LINE_SIZE,
404                         SOCKET_ID_ANY);
405         if (!h) {
406                 DRV_LOG(ERR, "No memory for hash list %s creation",
407                         name ? name : "None");
408                 return NULL;
409         }
410         if (name)
411                 snprintf(h->l_const.name, sizeof(h->l_const.name), "%s", name);
412         h->l_const.ctx = ctx;
413         h->l_const.lcores_share = lcores_share;
414         h->l_const.cb_create = cb_create;
415         h->l_const.cb_match = cb_match;
416         h->l_const.cb_remove = cb_remove;
417         h->l_const.cb_clone = cb_clone;
418         h->l_const.cb_clone_free = cb_clone_free;
419         h->mask = act_size - 1;
420         h->direct_key = direct_key;
421         gc = (struct mlx5_list_cache *)&h->buckets[act_size];
422         for (i = 0; i < act_size; i++) {
423                 if (mlx5_list_init(&h->buckets[i].l, &h->l_const,
424                     lcores_share ? &gc[i] : NULL) != 0) {
425                         mlx5_free(h);
426                         return NULL;
427                 }
428         }
429         DRV_LOG(DEBUG, "Hash list %s with size 0x%" PRIX32 " was created.",
430                 name, act_size);
431         return h;
432 }
433
434
435 struct mlx5_list_entry *
436 mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx)
437 {
438         uint32_t idx;
439
440         if (h->direct_key)
441                 idx = (uint32_t)(key & h->mask);
442         else
443                 idx = rte_hash_crc_8byte(key, 0) & h->mask;
444         return _mlx5_list_lookup(&h->buckets[idx].l, &h->l_const, ctx);
445 }
446
447 struct mlx5_list_entry*
448 mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, void *ctx)
449 {
450         uint32_t idx;
451         struct mlx5_list_entry *entry;
452
453         if (h->direct_key)
454                 idx = (uint32_t)(key & h->mask);
455         else
456                 idx = rte_hash_crc_8byte(key, 0) & h->mask;
457         entry = _mlx5_list_register(&h->buckets[idx].l, &h->l_const, ctx);
458         if (likely(entry)) {
459                 if (h->l_const.lcores_share)
460                         entry->gentry->bucket_idx = idx;
461                 else
462                         entry->bucket_idx = idx;
463         }
464         return entry;
465 }
466
467 int
468 mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_list_entry *entry)
469 {
470         uint32_t idx = h->l_const.lcores_share ? entry->gentry->bucket_idx :
471                                                               entry->bucket_idx;
472
473         return _mlx5_list_unregister(&h->buckets[idx].l, &h->l_const, entry);
474 }
475
476 void
477 mlx5_hlist_destroy(struct mlx5_hlist *h)
478 {
479         uint32_t i;
480
481         for (i = 0; i <= h->mask; i++)
482                 mlx5_list_uninit(&h->buckets[i].l, &h->l_const);
483         mlx5_free(h);
484 }