net/mlx5: reduce log level in hash list registration
[dpdk.git] / drivers / net / mlx5 / mlx5_utils.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4
5 #include <rte_malloc.h>
6 #include <rte_hash_crc.h>
7
8 #include <mlx5_malloc.h>
9
10 #include "mlx5_utils.h"
11
12 /********************* Hash List **********************/
13
14 static struct mlx5_hlist_entry *
15 mlx5_hlist_default_create_cb(struct mlx5_hlist *h, uint64_t key __rte_unused,
16                              void *ctx __rte_unused)
17 {
18         return mlx5_malloc(MLX5_MEM_ZERO, h->entry_sz, 0, SOCKET_ID_ANY);
19 }
20
21 static void
22 mlx5_hlist_default_remove_cb(struct mlx5_hlist *h __rte_unused,
23                              struct mlx5_hlist_entry *entry)
24 {
25         mlx5_free(entry);
26 }
27
28 static int
29 mlx5_hlist_default_match_cb(struct mlx5_hlist *h __rte_unused,
30                             struct mlx5_hlist_entry *entry,
31                             uint64_t key, void *ctx __rte_unused)
32 {
33         return entry->key != key;
34 }
35
36 struct mlx5_hlist *
37 mlx5_hlist_create(const char *name, uint32_t size, uint32_t entry_size,
38                   uint32_t flags, mlx5_hlist_create_cb cb_create,
39                   mlx5_hlist_match_cb cb_match, mlx5_hlist_remove_cb cb_remove)
40 {
41         struct mlx5_hlist *h;
42         uint32_t act_size;
43         uint32_t alloc_size;
44
45         if (!size || (!cb_create ^ !cb_remove))
46                 return NULL;
47         /* Align to the next power of 2, 32bits integer is enough now. */
48         if (!rte_is_power_of_2(size)) {
49                 act_size = rte_align32pow2(size);
50                 DRV_LOG(WARNING, "Size 0x%" PRIX32 " is not power of 2, will "
51                         "be aligned to 0x%" PRIX32 ".", size, act_size);
52         } else {
53                 act_size = size;
54         }
55         alloc_size = sizeof(struct mlx5_hlist) +
56                      sizeof(struct mlx5_hlist_head) * act_size;
57         /* Using zmalloc, then no need to initialize the heads. */
58         h = mlx5_malloc(MLX5_MEM_ZERO, alloc_size, RTE_CACHE_LINE_SIZE,
59                         SOCKET_ID_ANY);
60         if (!h) {
61                 DRV_LOG(ERR, "No memory for hash list %s creation",
62                         name ? name : "None");
63                 return NULL;
64         }
65         if (name)
66                 snprintf(h->name, MLX5_HLIST_NAMESIZE, "%s", name);
67         h->table_sz = act_size;
68         h->mask = act_size - 1;
69         h->entry_sz = entry_size;
70         h->direct_key = !!(flags & MLX5_HLIST_DIRECT_KEY);
71         h->write_most = !!(flags & MLX5_HLIST_WRITE_MOST);
72         h->cb_create = cb_create ? cb_create : mlx5_hlist_default_create_cb;
73         h->cb_match = cb_match ? cb_match : mlx5_hlist_default_match_cb;
74         h->cb_remove = cb_remove ? cb_remove : mlx5_hlist_default_remove_cb;
75         rte_rwlock_init(&h->lock);
76         DRV_LOG(DEBUG, "Hash list with %s size 0x%" PRIX32 " is created.",
77                 h->name, act_size);
78         return h;
79 }
80
81 static struct mlx5_hlist_entry *
82 __hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx, bool reuse)
83 {
84         uint32_t idx;
85         struct mlx5_hlist_head *first;
86         struct mlx5_hlist_entry *node;
87
88         MLX5_ASSERT(h);
89         if (h->direct_key)
90                 idx = (uint32_t)(key & h->mask);
91         else
92                 idx = rte_hash_crc_8byte(key, 0) & h->mask;
93         first = &h->heads[idx];
94         LIST_FOREACH(node, first, next) {
95                 if (!h->cb_match(h, node, key, ctx)) {
96                         if (reuse) {
97                                 __atomic_add_fetch(&node->ref_cnt, 1,
98                                                    __ATOMIC_RELAXED);
99                                 DRV_LOG(DEBUG, "Hash list %s entry %p "
100                                         "reuse: %u.",
101                                         h->name, (void *)node, node->ref_cnt);
102                         }
103                         break;
104                 }
105         }
106         return node;
107 }
108
109 static struct mlx5_hlist_entry *
110 hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx, bool reuse)
111 {
112         struct mlx5_hlist_entry *node;
113
114         MLX5_ASSERT(h);
115         rte_rwlock_read_lock(&h->lock);
116         node = __hlist_lookup(h, key, ctx, reuse);
117         rte_rwlock_read_unlock(&h->lock);
118         return node;
119 }
120
121 struct mlx5_hlist_entry *
122 mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx)
123 {
124         return hlist_lookup(h, key, ctx, false);
125 }
126
127 struct mlx5_hlist_entry*
128 mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, void *ctx)
129 {
130         uint32_t idx;
131         struct mlx5_hlist_head *first;
132         struct mlx5_hlist_entry *entry;
133         uint32_t prev_gen_cnt = 0;
134
135         MLX5_ASSERT(h);
136         /* Use write lock directly for write-most list. */
137         if (!h->write_most) {
138                 prev_gen_cnt = __atomic_load_n(&h->gen_cnt, __ATOMIC_ACQUIRE);
139                 entry = hlist_lookup(h, key, ctx, true);
140                 if (entry)
141                         return entry;
142         }
143         rte_rwlock_write_lock(&h->lock);
144         /* Check if the list changed by other threads. */
145         if (h->write_most ||
146             prev_gen_cnt != __atomic_load_n(&h->gen_cnt, __ATOMIC_ACQUIRE)) {
147                 entry = __hlist_lookup(h, key, ctx, true);
148                 if (entry)
149                         goto done;
150         }
151         if (h->direct_key)
152                 idx = (uint32_t)(key & h->mask);
153         else
154                 idx = rte_hash_crc_8byte(key, 0) & h->mask;
155         first = &h->heads[idx];
156         entry = h->cb_create(h, key, ctx);
157         if (!entry) {
158                 rte_errno = ENOMEM;
159                 DRV_LOG(DEBUG, "Can't allocate hash list %s entry.", h->name);
160                 goto done;
161         }
162         entry->key = key;
163         entry->ref_cnt = 1;
164         LIST_INSERT_HEAD(first, entry, next);
165         __atomic_add_fetch(&h->gen_cnt, 1, __ATOMIC_ACQ_REL);
166         DRV_LOG(DEBUG, "Hash list %s entry %p new: %u.",
167                 h->name, (void *)entry, entry->ref_cnt);
168 done:
169         rte_rwlock_write_unlock(&h->lock);
170         return entry;
171 }
172
173 int
174 mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry)
175 {
176         rte_rwlock_write_lock(&h->lock);
177         MLX5_ASSERT(entry && entry->ref_cnt && entry->next.le_prev);
178         DRV_LOG(DEBUG, "Hash list %s entry %p deref: %u.",
179                 h->name, (void *)entry, entry->ref_cnt);
180         if (--entry->ref_cnt) {
181                 rte_rwlock_write_unlock(&h->lock);
182                 return 1;
183         }
184         LIST_REMOVE(entry, next);
185         /* Set to NULL to get rid of removing action for more than once. */
186         entry->next.le_prev = NULL;
187         h->cb_remove(h, entry);
188         rte_rwlock_write_unlock(&h->lock);
189         DRV_LOG(DEBUG, "Hash list %s entry %p removed.",
190                 h->name, (void *)entry);
191         return 0;
192 }
193
194 void
195 mlx5_hlist_destroy(struct mlx5_hlist *h)
196 {
197         uint32_t idx;
198         struct mlx5_hlist_entry *entry;
199
200         MLX5_ASSERT(h);
201         for (idx = 0; idx < h->table_sz; ++idx) {
202                 /* No LIST_FOREACH_SAFE, using while instead. */
203                 while (!LIST_EMPTY(&h->heads[idx])) {
204                         entry = LIST_FIRST(&h->heads[idx]);
205                         LIST_REMOVE(entry, next);
206                         /*
207                          * The owner of whole element which contains data entry
208                          * is the user, so it's the user's duty to do the clean
209                          * up and the free work because someone may not put the
210                          * hlist entry at the beginning(suggested to locate at
211                          * the beginning). Or else the default free function
212                          * will be used.
213                          */
214                         h->cb_remove(h, entry);
215                 }
216         }
217         mlx5_free(h);
218 }
219
220 /********************* Cache list ************************/
221
222 static struct mlx5_cache_entry *
223 mlx5_clist_default_create_cb(struct mlx5_cache_list *list,
224                              struct mlx5_cache_entry *entry __rte_unused,
225                              void *ctx __rte_unused)
226 {
227         return mlx5_malloc(MLX5_MEM_ZERO, list->entry_sz, 0, SOCKET_ID_ANY);
228 }
229
230 static void
231 mlx5_clist_default_remove_cb(struct mlx5_cache_list *list __rte_unused,
232                              struct mlx5_cache_entry *entry)
233 {
234         mlx5_free(entry);
235 }
236
237 int
238 mlx5_cache_list_init(struct mlx5_cache_list *list, const char *name,
239                      uint32_t entry_size, void *ctx,
240                      mlx5_cache_create_cb cb_create,
241                      mlx5_cache_match_cb cb_match,
242                      mlx5_cache_remove_cb cb_remove)
243 {
244         MLX5_ASSERT(list);
245         if (!cb_match || (!cb_create ^ !cb_remove))
246                 return -1;
247         if (name)
248                 snprintf(list->name, sizeof(list->name), "%s", name);
249         list->entry_sz = entry_size;
250         list->ctx = ctx;
251         list->cb_create = cb_create ? cb_create : mlx5_clist_default_create_cb;
252         list->cb_match = cb_match;
253         list->cb_remove = cb_remove ? cb_remove : mlx5_clist_default_remove_cb;
254         rte_rwlock_init(&list->lock);
255         DRV_LOG(DEBUG, "Cache list %s initialized.", list->name);
256         LIST_INIT(&list->head);
257         return 0;
258 }
259
260 static struct mlx5_cache_entry *
261 __cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
262 {
263         struct mlx5_cache_entry *entry;
264
265         LIST_FOREACH(entry, &list->head, next) {
266                 if (list->cb_match(list, entry, ctx))
267                         continue;
268                 if (reuse) {
269                         __atomic_add_fetch(&entry->ref_cnt, 1,
270                                            __ATOMIC_RELAXED);
271                         DRV_LOG(DEBUG, "Cache list %s entry %p ref++: %u.",
272                                 list->name, (void *)entry, entry->ref_cnt);
273                 }
274                 break;
275         }
276         return entry;
277 }
278
279 static struct mlx5_cache_entry *
280 cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
281 {
282         struct mlx5_cache_entry *entry;
283
284         rte_rwlock_read_lock(&list->lock);
285         entry = __cache_lookup(list, ctx, reuse);
286         rte_rwlock_read_unlock(&list->lock);
287         return entry;
288 }
289
290 struct mlx5_cache_entry *
291 mlx5_cache_lookup(struct mlx5_cache_list *list, void *ctx)
292 {
293         return cache_lookup(list, ctx, false);
294 }
295
296 struct mlx5_cache_entry *
297 mlx5_cache_register(struct mlx5_cache_list *list, void *ctx)
298 {
299         struct mlx5_cache_entry *entry;
300         uint32_t prev_gen_cnt = 0;
301
302         MLX5_ASSERT(list);
303         prev_gen_cnt = __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE);
304         /* Lookup with read lock, reuse if found. */
305         entry = cache_lookup(list, ctx, true);
306         if (entry)
307                 return entry;
308         /* Not found, append with write lock - block read from other threads. */
309         rte_rwlock_write_lock(&list->lock);
310         /* If list changed by other threads before lock, search again. */
311         if (prev_gen_cnt != __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE)) {
312                 /* Lookup and reuse w/o read lock. */
313                 entry = __cache_lookup(list, ctx, true);
314                 if (entry)
315                         goto done;
316         }
317         entry = list->cb_create(list, entry, ctx);
318         if (!entry) {
319                 DRV_LOG(ERR, "Failed to init cache list %s entry %p.",
320                         list->name, (void *)entry);
321                 goto done;
322         }
323         entry->ref_cnt = 1;
324         LIST_INSERT_HEAD(&list->head, entry, next);
325         __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_RELEASE);
326         __atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE);
327         DRV_LOG(DEBUG, "Cache list %s entry %p new: %u.",
328                 list->name, (void *)entry, entry->ref_cnt);
329 done:
330         rte_rwlock_write_unlock(&list->lock);
331         return entry;
332 }
333
334 int
335 mlx5_cache_unregister(struct mlx5_cache_list *list,
336                       struct mlx5_cache_entry *entry)
337 {
338         rte_rwlock_write_lock(&list->lock);
339         MLX5_ASSERT(entry && entry->next.le_prev);
340         DRV_LOG(DEBUG, "Cache list %s entry %p ref--: %u.",
341                 list->name, (void *)entry, entry->ref_cnt);
342         if (--entry->ref_cnt) {
343                 rte_rwlock_write_unlock(&list->lock);
344                 return 1;
345         }
346         __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_ACQUIRE);
347         __atomic_sub_fetch(&list->count, 1, __ATOMIC_ACQUIRE);
348         LIST_REMOVE(entry, next);
349         list->cb_remove(list, entry);
350         rte_rwlock_write_unlock(&list->lock);
351         DRV_LOG(DEBUG, "Cache list %s entry %p removed.",
352                 list->name, (void *)entry);
353         return 0;
354 }
355
356 void
357 mlx5_cache_list_destroy(struct mlx5_cache_list *list)
358 {
359         struct mlx5_cache_entry *entry;
360
361         MLX5_ASSERT(list);
362         /* no LIST_FOREACH_SAFE, using while instead */
363         while (!LIST_EMPTY(&list->head)) {
364                 entry = LIST_FIRST(&list->head);
365                 LIST_REMOVE(entry, next);
366                 list->cb_remove(list, entry);
367                 DRV_LOG(DEBUG, "Cache list %s entry %p destroyed.",
368                         list->name, (void *)entry);
369         }
370         memset(list, 0, sizeof(*list));
371 }
372
373 uint32_t
374 mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list)
375 {
376         MLX5_ASSERT(list);
377         return __atomic_load_n(&list->count, __ATOMIC_RELAXED);
378 }
379
380 /********************* Indexed pool **********************/
381
382 static inline void
383 mlx5_ipool_lock(struct mlx5_indexed_pool *pool)
384 {
385         if (pool->cfg.need_lock)
386                 rte_spinlock_lock(&pool->lock);
387 }
388
389 static inline void
390 mlx5_ipool_unlock(struct mlx5_indexed_pool *pool)
391 {
392         if (pool->cfg.need_lock)
393                 rte_spinlock_unlock(&pool->lock);
394 }
395
396 static inline uint32_t
397 mlx5_trunk_idx_get(struct mlx5_indexed_pool *pool, uint32_t entry_idx)
398 {
399         struct mlx5_indexed_pool_config *cfg = &pool->cfg;
400         uint32_t trunk_idx = 0;
401         uint32_t i;
402
403         if (!cfg->grow_trunk)
404                 return entry_idx / cfg->trunk_size;
405         if (entry_idx >= pool->grow_tbl[cfg->grow_trunk - 1]) {
406                 trunk_idx = (entry_idx - pool->grow_tbl[cfg->grow_trunk - 1]) /
407                             (cfg->trunk_size << (cfg->grow_shift *
408                             cfg->grow_trunk)) + cfg->grow_trunk;
409         } else {
410                 for (i = 0; i < cfg->grow_trunk; i++) {
411                         if (entry_idx < pool->grow_tbl[i])
412                                 break;
413                 }
414                 trunk_idx = i;
415         }
416         return trunk_idx;
417 }
418
419 static inline uint32_t
420 mlx5_trunk_size_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx)
421 {
422         struct mlx5_indexed_pool_config *cfg = &pool->cfg;
423
424         return cfg->trunk_size << (cfg->grow_shift *
425                (trunk_idx > cfg->grow_trunk ? cfg->grow_trunk : trunk_idx));
426 }
427
428 static inline uint32_t
429 mlx5_trunk_idx_offset_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx)
430 {
431         struct mlx5_indexed_pool_config *cfg = &pool->cfg;
432         uint32_t offset = 0;
433
434         if (!trunk_idx)
435                 return 0;
436         if (!cfg->grow_trunk)
437                 return cfg->trunk_size * trunk_idx;
438         if (trunk_idx < cfg->grow_trunk)
439                 offset = pool->grow_tbl[trunk_idx - 1];
440         else
441                 offset = pool->grow_tbl[cfg->grow_trunk - 1] +
442                          (cfg->trunk_size << (cfg->grow_shift *
443                          cfg->grow_trunk)) * (trunk_idx - cfg->grow_trunk);
444         return offset;
445 }
446
447 struct mlx5_indexed_pool *
448 mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg)
449 {
450         struct mlx5_indexed_pool *pool;
451         uint32_t i;
452
453         if (!cfg || (!cfg->malloc ^ !cfg->free) ||
454             (cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) ||
455             ((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32))))
456                 return NULL;
457         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool) + cfg->grow_trunk *
458                            sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE,
459                            SOCKET_ID_ANY);
460         if (!pool)
461                 return NULL;
462         pool->cfg = *cfg;
463         if (!pool->cfg.trunk_size)
464                 pool->cfg.trunk_size = MLX5_IPOOL_DEFAULT_TRUNK_SIZE;
465         if (!cfg->malloc && !cfg->free) {
466                 pool->cfg.malloc = mlx5_malloc;
467                 pool->cfg.free = mlx5_free;
468         }
469         pool->free_list = TRUNK_INVALID;
470         if (pool->cfg.need_lock)
471                 rte_spinlock_init(&pool->lock);
472         /*
473          * Initialize the dynamic grow trunk size lookup table to have a quick
474          * lookup for the trunk entry index offset.
475          */
476         for (i = 0; i < cfg->grow_trunk; i++) {
477                 pool->grow_tbl[i] = cfg->trunk_size << (cfg->grow_shift * i);
478                 if (i > 0)
479                         pool->grow_tbl[i] += pool->grow_tbl[i - 1];
480         }
481         return pool;
482 }
483
484 static int
485 mlx5_ipool_grow(struct mlx5_indexed_pool *pool)
486 {
487         struct mlx5_indexed_trunk *trunk;
488         struct mlx5_indexed_trunk **trunk_tmp;
489         struct mlx5_indexed_trunk **p;
490         size_t trunk_size = 0;
491         size_t data_size;
492         size_t bmp_size;
493         uint32_t idx;
494
495         if (pool->n_trunk_valid == TRUNK_MAX_IDX)
496                 return -ENOMEM;
497         if (pool->n_trunk_valid == pool->n_trunk) {
498                 /* No free trunk flags, expand trunk list. */
499                 int n_grow = pool->n_trunk_valid ? pool->n_trunk :
500                              RTE_CACHE_LINE_SIZE / sizeof(void *);
501
502                 p = pool->cfg.malloc(0, (pool->n_trunk_valid + n_grow) *
503                                      sizeof(struct mlx5_indexed_trunk *),
504                                      RTE_CACHE_LINE_SIZE, rte_socket_id());
505                 if (!p)
506                         return -ENOMEM;
507                 if (pool->trunks)
508                         memcpy(p, pool->trunks, pool->n_trunk_valid *
509                                sizeof(struct mlx5_indexed_trunk *));
510                 memset(RTE_PTR_ADD(p, pool->n_trunk_valid * sizeof(void *)), 0,
511                        n_grow * sizeof(void *));
512                 trunk_tmp = pool->trunks;
513                 pool->trunks = p;
514                 if (trunk_tmp)
515                         pool->cfg.free(trunk_tmp);
516                 pool->n_trunk += n_grow;
517         }
518         if (!pool->cfg.release_mem_en) {
519                 idx = pool->n_trunk_valid;
520         } else {
521                 /* Find the first available slot in trunk list */
522                 for (idx = 0; idx < pool->n_trunk; idx++)
523                         if (pool->trunks[idx] == NULL)
524                                 break;
525         }
526         trunk_size += sizeof(*trunk);
527         data_size = mlx5_trunk_size_get(pool, idx);
528         bmp_size = rte_bitmap_get_memory_footprint(data_size);
529         /* rte_bitmap requires memory cacheline aligned. */
530         trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size);
531         trunk_size += bmp_size;
532         trunk = pool->cfg.malloc(0, trunk_size,
533                                  RTE_CACHE_LINE_SIZE, rte_socket_id());
534         if (!trunk)
535                 return -ENOMEM;
536         pool->trunks[idx] = trunk;
537         trunk->idx = idx;
538         trunk->free = data_size;
539         trunk->prev = TRUNK_INVALID;
540         trunk->next = TRUNK_INVALID;
541         MLX5_ASSERT(pool->free_list == TRUNK_INVALID);
542         pool->free_list = idx;
543         /* Mark all entries as available. */
544         trunk->bmp = rte_bitmap_init_with_all_set(data_size, &trunk->data
545                      [RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size)],
546                      bmp_size);
547         MLX5_ASSERT(trunk->bmp);
548         pool->n_trunk_valid++;
549 #ifdef POOL_DEBUG
550         pool->trunk_new++;
551         pool->trunk_avail++;
552 #endif
553         return 0;
554 }
555
556 void *
557 mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx)
558 {
559         struct mlx5_indexed_trunk *trunk;
560         uint64_t slab = 0;
561         uint32_t iidx = 0;
562         void *p;
563
564         mlx5_ipool_lock(pool);
565         if (pool->free_list == TRUNK_INVALID) {
566                 /* If no available trunks, grow new. */
567                 if (mlx5_ipool_grow(pool)) {
568                         mlx5_ipool_unlock(pool);
569                         return NULL;
570                 }
571         }
572         MLX5_ASSERT(pool->free_list != TRUNK_INVALID);
573         trunk = pool->trunks[pool->free_list];
574         MLX5_ASSERT(trunk->free);
575         if (!rte_bitmap_scan(trunk->bmp, &iidx, &slab)) {
576                 mlx5_ipool_unlock(pool);
577                 return NULL;
578         }
579         MLX5_ASSERT(slab);
580         iidx += __builtin_ctzll(slab);
581         MLX5_ASSERT(iidx != UINT32_MAX);
582         MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx));
583         rte_bitmap_clear(trunk->bmp, iidx);
584         p = &trunk->data[iidx * pool->cfg.size];
585         /*
586          * The ipool index should grow continually from small to big,
587          * some features as metering only accept limited bits of index.
588          * Random index with MSB set may be rejected.
589          */
590         iidx += mlx5_trunk_idx_offset_get(pool, trunk->idx);
591         iidx += 1; /* non-zero index. */
592         trunk->free--;
593 #ifdef POOL_DEBUG
594         pool->n_entry++;
595 #endif
596         if (!trunk->free) {
597                 /* Full trunk will be removed from free list in imalloc. */
598                 MLX5_ASSERT(pool->free_list == trunk->idx);
599                 pool->free_list = trunk->next;
600                 if (trunk->next != TRUNK_INVALID)
601                         pool->trunks[trunk->next]->prev = TRUNK_INVALID;
602                 trunk->prev = TRUNK_INVALID;
603                 trunk->next = TRUNK_INVALID;
604 #ifdef POOL_DEBUG
605                 pool->trunk_empty++;
606                 pool->trunk_avail--;
607 #endif
608         }
609         *idx = iidx;
610         mlx5_ipool_unlock(pool);
611         return p;
612 }
613
614 void *
615 mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx)
616 {
617         void *entry = mlx5_ipool_malloc(pool, idx);
618
619         if (entry && pool->cfg.size)
620                 memset(entry, 0, pool->cfg.size);
621         return entry;
622 }
623
624 void
625 mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx)
626 {
627         struct mlx5_indexed_trunk *trunk;
628         uint32_t trunk_idx;
629         uint32_t entry_idx;
630
631         if (!idx)
632                 return;
633         idx -= 1;
634         mlx5_ipool_lock(pool);
635         trunk_idx = mlx5_trunk_idx_get(pool, idx);
636         if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) ||
637             (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk))
638                 goto out;
639         trunk = pool->trunks[trunk_idx];
640         if (!trunk)
641                 goto out;
642         entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx);
643         if (trunk_idx != trunk->idx ||
644             rte_bitmap_get(trunk->bmp, entry_idx))
645                 goto out;
646         rte_bitmap_set(trunk->bmp, entry_idx);
647         trunk->free++;
648         if (pool->cfg.release_mem_en && trunk->free == mlx5_trunk_size_get
649            (pool, trunk->idx)) {
650                 if (pool->free_list == trunk->idx)
651                         pool->free_list = trunk->next;
652                 if (trunk->next != TRUNK_INVALID)
653                         pool->trunks[trunk->next]->prev = trunk->prev;
654                 if (trunk->prev != TRUNK_INVALID)
655                         pool->trunks[trunk->prev]->next = trunk->next;
656                 pool->cfg.free(trunk);
657                 pool->trunks[trunk_idx] = NULL;
658                 pool->n_trunk_valid--;
659 #ifdef POOL_DEBUG
660                 pool->trunk_avail--;
661                 pool->trunk_free++;
662 #endif
663                 if (pool->n_trunk_valid == 0) {
664                         pool->cfg.free(pool->trunks);
665                         pool->trunks = NULL;
666                         pool->n_trunk = 0;
667                 }
668         } else if (trunk->free == 1) {
669                 /* Put into free trunk list head. */
670                 MLX5_ASSERT(pool->free_list != trunk->idx);
671                 trunk->next = pool->free_list;
672                 trunk->prev = TRUNK_INVALID;
673                 if (pool->free_list != TRUNK_INVALID)
674                         pool->trunks[pool->free_list]->prev = trunk->idx;
675                 pool->free_list = trunk->idx;
676 #ifdef POOL_DEBUG
677                 pool->trunk_empty--;
678                 pool->trunk_avail++;
679 #endif
680         }
681 #ifdef POOL_DEBUG
682         pool->n_entry--;
683 #endif
684 out:
685         mlx5_ipool_unlock(pool);
686 }
687
688 void *
689 mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx)
690 {
691         struct mlx5_indexed_trunk *trunk;
692         void *p = NULL;
693         uint32_t trunk_idx;
694         uint32_t entry_idx;
695
696         if (!idx)
697                 return NULL;
698         idx -= 1;
699         mlx5_ipool_lock(pool);
700         trunk_idx = mlx5_trunk_idx_get(pool, idx);
701         if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) ||
702             (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk))
703                 goto out;
704         trunk = pool->trunks[trunk_idx];
705         if (!trunk)
706                 goto out;
707         entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx);
708         if (trunk_idx != trunk->idx ||
709             rte_bitmap_get(trunk->bmp, entry_idx))
710                 goto out;
711         p = &trunk->data[entry_idx * pool->cfg.size];
712 out:
713         mlx5_ipool_unlock(pool);
714         return p;
715 }
716
717 int
718 mlx5_ipool_destroy(struct mlx5_indexed_pool *pool)
719 {
720         struct mlx5_indexed_trunk **trunks;
721         uint32_t i;
722
723         MLX5_ASSERT(pool);
724         mlx5_ipool_lock(pool);
725         trunks = pool->trunks;
726         for (i = 0; i < pool->n_trunk; i++) {
727                 if (trunks[i])
728                         pool->cfg.free(trunks[i]);
729         }
730         if (!pool->trunks)
731                 pool->cfg.free(pool->trunks);
732         mlx5_ipool_unlock(pool);
733         mlx5_free(pool);
734         return 0;
735 }
736
737 void
738 mlx5_ipool_dump(struct mlx5_indexed_pool *pool)
739 {
740         printf("Pool %s entry size %u, trunks %u, %d entry per trunk, "
741                "total: %d\n",
742                pool->cfg.type, pool->cfg.size, pool->n_trunk_valid,
743                pool->cfg.trunk_size, pool->n_trunk_valid);
744 #ifdef POOL_DEBUG
745         printf("Pool %s entry %u, trunk alloc %u, empty: %u, "
746                "available %u free %u\n",
747                pool->cfg.type, pool->n_entry, pool->trunk_new,
748                pool->trunk_empty, pool->trunk_avail, pool->trunk_free);
749 #endif
750 }
751
752 struct mlx5_l3t_tbl *
753 mlx5_l3t_create(enum mlx5_l3t_type type)
754 {
755         struct mlx5_l3t_tbl *tbl;
756         struct mlx5_indexed_pool_config l3t_ip_cfg = {
757                 .trunk_size = 16,
758                 .grow_trunk = 6,
759                 .grow_shift = 1,
760                 .need_lock = 0,
761                 .release_mem_en = 1,
762                 .malloc = mlx5_malloc,
763                 .free = mlx5_free,
764         };
765
766         if (type >= MLX5_L3T_TYPE_MAX) {
767                 rte_errno = EINVAL;
768                 return NULL;
769         }
770         tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_l3t_tbl), 1,
771                           SOCKET_ID_ANY);
772         if (!tbl) {
773                 rte_errno = ENOMEM;
774                 return NULL;
775         }
776         tbl->type = type;
777         switch (type) {
778         case MLX5_L3T_TYPE_WORD:
779                 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_word);
780                 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_w";
781                 break;
782         case MLX5_L3T_TYPE_DWORD:
783                 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_dword);
784                 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_dw";
785                 break;
786         case MLX5_L3T_TYPE_QWORD:
787                 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_qword);
788                 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_qw";
789                 break;
790         default:
791                 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_ptr);
792                 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_tpr";
793                 break;
794         }
795         rte_spinlock_init(&tbl->sl);
796         tbl->eip = mlx5_ipool_create(&l3t_ip_cfg);
797         if (!tbl->eip) {
798                 rte_errno = ENOMEM;
799                 mlx5_free(tbl);
800                 tbl = NULL;
801         }
802         return tbl;
803 }
804
805 void
806 mlx5_l3t_destroy(struct mlx5_l3t_tbl *tbl)
807 {
808         struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
809         uint32_t i, j;
810
811         if (!tbl)
812                 return;
813         g_tbl = tbl->tbl;
814         if (g_tbl) {
815                 for (i = 0; i < MLX5_L3T_GT_SIZE; i++) {
816                         m_tbl = g_tbl->tbl[i];
817                         if (!m_tbl)
818                                 continue;
819                         for (j = 0; j < MLX5_L3T_MT_SIZE; j++) {
820                                 if (!m_tbl->tbl[j])
821                                         continue;
822                                 MLX5_ASSERT(!((struct mlx5_l3t_entry_word *)
823                                             m_tbl->tbl[j])->ref_cnt);
824                                 mlx5_ipool_free(tbl->eip,
825                                                 ((struct mlx5_l3t_entry_word *)
826                                                 m_tbl->tbl[j])->idx);
827                                 m_tbl->tbl[j] = 0;
828                                 if (!(--m_tbl->ref_cnt))
829                                         break;
830                         }
831                         MLX5_ASSERT(!m_tbl->ref_cnt);
832                         mlx5_free(g_tbl->tbl[i]);
833                         g_tbl->tbl[i] = 0;
834                         if (!(--g_tbl->ref_cnt))
835                                 break;
836                 }
837                 MLX5_ASSERT(!g_tbl->ref_cnt);
838                 mlx5_free(tbl->tbl);
839                 tbl->tbl = 0;
840         }
841         mlx5_ipool_destroy(tbl->eip);
842         mlx5_free(tbl);
843 }
844
845 static int32_t
846 __l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
847                 union mlx5_l3t_data *data)
848 {
849         struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
850         struct mlx5_l3t_entry_word *w_e_tbl;
851         struct mlx5_l3t_entry_dword *dw_e_tbl;
852         struct mlx5_l3t_entry_qword *qw_e_tbl;
853         struct mlx5_l3t_entry_ptr *ptr_e_tbl;
854         void *e_tbl;
855         uint32_t entry_idx;
856
857         g_tbl = tbl->tbl;
858         if (!g_tbl)
859                 return -1;
860         m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
861         if (!m_tbl)
862                 return -1;
863         e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
864         if (!e_tbl)
865                 return -1;
866         entry_idx = idx & MLX5_L3T_ET_MASK;
867         switch (tbl->type) {
868         case MLX5_L3T_TYPE_WORD:
869                 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
870                 data->word = w_e_tbl->entry[entry_idx].data;
871                 if (w_e_tbl->entry[entry_idx].data)
872                         w_e_tbl->entry[entry_idx].ref_cnt++;
873                 break;
874         case MLX5_L3T_TYPE_DWORD:
875                 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
876                 data->dword = dw_e_tbl->entry[entry_idx].data;
877                 if (dw_e_tbl->entry[entry_idx].data)
878                         dw_e_tbl->entry[entry_idx].ref_cnt++;
879                 break;
880         case MLX5_L3T_TYPE_QWORD:
881                 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
882                 data->qword = qw_e_tbl->entry[entry_idx].data;
883                 if (qw_e_tbl->entry[entry_idx].data)
884                         qw_e_tbl->entry[entry_idx].ref_cnt++;
885                 break;
886         default:
887                 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
888                 data->ptr = ptr_e_tbl->entry[entry_idx].data;
889                 if (ptr_e_tbl->entry[entry_idx].data)
890                         ptr_e_tbl->entry[entry_idx].ref_cnt++;
891                 break;
892         }
893         return 0;
894 }
895
896 int32_t
897 mlx5_l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
898                    union mlx5_l3t_data *data)
899 {
900         int ret;
901
902         rte_spinlock_lock(&tbl->sl);
903         ret = __l3t_get_entry(tbl, idx, data);
904         rte_spinlock_unlock(&tbl->sl);
905         return ret;
906 }
907
908 int32_t
909 mlx5_l3t_clear_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx)
910 {
911         struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
912         struct mlx5_l3t_entry_word *w_e_tbl;
913         struct mlx5_l3t_entry_dword *dw_e_tbl;
914         struct mlx5_l3t_entry_qword *qw_e_tbl;
915         struct mlx5_l3t_entry_ptr *ptr_e_tbl;
916         void *e_tbl;
917         uint32_t entry_idx;
918         uint64_t ref_cnt;
919         int32_t ret = -1;
920
921         rte_spinlock_lock(&tbl->sl);
922         g_tbl = tbl->tbl;
923         if (!g_tbl)
924                 goto out;
925         m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
926         if (!m_tbl)
927                 goto out;
928         e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
929         if (!e_tbl)
930                 goto out;
931         entry_idx = idx & MLX5_L3T_ET_MASK;
932         switch (tbl->type) {
933         case MLX5_L3T_TYPE_WORD:
934                 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
935                 MLX5_ASSERT(w_e_tbl->entry[entry_idx].ref_cnt);
936                 ret = --w_e_tbl->entry[entry_idx].ref_cnt;
937                 if (ret)
938                         goto out;
939                 w_e_tbl->entry[entry_idx].data = 0;
940                 ref_cnt = --w_e_tbl->ref_cnt;
941                 break;
942         case MLX5_L3T_TYPE_DWORD:
943                 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
944                 MLX5_ASSERT(dw_e_tbl->entry[entry_idx].ref_cnt);
945                 ret = --dw_e_tbl->entry[entry_idx].ref_cnt;
946                 if (ret)
947                         goto out;
948                 dw_e_tbl->entry[entry_idx].data = 0;
949                 ref_cnt = --dw_e_tbl->ref_cnt;
950                 break;
951         case MLX5_L3T_TYPE_QWORD:
952                 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
953                 MLX5_ASSERT(qw_e_tbl->entry[entry_idx].ref_cnt);
954                 ret = --qw_e_tbl->entry[entry_idx].ref_cnt;
955                 if (ret)
956                         goto out;
957                 qw_e_tbl->entry[entry_idx].data = 0;
958                 ref_cnt = --qw_e_tbl->ref_cnt;
959                 break;
960         default:
961                 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
962                 MLX5_ASSERT(ptr_e_tbl->entry[entry_idx].ref_cnt);
963                 ret = --ptr_e_tbl->entry[entry_idx].ref_cnt;
964                 if (ret)
965                         goto out;
966                 ptr_e_tbl->entry[entry_idx].data = NULL;
967                 ref_cnt = --ptr_e_tbl->ref_cnt;
968                 break;
969         }
970         if (!ref_cnt) {
971                 mlx5_ipool_free(tbl->eip,
972                                 ((struct mlx5_l3t_entry_word *)e_tbl)->idx);
973                 m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] =
974                                                                         NULL;
975                 if (!(--m_tbl->ref_cnt)) {
976                         mlx5_free(m_tbl);
977                         g_tbl->tbl
978                         [(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] = NULL;
979                         if (!(--g_tbl->ref_cnt)) {
980                                 mlx5_free(g_tbl);
981                                 tbl->tbl = 0;
982                         }
983                 }
984         }
985 out:
986         rte_spinlock_unlock(&tbl->sl);
987         return ret;
988 }
989
990 static int32_t
991 __l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
992                 union mlx5_l3t_data *data)
993 {
994         struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
995         struct mlx5_l3t_entry_word *w_e_tbl;
996         struct mlx5_l3t_entry_dword *dw_e_tbl;
997         struct mlx5_l3t_entry_qword *qw_e_tbl;
998         struct mlx5_l3t_entry_ptr *ptr_e_tbl;
999         void *e_tbl;
1000         uint32_t entry_idx, tbl_idx = 0;
1001
1002         /* Check the global table, create it if empty. */
1003         g_tbl = tbl->tbl;
1004         if (!g_tbl) {
1005                 g_tbl = mlx5_malloc(MLX5_MEM_ZERO,
1006                                     sizeof(struct mlx5_l3t_level_tbl) +
1007                                     sizeof(void *) * MLX5_L3T_GT_SIZE, 1,
1008                                     SOCKET_ID_ANY);
1009                 if (!g_tbl) {
1010                         rte_errno = ENOMEM;
1011                         return -1;
1012                 }
1013                 tbl->tbl = g_tbl;
1014         }
1015         /*
1016          * Check the middle table, create it if empty. Ref_cnt will be
1017          * increased if new sub table created.
1018          */
1019         m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
1020         if (!m_tbl) {
1021                 m_tbl = mlx5_malloc(MLX5_MEM_ZERO,
1022                                     sizeof(struct mlx5_l3t_level_tbl) +
1023                                     sizeof(void *) * MLX5_L3T_MT_SIZE, 1,
1024                                     SOCKET_ID_ANY);
1025                 if (!m_tbl) {
1026                         rte_errno = ENOMEM;
1027                         return -1;
1028                 }
1029                 g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] =
1030                                                                         m_tbl;
1031                 g_tbl->ref_cnt++;
1032         }
1033         /*
1034          * Check the entry table, create it if empty. Ref_cnt will be
1035          * increased if new sub entry table created.
1036          */
1037         e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
1038         if (!e_tbl) {
1039                 e_tbl = mlx5_ipool_zmalloc(tbl->eip, &tbl_idx);
1040                 if (!e_tbl) {
1041                         rte_errno = ENOMEM;
1042                         return -1;
1043                 }
1044                 ((struct mlx5_l3t_entry_word *)e_tbl)->idx = tbl_idx;
1045                 m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] =
1046                                                                         e_tbl;
1047                 m_tbl->ref_cnt++;
1048         }
1049         entry_idx = idx & MLX5_L3T_ET_MASK;
1050         switch (tbl->type) {
1051         case MLX5_L3T_TYPE_WORD:
1052                 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
1053                 if (w_e_tbl->entry[entry_idx].data) {
1054                         data->word = w_e_tbl->entry[entry_idx].data;
1055                         w_e_tbl->entry[entry_idx].ref_cnt++;
1056                         rte_errno = EEXIST;
1057                         return -1;
1058                 }
1059                 w_e_tbl->entry[entry_idx].data = data->word;
1060                 w_e_tbl->entry[entry_idx].ref_cnt = 1;
1061                 w_e_tbl->ref_cnt++;
1062                 break;
1063         case MLX5_L3T_TYPE_DWORD:
1064                 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
1065                 if (dw_e_tbl->entry[entry_idx].data) {
1066                         data->dword = dw_e_tbl->entry[entry_idx].data;
1067                         dw_e_tbl->entry[entry_idx].ref_cnt++;
1068                         rte_errno = EEXIST;
1069                         return -1;
1070                 }
1071                 dw_e_tbl->entry[entry_idx].data = data->dword;
1072                 dw_e_tbl->entry[entry_idx].ref_cnt = 1;
1073                 dw_e_tbl->ref_cnt++;
1074                 break;
1075         case MLX5_L3T_TYPE_QWORD:
1076                 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
1077                 if (qw_e_tbl->entry[entry_idx].data) {
1078                         data->qword = qw_e_tbl->entry[entry_idx].data;
1079                         qw_e_tbl->entry[entry_idx].ref_cnt++;
1080                         rte_errno = EEXIST;
1081                         return -1;
1082                 }
1083                 qw_e_tbl->entry[entry_idx].data = data->qword;
1084                 qw_e_tbl->entry[entry_idx].ref_cnt = 1;
1085                 qw_e_tbl->ref_cnt++;
1086                 break;
1087         default:
1088                 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
1089                 if (ptr_e_tbl->entry[entry_idx].data) {
1090                         data->ptr = ptr_e_tbl->entry[entry_idx].data;
1091                         ptr_e_tbl->entry[entry_idx].ref_cnt++;
1092                         rte_errno = EEXIST;
1093                         return -1;
1094                 }
1095                 ptr_e_tbl->entry[entry_idx].data = data->ptr;
1096                 ptr_e_tbl->entry[entry_idx].ref_cnt = 1;
1097                 ptr_e_tbl->ref_cnt++;
1098                 break;
1099         }
1100         return 0;
1101 }
1102
1103 int32_t
1104 mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
1105                    union mlx5_l3t_data *data)
1106 {
1107         int ret;
1108
1109         rte_spinlock_lock(&tbl->sl);
1110         ret = __l3t_set_entry(tbl, idx, data);
1111         rte_spinlock_unlock(&tbl->sl);
1112         return ret;
1113 }
1114
1115 int32_t
1116 mlx5_l3t_prepare_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
1117                        union mlx5_l3t_data *data,
1118                        mlx5_l3t_alloc_callback_fn cb, void *ctx)
1119 {
1120         int32_t ret;
1121
1122         rte_spinlock_lock(&tbl->sl);
1123         /* Check if entry data is ready. */
1124         ret = __l3t_get_entry(tbl, idx, data);
1125         if (!ret) {
1126                 switch (tbl->type) {
1127                 case MLX5_L3T_TYPE_WORD:
1128                         if (data->word)
1129                                 goto out;
1130                         break;
1131                 case MLX5_L3T_TYPE_DWORD:
1132                         if (data->dword)
1133                                 goto out;
1134                         break;
1135                 case MLX5_L3T_TYPE_QWORD:
1136                         if (data->qword)
1137                                 goto out;
1138                         break;
1139                 default:
1140                         if (data->ptr)
1141                                 goto out;
1142                         break;
1143                 }
1144         }
1145         /* Entry data is not ready, use user callback to create it. */
1146         ret = cb(ctx, data);
1147         if (ret)
1148                 goto out;
1149         /* Save the new allocated data to entry. */
1150         ret = __l3t_set_entry(tbl, idx, data);
1151 out:
1152         rte_spinlock_unlock(&tbl->sl);
1153         return ret;
1154 }