1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
5 #include <rte_malloc.h>
7 #include <mlx5_malloc.h>
9 #include "mlx5_utils.h"
12 /********************* mlx5 list ************************/
15 mlx5_list_create(const char *name, void *ctx,
16 mlx5_list_create_cb cb_create,
17 mlx5_list_match_cb cb_match,
18 mlx5_list_remove_cb cb_remove,
19 mlx5_list_clone_cb cb_clone,
20 mlx5_list_clone_free_cb cb_clone_free)
22 struct mlx5_list *list;
25 if (!cb_match || !cb_create || !cb_remove || !cb_clone ||
30 list = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*list), 0, SOCKET_ID_ANY);
34 snprintf(list->name, sizeof(list->name), "%s", name);
36 list->cb_create = cb_create;
37 list->cb_match = cb_match;
38 list->cb_remove = cb_remove;
39 list->cb_clone = cb_clone;
40 list->cb_clone_free = cb_clone_free;
41 rte_rwlock_init(&list->lock);
42 DRV_LOG(DEBUG, "mlx5 list %s initialized.", list->name);
43 for (i = 0; i <= RTE_MAX_LCORE; i++)
44 LIST_INIT(&list->cache[i].h);
48 static struct mlx5_list_entry *
49 __list_lookup(struct mlx5_list *list, int lcore_index, void *ctx, bool reuse)
51 struct mlx5_list_entry *entry = LIST_FIRST(&list->cache[lcore_index].h);
54 while (entry != NULL) {
55 if (list->cb_match(list, entry, ctx) == 0) {
57 ret = __atomic_add_fetch(&entry->ref_cnt, 1,
58 __ATOMIC_RELAXED) - 1;
59 DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
60 list->name, (void *)entry,
62 } else if (lcore_index < RTE_MAX_LCORE) {
63 ret = __atomic_load_n(&entry->ref_cnt,
66 if (likely(ret != 0 || lcore_index == RTE_MAX_LCORE))
68 if (reuse && ret == 0)
69 entry->ref_cnt--; /* Invalid entry. */
71 entry = LIST_NEXT(entry, next);
76 struct mlx5_list_entry *
77 mlx5_list_lookup(struct mlx5_list *list, void *ctx)
79 struct mlx5_list_entry *entry = NULL;
82 rte_rwlock_read_lock(&list->lock);
83 for (i = 0; i < RTE_MAX_LCORE; i++) {
84 entry = __list_lookup(list, i, ctx, false);
88 rte_rwlock_read_unlock(&list->lock);
92 static struct mlx5_list_entry *
93 mlx5_list_cache_insert(struct mlx5_list *list, int lcore_index,
94 struct mlx5_list_entry *gentry, void *ctx)
96 struct mlx5_list_entry *lentry = list->cb_clone(list, gentry, ctx);
98 if (unlikely(!lentry))
100 lentry->ref_cnt = 1u;
101 lentry->gentry = gentry;
102 lentry->lcore_idx = (uint32_t)lcore_index;
103 LIST_INSERT_HEAD(&list->cache[lcore_index].h, lentry, next);
108 __list_cache_clean(struct mlx5_list *list, int lcore_index)
110 struct mlx5_list_cache *c = &list->cache[lcore_index];
111 struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
112 uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,
115 while (inv_cnt != 0 && entry != NULL) {
116 struct mlx5_list_entry *nentry = LIST_NEXT(entry, next);
118 if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
119 LIST_REMOVE(entry, next);
120 list->cb_clone_free(list, entry);
127 struct mlx5_list_entry *
128 mlx5_list_register(struct mlx5_list *list, void *ctx)
130 struct mlx5_list_entry *entry, *local_entry;
131 volatile uint32_t prev_gen_cnt = 0;
132 int lcore_index = rte_lcore_index(rte_lcore_id());
135 MLX5_ASSERT(lcore_index < RTE_MAX_LCORE);
136 if (unlikely(lcore_index == -1)) {
140 /* 0. Free entries that was invalidated by other lcores. */
141 __list_cache_clean(list, lcore_index);
142 /* 1. Lookup in local cache. */
143 local_entry = __list_lookup(list, lcore_index, ctx, true);
146 /* 2. Lookup with read lock on global list, reuse if found. */
147 rte_rwlock_read_lock(&list->lock);
148 entry = __list_lookup(list, RTE_MAX_LCORE, ctx, true);
150 rte_rwlock_read_unlock(&list->lock);
151 return mlx5_list_cache_insert(list, lcore_index, entry, ctx);
153 prev_gen_cnt = list->gen_cnt;
154 rte_rwlock_read_unlock(&list->lock);
155 /* 3. Prepare new entry for global list and for cache. */
156 entry = list->cb_create(list, entry, ctx);
157 if (unlikely(!entry))
159 local_entry = list->cb_clone(list, entry, ctx);
160 if (unlikely(!local_entry)) {
161 list->cb_remove(list, entry);
165 local_entry->ref_cnt = 1u;
166 local_entry->gentry = entry;
167 local_entry->lcore_idx = (uint32_t)lcore_index;
168 rte_rwlock_write_lock(&list->lock);
169 /* 4. Make sure the same entry was not created before the write lock. */
170 if (unlikely(prev_gen_cnt != list->gen_cnt)) {
171 struct mlx5_list_entry *oentry = __list_lookup(list,
175 if (unlikely(oentry)) {
176 /* 4.5. Found real race!!, reuse the old entry. */
177 rte_rwlock_write_unlock(&list->lock);
178 list->cb_remove(list, entry);
179 list->cb_clone_free(list, local_entry);
180 return mlx5_list_cache_insert(list, lcore_index, oentry,
184 /* 5. Update lists. */
185 LIST_INSERT_HEAD(&list->cache[RTE_MAX_LCORE].h, entry, next);
187 rte_rwlock_write_unlock(&list->lock);
188 LIST_INSERT_HEAD(&list->cache[lcore_index].h, local_entry, next);
189 __atomic_add_fetch(&list->count, 1, __ATOMIC_RELAXED);
190 DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", list->name,
191 (void *)entry, entry->ref_cnt);
196 mlx5_list_unregister(struct mlx5_list *list,
197 struct mlx5_list_entry *entry)
199 struct mlx5_list_entry *gentry = entry->gentry;
202 if (__atomic_sub_fetch(&entry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
204 lcore_idx = rte_lcore_index(rte_lcore_id());
205 MLX5_ASSERT(lcore_idx < RTE_MAX_LCORE);
206 if (entry->lcore_idx == (uint32_t)lcore_idx) {
207 LIST_REMOVE(entry, next);
208 list->cb_clone_free(list, entry);
209 } else if (likely(lcore_idx != -1)) {
210 __atomic_add_fetch(&list->cache[entry->lcore_idx].inv_cnt, 1,
215 if (__atomic_sub_fetch(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
217 rte_rwlock_write_lock(&list->lock);
218 if (likely(gentry->ref_cnt == 0)) {
219 LIST_REMOVE(gentry, next);
220 rte_rwlock_write_unlock(&list->lock);
221 list->cb_remove(list, gentry);
222 __atomic_sub_fetch(&list->count, 1, __ATOMIC_RELAXED);
223 DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
224 list->name, (void *)gentry);
227 rte_rwlock_write_unlock(&list->lock);
232 mlx5_list_destroy(struct mlx5_list *list)
234 struct mlx5_list_entry *entry;
238 for (i = 0; i <= RTE_MAX_LCORE; i++) {
239 while (!LIST_EMPTY(&list->cache[i].h)) {
240 entry = LIST_FIRST(&list->cache[i].h);
241 LIST_REMOVE(entry, next);
242 if (i == RTE_MAX_LCORE) {
243 list->cb_remove(list, entry);
244 DRV_LOG(DEBUG, "mlx5 list %s entry %p "
245 "destroyed.", list->name,
248 list->cb_clone_free(list, entry);
256 mlx5_list_get_entry_num(struct mlx5_list *list)
259 return __atomic_load_n(&list->count, __ATOMIC_RELAXED);
262 /********************* Indexed pool **********************/
265 mlx5_ipool_lock(struct mlx5_indexed_pool *pool)
267 if (pool->cfg.need_lock)
268 rte_spinlock_lock(&pool->rsz_lock);
272 mlx5_ipool_unlock(struct mlx5_indexed_pool *pool)
274 if (pool->cfg.need_lock)
275 rte_spinlock_unlock(&pool->rsz_lock);
278 static inline uint32_t
279 mlx5_trunk_idx_get(struct mlx5_indexed_pool *pool, uint32_t entry_idx)
281 struct mlx5_indexed_pool_config *cfg = &pool->cfg;
282 uint32_t trunk_idx = 0;
285 if (!cfg->grow_trunk)
286 return entry_idx / cfg->trunk_size;
287 if (entry_idx >= pool->grow_tbl[cfg->grow_trunk - 1]) {
288 trunk_idx = (entry_idx - pool->grow_tbl[cfg->grow_trunk - 1]) /
289 (cfg->trunk_size << (cfg->grow_shift *
290 cfg->grow_trunk)) + cfg->grow_trunk;
292 for (i = 0; i < cfg->grow_trunk; i++) {
293 if (entry_idx < pool->grow_tbl[i])
301 static inline uint32_t
302 mlx5_trunk_size_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx)
304 struct mlx5_indexed_pool_config *cfg = &pool->cfg;
306 return cfg->trunk_size << (cfg->grow_shift *
307 (trunk_idx > cfg->grow_trunk ? cfg->grow_trunk : trunk_idx));
310 static inline uint32_t
311 mlx5_trunk_idx_offset_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx)
313 struct mlx5_indexed_pool_config *cfg = &pool->cfg;
318 if (!cfg->grow_trunk)
319 return cfg->trunk_size * trunk_idx;
320 if (trunk_idx < cfg->grow_trunk)
321 offset = pool->grow_tbl[trunk_idx - 1];
323 offset = pool->grow_tbl[cfg->grow_trunk - 1] +
324 (cfg->trunk_size << (cfg->grow_shift *
325 cfg->grow_trunk)) * (trunk_idx - cfg->grow_trunk);
329 struct mlx5_indexed_pool *
330 mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg)
332 struct mlx5_indexed_pool *pool;
335 if (!cfg || (!cfg->malloc ^ !cfg->free) ||
336 (cfg->per_core_cache && cfg->release_mem_en) ||
337 (cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) ||
338 ((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32))))
340 pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool) + cfg->grow_trunk *
341 sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE,
346 if (!pool->cfg.trunk_size)
347 pool->cfg.trunk_size = MLX5_IPOOL_DEFAULT_TRUNK_SIZE;
348 if (!cfg->malloc && !cfg->free) {
349 pool->cfg.malloc = mlx5_malloc;
350 pool->cfg.free = mlx5_free;
352 if (pool->cfg.need_lock)
353 rte_spinlock_init(&pool->rsz_lock);
355 * Initialize the dynamic grow trunk size lookup table to have a quick
356 * lookup for the trunk entry index offset.
358 for (i = 0; i < cfg->grow_trunk; i++) {
359 pool->grow_tbl[i] = cfg->trunk_size << (cfg->grow_shift * i);
361 pool->grow_tbl[i] += pool->grow_tbl[i - 1];
363 if (!pool->cfg.max_idx)
365 mlx5_trunk_idx_offset_get(pool, TRUNK_MAX_IDX + 1);
366 if (!cfg->per_core_cache)
367 pool->free_list = TRUNK_INVALID;
368 rte_spinlock_init(&pool->lcore_lock);
373 mlx5_ipool_grow(struct mlx5_indexed_pool *pool)
375 struct mlx5_indexed_trunk *trunk;
376 struct mlx5_indexed_trunk **trunk_tmp;
377 struct mlx5_indexed_trunk **p;
378 size_t trunk_size = 0;
381 uint32_t idx, cur_max_idx, i;
383 cur_max_idx = mlx5_trunk_idx_offset_get(pool, pool->n_trunk_valid);
384 if (pool->n_trunk_valid == TRUNK_MAX_IDX ||
385 cur_max_idx >= pool->cfg.max_idx)
387 if (pool->n_trunk_valid == pool->n_trunk) {
388 /* No free trunk flags, expand trunk list. */
389 int n_grow = pool->n_trunk_valid ? pool->n_trunk :
390 RTE_CACHE_LINE_SIZE / sizeof(void *);
392 p = pool->cfg.malloc(0, (pool->n_trunk_valid + n_grow) *
393 sizeof(struct mlx5_indexed_trunk *),
394 RTE_CACHE_LINE_SIZE, rte_socket_id());
398 memcpy(p, pool->trunks, pool->n_trunk_valid *
399 sizeof(struct mlx5_indexed_trunk *));
400 memset(RTE_PTR_ADD(p, pool->n_trunk_valid * sizeof(void *)), 0,
401 n_grow * sizeof(void *));
402 trunk_tmp = pool->trunks;
405 pool->cfg.free(trunk_tmp);
406 pool->n_trunk += n_grow;
408 if (!pool->cfg.release_mem_en) {
409 idx = pool->n_trunk_valid;
411 /* Find the first available slot in trunk list */
412 for (idx = 0; idx < pool->n_trunk; idx++)
413 if (pool->trunks[idx] == NULL)
416 trunk_size += sizeof(*trunk);
417 data_size = mlx5_trunk_size_get(pool, idx);
418 bmp_size = rte_bitmap_get_memory_footprint(data_size);
419 /* rte_bitmap requires memory cacheline aligned. */
420 trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size);
421 trunk_size += bmp_size;
422 trunk = pool->cfg.malloc(0, trunk_size,
423 RTE_CACHE_LINE_SIZE, rte_socket_id());
426 pool->trunks[idx] = trunk;
428 trunk->free = data_size;
429 trunk->prev = TRUNK_INVALID;
430 trunk->next = TRUNK_INVALID;
431 MLX5_ASSERT(pool->free_list == TRUNK_INVALID);
432 pool->free_list = idx;
433 /* Mark all entries as available. */
434 trunk->bmp = rte_bitmap_init_with_all_set(data_size, &trunk->data
435 [RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size)],
437 /* Clear the overhead bits in the trunk if it happens. */
438 if (cur_max_idx + data_size > pool->cfg.max_idx) {
439 for (i = pool->cfg.max_idx - cur_max_idx; i < data_size; i++)
440 rte_bitmap_clear(trunk->bmp, i);
442 MLX5_ASSERT(trunk->bmp);
443 pool->n_trunk_valid++;
451 static inline struct mlx5_indexed_cache *
452 mlx5_ipool_update_global_cache(struct mlx5_indexed_pool *pool, int cidx)
454 struct mlx5_indexed_cache *gc, *lc, *olc = NULL;
456 lc = pool->cache[cidx]->lc;
457 gc = __atomic_load_n(&pool->gc, __ATOMIC_RELAXED);
458 if (gc && lc != gc) {
459 mlx5_ipool_lock(pool);
460 if (lc && !(--lc->ref_cnt))
464 pool->cache[cidx]->lc = lc;
465 mlx5_ipool_unlock(pool);
473 mlx5_ipool_allocate_from_global(struct mlx5_indexed_pool *pool, int cidx)
475 struct mlx5_indexed_trunk *trunk;
476 struct mlx5_indexed_cache *p, *lc, *olc = NULL;
477 size_t trunk_size = 0;
479 uint32_t cur_max_idx, trunk_idx, trunk_n;
480 uint32_t fetch_size, ts_idx, i;
487 * Fetch new index from global if possible. First round local
488 * cache will be NULL.
490 lc = pool->cache[cidx]->lc;
491 mlx5_ipool_lock(pool);
492 /* Try to update local cache first. */
493 if (likely(pool->gc)) {
494 if (lc != pool->gc) {
495 if (lc && !(--lc->ref_cnt))
499 pool->cache[cidx]->lc = lc;
502 /* Use the updated local cache to fetch index. */
503 fetch_size = pool->cfg.per_core_cache >> 2;
504 if (lc->len < fetch_size)
505 fetch_size = lc->len;
506 lc->len -= fetch_size;
507 memcpy(pool->cache[cidx]->idx, &lc->idx[lc->len],
508 sizeof(uint32_t) * fetch_size);
511 mlx5_ipool_unlock(pool);
517 pool->cache[cidx]->len = fetch_size - 1;
518 return pool->cache[cidx]->idx[pool->cache[cidx]->len];
520 trunk_idx = lc ? __atomic_load_n(&lc->n_trunk_valid,
521 __ATOMIC_ACQUIRE) : 0;
522 trunk_n = lc ? lc->n_trunk : 0;
523 cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx);
524 /* Check if index reach maximum. */
525 if (trunk_idx == TRUNK_MAX_IDX ||
526 cur_max_idx >= pool->cfg.max_idx)
528 /* No enough space in trunk array, resize the trunks array. */
529 if (trunk_idx == trunk_n) {
530 n_grow = trunk_idx ? trunk_idx :
531 RTE_CACHE_LINE_SIZE / sizeof(void *);
532 cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_n + n_grow);
533 /* Resize the trunk array. */
534 p = pool->cfg.malloc(0, ((trunk_idx + n_grow) *
535 sizeof(struct mlx5_indexed_trunk *)) +
536 (cur_max_idx * sizeof(uint32_t)) + sizeof(*p),
537 RTE_CACHE_LINE_SIZE, rte_socket_id());
540 p->trunks = (struct mlx5_indexed_trunk **)&p->idx[cur_max_idx];
542 memcpy(p->trunks, lc->trunks, trunk_idx *
543 sizeof(struct mlx5_indexed_trunk *));
544 #ifdef RTE_LIBRTE_MLX5_DEBUG
545 memset(RTE_PTR_ADD(p->trunks, trunk_idx * sizeof(void *)), 0,
546 n_grow * sizeof(void *));
548 p->n_trunk_valid = trunk_idx;
549 p->n_trunk = trunk_n + n_grow;
552 /* Prepare the new trunk. */
553 trunk_size = sizeof(*trunk);
554 data_size = mlx5_trunk_size_get(pool, trunk_idx);
555 trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size);
556 trunk = pool->cfg.malloc(0, trunk_size,
557 RTE_CACHE_LINE_SIZE, rte_socket_id());
558 if (unlikely(!trunk)) {
562 trunk->idx = trunk_idx;
563 trunk->free = data_size;
564 mlx5_ipool_lock(pool);
566 * Double check if trunks has been updated or have available index.
567 * During the new trunk allocate, index may still be flushed to the
568 * global cache. So also need to check the pool->gc->len.
570 if (pool->gc && (lc != pool->gc ||
571 lc->n_trunk_valid != trunk_idx ||
573 mlx5_ipool_unlock(pool);
576 pool->cfg.free(trunk);
579 /* Resize the trunk array and update local cache first. */
581 if (lc && !(--lc->ref_cnt))
585 pool->cache[cidx]->lc = lc;
586 __atomic_store_n(&pool->gc, p, __ATOMIC_RELAXED);
588 /* Add trunk to trunks array. */
589 lc->trunks[trunk_idx] = trunk;
590 __atomic_fetch_add(&lc->n_trunk_valid, 1, __ATOMIC_RELAXED);
591 /* Enqueue half of the index to global. */
592 ts_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx) + 1;
593 fetch_size = trunk->free >> 1;
594 for (i = 0; i < fetch_size; i++)
595 lc->idx[i] = ts_idx + i;
596 lc->len = fetch_size;
597 mlx5_ipool_unlock(pool);
598 /* Copy left half - 1 to local cache index array. */
599 pool->cache[cidx]->len = trunk->free - fetch_size - 1;
600 ts_idx += fetch_size;
601 for (i = 0; i < pool->cache[cidx]->len; i++)
602 pool->cache[cidx]->idx[i] = ts_idx + i;
609 _mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx)
611 struct mlx5_indexed_trunk *trunk;
612 struct mlx5_indexed_cache *lc;
617 if (unlikely(!pool->cache[cidx])) {
618 pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO,
619 sizeof(struct mlx5_ipool_per_lcore) +
620 (pool->cfg.per_core_cache * sizeof(uint32_t)),
621 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
622 if (!pool->cache[cidx]) {
623 DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx);
627 lc = mlx5_ipool_update_global_cache(pool, cidx);
629 trunk_idx = mlx5_trunk_idx_get(pool, idx);
630 trunk = lc->trunks[trunk_idx];
632 entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk_idx);
633 return &trunk->data[entry_idx * pool->cfg.size];
637 mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, uint32_t idx)
642 cidx = rte_lcore_index(rte_lcore_id());
643 if (unlikely(cidx == -1)) {
644 cidx = RTE_MAX_LCORE;
645 rte_spinlock_lock(&pool->lcore_lock);
647 entry = _mlx5_ipool_get_cache(pool, cidx, idx);
648 if (unlikely(cidx == RTE_MAX_LCORE))
649 rte_spinlock_unlock(&pool->lcore_lock);
655 _mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, int cidx,
658 if (unlikely(!pool->cache[cidx])) {
659 pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO,
660 sizeof(struct mlx5_ipool_per_lcore) +
661 (pool->cfg.per_core_cache * sizeof(uint32_t)),
662 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
663 if (!pool->cache[cidx]) {
664 DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx);
667 } else if (pool->cache[cidx]->len) {
668 pool->cache[cidx]->len--;
669 *idx = pool->cache[cidx]->idx[pool->cache[cidx]->len];
670 return _mlx5_ipool_get_cache(pool, cidx, *idx);
672 /* Not enough idx in global cache. Keep fetching from global. */
673 *idx = mlx5_ipool_allocate_from_global(pool, cidx);
674 if (unlikely(!(*idx)))
676 return _mlx5_ipool_get_cache(pool, cidx, *idx);
680 mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, uint32_t *idx)
685 cidx = rte_lcore_index(rte_lcore_id());
686 if (unlikely(cidx == -1)) {
687 cidx = RTE_MAX_LCORE;
688 rte_spinlock_lock(&pool->lcore_lock);
690 entry = _mlx5_ipool_malloc_cache(pool, cidx, idx);
691 if (unlikely(cidx == RTE_MAX_LCORE))
692 rte_spinlock_unlock(&pool->lcore_lock);
697 _mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx)
699 struct mlx5_ipool_per_lcore *ilc;
700 struct mlx5_indexed_cache *gc, *olc = NULL;
701 uint32_t reclaim_num = 0;
705 * When index was allocated on core A but freed on core B. In this
706 * case check if local cache on core B was allocated before.
708 if (unlikely(!pool->cache[cidx])) {
709 pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO,
710 sizeof(struct mlx5_ipool_per_lcore) +
711 (pool->cfg.per_core_cache * sizeof(uint32_t)),
712 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
713 if (!pool->cache[cidx]) {
714 DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx);
718 /* Try to enqueue to local index cache. */
719 if (pool->cache[cidx]->len < pool->cfg.per_core_cache) {
720 pool->cache[cidx]->idx[pool->cache[cidx]->len] = idx;
721 pool->cache[cidx]->len++;
724 ilc = pool->cache[cidx];
725 reclaim_num = pool->cfg.per_core_cache >> 2;
726 ilc->len -= reclaim_num;
727 /* Local index cache full, try with global index cache. */
728 mlx5_ipool_lock(pool);
731 if (!(--ilc->lc->ref_cnt))
736 memcpy(&gc->idx[gc->len], &ilc->idx[ilc->len],
737 reclaim_num * sizeof(uint32_t));
738 gc->len += reclaim_num;
739 mlx5_ipool_unlock(pool);
742 pool->cache[cidx]->idx[pool->cache[cidx]->len] = idx;
743 pool->cache[cidx]->len++;
747 mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, uint32_t idx)
751 cidx = rte_lcore_index(rte_lcore_id());
752 if (unlikely(cidx == -1)) {
753 cidx = RTE_MAX_LCORE;
754 rte_spinlock_lock(&pool->lcore_lock);
756 _mlx5_ipool_free_cache(pool, cidx, idx);
757 if (unlikely(cidx == RTE_MAX_LCORE))
758 rte_spinlock_unlock(&pool->lcore_lock);
762 mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx)
764 struct mlx5_indexed_trunk *trunk;
769 if (pool->cfg.per_core_cache)
770 return mlx5_ipool_malloc_cache(pool, idx);
771 mlx5_ipool_lock(pool);
772 if (pool->free_list == TRUNK_INVALID) {
773 /* If no available trunks, grow new. */
774 if (mlx5_ipool_grow(pool)) {
775 mlx5_ipool_unlock(pool);
779 MLX5_ASSERT(pool->free_list != TRUNK_INVALID);
780 trunk = pool->trunks[pool->free_list];
781 MLX5_ASSERT(trunk->free);
782 if (!rte_bitmap_scan(trunk->bmp, &iidx, &slab)) {
783 mlx5_ipool_unlock(pool);
787 iidx += __builtin_ctzll(slab);
788 MLX5_ASSERT(iidx != UINT32_MAX);
789 MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx));
790 rte_bitmap_clear(trunk->bmp, iidx);
791 p = &trunk->data[iidx * pool->cfg.size];
793 * The ipool index should grow continually from small to big,
794 * some features as metering only accept limited bits of index.
795 * Random index with MSB set may be rejected.
797 iidx += mlx5_trunk_idx_offset_get(pool, trunk->idx);
798 iidx += 1; /* non-zero index. */
804 /* Full trunk will be removed from free list in imalloc. */
805 MLX5_ASSERT(pool->free_list == trunk->idx);
806 pool->free_list = trunk->next;
807 if (trunk->next != TRUNK_INVALID)
808 pool->trunks[trunk->next]->prev = TRUNK_INVALID;
809 trunk->prev = TRUNK_INVALID;
810 trunk->next = TRUNK_INVALID;
817 mlx5_ipool_unlock(pool);
822 mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx)
824 void *entry = mlx5_ipool_malloc(pool, idx);
826 if (entry && pool->cfg.size)
827 memset(entry, 0, pool->cfg.size);
832 mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx)
834 struct mlx5_indexed_trunk *trunk;
840 if (pool->cfg.per_core_cache) {
841 mlx5_ipool_free_cache(pool, idx);
845 mlx5_ipool_lock(pool);
846 trunk_idx = mlx5_trunk_idx_get(pool, idx);
847 if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) ||
848 (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk))
850 trunk = pool->trunks[trunk_idx];
853 entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx);
854 if (trunk_idx != trunk->idx ||
855 rte_bitmap_get(trunk->bmp, entry_idx))
857 rte_bitmap_set(trunk->bmp, entry_idx);
859 if (pool->cfg.release_mem_en && trunk->free == mlx5_trunk_size_get
860 (pool, trunk->idx)) {
861 if (pool->free_list == trunk->idx)
862 pool->free_list = trunk->next;
863 if (trunk->next != TRUNK_INVALID)
864 pool->trunks[trunk->next]->prev = trunk->prev;
865 if (trunk->prev != TRUNK_INVALID)
866 pool->trunks[trunk->prev]->next = trunk->next;
867 pool->cfg.free(trunk);
868 pool->trunks[trunk_idx] = NULL;
869 pool->n_trunk_valid--;
874 if (pool->n_trunk_valid == 0) {
875 pool->cfg.free(pool->trunks);
879 } else if (trunk->free == 1) {
880 /* Put into free trunk list head. */
881 MLX5_ASSERT(pool->free_list != trunk->idx);
882 trunk->next = pool->free_list;
883 trunk->prev = TRUNK_INVALID;
884 if (pool->free_list != TRUNK_INVALID)
885 pool->trunks[pool->free_list]->prev = trunk->idx;
886 pool->free_list = trunk->idx;
896 mlx5_ipool_unlock(pool);
900 mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx)
902 struct mlx5_indexed_trunk *trunk;
909 if (pool->cfg.per_core_cache)
910 return mlx5_ipool_get_cache(pool, idx);
912 mlx5_ipool_lock(pool);
913 trunk_idx = mlx5_trunk_idx_get(pool, idx);
914 if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) ||
915 (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk))
917 trunk = pool->trunks[trunk_idx];
920 entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx);
921 if (trunk_idx != trunk->idx ||
922 rte_bitmap_get(trunk->bmp, entry_idx))
924 p = &trunk->data[entry_idx * pool->cfg.size];
926 mlx5_ipool_unlock(pool);
931 mlx5_ipool_destroy(struct mlx5_indexed_pool *pool)
933 struct mlx5_indexed_trunk **trunks = NULL;
934 struct mlx5_indexed_cache *gc = pool->gc;
935 uint32_t i, n_trunk_valid = 0;
938 mlx5_ipool_lock(pool);
939 if (pool->cfg.per_core_cache) {
940 for (i = 0; i <= RTE_MAX_LCORE; i++) {
942 * Free only old global cache. Pool gc will be
945 if (pool->cache[i]) {
946 if (pool->cache[i]->lc &&
947 pool->cache[i]->lc != pool->gc &&
948 (!(--pool->cache[i]->lc->ref_cnt)))
949 pool->cfg.free(pool->cache[i]->lc);
950 pool->cfg.free(pool->cache[i]);
955 n_trunk_valid = gc->n_trunk_valid;
959 trunks = pool->trunks;
960 n_trunk_valid = pool->n_trunk_valid;
962 for (i = 0; i < n_trunk_valid; i++) {
964 pool->cfg.free(trunks[i]);
967 pool->cfg.free(trunks);
970 mlx5_ipool_unlock(pool);
976 mlx5_ipool_flush_cache(struct mlx5_indexed_pool *pool)
979 struct mlx5_indexed_cache *gc;
980 struct rte_bitmap *ibmp;
981 uint32_t bmp_num, mem_size;
983 if (!pool->cfg.per_core_cache)
989 bmp_num = mlx5_trunk_idx_offset_get(pool, gc->n_trunk_valid);
990 mem_size = rte_bitmap_get_memory_footprint(bmp_num);
991 pool->bmp_mem = pool->cfg.malloc(MLX5_MEM_ZERO, mem_size,
992 RTE_CACHE_LINE_SIZE, rte_socket_id());
993 if (!pool->bmp_mem) {
994 DRV_LOG(ERR, "Ipool bitmap mem allocate failed.\n");
997 ibmp = rte_bitmap_init_with_all_set(bmp_num, pool->bmp_mem, mem_size);
999 pool->cfg.free(pool->bmp_mem);
1000 pool->bmp_mem = NULL;
1001 DRV_LOG(ERR, "Ipool bitmap create failed.\n");
1005 /* Clear global cache. */
1006 for (i = 0; i < gc->len; i++)
1007 rte_bitmap_clear(ibmp, gc->idx[i] - 1);
1008 /* Clear core cache. */
1009 for (i = 0; i < RTE_MAX_LCORE + 1; i++) {
1010 struct mlx5_ipool_per_lcore *ilc = pool->cache[i];
1014 for (j = 0; j < ilc->len; j++)
1015 rte_bitmap_clear(ibmp, ilc->idx[j] - 1);
1020 mlx5_ipool_get_next_cache(struct mlx5_indexed_pool *pool, uint32_t *pos)
1022 struct rte_bitmap *ibmp;
1024 uint32_t iidx = *pos;
1027 if (!ibmp || !rte_bitmap_scan(ibmp, &iidx, &slab)) {
1028 if (pool->bmp_mem) {
1029 pool->cfg.free(pool->bmp_mem);
1030 pool->bmp_mem = NULL;
1035 iidx += __builtin_ctzll(slab);
1036 rte_bitmap_clear(ibmp, iidx);
1039 return mlx5_ipool_get_cache(pool, iidx);
1043 mlx5_ipool_get_next(struct mlx5_indexed_pool *pool, uint32_t *pos)
1045 uint32_t idx = *pos;
1048 if (pool->cfg.per_core_cache)
1049 return mlx5_ipool_get_next_cache(pool, pos);
1050 while (idx <= mlx5_trunk_idx_offset_get(pool, pool->n_trunk)) {
1051 entry = mlx5_ipool_get(pool, idx);
1062 mlx5_ipool_dump(struct mlx5_indexed_pool *pool)
1064 printf("Pool %s entry size %u, trunks %u, %d entry per trunk, "
1066 pool->cfg.type, pool->cfg.size, pool->n_trunk_valid,
1067 pool->cfg.trunk_size, pool->n_trunk_valid);
1069 printf("Pool %s entry %u, trunk alloc %u, empty: %u, "
1070 "available %u free %u\n",
1071 pool->cfg.type, pool->n_entry, pool->trunk_new,
1072 pool->trunk_empty, pool->trunk_avail, pool->trunk_free);
1076 struct mlx5_l3t_tbl *
1077 mlx5_l3t_create(enum mlx5_l3t_type type)
1079 struct mlx5_l3t_tbl *tbl;
1080 struct mlx5_indexed_pool_config l3t_ip_cfg = {
1085 .release_mem_en = 1,
1086 .malloc = mlx5_malloc,
1090 if (type >= MLX5_L3T_TYPE_MAX) {
1094 tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_l3t_tbl), 1,
1102 case MLX5_L3T_TYPE_WORD:
1103 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_word);
1104 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_w";
1106 case MLX5_L3T_TYPE_DWORD:
1107 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_dword);
1108 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_dw";
1110 case MLX5_L3T_TYPE_QWORD:
1111 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_qword);
1112 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_qw";
1115 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_ptr);
1116 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_tpr";
1119 rte_spinlock_init(&tbl->sl);
1120 tbl->eip = mlx5_ipool_create(&l3t_ip_cfg);
1130 mlx5_l3t_destroy(struct mlx5_l3t_tbl *tbl)
1132 struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
1139 for (i = 0; i < MLX5_L3T_GT_SIZE; i++) {
1140 m_tbl = g_tbl->tbl[i];
1143 for (j = 0; j < MLX5_L3T_MT_SIZE; j++) {
1146 MLX5_ASSERT(!((struct mlx5_l3t_entry_word *)
1147 m_tbl->tbl[j])->ref_cnt);
1148 mlx5_ipool_free(tbl->eip,
1149 ((struct mlx5_l3t_entry_word *)
1150 m_tbl->tbl[j])->idx);
1152 if (!(--m_tbl->ref_cnt))
1155 MLX5_ASSERT(!m_tbl->ref_cnt);
1156 mlx5_free(g_tbl->tbl[i]);
1158 if (!(--g_tbl->ref_cnt))
1161 MLX5_ASSERT(!g_tbl->ref_cnt);
1162 mlx5_free(tbl->tbl);
1165 mlx5_ipool_destroy(tbl->eip);
1170 __l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
1171 union mlx5_l3t_data *data)
1173 struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
1174 struct mlx5_l3t_entry_word *w_e_tbl;
1175 struct mlx5_l3t_entry_dword *dw_e_tbl;
1176 struct mlx5_l3t_entry_qword *qw_e_tbl;
1177 struct mlx5_l3t_entry_ptr *ptr_e_tbl;
1184 m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
1187 e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
1190 entry_idx = idx & MLX5_L3T_ET_MASK;
1191 switch (tbl->type) {
1192 case MLX5_L3T_TYPE_WORD:
1193 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
1194 data->word = w_e_tbl->entry[entry_idx].data;
1195 if (w_e_tbl->entry[entry_idx].data)
1196 w_e_tbl->entry[entry_idx].ref_cnt++;
1198 case MLX5_L3T_TYPE_DWORD:
1199 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
1200 data->dword = dw_e_tbl->entry[entry_idx].data;
1201 if (dw_e_tbl->entry[entry_idx].data)
1202 dw_e_tbl->entry[entry_idx].ref_cnt++;
1204 case MLX5_L3T_TYPE_QWORD:
1205 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
1206 data->qword = qw_e_tbl->entry[entry_idx].data;
1207 if (qw_e_tbl->entry[entry_idx].data)
1208 qw_e_tbl->entry[entry_idx].ref_cnt++;
1211 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
1212 data->ptr = ptr_e_tbl->entry[entry_idx].data;
1213 if (ptr_e_tbl->entry[entry_idx].data)
1214 ptr_e_tbl->entry[entry_idx].ref_cnt++;
1221 mlx5_l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
1222 union mlx5_l3t_data *data)
1226 rte_spinlock_lock(&tbl->sl);
1227 ret = __l3t_get_entry(tbl, idx, data);
1228 rte_spinlock_unlock(&tbl->sl);
1233 mlx5_l3t_clear_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx)
1235 struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
1236 struct mlx5_l3t_entry_word *w_e_tbl;
1237 struct mlx5_l3t_entry_dword *dw_e_tbl;
1238 struct mlx5_l3t_entry_qword *qw_e_tbl;
1239 struct mlx5_l3t_entry_ptr *ptr_e_tbl;
1245 rte_spinlock_lock(&tbl->sl);
1249 m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
1252 e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
1255 entry_idx = idx & MLX5_L3T_ET_MASK;
1256 switch (tbl->type) {
1257 case MLX5_L3T_TYPE_WORD:
1258 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
1259 MLX5_ASSERT(w_e_tbl->entry[entry_idx].ref_cnt);
1260 ret = --w_e_tbl->entry[entry_idx].ref_cnt;
1263 w_e_tbl->entry[entry_idx].data = 0;
1264 ref_cnt = --w_e_tbl->ref_cnt;
1266 case MLX5_L3T_TYPE_DWORD:
1267 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
1268 MLX5_ASSERT(dw_e_tbl->entry[entry_idx].ref_cnt);
1269 ret = --dw_e_tbl->entry[entry_idx].ref_cnt;
1272 dw_e_tbl->entry[entry_idx].data = 0;
1273 ref_cnt = --dw_e_tbl->ref_cnt;
1275 case MLX5_L3T_TYPE_QWORD:
1276 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
1277 MLX5_ASSERT(qw_e_tbl->entry[entry_idx].ref_cnt);
1278 ret = --qw_e_tbl->entry[entry_idx].ref_cnt;
1281 qw_e_tbl->entry[entry_idx].data = 0;
1282 ref_cnt = --qw_e_tbl->ref_cnt;
1285 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
1286 MLX5_ASSERT(ptr_e_tbl->entry[entry_idx].ref_cnt);
1287 ret = --ptr_e_tbl->entry[entry_idx].ref_cnt;
1290 ptr_e_tbl->entry[entry_idx].data = NULL;
1291 ref_cnt = --ptr_e_tbl->ref_cnt;
1295 mlx5_ipool_free(tbl->eip,
1296 ((struct mlx5_l3t_entry_word *)e_tbl)->idx);
1297 m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] =
1299 if (!(--m_tbl->ref_cnt)) {
1302 [(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] = NULL;
1303 if (!(--g_tbl->ref_cnt)) {
1310 rte_spinlock_unlock(&tbl->sl);
1315 __l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
1316 union mlx5_l3t_data *data)
1318 struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
1319 struct mlx5_l3t_entry_word *w_e_tbl;
1320 struct mlx5_l3t_entry_dword *dw_e_tbl;
1321 struct mlx5_l3t_entry_qword *qw_e_tbl;
1322 struct mlx5_l3t_entry_ptr *ptr_e_tbl;
1324 uint32_t entry_idx, tbl_idx = 0;
1326 /* Check the global table, create it if empty. */
1329 g_tbl = mlx5_malloc(MLX5_MEM_ZERO,
1330 sizeof(struct mlx5_l3t_level_tbl) +
1331 sizeof(void *) * MLX5_L3T_GT_SIZE, 1,
1340 * Check the middle table, create it if empty. Ref_cnt will be
1341 * increased if new sub table created.
1343 m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
1345 m_tbl = mlx5_malloc(MLX5_MEM_ZERO,
1346 sizeof(struct mlx5_l3t_level_tbl) +
1347 sizeof(void *) * MLX5_L3T_MT_SIZE, 1,
1353 g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] =
1358 * Check the entry table, create it if empty. Ref_cnt will be
1359 * increased if new sub entry table created.
1361 e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
1363 e_tbl = mlx5_ipool_zmalloc(tbl->eip, &tbl_idx);
1368 ((struct mlx5_l3t_entry_word *)e_tbl)->idx = tbl_idx;
1369 m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] =
1373 entry_idx = idx & MLX5_L3T_ET_MASK;
1374 switch (tbl->type) {
1375 case MLX5_L3T_TYPE_WORD:
1376 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
1377 if (w_e_tbl->entry[entry_idx].data) {
1378 data->word = w_e_tbl->entry[entry_idx].data;
1379 w_e_tbl->entry[entry_idx].ref_cnt++;
1383 w_e_tbl->entry[entry_idx].data = data->word;
1384 w_e_tbl->entry[entry_idx].ref_cnt = 1;
1387 case MLX5_L3T_TYPE_DWORD:
1388 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
1389 if (dw_e_tbl->entry[entry_idx].data) {
1390 data->dword = dw_e_tbl->entry[entry_idx].data;
1391 dw_e_tbl->entry[entry_idx].ref_cnt++;
1395 dw_e_tbl->entry[entry_idx].data = data->dword;
1396 dw_e_tbl->entry[entry_idx].ref_cnt = 1;
1397 dw_e_tbl->ref_cnt++;
1399 case MLX5_L3T_TYPE_QWORD:
1400 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
1401 if (qw_e_tbl->entry[entry_idx].data) {
1402 data->qword = qw_e_tbl->entry[entry_idx].data;
1403 qw_e_tbl->entry[entry_idx].ref_cnt++;
1407 qw_e_tbl->entry[entry_idx].data = data->qword;
1408 qw_e_tbl->entry[entry_idx].ref_cnt = 1;
1409 qw_e_tbl->ref_cnt++;
1412 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
1413 if (ptr_e_tbl->entry[entry_idx].data) {
1414 data->ptr = ptr_e_tbl->entry[entry_idx].data;
1415 ptr_e_tbl->entry[entry_idx].ref_cnt++;
1419 ptr_e_tbl->entry[entry_idx].data = data->ptr;
1420 ptr_e_tbl->entry[entry_idx].ref_cnt = 1;
1421 ptr_e_tbl->ref_cnt++;
1428 mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
1429 union mlx5_l3t_data *data)
1433 rte_spinlock_lock(&tbl->sl);
1434 ret = __l3t_set_entry(tbl, idx, data);
1435 rte_spinlock_unlock(&tbl->sl);
1440 mlx5_l3t_prepare_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
1441 union mlx5_l3t_data *data,
1442 mlx5_l3t_alloc_callback_fn cb, void *ctx)
1446 rte_spinlock_lock(&tbl->sl);
1447 /* Check if entry data is ready. */
1448 ret = __l3t_get_entry(tbl, idx, data);
1450 switch (tbl->type) {
1451 case MLX5_L3T_TYPE_WORD:
1455 case MLX5_L3T_TYPE_DWORD:
1459 case MLX5_L3T_TYPE_QWORD:
1469 /* Entry data is not ready, use user callback to create it. */
1470 ret = cb(ctx, data);
1473 /* Save the new allocated data to entry. */
1474 ret = __l3t_set_entry(tbl, idx, data);
1476 rte_spinlock_unlock(&tbl->sl);