1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
5 #include <rte_malloc.h>
7 #include <mlx5_malloc.h>
9 #include "mlx5_utils.h"
12 /********************* Cache list ************************/
14 static struct mlx5_cache_entry *
15 mlx5_clist_default_create_cb(struct mlx5_cache_list *list,
16 struct mlx5_cache_entry *entry __rte_unused,
17 void *ctx __rte_unused)
19 return mlx5_malloc(MLX5_MEM_ZERO, list->entry_sz, 0, SOCKET_ID_ANY);
23 mlx5_clist_default_remove_cb(struct mlx5_cache_list *list __rte_unused,
24 struct mlx5_cache_entry *entry)
30 mlx5_cache_list_init(struct mlx5_cache_list *list, const char *name,
31 uint32_t entry_size, void *ctx,
32 mlx5_cache_create_cb cb_create,
33 mlx5_cache_match_cb cb_match,
34 mlx5_cache_remove_cb cb_remove)
37 if (!cb_match || (!cb_create ^ !cb_remove))
40 snprintf(list->name, sizeof(list->name), "%s", name);
41 list->entry_sz = entry_size;
43 list->cb_create = cb_create ? cb_create : mlx5_clist_default_create_cb;
44 list->cb_match = cb_match;
45 list->cb_remove = cb_remove ? cb_remove : mlx5_clist_default_remove_cb;
46 rte_rwlock_init(&list->lock);
47 DRV_LOG(DEBUG, "Cache list %s initialized.", list->name);
48 LIST_INIT(&list->head);
52 static struct mlx5_cache_entry *
53 __cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
55 struct mlx5_cache_entry *entry;
57 LIST_FOREACH(entry, &list->head, next) {
58 if (list->cb_match(list, entry, ctx))
61 __atomic_add_fetch(&entry->ref_cnt, 1,
63 DRV_LOG(DEBUG, "Cache list %s entry %p ref++: %u.",
64 list->name, (void *)entry, entry->ref_cnt);
71 static struct mlx5_cache_entry *
72 cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
74 struct mlx5_cache_entry *entry;
76 rte_rwlock_read_lock(&list->lock);
77 entry = __cache_lookup(list, ctx, reuse);
78 rte_rwlock_read_unlock(&list->lock);
82 struct mlx5_cache_entry *
83 mlx5_cache_lookup(struct mlx5_cache_list *list, void *ctx)
85 return cache_lookup(list, ctx, false);
88 struct mlx5_cache_entry *
89 mlx5_cache_register(struct mlx5_cache_list *list, void *ctx)
91 struct mlx5_cache_entry *entry;
92 uint32_t prev_gen_cnt = 0;
95 prev_gen_cnt = __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE);
96 /* Lookup with read lock, reuse if found. */
97 entry = cache_lookup(list, ctx, true);
100 /* Not found, append with write lock - block read from other threads. */
101 rte_rwlock_write_lock(&list->lock);
102 /* If list changed by other threads before lock, search again. */
103 if (prev_gen_cnt != __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE)) {
104 /* Lookup and reuse w/o read lock. */
105 entry = __cache_lookup(list, ctx, true);
109 entry = list->cb_create(list, entry, ctx);
111 DRV_LOG(ERR, "Failed to init cache list %s entry %p.",
112 list->name, (void *)entry);
116 LIST_INSERT_HEAD(&list->head, entry, next);
117 __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_RELEASE);
118 __atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE);
119 DRV_LOG(DEBUG, "Cache list %s entry %p new: %u.",
120 list->name, (void *)entry, entry->ref_cnt);
122 rte_rwlock_write_unlock(&list->lock);
127 mlx5_cache_unregister(struct mlx5_cache_list *list,
128 struct mlx5_cache_entry *entry)
130 rte_rwlock_write_lock(&list->lock);
131 MLX5_ASSERT(entry && entry->next.le_prev);
132 DRV_LOG(DEBUG, "Cache list %s entry %p ref--: %u.",
133 list->name, (void *)entry, entry->ref_cnt);
134 if (--entry->ref_cnt) {
135 rte_rwlock_write_unlock(&list->lock);
138 __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_ACQUIRE);
139 __atomic_sub_fetch(&list->count, 1, __ATOMIC_ACQUIRE);
140 LIST_REMOVE(entry, next);
141 list->cb_remove(list, entry);
142 rte_rwlock_write_unlock(&list->lock);
143 DRV_LOG(DEBUG, "Cache list %s entry %p removed.",
144 list->name, (void *)entry);
149 mlx5_cache_list_destroy(struct mlx5_cache_list *list)
151 struct mlx5_cache_entry *entry;
154 /* no LIST_FOREACH_SAFE, using while instead */
155 while (!LIST_EMPTY(&list->head)) {
156 entry = LIST_FIRST(&list->head);
157 LIST_REMOVE(entry, next);
158 list->cb_remove(list, entry);
159 DRV_LOG(DEBUG, "Cache list %s entry %p destroyed.",
160 list->name, (void *)entry);
162 memset(list, 0, sizeof(*list));
166 mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list)
169 return __atomic_load_n(&list->count, __ATOMIC_RELAXED);
172 /********************* Indexed pool **********************/
175 mlx5_ipool_lock(struct mlx5_indexed_pool *pool)
177 if (pool->cfg.need_lock)
178 rte_spinlock_lock(&pool->lock);
182 mlx5_ipool_unlock(struct mlx5_indexed_pool *pool)
184 if (pool->cfg.need_lock)
185 rte_spinlock_unlock(&pool->lock);
188 static inline uint32_t
189 mlx5_trunk_idx_get(struct mlx5_indexed_pool *pool, uint32_t entry_idx)
191 struct mlx5_indexed_pool_config *cfg = &pool->cfg;
192 uint32_t trunk_idx = 0;
195 if (!cfg->grow_trunk)
196 return entry_idx / cfg->trunk_size;
197 if (entry_idx >= pool->grow_tbl[cfg->grow_trunk - 1]) {
198 trunk_idx = (entry_idx - pool->grow_tbl[cfg->grow_trunk - 1]) /
199 (cfg->trunk_size << (cfg->grow_shift *
200 cfg->grow_trunk)) + cfg->grow_trunk;
202 for (i = 0; i < cfg->grow_trunk; i++) {
203 if (entry_idx < pool->grow_tbl[i])
211 static inline uint32_t
212 mlx5_trunk_size_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx)
214 struct mlx5_indexed_pool_config *cfg = &pool->cfg;
216 return cfg->trunk_size << (cfg->grow_shift *
217 (trunk_idx > cfg->grow_trunk ? cfg->grow_trunk : trunk_idx));
220 static inline uint32_t
221 mlx5_trunk_idx_offset_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx)
223 struct mlx5_indexed_pool_config *cfg = &pool->cfg;
228 if (!cfg->grow_trunk)
229 return cfg->trunk_size * trunk_idx;
230 if (trunk_idx < cfg->grow_trunk)
231 offset = pool->grow_tbl[trunk_idx - 1];
233 offset = pool->grow_tbl[cfg->grow_trunk - 1] +
234 (cfg->trunk_size << (cfg->grow_shift *
235 cfg->grow_trunk)) * (trunk_idx - cfg->grow_trunk);
239 struct mlx5_indexed_pool *
240 mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg)
242 struct mlx5_indexed_pool *pool;
245 if (!cfg || (!cfg->malloc ^ !cfg->free) ||
246 (cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) ||
247 ((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32))))
249 pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool) + cfg->grow_trunk *
250 sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE,
255 if (!pool->cfg.trunk_size)
256 pool->cfg.trunk_size = MLX5_IPOOL_DEFAULT_TRUNK_SIZE;
257 if (!cfg->malloc && !cfg->free) {
258 pool->cfg.malloc = mlx5_malloc;
259 pool->cfg.free = mlx5_free;
261 pool->free_list = TRUNK_INVALID;
262 if (pool->cfg.need_lock)
263 rte_spinlock_init(&pool->lock);
265 * Initialize the dynamic grow trunk size lookup table to have a quick
266 * lookup for the trunk entry index offset.
268 for (i = 0; i < cfg->grow_trunk; i++) {
269 pool->grow_tbl[i] = cfg->trunk_size << (cfg->grow_shift * i);
271 pool->grow_tbl[i] += pool->grow_tbl[i - 1];
273 if (!pool->cfg.max_idx)
275 mlx5_trunk_idx_offset_get(pool, TRUNK_MAX_IDX + 1);
280 mlx5_ipool_grow(struct mlx5_indexed_pool *pool)
282 struct mlx5_indexed_trunk *trunk;
283 struct mlx5_indexed_trunk **trunk_tmp;
284 struct mlx5_indexed_trunk **p;
285 size_t trunk_size = 0;
288 uint32_t idx, cur_max_idx, i;
290 cur_max_idx = mlx5_trunk_idx_offset_get(pool, pool->n_trunk_valid);
291 if (pool->n_trunk_valid == TRUNK_MAX_IDX ||
292 cur_max_idx >= pool->cfg.max_idx)
294 if (pool->n_trunk_valid == pool->n_trunk) {
295 /* No free trunk flags, expand trunk list. */
296 int n_grow = pool->n_trunk_valid ? pool->n_trunk :
297 RTE_CACHE_LINE_SIZE / sizeof(void *);
299 p = pool->cfg.malloc(0, (pool->n_trunk_valid + n_grow) *
300 sizeof(struct mlx5_indexed_trunk *),
301 RTE_CACHE_LINE_SIZE, rte_socket_id());
305 memcpy(p, pool->trunks, pool->n_trunk_valid *
306 sizeof(struct mlx5_indexed_trunk *));
307 memset(RTE_PTR_ADD(p, pool->n_trunk_valid * sizeof(void *)), 0,
308 n_grow * sizeof(void *));
309 trunk_tmp = pool->trunks;
312 pool->cfg.free(trunk_tmp);
313 pool->n_trunk += n_grow;
315 if (!pool->cfg.release_mem_en) {
316 idx = pool->n_trunk_valid;
318 /* Find the first available slot in trunk list */
319 for (idx = 0; idx < pool->n_trunk; idx++)
320 if (pool->trunks[idx] == NULL)
323 trunk_size += sizeof(*trunk);
324 data_size = mlx5_trunk_size_get(pool, idx);
325 bmp_size = rte_bitmap_get_memory_footprint(data_size);
326 /* rte_bitmap requires memory cacheline aligned. */
327 trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size);
328 trunk_size += bmp_size;
329 trunk = pool->cfg.malloc(0, trunk_size,
330 RTE_CACHE_LINE_SIZE, rte_socket_id());
333 pool->trunks[idx] = trunk;
335 trunk->free = data_size;
336 trunk->prev = TRUNK_INVALID;
337 trunk->next = TRUNK_INVALID;
338 MLX5_ASSERT(pool->free_list == TRUNK_INVALID);
339 pool->free_list = idx;
340 /* Mark all entries as available. */
341 trunk->bmp = rte_bitmap_init_with_all_set(data_size, &trunk->data
342 [RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size)],
344 /* Clear the overhead bits in the trunk if it happens. */
345 if (cur_max_idx + data_size > pool->cfg.max_idx) {
346 for (i = pool->cfg.max_idx - cur_max_idx; i < data_size; i++)
347 rte_bitmap_clear(trunk->bmp, i);
349 MLX5_ASSERT(trunk->bmp);
350 pool->n_trunk_valid++;
359 mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx)
361 struct mlx5_indexed_trunk *trunk;
366 mlx5_ipool_lock(pool);
367 if (pool->free_list == TRUNK_INVALID) {
368 /* If no available trunks, grow new. */
369 if (mlx5_ipool_grow(pool)) {
370 mlx5_ipool_unlock(pool);
374 MLX5_ASSERT(pool->free_list != TRUNK_INVALID);
375 trunk = pool->trunks[pool->free_list];
376 MLX5_ASSERT(trunk->free);
377 if (!rte_bitmap_scan(trunk->bmp, &iidx, &slab)) {
378 mlx5_ipool_unlock(pool);
382 iidx += __builtin_ctzll(slab);
383 MLX5_ASSERT(iidx != UINT32_MAX);
384 MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx));
385 rte_bitmap_clear(trunk->bmp, iidx);
386 p = &trunk->data[iidx * pool->cfg.size];
388 * The ipool index should grow continually from small to big,
389 * some features as metering only accept limited bits of index.
390 * Random index with MSB set may be rejected.
392 iidx += mlx5_trunk_idx_offset_get(pool, trunk->idx);
393 iidx += 1; /* non-zero index. */
399 /* Full trunk will be removed from free list in imalloc. */
400 MLX5_ASSERT(pool->free_list == trunk->idx);
401 pool->free_list = trunk->next;
402 if (trunk->next != TRUNK_INVALID)
403 pool->trunks[trunk->next]->prev = TRUNK_INVALID;
404 trunk->prev = TRUNK_INVALID;
405 trunk->next = TRUNK_INVALID;
412 mlx5_ipool_unlock(pool);
417 mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx)
419 void *entry = mlx5_ipool_malloc(pool, idx);
421 if (entry && pool->cfg.size)
422 memset(entry, 0, pool->cfg.size);
427 mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx)
429 struct mlx5_indexed_trunk *trunk;
436 mlx5_ipool_lock(pool);
437 trunk_idx = mlx5_trunk_idx_get(pool, idx);
438 if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) ||
439 (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk))
441 trunk = pool->trunks[trunk_idx];
444 entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx);
445 if (trunk_idx != trunk->idx ||
446 rte_bitmap_get(trunk->bmp, entry_idx))
448 rte_bitmap_set(trunk->bmp, entry_idx);
450 if (pool->cfg.release_mem_en && trunk->free == mlx5_trunk_size_get
451 (pool, trunk->idx)) {
452 if (pool->free_list == trunk->idx)
453 pool->free_list = trunk->next;
454 if (trunk->next != TRUNK_INVALID)
455 pool->trunks[trunk->next]->prev = trunk->prev;
456 if (trunk->prev != TRUNK_INVALID)
457 pool->trunks[trunk->prev]->next = trunk->next;
458 pool->cfg.free(trunk);
459 pool->trunks[trunk_idx] = NULL;
460 pool->n_trunk_valid--;
465 if (pool->n_trunk_valid == 0) {
466 pool->cfg.free(pool->trunks);
470 } else if (trunk->free == 1) {
471 /* Put into free trunk list head. */
472 MLX5_ASSERT(pool->free_list != trunk->idx);
473 trunk->next = pool->free_list;
474 trunk->prev = TRUNK_INVALID;
475 if (pool->free_list != TRUNK_INVALID)
476 pool->trunks[pool->free_list]->prev = trunk->idx;
477 pool->free_list = trunk->idx;
487 mlx5_ipool_unlock(pool);
491 mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx)
493 struct mlx5_indexed_trunk *trunk;
501 mlx5_ipool_lock(pool);
502 trunk_idx = mlx5_trunk_idx_get(pool, idx);
503 if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) ||
504 (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk))
506 trunk = pool->trunks[trunk_idx];
509 entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx);
510 if (trunk_idx != trunk->idx ||
511 rte_bitmap_get(trunk->bmp, entry_idx))
513 p = &trunk->data[entry_idx * pool->cfg.size];
515 mlx5_ipool_unlock(pool);
520 mlx5_ipool_destroy(struct mlx5_indexed_pool *pool)
522 struct mlx5_indexed_trunk **trunks;
526 mlx5_ipool_lock(pool);
527 trunks = pool->trunks;
528 for (i = 0; i < pool->n_trunk; i++) {
530 pool->cfg.free(trunks[i]);
533 pool->cfg.free(pool->trunks);
534 mlx5_ipool_unlock(pool);
540 mlx5_ipool_dump(struct mlx5_indexed_pool *pool)
542 printf("Pool %s entry size %u, trunks %u, %d entry per trunk, "
544 pool->cfg.type, pool->cfg.size, pool->n_trunk_valid,
545 pool->cfg.trunk_size, pool->n_trunk_valid);
547 printf("Pool %s entry %u, trunk alloc %u, empty: %u, "
548 "available %u free %u\n",
549 pool->cfg.type, pool->n_entry, pool->trunk_new,
550 pool->trunk_empty, pool->trunk_avail, pool->trunk_free);
554 struct mlx5_l3t_tbl *
555 mlx5_l3t_create(enum mlx5_l3t_type type)
557 struct mlx5_l3t_tbl *tbl;
558 struct mlx5_indexed_pool_config l3t_ip_cfg = {
564 .malloc = mlx5_malloc,
568 if (type >= MLX5_L3T_TYPE_MAX) {
572 tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_l3t_tbl), 1,
580 case MLX5_L3T_TYPE_WORD:
581 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_word);
582 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_w";
584 case MLX5_L3T_TYPE_DWORD:
585 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_dword);
586 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_dw";
588 case MLX5_L3T_TYPE_QWORD:
589 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_qword);
590 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_qw";
593 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_ptr);
594 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_tpr";
597 rte_spinlock_init(&tbl->sl);
598 tbl->eip = mlx5_ipool_create(&l3t_ip_cfg);
608 mlx5_l3t_destroy(struct mlx5_l3t_tbl *tbl)
610 struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
617 for (i = 0; i < MLX5_L3T_GT_SIZE; i++) {
618 m_tbl = g_tbl->tbl[i];
621 for (j = 0; j < MLX5_L3T_MT_SIZE; j++) {
624 MLX5_ASSERT(!((struct mlx5_l3t_entry_word *)
625 m_tbl->tbl[j])->ref_cnt);
626 mlx5_ipool_free(tbl->eip,
627 ((struct mlx5_l3t_entry_word *)
628 m_tbl->tbl[j])->idx);
630 if (!(--m_tbl->ref_cnt))
633 MLX5_ASSERT(!m_tbl->ref_cnt);
634 mlx5_free(g_tbl->tbl[i]);
636 if (!(--g_tbl->ref_cnt))
639 MLX5_ASSERT(!g_tbl->ref_cnt);
643 mlx5_ipool_destroy(tbl->eip);
648 __l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
649 union mlx5_l3t_data *data)
651 struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
652 struct mlx5_l3t_entry_word *w_e_tbl;
653 struct mlx5_l3t_entry_dword *dw_e_tbl;
654 struct mlx5_l3t_entry_qword *qw_e_tbl;
655 struct mlx5_l3t_entry_ptr *ptr_e_tbl;
662 m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
665 e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
668 entry_idx = idx & MLX5_L3T_ET_MASK;
670 case MLX5_L3T_TYPE_WORD:
671 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
672 data->word = w_e_tbl->entry[entry_idx].data;
673 if (w_e_tbl->entry[entry_idx].data)
674 w_e_tbl->entry[entry_idx].ref_cnt++;
676 case MLX5_L3T_TYPE_DWORD:
677 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
678 data->dword = dw_e_tbl->entry[entry_idx].data;
679 if (dw_e_tbl->entry[entry_idx].data)
680 dw_e_tbl->entry[entry_idx].ref_cnt++;
682 case MLX5_L3T_TYPE_QWORD:
683 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
684 data->qword = qw_e_tbl->entry[entry_idx].data;
685 if (qw_e_tbl->entry[entry_idx].data)
686 qw_e_tbl->entry[entry_idx].ref_cnt++;
689 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
690 data->ptr = ptr_e_tbl->entry[entry_idx].data;
691 if (ptr_e_tbl->entry[entry_idx].data)
692 ptr_e_tbl->entry[entry_idx].ref_cnt++;
699 mlx5_l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
700 union mlx5_l3t_data *data)
704 rte_spinlock_lock(&tbl->sl);
705 ret = __l3t_get_entry(tbl, idx, data);
706 rte_spinlock_unlock(&tbl->sl);
711 mlx5_l3t_clear_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx)
713 struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
714 struct mlx5_l3t_entry_word *w_e_tbl;
715 struct mlx5_l3t_entry_dword *dw_e_tbl;
716 struct mlx5_l3t_entry_qword *qw_e_tbl;
717 struct mlx5_l3t_entry_ptr *ptr_e_tbl;
723 rte_spinlock_lock(&tbl->sl);
727 m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
730 e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
733 entry_idx = idx & MLX5_L3T_ET_MASK;
735 case MLX5_L3T_TYPE_WORD:
736 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
737 MLX5_ASSERT(w_e_tbl->entry[entry_idx].ref_cnt);
738 ret = --w_e_tbl->entry[entry_idx].ref_cnt;
741 w_e_tbl->entry[entry_idx].data = 0;
742 ref_cnt = --w_e_tbl->ref_cnt;
744 case MLX5_L3T_TYPE_DWORD:
745 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
746 MLX5_ASSERT(dw_e_tbl->entry[entry_idx].ref_cnt);
747 ret = --dw_e_tbl->entry[entry_idx].ref_cnt;
750 dw_e_tbl->entry[entry_idx].data = 0;
751 ref_cnt = --dw_e_tbl->ref_cnt;
753 case MLX5_L3T_TYPE_QWORD:
754 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
755 MLX5_ASSERT(qw_e_tbl->entry[entry_idx].ref_cnt);
756 ret = --qw_e_tbl->entry[entry_idx].ref_cnt;
759 qw_e_tbl->entry[entry_idx].data = 0;
760 ref_cnt = --qw_e_tbl->ref_cnt;
763 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
764 MLX5_ASSERT(ptr_e_tbl->entry[entry_idx].ref_cnt);
765 ret = --ptr_e_tbl->entry[entry_idx].ref_cnt;
768 ptr_e_tbl->entry[entry_idx].data = NULL;
769 ref_cnt = --ptr_e_tbl->ref_cnt;
773 mlx5_ipool_free(tbl->eip,
774 ((struct mlx5_l3t_entry_word *)e_tbl)->idx);
775 m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] =
777 if (!(--m_tbl->ref_cnt)) {
780 [(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] = NULL;
781 if (!(--g_tbl->ref_cnt)) {
788 rte_spinlock_unlock(&tbl->sl);
793 __l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
794 union mlx5_l3t_data *data)
796 struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
797 struct mlx5_l3t_entry_word *w_e_tbl;
798 struct mlx5_l3t_entry_dword *dw_e_tbl;
799 struct mlx5_l3t_entry_qword *qw_e_tbl;
800 struct mlx5_l3t_entry_ptr *ptr_e_tbl;
802 uint32_t entry_idx, tbl_idx = 0;
804 /* Check the global table, create it if empty. */
807 g_tbl = mlx5_malloc(MLX5_MEM_ZERO,
808 sizeof(struct mlx5_l3t_level_tbl) +
809 sizeof(void *) * MLX5_L3T_GT_SIZE, 1,
818 * Check the middle table, create it if empty. Ref_cnt will be
819 * increased if new sub table created.
821 m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
823 m_tbl = mlx5_malloc(MLX5_MEM_ZERO,
824 sizeof(struct mlx5_l3t_level_tbl) +
825 sizeof(void *) * MLX5_L3T_MT_SIZE, 1,
831 g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] =
836 * Check the entry table, create it if empty. Ref_cnt will be
837 * increased if new sub entry table created.
839 e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
841 e_tbl = mlx5_ipool_zmalloc(tbl->eip, &tbl_idx);
846 ((struct mlx5_l3t_entry_word *)e_tbl)->idx = tbl_idx;
847 m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] =
851 entry_idx = idx & MLX5_L3T_ET_MASK;
853 case MLX5_L3T_TYPE_WORD:
854 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
855 if (w_e_tbl->entry[entry_idx].data) {
856 data->word = w_e_tbl->entry[entry_idx].data;
857 w_e_tbl->entry[entry_idx].ref_cnt++;
861 w_e_tbl->entry[entry_idx].data = data->word;
862 w_e_tbl->entry[entry_idx].ref_cnt = 1;
865 case MLX5_L3T_TYPE_DWORD:
866 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
867 if (dw_e_tbl->entry[entry_idx].data) {
868 data->dword = dw_e_tbl->entry[entry_idx].data;
869 dw_e_tbl->entry[entry_idx].ref_cnt++;
873 dw_e_tbl->entry[entry_idx].data = data->dword;
874 dw_e_tbl->entry[entry_idx].ref_cnt = 1;
877 case MLX5_L3T_TYPE_QWORD:
878 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
879 if (qw_e_tbl->entry[entry_idx].data) {
880 data->qword = qw_e_tbl->entry[entry_idx].data;
881 qw_e_tbl->entry[entry_idx].ref_cnt++;
885 qw_e_tbl->entry[entry_idx].data = data->qword;
886 qw_e_tbl->entry[entry_idx].ref_cnt = 1;
890 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
891 if (ptr_e_tbl->entry[entry_idx].data) {
892 data->ptr = ptr_e_tbl->entry[entry_idx].data;
893 ptr_e_tbl->entry[entry_idx].ref_cnt++;
897 ptr_e_tbl->entry[entry_idx].data = data->ptr;
898 ptr_e_tbl->entry[entry_idx].ref_cnt = 1;
899 ptr_e_tbl->ref_cnt++;
906 mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
907 union mlx5_l3t_data *data)
911 rte_spinlock_lock(&tbl->sl);
912 ret = __l3t_set_entry(tbl, idx, data);
913 rte_spinlock_unlock(&tbl->sl);
918 mlx5_l3t_prepare_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
919 union mlx5_l3t_data *data,
920 mlx5_l3t_alloc_callback_fn cb, void *ctx)
924 rte_spinlock_lock(&tbl->sl);
925 /* Check if entry data is ready. */
926 ret = __l3t_get_entry(tbl, idx, data);
929 case MLX5_L3T_TYPE_WORD:
933 case MLX5_L3T_TYPE_DWORD:
937 case MLX5_L3T_TYPE_QWORD:
947 /* Entry data is not ready, use user callback to create it. */
951 /* Save the new allocated data to entry. */
952 ret = __l3t_set_entry(tbl, idx, data);
954 rte_spinlock_unlock(&tbl->sl);