1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
5 #include <rte_malloc.h>
6 #include <rte_hash_crc.h>
8 #include <mlx5_malloc.h>
10 #include "mlx5_utils.h"
13 mlx5_hlist_create(const char *name, uint32_t size)
21 /* Align to the next power of 2, 32bits integer is enough now. */
22 if (!rte_is_power_of_2(size)) {
23 act_size = rte_align32pow2(size);
24 DRV_LOG(WARNING, "Size 0x%" PRIX32 " is not power of 2, will "
25 "be aligned to 0x%" PRIX32 ".", size, act_size);
29 alloc_size = sizeof(struct mlx5_hlist) +
30 sizeof(struct mlx5_hlist_head) * act_size;
31 /* Using zmalloc, then no need to initialize the heads. */
32 h = mlx5_malloc(MLX5_MEM_ZERO, alloc_size, RTE_CACHE_LINE_SIZE,
35 DRV_LOG(ERR, "No memory for hash list %s creation",
36 name ? name : "None");
40 snprintf(h->name, MLX5_HLIST_NAMESIZE, "%s", name);
41 h->table_sz = act_size;
42 h->mask = act_size - 1;
43 DRV_LOG(DEBUG, "Hash list with %s size 0x%" PRIX32 " is created.",
48 struct mlx5_hlist_entry *
49 mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key)
52 struct mlx5_hlist_head *first;
53 struct mlx5_hlist_entry *node;
56 idx = rte_hash_crc_8byte(key, 0) & h->mask;
57 first = &h->heads[idx];
58 LIST_FOREACH(node, first, next) {
66 mlx5_hlist_insert(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry)
69 struct mlx5_hlist_head *first;
70 struct mlx5_hlist_entry *node;
72 MLX5_ASSERT(h && entry);
73 idx = rte_hash_crc_8byte(entry->key, 0) & h->mask;
74 first = &h->heads[idx];
75 /* No need to reuse the lookup function. */
76 LIST_FOREACH(node, first, next) {
77 if (node->key == entry->key)
80 LIST_INSERT_HEAD(first, entry, next);
84 struct mlx5_hlist_entry *
85 mlx5_hlist_lookup_ex(struct mlx5_hlist *h, uint64_t key,
86 mlx5_hlist_match_callback_fn cb, void *ctx)
89 struct mlx5_hlist_head *first;
90 struct mlx5_hlist_entry *node;
92 MLX5_ASSERT(h && cb && ctx);
93 idx = rte_hash_crc_8byte(key, 0) & h->mask;
94 first = &h->heads[idx];
95 LIST_FOREACH(node, first, next) {
103 mlx5_hlist_insert_ex(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry,
104 mlx5_hlist_match_callback_fn cb, void *ctx)
107 struct mlx5_hlist_head *first;
108 struct mlx5_hlist_entry *node;
110 MLX5_ASSERT(h && entry && cb && ctx);
111 idx = rte_hash_crc_8byte(entry->key, 0) & h->mask;
112 first = &h->heads[idx];
113 /* No need to reuse the lookup function. */
114 LIST_FOREACH(node, first, next) {
118 LIST_INSERT_HEAD(first, entry, next);
123 mlx5_hlist_remove(struct mlx5_hlist *h __rte_unused,
124 struct mlx5_hlist_entry *entry)
126 MLX5_ASSERT(entry && entry->next.le_prev);
127 LIST_REMOVE(entry, next);
128 /* Set to NULL to get rid of removing action for more than once. */
129 entry->next.le_prev = NULL;
133 mlx5_hlist_destroy(struct mlx5_hlist *h,
134 mlx5_hlist_destroy_callback_fn cb, void *ctx)
137 struct mlx5_hlist_entry *entry;
140 for (idx = 0; idx < h->table_sz; ++idx) {
141 /* no LIST_FOREACH_SAFE, using while instead */
142 while (!LIST_EMPTY(&h->heads[idx])) {
143 entry = LIST_FIRST(&h->heads[idx]);
144 LIST_REMOVE(entry, next);
146 * The owner of whole element which contains data entry
147 * is the user, so it's the user's duty to do the clean
148 * up and the free work because someone may not put the
149 * hlist entry at the beginning(suggested to locate at
150 * the beginning). Or else the default free function
163 mlx5_ipool_lock(struct mlx5_indexed_pool *pool)
165 if (pool->cfg.need_lock)
166 rte_spinlock_lock(&pool->lock);
170 mlx5_ipool_unlock(struct mlx5_indexed_pool *pool)
172 if (pool->cfg.need_lock)
173 rte_spinlock_unlock(&pool->lock);
176 static inline uint32_t
177 mlx5_trunk_idx_get(struct mlx5_indexed_pool *pool, uint32_t entry_idx)
179 struct mlx5_indexed_pool_config *cfg = &pool->cfg;
180 uint32_t trunk_idx = 0;
183 if (!cfg->grow_trunk)
184 return entry_idx / cfg->trunk_size;
185 if (entry_idx >= pool->grow_tbl[cfg->grow_trunk - 1]) {
186 trunk_idx = (entry_idx - pool->grow_tbl[cfg->grow_trunk - 1]) /
187 (cfg->trunk_size << (cfg->grow_shift *
188 cfg->grow_trunk)) + cfg->grow_trunk;
190 for (i = 0; i < cfg->grow_trunk; i++) {
191 if (entry_idx < pool->grow_tbl[i])
199 static inline uint32_t
200 mlx5_trunk_size_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx)
202 struct mlx5_indexed_pool_config *cfg = &pool->cfg;
204 return cfg->trunk_size << (cfg->grow_shift *
205 (trunk_idx > cfg->grow_trunk ? cfg->grow_trunk : trunk_idx));
208 static inline uint32_t
209 mlx5_trunk_idx_offset_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx)
211 struct mlx5_indexed_pool_config *cfg = &pool->cfg;
216 if (!cfg->grow_trunk)
217 return cfg->trunk_size * trunk_idx;
218 if (trunk_idx < cfg->grow_trunk)
219 offset = pool->grow_tbl[trunk_idx - 1];
221 offset = pool->grow_tbl[cfg->grow_trunk - 1] +
222 (cfg->trunk_size << (cfg->grow_shift *
223 cfg->grow_trunk)) * (trunk_idx - cfg->grow_trunk);
227 struct mlx5_indexed_pool *
228 mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg)
230 struct mlx5_indexed_pool *pool;
233 if (!cfg || (!cfg->malloc ^ !cfg->free) ||
234 (cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) ||
235 ((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32))))
237 pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool) + cfg->grow_trunk *
238 sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE,
243 if (!pool->cfg.trunk_size)
244 pool->cfg.trunk_size = MLX5_IPOOL_DEFAULT_TRUNK_SIZE;
245 if (!cfg->malloc && !cfg->free) {
246 pool->cfg.malloc = mlx5_malloc;
247 pool->cfg.free = mlx5_free;
249 pool->free_list = TRUNK_INVALID;
250 if (pool->cfg.need_lock)
251 rte_spinlock_init(&pool->lock);
253 * Initialize the dynamic grow trunk size lookup table to have a quick
254 * lookup for the trunk entry index offset.
256 for (i = 0; i < cfg->grow_trunk; i++) {
257 pool->grow_tbl[i] = cfg->trunk_size << (cfg->grow_shift * i);
259 pool->grow_tbl[i] += pool->grow_tbl[i - 1];
265 mlx5_ipool_grow(struct mlx5_indexed_pool *pool)
267 struct mlx5_indexed_trunk *trunk;
268 struct mlx5_indexed_trunk **trunk_tmp;
269 struct mlx5_indexed_trunk **p;
270 size_t trunk_size = 0;
275 if (pool->n_trunk_valid == TRUNK_MAX_IDX)
277 if (pool->n_trunk_valid == pool->n_trunk) {
278 /* No free trunk flags, expand trunk list. */
279 int n_grow = pool->n_trunk_valid ? pool->n_trunk :
280 RTE_CACHE_LINE_SIZE / sizeof(void *);
282 p = pool->cfg.malloc(0, (pool->n_trunk_valid + n_grow) *
283 sizeof(struct mlx5_indexed_trunk *),
284 RTE_CACHE_LINE_SIZE, rte_socket_id());
288 memcpy(p, pool->trunks, pool->n_trunk_valid *
289 sizeof(struct mlx5_indexed_trunk *));
290 memset(RTE_PTR_ADD(p, pool->n_trunk_valid * sizeof(void *)), 0,
291 n_grow * sizeof(void *));
292 trunk_tmp = pool->trunks;
295 pool->cfg.free(trunk_tmp);
296 pool->n_trunk += n_grow;
298 if (!pool->cfg.release_mem_en) {
299 idx = pool->n_trunk_valid;
301 /* Find the first available slot in trunk list */
302 for (idx = 0; idx < pool->n_trunk; idx++)
303 if (pool->trunks[idx] == NULL)
306 trunk_size += sizeof(*trunk);
307 data_size = mlx5_trunk_size_get(pool, idx);
308 bmp_size = rte_bitmap_get_memory_footprint(data_size);
309 /* rte_bitmap requires memory cacheline aligned. */
310 trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size);
311 trunk_size += bmp_size;
312 trunk = pool->cfg.malloc(0, trunk_size,
313 RTE_CACHE_LINE_SIZE, rte_socket_id());
316 pool->trunks[idx] = trunk;
318 trunk->free = data_size;
319 trunk->prev = TRUNK_INVALID;
320 trunk->next = TRUNK_INVALID;
321 MLX5_ASSERT(pool->free_list == TRUNK_INVALID);
322 pool->free_list = idx;
323 /* Mark all entries as available. */
324 trunk->bmp = rte_bitmap_init_with_all_set(data_size, &trunk->data
325 [RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size)],
327 MLX5_ASSERT(trunk->bmp);
328 pool->n_trunk_valid++;
337 mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx)
339 struct mlx5_indexed_trunk *trunk;
344 mlx5_ipool_lock(pool);
345 if (pool->free_list == TRUNK_INVALID) {
346 /* If no available trunks, grow new. */
347 if (mlx5_ipool_grow(pool)) {
348 mlx5_ipool_unlock(pool);
352 MLX5_ASSERT(pool->free_list != TRUNK_INVALID);
353 trunk = pool->trunks[pool->free_list];
354 MLX5_ASSERT(trunk->free);
355 if (!rte_bitmap_scan(trunk->bmp, &iidx, &slab)) {
356 mlx5_ipool_unlock(pool);
360 iidx += __builtin_ctzll(slab);
361 MLX5_ASSERT(iidx != UINT32_MAX);
362 MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx));
363 rte_bitmap_clear(trunk->bmp, iidx);
364 p = &trunk->data[iidx * pool->cfg.size];
366 * The ipool index should grow continually from small to big,
367 * some features as metering only accept limited bits of index.
368 * Random index with MSB set may be rejected.
370 iidx += mlx5_trunk_idx_offset_get(pool, trunk->idx);
371 iidx += 1; /* non-zero index. */
377 /* Full trunk will be removed from free list in imalloc. */
378 MLX5_ASSERT(pool->free_list == trunk->idx);
379 pool->free_list = trunk->next;
380 if (trunk->next != TRUNK_INVALID)
381 pool->trunks[trunk->next]->prev = TRUNK_INVALID;
382 trunk->prev = TRUNK_INVALID;
383 trunk->next = TRUNK_INVALID;
390 mlx5_ipool_unlock(pool);
395 mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx)
397 void *entry = mlx5_ipool_malloc(pool, idx);
399 if (entry && pool->cfg.size)
400 memset(entry, 0, pool->cfg.size);
405 mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx)
407 struct mlx5_indexed_trunk *trunk;
414 mlx5_ipool_lock(pool);
415 trunk_idx = mlx5_trunk_idx_get(pool, idx);
416 if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) ||
417 (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk))
419 trunk = pool->trunks[trunk_idx];
422 entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx);
423 if (trunk_idx != trunk->idx ||
424 rte_bitmap_get(trunk->bmp, entry_idx))
426 rte_bitmap_set(trunk->bmp, entry_idx);
428 if (pool->cfg.release_mem_en && trunk->free == mlx5_trunk_size_get
429 (pool, trunk->idx)) {
430 if (pool->free_list == trunk->idx)
431 pool->free_list = trunk->next;
432 if (trunk->next != TRUNK_INVALID)
433 pool->trunks[trunk->next]->prev = trunk->prev;
434 if (trunk->prev != TRUNK_INVALID)
435 pool->trunks[trunk->prev]->next = trunk->next;
436 pool->cfg.free(trunk);
437 pool->trunks[trunk_idx] = NULL;
438 pool->n_trunk_valid--;
443 if (pool->n_trunk_valid == 0) {
444 pool->cfg.free(pool->trunks);
448 } else if (trunk->free == 1) {
449 /* Put into free trunk list head. */
450 MLX5_ASSERT(pool->free_list != trunk->idx);
451 trunk->next = pool->free_list;
452 trunk->prev = TRUNK_INVALID;
453 if (pool->free_list != TRUNK_INVALID)
454 pool->trunks[pool->free_list]->prev = trunk->idx;
455 pool->free_list = trunk->idx;
465 mlx5_ipool_unlock(pool);
469 mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx)
471 struct mlx5_indexed_trunk *trunk;
479 mlx5_ipool_lock(pool);
480 trunk_idx = mlx5_trunk_idx_get(pool, idx);
481 if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) ||
482 (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk))
484 trunk = pool->trunks[trunk_idx];
487 entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx);
488 if (trunk_idx != trunk->idx ||
489 rte_bitmap_get(trunk->bmp, entry_idx))
491 p = &trunk->data[entry_idx * pool->cfg.size];
493 mlx5_ipool_unlock(pool);
498 mlx5_ipool_destroy(struct mlx5_indexed_pool *pool)
500 struct mlx5_indexed_trunk **trunks;
504 mlx5_ipool_lock(pool);
505 trunks = pool->trunks;
506 for (i = 0; i < pool->n_trunk; i++) {
508 pool->cfg.free(trunks[i]);
511 pool->cfg.free(pool->trunks);
512 mlx5_ipool_unlock(pool);
518 mlx5_ipool_dump(struct mlx5_indexed_pool *pool)
520 printf("Pool %s entry size %u, trunks %u, %d entry per trunk, "
522 pool->cfg.type, pool->cfg.size, pool->n_trunk_valid,
523 pool->cfg.trunk_size, pool->n_trunk_valid);
525 printf("Pool %s entry %u, trunk alloc %u, empty: %u, "
526 "available %u free %u\n",
527 pool->cfg.type, pool->n_entry, pool->trunk_new,
528 pool->trunk_empty, pool->trunk_avail, pool->trunk_free);
532 struct mlx5_l3t_tbl *
533 mlx5_l3t_create(enum mlx5_l3t_type type)
535 struct mlx5_l3t_tbl *tbl;
536 struct mlx5_indexed_pool_config l3t_ip_cfg = {
542 .malloc = mlx5_malloc,
546 if (type >= MLX5_L3T_TYPE_MAX) {
550 tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_l3t_tbl), 1,
558 case MLX5_L3T_TYPE_WORD:
559 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_word);
560 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_w";
562 case MLX5_L3T_TYPE_DWORD:
563 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_dword);
564 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_dw";
566 case MLX5_L3T_TYPE_QWORD:
567 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_qword);
568 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_qw";
571 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_ptr);
572 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_tpr";
575 rte_spinlock_init(&tbl->sl);
576 tbl->eip = mlx5_ipool_create(&l3t_ip_cfg);
586 mlx5_l3t_destroy(struct mlx5_l3t_tbl *tbl)
588 struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
595 for (i = 0; i < MLX5_L3T_GT_SIZE; i++) {
596 m_tbl = g_tbl->tbl[i];
599 for (j = 0; j < MLX5_L3T_MT_SIZE; j++) {
602 MLX5_ASSERT(!((struct mlx5_l3t_entry_word *)
603 m_tbl->tbl[j])->ref_cnt);
604 mlx5_ipool_free(tbl->eip,
605 ((struct mlx5_l3t_entry_word *)
606 m_tbl->tbl[j])->idx);
608 if (!(--m_tbl->ref_cnt))
611 MLX5_ASSERT(!m_tbl->ref_cnt);
612 mlx5_free(g_tbl->tbl[i]);
614 if (!(--g_tbl->ref_cnt))
617 MLX5_ASSERT(!g_tbl->ref_cnt);
621 mlx5_ipool_destroy(tbl->eip);
626 __l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
627 union mlx5_l3t_data *data)
629 struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
630 struct mlx5_l3t_entry_word *w_e_tbl;
631 struct mlx5_l3t_entry_dword *dw_e_tbl;
632 struct mlx5_l3t_entry_qword *qw_e_tbl;
633 struct mlx5_l3t_entry_ptr *ptr_e_tbl;
640 m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
643 e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
646 entry_idx = idx & MLX5_L3T_ET_MASK;
648 case MLX5_L3T_TYPE_WORD:
649 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
650 data->word = w_e_tbl->entry[entry_idx].data;
651 if (w_e_tbl->entry[entry_idx].data)
652 w_e_tbl->entry[entry_idx].ref_cnt++;
654 case MLX5_L3T_TYPE_DWORD:
655 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
656 data->dword = dw_e_tbl->entry[entry_idx].data;
657 if (dw_e_tbl->entry[entry_idx].data)
658 dw_e_tbl->entry[entry_idx].ref_cnt++;
660 case MLX5_L3T_TYPE_QWORD:
661 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
662 data->qword = qw_e_tbl->entry[entry_idx].data;
663 if (qw_e_tbl->entry[entry_idx].data)
664 qw_e_tbl->entry[entry_idx].ref_cnt++;
667 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
668 data->ptr = ptr_e_tbl->entry[entry_idx].data;
669 if (ptr_e_tbl->entry[entry_idx].data)
670 ptr_e_tbl->entry[entry_idx].ref_cnt++;
677 mlx5_l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
678 union mlx5_l3t_data *data)
682 rte_spinlock_lock(&tbl->sl);
683 ret = __l3t_get_entry(tbl, idx, data);
684 rte_spinlock_unlock(&tbl->sl);
689 mlx5_l3t_clear_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx)
691 struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
692 struct mlx5_l3t_entry_word *w_e_tbl;
693 struct mlx5_l3t_entry_dword *dw_e_tbl;
694 struct mlx5_l3t_entry_qword *qw_e_tbl;
695 struct mlx5_l3t_entry_ptr *ptr_e_tbl;
701 rte_spinlock_lock(&tbl->sl);
705 m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
708 e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
711 entry_idx = idx & MLX5_L3T_ET_MASK;
713 case MLX5_L3T_TYPE_WORD:
714 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
715 MLX5_ASSERT(w_e_tbl->entry[entry_idx].ref_cnt);
716 ret = --w_e_tbl->entry[entry_idx].ref_cnt;
719 w_e_tbl->entry[entry_idx].data = 0;
720 ref_cnt = --w_e_tbl->ref_cnt;
722 case MLX5_L3T_TYPE_DWORD:
723 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
724 MLX5_ASSERT(dw_e_tbl->entry[entry_idx].ref_cnt);
725 ret = --dw_e_tbl->entry[entry_idx].ref_cnt;
728 dw_e_tbl->entry[entry_idx].data = 0;
729 ref_cnt = --dw_e_tbl->ref_cnt;
731 case MLX5_L3T_TYPE_QWORD:
732 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
733 MLX5_ASSERT(qw_e_tbl->entry[entry_idx].ref_cnt);
734 ret = --qw_e_tbl->entry[entry_idx].ref_cnt;
737 qw_e_tbl->entry[entry_idx].data = 0;
738 ref_cnt = --qw_e_tbl->ref_cnt;
741 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
742 MLX5_ASSERT(ptr_e_tbl->entry[entry_idx].ref_cnt);
743 ret = --ptr_e_tbl->entry[entry_idx].ref_cnt;
746 ptr_e_tbl->entry[entry_idx].data = NULL;
747 ref_cnt = --ptr_e_tbl->ref_cnt;
751 mlx5_ipool_free(tbl->eip,
752 ((struct mlx5_l3t_entry_word *)e_tbl)->idx);
753 m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] =
755 if (!(--m_tbl->ref_cnt)) {
758 [(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] = NULL;
759 if (!(--g_tbl->ref_cnt)) {
766 rte_spinlock_unlock(&tbl->sl);
771 __l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
772 union mlx5_l3t_data *data)
774 struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
775 struct mlx5_l3t_entry_word *w_e_tbl;
776 struct mlx5_l3t_entry_dword *dw_e_tbl;
777 struct mlx5_l3t_entry_qword *qw_e_tbl;
778 struct mlx5_l3t_entry_ptr *ptr_e_tbl;
780 uint32_t entry_idx, tbl_idx = 0;
782 /* Check the global table, create it if empty. */
785 g_tbl = mlx5_malloc(MLX5_MEM_ZERO,
786 sizeof(struct mlx5_l3t_level_tbl) +
787 sizeof(void *) * MLX5_L3T_GT_SIZE, 1,
796 * Check the middle table, create it if empty. Ref_cnt will be
797 * increased if new sub table created.
799 m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
801 m_tbl = mlx5_malloc(MLX5_MEM_ZERO,
802 sizeof(struct mlx5_l3t_level_tbl) +
803 sizeof(void *) * MLX5_L3T_MT_SIZE, 1,
809 g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] =
814 * Check the entry table, create it if empty. Ref_cnt will be
815 * increased if new sub entry table created.
817 e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
819 e_tbl = mlx5_ipool_zmalloc(tbl->eip, &tbl_idx);
824 ((struct mlx5_l3t_entry_word *)e_tbl)->idx = tbl_idx;
825 m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] =
829 entry_idx = idx & MLX5_L3T_ET_MASK;
831 case MLX5_L3T_TYPE_WORD:
832 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
833 if (w_e_tbl->entry[entry_idx].data) {
834 data->word = w_e_tbl->entry[entry_idx].data;
835 w_e_tbl->entry[entry_idx].ref_cnt++;
839 w_e_tbl->entry[entry_idx].data = data->word;
840 w_e_tbl->entry[entry_idx].ref_cnt = 1;
843 case MLX5_L3T_TYPE_DWORD:
844 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
845 if (dw_e_tbl->entry[entry_idx].data) {
846 data->dword = dw_e_tbl->entry[entry_idx].data;
847 dw_e_tbl->entry[entry_idx].ref_cnt++;
851 dw_e_tbl->entry[entry_idx].data = data->dword;
852 dw_e_tbl->entry[entry_idx].ref_cnt = 1;
855 case MLX5_L3T_TYPE_QWORD:
856 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
857 if (qw_e_tbl->entry[entry_idx].data) {
858 data->qword = qw_e_tbl->entry[entry_idx].data;
859 qw_e_tbl->entry[entry_idx].ref_cnt++;
863 qw_e_tbl->entry[entry_idx].data = data->qword;
864 qw_e_tbl->entry[entry_idx].ref_cnt = 1;
868 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
869 if (ptr_e_tbl->entry[entry_idx].data) {
870 data->ptr = ptr_e_tbl->entry[entry_idx].data;
871 ptr_e_tbl->entry[entry_idx].ref_cnt++;
875 ptr_e_tbl->entry[entry_idx].data = data->ptr;
876 ptr_e_tbl->entry[entry_idx].ref_cnt = 1;
877 ptr_e_tbl->ref_cnt++;
884 mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
885 union mlx5_l3t_data *data)
889 rte_spinlock_lock(&tbl->sl);
890 ret = __l3t_set_entry(tbl, idx, data);
891 rte_spinlock_unlock(&tbl->sl);
896 mlx5_l3t_prepare_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
897 union mlx5_l3t_data *data,
898 mlx5_l3t_alloc_callback_fn cb, void *ctx)
902 rte_spinlock_lock(&tbl->sl);
903 /* Check if entry data is ready. */
904 ret = __l3t_get_entry(tbl, idx, data);
907 case MLX5_L3T_TYPE_WORD:
911 case MLX5_L3T_TYPE_DWORD:
915 case MLX5_L3T_TYPE_QWORD:
925 /* Entry data is not ready, use user callback to create it. */
929 /* Save the new allocated data to entry. */
930 ret = __l3t_set_entry(tbl, idx, data);
932 rte_spinlock_unlock(&tbl->sl);