common/cnxk: add lower bound check for SSO resources
[dpdk.git] / drivers / net / mlx5 / mlx5_utils.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4
5 #include <rte_malloc.h>
6
7 #include <mlx5_malloc.h>
8
9 #include "mlx5_utils.h"
10
11 /********************* Indexed pool **********************/
12
13 static inline void
14 mlx5_ipool_lock(struct mlx5_indexed_pool *pool)
15 {
16         if (pool->cfg.need_lock)
17                 rte_spinlock_lock(&pool->rsz_lock);
18 }
19
20 static inline void
21 mlx5_ipool_unlock(struct mlx5_indexed_pool *pool)
22 {
23         if (pool->cfg.need_lock)
24                 rte_spinlock_unlock(&pool->rsz_lock);
25 }
26
27 static inline uint32_t
28 mlx5_trunk_idx_get(struct mlx5_indexed_pool *pool, uint32_t entry_idx)
29 {
30         struct mlx5_indexed_pool_config *cfg = &pool->cfg;
31         uint32_t trunk_idx = 0;
32         uint32_t i;
33
34         if (!cfg->grow_trunk)
35                 return entry_idx / cfg->trunk_size;
36         if (entry_idx >= pool->grow_tbl[cfg->grow_trunk - 1]) {
37                 trunk_idx = (entry_idx - pool->grow_tbl[cfg->grow_trunk - 1]) /
38                             (cfg->trunk_size << (cfg->grow_shift *
39                             cfg->grow_trunk)) + cfg->grow_trunk;
40         } else {
41                 for (i = 0; i < cfg->grow_trunk; i++) {
42                         if (entry_idx < pool->grow_tbl[i])
43                                 break;
44                 }
45                 trunk_idx = i;
46         }
47         return trunk_idx;
48 }
49
50 static inline uint32_t
51 mlx5_trunk_size_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx)
52 {
53         struct mlx5_indexed_pool_config *cfg = &pool->cfg;
54
55         return cfg->trunk_size << (cfg->grow_shift *
56                (trunk_idx > cfg->grow_trunk ? cfg->grow_trunk : trunk_idx));
57 }
58
59 static inline uint32_t
60 mlx5_trunk_idx_offset_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx)
61 {
62         struct mlx5_indexed_pool_config *cfg = &pool->cfg;
63         uint32_t offset = 0;
64
65         if (!trunk_idx)
66                 return 0;
67         if (!cfg->grow_trunk)
68                 return cfg->trunk_size * trunk_idx;
69         if (trunk_idx < cfg->grow_trunk)
70                 offset = pool->grow_tbl[trunk_idx - 1];
71         else
72                 offset = pool->grow_tbl[cfg->grow_trunk - 1] +
73                          (cfg->trunk_size << (cfg->grow_shift *
74                          cfg->grow_trunk)) * (trunk_idx - cfg->grow_trunk);
75         return offset;
76 }
77
78 struct mlx5_indexed_pool *
79 mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg)
80 {
81         struct mlx5_indexed_pool *pool;
82         uint32_t i;
83
84         if (!cfg || (!cfg->malloc ^ !cfg->free) ||
85             (cfg->per_core_cache && cfg->release_mem_en) ||
86             (cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) ||
87             ((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32))))
88                 return NULL;
89         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool) + cfg->grow_trunk *
90                            sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE,
91                            SOCKET_ID_ANY);
92         if (!pool)
93                 return NULL;
94         pool->cfg = *cfg;
95         if (!pool->cfg.trunk_size)
96                 pool->cfg.trunk_size = MLX5_IPOOL_DEFAULT_TRUNK_SIZE;
97         if (!cfg->malloc && !cfg->free) {
98                 pool->cfg.malloc = mlx5_malloc;
99                 pool->cfg.free = mlx5_free;
100         }
101         if (pool->cfg.need_lock)
102                 rte_spinlock_init(&pool->rsz_lock);
103         /*
104          * Initialize the dynamic grow trunk size lookup table to have a quick
105          * lookup for the trunk entry index offset.
106          */
107         for (i = 0; i < cfg->grow_trunk; i++) {
108                 pool->grow_tbl[i] = cfg->trunk_size << (cfg->grow_shift * i);
109                 if (i > 0)
110                         pool->grow_tbl[i] += pool->grow_tbl[i - 1];
111         }
112         if (!pool->cfg.max_idx)
113                 pool->cfg.max_idx =
114                         mlx5_trunk_idx_offset_get(pool, TRUNK_MAX_IDX + 1);
115         if (!cfg->per_core_cache)
116                 pool->free_list = TRUNK_INVALID;
117         rte_spinlock_init(&pool->lcore_lock);
118         return pool;
119 }
120
121 static int
122 mlx5_ipool_grow(struct mlx5_indexed_pool *pool)
123 {
124         struct mlx5_indexed_trunk *trunk;
125         struct mlx5_indexed_trunk **trunk_tmp;
126         struct mlx5_indexed_trunk **p;
127         size_t trunk_size = 0;
128         size_t data_size;
129         size_t bmp_size;
130         uint32_t idx, cur_max_idx, i;
131
132         cur_max_idx = mlx5_trunk_idx_offset_get(pool, pool->n_trunk_valid);
133         if (pool->n_trunk_valid == TRUNK_MAX_IDX ||
134             cur_max_idx >= pool->cfg.max_idx)
135                 return -ENOMEM;
136         if (pool->n_trunk_valid == pool->n_trunk) {
137                 /* No free trunk flags, expand trunk list. */
138                 int n_grow = pool->n_trunk_valid ? pool->n_trunk :
139                              RTE_CACHE_LINE_SIZE / sizeof(void *);
140
141                 p = pool->cfg.malloc(0, (pool->n_trunk_valid + n_grow) *
142                                      sizeof(struct mlx5_indexed_trunk *),
143                                      RTE_CACHE_LINE_SIZE, rte_socket_id());
144                 if (!p)
145                         return -ENOMEM;
146                 if (pool->trunks)
147                         memcpy(p, pool->trunks, pool->n_trunk_valid *
148                                sizeof(struct mlx5_indexed_trunk *));
149                 memset(RTE_PTR_ADD(p, pool->n_trunk_valid * sizeof(void *)), 0,
150                        n_grow * sizeof(void *));
151                 trunk_tmp = pool->trunks;
152                 pool->trunks = p;
153                 if (trunk_tmp)
154                         pool->cfg.free(trunk_tmp);
155                 pool->n_trunk += n_grow;
156         }
157         if (!pool->cfg.release_mem_en) {
158                 idx = pool->n_trunk_valid;
159         } else {
160                 /* Find the first available slot in trunk list */
161                 for (idx = 0; idx < pool->n_trunk; idx++)
162                         if (pool->trunks[idx] == NULL)
163                                 break;
164         }
165         trunk_size += sizeof(*trunk);
166         data_size = mlx5_trunk_size_get(pool, idx);
167         bmp_size = rte_bitmap_get_memory_footprint(data_size);
168         /* rte_bitmap requires memory cacheline aligned. */
169         trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size);
170         trunk_size += bmp_size;
171         trunk = pool->cfg.malloc(0, trunk_size,
172                                  RTE_CACHE_LINE_SIZE, rte_socket_id());
173         if (!trunk)
174                 return -ENOMEM;
175         pool->trunks[idx] = trunk;
176         trunk->idx = idx;
177         trunk->free = data_size;
178         trunk->prev = TRUNK_INVALID;
179         trunk->next = TRUNK_INVALID;
180         MLX5_ASSERT(pool->free_list == TRUNK_INVALID);
181         pool->free_list = idx;
182         /* Mark all entries as available. */
183         trunk->bmp = rte_bitmap_init_with_all_set(data_size, &trunk->data
184                      [RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size)],
185                      bmp_size);
186         /* Clear the overhead bits in the trunk if it happens. */
187         if (cur_max_idx + data_size > pool->cfg.max_idx) {
188                 for (i = pool->cfg.max_idx - cur_max_idx; i < data_size; i++)
189                         rte_bitmap_clear(trunk->bmp, i);
190         }
191         MLX5_ASSERT(trunk->bmp);
192         pool->n_trunk_valid++;
193 #ifdef POOL_DEBUG
194         pool->trunk_new++;
195         pool->trunk_avail++;
196 #endif
197         return 0;
198 }
199
200 static inline struct mlx5_indexed_cache *
201 mlx5_ipool_update_global_cache(struct mlx5_indexed_pool *pool, int cidx)
202 {
203         struct mlx5_indexed_cache *gc, *lc, *olc = NULL;
204
205         lc = pool->cache[cidx]->lc;
206         gc = __atomic_load_n(&pool->gc, __ATOMIC_RELAXED);
207         if (gc && lc != gc) {
208                 mlx5_ipool_lock(pool);
209                 if (lc && !(--lc->ref_cnt))
210                         olc = lc;
211                 lc = pool->gc;
212                 lc->ref_cnt++;
213                 pool->cache[cidx]->lc = lc;
214                 mlx5_ipool_unlock(pool);
215                 if (olc)
216                         pool->cfg.free(olc);
217         }
218         return lc;
219 }
220
221 static uint32_t
222 mlx5_ipool_allocate_from_global(struct mlx5_indexed_pool *pool, int cidx)
223 {
224         struct mlx5_indexed_trunk *trunk;
225         struct mlx5_indexed_cache *p, *lc, *olc = NULL;
226         size_t trunk_size = 0;
227         size_t data_size;
228         uint32_t cur_max_idx, trunk_idx, trunk_n;
229         uint32_t fetch_size, ts_idx, i;
230         int n_grow;
231
232 check_again:
233         p = NULL;
234         fetch_size = 0;
235         /*
236          * Fetch new index from global if possible. First round local
237          * cache will be NULL.
238          */
239         lc = pool->cache[cidx]->lc;
240         mlx5_ipool_lock(pool);
241         /* Try to update local cache first. */
242         if (likely(pool->gc)) {
243                 if (lc != pool->gc) {
244                         if (lc && !(--lc->ref_cnt))
245                                 olc = lc;
246                         lc = pool->gc;
247                         lc->ref_cnt++;
248                         pool->cache[cidx]->lc = lc;
249                 }
250                 if (lc->len) {
251                         /* Use the updated local cache to fetch index. */
252                         fetch_size = pool->cfg.per_core_cache >> 2;
253                         if (lc->len < fetch_size)
254                                 fetch_size = lc->len;
255                         lc->len -= fetch_size;
256                         memcpy(pool->cache[cidx]->idx, &lc->idx[lc->len],
257                                sizeof(uint32_t) * fetch_size);
258                 }
259         }
260         mlx5_ipool_unlock(pool);
261         if (unlikely(olc)) {
262                 pool->cfg.free(olc);
263                 olc = NULL;
264         }
265         if (fetch_size) {
266                 pool->cache[cidx]->len = fetch_size - 1;
267                 return pool->cache[cidx]->idx[pool->cache[cidx]->len];
268         }
269         trunk_idx = lc ? __atomic_load_n(&lc->n_trunk_valid,
270                          __ATOMIC_ACQUIRE) : 0;
271         trunk_n = lc ? lc->n_trunk : 0;
272         cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx);
273         /* Check if index reach maximum. */
274         if (trunk_idx == TRUNK_MAX_IDX ||
275             cur_max_idx >= pool->cfg.max_idx)
276                 return 0;
277         /* No enough space in trunk array, resize the trunks array. */
278         if (trunk_idx == trunk_n) {
279                 n_grow = trunk_idx ? trunk_idx :
280                              RTE_CACHE_LINE_SIZE / sizeof(void *);
281                 cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_n + n_grow);
282                 /* Resize the trunk array. */
283                 p = pool->cfg.malloc(0, ((trunk_idx + n_grow) *
284                         sizeof(struct mlx5_indexed_trunk *)) +
285                         (cur_max_idx * sizeof(uint32_t)) + sizeof(*p),
286                         RTE_CACHE_LINE_SIZE, rte_socket_id());
287                 if (!p)
288                         return 0;
289                 p->trunks = (struct mlx5_indexed_trunk **)&p->idx[cur_max_idx];
290                 if (lc)
291                         memcpy(p->trunks, lc->trunks, trunk_idx *
292                        sizeof(struct mlx5_indexed_trunk *));
293 #ifdef RTE_LIBRTE_MLX5_DEBUG
294                 memset(RTE_PTR_ADD(p->trunks, trunk_idx * sizeof(void *)), 0,
295                         n_grow * sizeof(void *));
296 #endif
297                 p->n_trunk_valid = trunk_idx;
298                 p->n_trunk = trunk_n + n_grow;
299                 p->len = 0;
300         }
301         /* Prepare the new trunk. */
302         trunk_size = sizeof(*trunk);
303         data_size = mlx5_trunk_size_get(pool, trunk_idx);
304         trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size);
305         trunk = pool->cfg.malloc(0, trunk_size,
306                                  RTE_CACHE_LINE_SIZE, rte_socket_id());
307         if (unlikely(!trunk)) {
308                 pool->cfg.free(p);
309                 return 0;
310         }
311         trunk->idx = trunk_idx;
312         trunk->free = data_size;
313         mlx5_ipool_lock(pool);
314         /*
315          * Double check if trunks has been updated or have available index.
316          * During the new trunk allocate, index may still be flushed to the
317          * global cache. So also need to check the pool->gc->len.
318          */
319         if (pool->gc && (lc != pool->gc ||
320             lc->n_trunk_valid != trunk_idx ||
321             pool->gc->len)) {
322                 mlx5_ipool_unlock(pool);
323                 if (p)
324                         pool->cfg.free(p);
325                 pool->cfg.free(trunk);
326                 goto check_again;
327         }
328         /* Resize the trunk array and update local cache first.  */
329         if (p) {
330                 if (lc && !(--lc->ref_cnt))
331                         olc = lc;
332                 lc = p;
333                 lc->ref_cnt = 1;
334                 pool->cache[cidx]->lc = lc;
335                 __atomic_store_n(&pool->gc, p, __ATOMIC_RELAXED);
336         }
337         /* Add trunk to trunks array. */
338         lc->trunks[trunk_idx] = trunk;
339         __atomic_fetch_add(&lc->n_trunk_valid, 1, __ATOMIC_RELAXED);
340         /* Enqueue half of the index to global. */
341         ts_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx) + 1;
342         fetch_size = trunk->free >> 1;
343         if (fetch_size > pool->cfg.per_core_cache)
344                 fetch_size = trunk->free - pool->cfg.per_core_cache;
345         for (i = 0; i < fetch_size; i++)
346                 lc->idx[i] = ts_idx + i;
347         lc->len = fetch_size;
348         mlx5_ipool_unlock(pool);
349         /* Copy left half - 1 to local cache index array. */
350         pool->cache[cidx]->len = trunk->free - fetch_size - 1;
351         ts_idx += fetch_size;
352         for (i = 0; i < pool->cache[cidx]->len; i++)
353                 pool->cache[cidx]->idx[i] = ts_idx + i;
354         if (olc)
355                 pool->cfg.free(olc);
356         return ts_idx + i;
357 }
358
359 static void *
360 _mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx)
361 {
362         struct mlx5_indexed_trunk *trunk;
363         struct mlx5_indexed_cache *lc;
364         uint32_t trunk_idx;
365         uint32_t entry_idx;
366
367         MLX5_ASSERT(idx);
368         if (unlikely(!pool->cache[cidx])) {
369                 pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO,
370                         sizeof(struct mlx5_ipool_per_lcore) +
371                         (pool->cfg.per_core_cache * sizeof(uint32_t)),
372                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
373                 if (!pool->cache[cidx]) {
374                         DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx);
375                         return NULL;
376                 }
377         }
378         lc = mlx5_ipool_update_global_cache(pool, cidx);
379         idx -= 1;
380         trunk_idx = mlx5_trunk_idx_get(pool, idx);
381         trunk = lc->trunks[trunk_idx];
382         MLX5_ASSERT(trunk);
383         entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk_idx);
384         return &trunk->data[entry_idx * pool->cfg.size];
385 }
386
387 static void *
388 mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, uint32_t idx)
389 {
390         void *entry;
391         int cidx;
392
393         cidx = rte_lcore_index(rte_lcore_id());
394         if (unlikely(cidx == -1)) {
395                 cidx = RTE_MAX_LCORE;
396                 rte_spinlock_lock(&pool->lcore_lock);
397         }
398         entry = _mlx5_ipool_get_cache(pool, cidx, idx);
399         if (unlikely(cidx == RTE_MAX_LCORE))
400                 rte_spinlock_unlock(&pool->lcore_lock);
401         return entry;
402 }
403
404
405 static void *
406 _mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, int cidx,
407                          uint32_t *idx)
408 {
409         if (unlikely(!pool->cache[cidx])) {
410                 pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO,
411                         sizeof(struct mlx5_ipool_per_lcore) +
412                         (pool->cfg.per_core_cache * sizeof(uint32_t)),
413                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
414                 if (!pool->cache[cidx]) {
415                         DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx);
416                         return NULL;
417                 }
418         } else if (pool->cache[cidx]->len) {
419                 pool->cache[cidx]->len--;
420                 *idx = pool->cache[cidx]->idx[pool->cache[cidx]->len];
421                 return _mlx5_ipool_get_cache(pool, cidx, *idx);
422         }
423         /* Not enough idx in global cache. Keep fetching from global. */
424         *idx = mlx5_ipool_allocate_from_global(pool, cidx);
425         if (unlikely(!(*idx)))
426                 return NULL;
427         return _mlx5_ipool_get_cache(pool, cidx, *idx);
428 }
429
430 static void *
431 mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, uint32_t *idx)
432 {
433         void *entry;
434         int cidx;
435
436         cidx = rte_lcore_index(rte_lcore_id());
437         if (unlikely(cidx == -1)) {
438                 cidx = RTE_MAX_LCORE;
439                 rte_spinlock_lock(&pool->lcore_lock);
440         }
441         entry = _mlx5_ipool_malloc_cache(pool, cidx, idx);
442         if (unlikely(cidx == RTE_MAX_LCORE))
443                 rte_spinlock_unlock(&pool->lcore_lock);
444         return entry;
445 }
446
447 static void
448 _mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx)
449 {
450         struct mlx5_ipool_per_lcore *ilc;
451         struct mlx5_indexed_cache *gc, *olc = NULL;
452         uint32_t reclaim_num = 0;
453
454         MLX5_ASSERT(idx);
455         /*
456          * When index was allocated on core A but freed on core B. In this
457          * case check if local cache on core B was allocated before.
458          */
459         if (unlikely(!pool->cache[cidx])) {
460                 pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO,
461                         sizeof(struct mlx5_ipool_per_lcore) +
462                         (pool->cfg.per_core_cache * sizeof(uint32_t)),
463                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
464                 if (!pool->cache[cidx]) {
465                         DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx);
466                         return;
467                 }
468         }
469         /* Try to enqueue to local index cache. */
470         if (pool->cache[cidx]->len < pool->cfg.per_core_cache) {
471                 pool->cache[cidx]->idx[pool->cache[cidx]->len] = idx;
472                 pool->cache[cidx]->len++;
473                 return;
474         }
475         ilc = pool->cache[cidx];
476         reclaim_num = pool->cfg.per_core_cache >> 2;
477         ilc->len -= reclaim_num;
478         /* Local index cache full, try with global index cache. */
479         mlx5_ipool_lock(pool);
480         gc = pool->gc;
481         if (ilc->lc != gc) {
482                 if (!(--ilc->lc->ref_cnt))
483                         olc = ilc->lc;
484                 gc->ref_cnt++;
485                 ilc->lc = gc;
486         }
487         memcpy(&gc->idx[gc->len], &ilc->idx[ilc->len],
488                reclaim_num * sizeof(uint32_t));
489         gc->len += reclaim_num;
490         mlx5_ipool_unlock(pool);
491         if (olc)
492                 pool->cfg.free(olc);
493         pool->cache[cidx]->idx[pool->cache[cidx]->len] = idx;
494         pool->cache[cidx]->len++;
495 }
496
497 static void
498 mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, uint32_t idx)
499 {
500         int cidx;
501
502         cidx = rte_lcore_index(rte_lcore_id());
503         if (unlikely(cidx == -1)) {
504                 cidx = RTE_MAX_LCORE;
505                 rte_spinlock_lock(&pool->lcore_lock);
506         }
507         _mlx5_ipool_free_cache(pool, cidx, idx);
508         if (unlikely(cidx == RTE_MAX_LCORE))
509                 rte_spinlock_unlock(&pool->lcore_lock);
510 }
511
512 void *
513 mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx)
514 {
515         struct mlx5_indexed_trunk *trunk;
516         uint64_t slab = 0;
517         uint32_t iidx = 0;
518         void *p;
519
520         if (pool->cfg.per_core_cache)
521                 return mlx5_ipool_malloc_cache(pool, idx);
522         mlx5_ipool_lock(pool);
523         if (pool->free_list == TRUNK_INVALID) {
524                 /* If no available trunks, grow new. */
525                 if (mlx5_ipool_grow(pool)) {
526                         mlx5_ipool_unlock(pool);
527                         return NULL;
528                 }
529         }
530         MLX5_ASSERT(pool->free_list != TRUNK_INVALID);
531         trunk = pool->trunks[pool->free_list];
532         MLX5_ASSERT(trunk->free);
533         if (!rte_bitmap_scan(trunk->bmp, &iidx, &slab)) {
534                 mlx5_ipool_unlock(pool);
535                 return NULL;
536         }
537         MLX5_ASSERT(slab);
538         iidx += __builtin_ctzll(slab);
539         MLX5_ASSERT(iidx != UINT32_MAX);
540         MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx));
541         rte_bitmap_clear(trunk->bmp, iidx);
542         p = &trunk->data[iidx * pool->cfg.size];
543         /*
544          * The ipool index should grow continually from small to big,
545          * some features as metering only accept limited bits of index.
546          * Random index with MSB set may be rejected.
547          */
548         iidx += mlx5_trunk_idx_offset_get(pool, trunk->idx);
549         iidx += 1; /* non-zero index. */
550         trunk->free--;
551 #ifdef POOL_DEBUG
552         pool->n_entry++;
553 #endif
554         if (!trunk->free) {
555                 /* Full trunk will be removed from free list in imalloc. */
556                 MLX5_ASSERT(pool->free_list == trunk->idx);
557                 pool->free_list = trunk->next;
558                 if (trunk->next != TRUNK_INVALID)
559                         pool->trunks[trunk->next]->prev = TRUNK_INVALID;
560                 trunk->prev = TRUNK_INVALID;
561                 trunk->next = TRUNK_INVALID;
562 #ifdef POOL_DEBUG
563                 pool->trunk_empty++;
564                 pool->trunk_avail--;
565 #endif
566         }
567         *idx = iidx;
568         mlx5_ipool_unlock(pool);
569         return p;
570 }
571
572 void *
573 mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx)
574 {
575         void *entry = mlx5_ipool_malloc(pool, idx);
576
577         if (entry && pool->cfg.size)
578                 memset(entry, 0, pool->cfg.size);
579         return entry;
580 }
581
582 void
583 mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx)
584 {
585         struct mlx5_indexed_trunk *trunk;
586         uint32_t trunk_idx;
587         uint32_t entry_idx;
588
589         if (!idx)
590                 return;
591         if (pool->cfg.per_core_cache) {
592                 mlx5_ipool_free_cache(pool, idx);
593                 return;
594         }
595         idx -= 1;
596         mlx5_ipool_lock(pool);
597         trunk_idx = mlx5_trunk_idx_get(pool, idx);
598         if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) ||
599             (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk))
600                 goto out;
601         trunk = pool->trunks[trunk_idx];
602         if (!trunk)
603                 goto out;
604         entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx);
605         if (trunk_idx != trunk->idx ||
606             rte_bitmap_get(trunk->bmp, entry_idx))
607                 goto out;
608         rte_bitmap_set(trunk->bmp, entry_idx);
609         trunk->free++;
610         if (pool->cfg.release_mem_en && trunk->free == mlx5_trunk_size_get
611            (pool, trunk->idx)) {
612                 if (pool->free_list == trunk->idx)
613                         pool->free_list = trunk->next;
614                 if (trunk->next != TRUNK_INVALID)
615                         pool->trunks[trunk->next]->prev = trunk->prev;
616                 if (trunk->prev != TRUNK_INVALID)
617                         pool->trunks[trunk->prev]->next = trunk->next;
618                 pool->cfg.free(trunk);
619                 pool->trunks[trunk_idx] = NULL;
620                 pool->n_trunk_valid--;
621 #ifdef POOL_DEBUG
622                 pool->trunk_avail--;
623                 pool->trunk_free++;
624 #endif
625                 if (pool->n_trunk_valid == 0) {
626                         pool->cfg.free(pool->trunks);
627                         pool->trunks = NULL;
628                         pool->n_trunk = 0;
629                 }
630         } else if (trunk->free == 1) {
631                 /* Put into free trunk list head. */
632                 MLX5_ASSERT(pool->free_list != trunk->idx);
633                 trunk->next = pool->free_list;
634                 trunk->prev = TRUNK_INVALID;
635                 if (pool->free_list != TRUNK_INVALID)
636                         pool->trunks[pool->free_list]->prev = trunk->idx;
637                 pool->free_list = trunk->idx;
638 #ifdef POOL_DEBUG
639                 pool->trunk_empty--;
640                 pool->trunk_avail++;
641 #endif
642         }
643 #ifdef POOL_DEBUG
644         pool->n_entry--;
645 #endif
646 out:
647         mlx5_ipool_unlock(pool);
648 }
649
650 void *
651 mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx)
652 {
653         struct mlx5_indexed_trunk *trunk;
654         void *p = NULL;
655         uint32_t trunk_idx;
656         uint32_t entry_idx;
657
658         if (!idx)
659                 return NULL;
660         if (pool->cfg.per_core_cache)
661                 return mlx5_ipool_get_cache(pool, idx);
662         idx -= 1;
663         mlx5_ipool_lock(pool);
664         trunk_idx = mlx5_trunk_idx_get(pool, idx);
665         if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) ||
666             (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk))
667                 goto out;
668         trunk = pool->trunks[trunk_idx];
669         if (!trunk)
670                 goto out;
671         entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx);
672         if (trunk_idx != trunk->idx ||
673             rte_bitmap_get(trunk->bmp, entry_idx))
674                 goto out;
675         p = &trunk->data[entry_idx * pool->cfg.size];
676 out:
677         mlx5_ipool_unlock(pool);
678         return p;
679 }
680
681 int
682 mlx5_ipool_destroy(struct mlx5_indexed_pool *pool)
683 {
684         struct mlx5_indexed_trunk **trunks = NULL;
685         struct mlx5_indexed_cache *gc = pool->gc;
686         uint32_t i, n_trunk_valid = 0;
687
688         MLX5_ASSERT(pool);
689         mlx5_ipool_lock(pool);
690         if (pool->cfg.per_core_cache) {
691                 for (i = 0; i <= RTE_MAX_LCORE; i++) {
692                         /*
693                          * Free only old global cache. Pool gc will be
694                          * freed at last.
695                          */
696                         if (pool->cache[i]) {
697                                 if (pool->cache[i]->lc &&
698                                     pool->cache[i]->lc != pool->gc &&
699                                     (!(--pool->cache[i]->lc->ref_cnt)))
700                                         pool->cfg.free(pool->cache[i]->lc);
701                                 pool->cfg.free(pool->cache[i]);
702                         }
703                 }
704                 if (gc) {
705                         trunks = gc->trunks;
706                         n_trunk_valid = gc->n_trunk_valid;
707                 }
708         } else {
709                 gc = NULL;
710                 trunks = pool->trunks;
711                 n_trunk_valid = pool->n_trunk_valid;
712         }
713         for (i = 0; i < n_trunk_valid; i++) {
714                 if (trunks[i])
715                         pool->cfg.free(trunks[i]);
716         }
717         if (!gc && trunks)
718                 pool->cfg.free(trunks);
719         if (gc)
720                 pool->cfg.free(gc);
721         mlx5_ipool_unlock(pool);
722         mlx5_free(pool);
723         return 0;
724 }
725
726 void
727 mlx5_ipool_flush_cache(struct mlx5_indexed_pool *pool)
728 {
729         uint32_t i, j;
730         struct mlx5_indexed_cache *gc;
731         struct rte_bitmap *ibmp;
732         uint32_t bmp_num, mem_size;
733
734         if (!pool->cfg.per_core_cache)
735                 return;
736         gc = pool->gc;
737         if (!gc)
738                 return;
739         /* Reset bmp. */
740         bmp_num = mlx5_trunk_idx_offset_get(pool, gc->n_trunk_valid);
741         mem_size = rte_bitmap_get_memory_footprint(bmp_num);
742         pool->bmp_mem = pool->cfg.malloc(MLX5_MEM_ZERO, mem_size,
743                                          RTE_CACHE_LINE_SIZE, rte_socket_id());
744         if (!pool->bmp_mem) {
745                 DRV_LOG(ERR, "Ipool bitmap mem allocate failed.\n");
746                 return;
747         }
748         ibmp = rte_bitmap_init_with_all_set(bmp_num, pool->bmp_mem, mem_size);
749         if (!ibmp) {
750                 pool->cfg.free(pool->bmp_mem);
751                 pool->bmp_mem = NULL;
752                 DRV_LOG(ERR, "Ipool bitmap create failed.\n");
753                 return;
754         }
755         pool->ibmp = ibmp;
756         /* Clear global cache. */
757         for (i = 0; i < gc->len; i++)
758                 rte_bitmap_clear(ibmp, gc->idx[i] - 1);
759         /* Clear core cache. */
760         for (i = 0; i < RTE_MAX_LCORE + 1; i++) {
761                 struct mlx5_ipool_per_lcore *ilc = pool->cache[i];
762
763                 if (!ilc)
764                         continue;
765                 for (j = 0; j < ilc->len; j++)
766                         rte_bitmap_clear(ibmp, ilc->idx[j] - 1);
767         }
768 }
769
770 static void *
771 mlx5_ipool_get_next_cache(struct mlx5_indexed_pool *pool, uint32_t *pos)
772 {
773         struct rte_bitmap *ibmp;
774         uint64_t slab = 0;
775         uint32_t iidx = *pos;
776
777         ibmp = pool->ibmp;
778         if (!ibmp || !rte_bitmap_scan(ibmp, &iidx, &slab)) {
779                 if (pool->bmp_mem) {
780                         pool->cfg.free(pool->bmp_mem);
781                         pool->bmp_mem = NULL;
782                         pool->ibmp = NULL;
783                 }
784                 return NULL;
785         }
786         iidx += __builtin_ctzll(slab);
787         rte_bitmap_clear(ibmp, iidx);
788         iidx++;
789         *pos = iidx;
790         return mlx5_ipool_get_cache(pool, iidx);
791 }
792
793 void *
794 mlx5_ipool_get_next(struct mlx5_indexed_pool *pool, uint32_t *pos)
795 {
796         uint32_t idx = *pos;
797         void *entry;
798
799         if (pool->cfg.per_core_cache)
800                 return mlx5_ipool_get_next_cache(pool, pos);
801         while (idx <= mlx5_trunk_idx_offset_get(pool, pool->n_trunk)) {
802                 entry = mlx5_ipool_get(pool, idx);
803                 if (entry) {
804                         *pos = idx;
805                         return entry;
806                 }
807                 idx++;
808         }
809         return NULL;
810 }
811
812 void
813 mlx5_ipool_dump(struct mlx5_indexed_pool *pool)
814 {
815         printf("Pool %s entry size %u, trunks %u, %d entry per trunk, "
816                "total: %d\n",
817                pool->cfg.type, pool->cfg.size, pool->n_trunk_valid,
818                pool->cfg.trunk_size, pool->n_trunk_valid);
819 #ifdef POOL_DEBUG
820         printf("Pool %s entry %u, trunk alloc %u, empty: %u, "
821                "available %u free %u\n",
822                pool->cfg.type, pool->n_entry, pool->trunk_new,
823                pool->trunk_empty, pool->trunk_avail, pool->trunk_free);
824 #endif
825 }
826
827 struct mlx5_l3t_tbl *
828 mlx5_l3t_create(enum mlx5_l3t_type type)
829 {
830         struct mlx5_l3t_tbl *tbl;
831         struct mlx5_indexed_pool_config l3t_ip_cfg = {
832                 .trunk_size = 16,
833                 .grow_trunk = 6,
834                 .grow_shift = 1,
835                 .need_lock = 0,
836                 .release_mem_en = 1,
837                 .malloc = mlx5_malloc,
838                 .free = mlx5_free,
839         };
840
841         if (type >= MLX5_L3T_TYPE_MAX) {
842                 rte_errno = EINVAL;
843                 return NULL;
844         }
845         tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_l3t_tbl), 1,
846                           SOCKET_ID_ANY);
847         if (!tbl) {
848                 rte_errno = ENOMEM;
849                 return NULL;
850         }
851         tbl->type = type;
852         switch (type) {
853         case MLX5_L3T_TYPE_WORD:
854                 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_word);
855                 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_w";
856                 break;
857         case MLX5_L3T_TYPE_DWORD:
858                 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_dword);
859                 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_dw";
860                 break;
861         case MLX5_L3T_TYPE_QWORD:
862                 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_qword);
863                 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_qw";
864                 break;
865         default:
866                 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_ptr);
867                 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_tpr";
868                 break;
869         }
870         rte_spinlock_init(&tbl->sl);
871         tbl->eip = mlx5_ipool_create(&l3t_ip_cfg);
872         if (!tbl->eip) {
873                 rte_errno = ENOMEM;
874                 mlx5_free(tbl);
875                 tbl = NULL;
876         }
877         return tbl;
878 }
879
880 void
881 mlx5_l3t_destroy(struct mlx5_l3t_tbl *tbl)
882 {
883         struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
884         uint32_t i, j;
885
886         if (!tbl)
887                 return;
888         g_tbl = tbl->tbl;
889         if (g_tbl) {
890                 for (i = 0; i < MLX5_L3T_GT_SIZE; i++) {
891                         m_tbl = g_tbl->tbl[i];
892                         if (!m_tbl)
893                                 continue;
894                         for (j = 0; j < MLX5_L3T_MT_SIZE; j++) {
895                                 if (!m_tbl->tbl[j])
896                                         continue;
897                                 MLX5_ASSERT(!((struct mlx5_l3t_entry_word *)
898                                             m_tbl->tbl[j])->ref_cnt);
899                                 mlx5_ipool_free(tbl->eip,
900                                                 ((struct mlx5_l3t_entry_word *)
901                                                 m_tbl->tbl[j])->idx);
902                                 m_tbl->tbl[j] = 0;
903                                 if (!(--m_tbl->ref_cnt))
904                                         break;
905                         }
906                         MLX5_ASSERT(!m_tbl->ref_cnt);
907                         mlx5_free(g_tbl->tbl[i]);
908                         g_tbl->tbl[i] = 0;
909                         if (!(--g_tbl->ref_cnt))
910                                 break;
911                 }
912                 MLX5_ASSERT(!g_tbl->ref_cnt);
913                 mlx5_free(tbl->tbl);
914                 tbl->tbl = 0;
915         }
916         mlx5_ipool_destroy(tbl->eip);
917         mlx5_free(tbl);
918 }
919
920 static int32_t
921 __l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
922                 union mlx5_l3t_data *data)
923 {
924         struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
925         struct mlx5_l3t_entry_word *w_e_tbl;
926         struct mlx5_l3t_entry_dword *dw_e_tbl;
927         struct mlx5_l3t_entry_qword *qw_e_tbl;
928         struct mlx5_l3t_entry_ptr *ptr_e_tbl;
929         void *e_tbl;
930         uint32_t entry_idx;
931
932         g_tbl = tbl->tbl;
933         if (!g_tbl)
934                 return -1;
935         m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
936         if (!m_tbl)
937                 return -1;
938         e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
939         if (!e_tbl)
940                 return -1;
941         entry_idx = idx & MLX5_L3T_ET_MASK;
942         switch (tbl->type) {
943         case MLX5_L3T_TYPE_WORD:
944                 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
945                 data->word = w_e_tbl->entry[entry_idx].data;
946                 if (w_e_tbl->entry[entry_idx].data)
947                         w_e_tbl->entry[entry_idx].ref_cnt++;
948                 break;
949         case MLX5_L3T_TYPE_DWORD:
950                 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
951                 data->dword = dw_e_tbl->entry[entry_idx].data;
952                 if (dw_e_tbl->entry[entry_idx].data)
953                         dw_e_tbl->entry[entry_idx].ref_cnt++;
954                 break;
955         case MLX5_L3T_TYPE_QWORD:
956                 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
957                 data->qword = qw_e_tbl->entry[entry_idx].data;
958                 if (qw_e_tbl->entry[entry_idx].data)
959                         qw_e_tbl->entry[entry_idx].ref_cnt++;
960                 break;
961         default:
962                 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
963                 data->ptr = ptr_e_tbl->entry[entry_idx].data;
964                 if (ptr_e_tbl->entry[entry_idx].data)
965                         ptr_e_tbl->entry[entry_idx].ref_cnt++;
966                 break;
967         }
968         return 0;
969 }
970
971 int32_t
972 mlx5_l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
973                    union mlx5_l3t_data *data)
974 {
975         int ret;
976
977         rte_spinlock_lock(&tbl->sl);
978         ret = __l3t_get_entry(tbl, idx, data);
979         rte_spinlock_unlock(&tbl->sl);
980         return ret;
981 }
982
983 int32_t
984 mlx5_l3t_clear_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx)
985 {
986         struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
987         struct mlx5_l3t_entry_word *w_e_tbl;
988         struct mlx5_l3t_entry_dword *dw_e_tbl;
989         struct mlx5_l3t_entry_qword *qw_e_tbl;
990         struct mlx5_l3t_entry_ptr *ptr_e_tbl;
991         void *e_tbl;
992         uint32_t entry_idx;
993         uint64_t ref_cnt;
994         int32_t ret = -1;
995
996         rte_spinlock_lock(&tbl->sl);
997         g_tbl = tbl->tbl;
998         if (!g_tbl)
999                 goto out;
1000         m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
1001         if (!m_tbl)
1002                 goto out;
1003         e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
1004         if (!e_tbl)
1005                 goto out;
1006         entry_idx = idx & MLX5_L3T_ET_MASK;
1007         switch (tbl->type) {
1008         case MLX5_L3T_TYPE_WORD:
1009                 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
1010                 MLX5_ASSERT(w_e_tbl->entry[entry_idx].ref_cnt);
1011                 ret = --w_e_tbl->entry[entry_idx].ref_cnt;
1012                 if (ret)
1013                         goto out;
1014                 w_e_tbl->entry[entry_idx].data = 0;
1015                 ref_cnt = --w_e_tbl->ref_cnt;
1016                 break;
1017         case MLX5_L3T_TYPE_DWORD:
1018                 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
1019                 MLX5_ASSERT(dw_e_tbl->entry[entry_idx].ref_cnt);
1020                 ret = --dw_e_tbl->entry[entry_idx].ref_cnt;
1021                 if (ret)
1022                         goto out;
1023                 dw_e_tbl->entry[entry_idx].data = 0;
1024                 ref_cnt = --dw_e_tbl->ref_cnt;
1025                 break;
1026         case MLX5_L3T_TYPE_QWORD:
1027                 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
1028                 MLX5_ASSERT(qw_e_tbl->entry[entry_idx].ref_cnt);
1029                 ret = --qw_e_tbl->entry[entry_idx].ref_cnt;
1030                 if (ret)
1031                         goto out;
1032                 qw_e_tbl->entry[entry_idx].data = 0;
1033                 ref_cnt = --qw_e_tbl->ref_cnt;
1034                 break;
1035         default:
1036                 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
1037                 MLX5_ASSERT(ptr_e_tbl->entry[entry_idx].ref_cnt);
1038                 ret = --ptr_e_tbl->entry[entry_idx].ref_cnt;
1039                 if (ret)
1040                         goto out;
1041                 ptr_e_tbl->entry[entry_idx].data = NULL;
1042                 ref_cnt = --ptr_e_tbl->ref_cnt;
1043                 break;
1044         }
1045         if (!ref_cnt) {
1046                 mlx5_ipool_free(tbl->eip,
1047                                 ((struct mlx5_l3t_entry_word *)e_tbl)->idx);
1048                 m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] =
1049                                                                         NULL;
1050                 if (!(--m_tbl->ref_cnt)) {
1051                         mlx5_free(m_tbl);
1052                         g_tbl->tbl
1053                         [(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] = NULL;
1054                         if (!(--g_tbl->ref_cnt)) {
1055                                 mlx5_free(g_tbl);
1056                                 tbl->tbl = 0;
1057                         }
1058                 }
1059         }
1060 out:
1061         rte_spinlock_unlock(&tbl->sl);
1062         return ret;
1063 }
1064
1065 static int32_t
1066 __l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
1067                 union mlx5_l3t_data *data)
1068 {
1069         struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
1070         struct mlx5_l3t_entry_word *w_e_tbl;
1071         struct mlx5_l3t_entry_dword *dw_e_tbl;
1072         struct mlx5_l3t_entry_qword *qw_e_tbl;
1073         struct mlx5_l3t_entry_ptr *ptr_e_tbl;
1074         void *e_tbl;
1075         uint32_t entry_idx, tbl_idx = 0;
1076
1077         /* Check the global table, create it if empty. */
1078         g_tbl = tbl->tbl;
1079         if (!g_tbl) {
1080                 g_tbl = mlx5_malloc(MLX5_MEM_ZERO,
1081                                     sizeof(struct mlx5_l3t_level_tbl) +
1082                                     sizeof(void *) * MLX5_L3T_GT_SIZE, 1,
1083                                     SOCKET_ID_ANY);
1084                 if (!g_tbl) {
1085                         rte_errno = ENOMEM;
1086                         return -1;
1087                 }
1088                 tbl->tbl = g_tbl;
1089         }
1090         /*
1091          * Check the middle table, create it if empty. Ref_cnt will be
1092          * increased if new sub table created.
1093          */
1094         m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
1095         if (!m_tbl) {
1096                 m_tbl = mlx5_malloc(MLX5_MEM_ZERO,
1097                                     sizeof(struct mlx5_l3t_level_tbl) +
1098                                     sizeof(void *) * MLX5_L3T_MT_SIZE, 1,
1099                                     SOCKET_ID_ANY);
1100                 if (!m_tbl) {
1101                         rte_errno = ENOMEM;
1102                         return -1;
1103                 }
1104                 g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] =
1105                                                                         m_tbl;
1106                 g_tbl->ref_cnt++;
1107         }
1108         /*
1109          * Check the entry table, create it if empty. Ref_cnt will be
1110          * increased if new sub entry table created.
1111          */
1112         e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
1113         if (!e_tbl) {
1114                 e_tbl = mlx5_ipool_zmalloc(tbl->eip, &tbl_idx);
1115                 if (!e_tbl) {
1116                         rte_errno = ENOMEM;
1117                         return -1;
1118                 }
1119                 ((struct mlx5_l3t_entry_word *)e_tbl)->idx = tbl_idx;
1120                 m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] =
1121                                                                         e_tbl;
1122                 m_tbl->ref_cnt++;
1123         }
1124         entry_idx = idx & MLX5_L3T_ET_MASK;
1125         switch (tbl->type) {
1126         case MLX5_L3T_TYPE_WORD:
1127                 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
1128                 if (w_e_tbl->entry[entry_idx].data) {
1129                         data->word = w_e_tbl->entry[entry_idx].data;
1130                         w_e_tbl->entry[entry_idx].ref_cnt++;
1131                         rte_errno = EEXIST;
1132                         return -1;
1133                 }
1134                 w_e_tbl->entry[entry_idx].data = data->word;
1135                 w_e_tbl->entry[entry_idx].ref_cnt = 1;
1136                 w_e_tbl->ref_cnt++;
1137                 break;
1138         case MLX5_L3T_TYPE_DWORD:
1139                 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
1140                 if (dw_e_tbl->entry[entry_idx].data) {
1141                         data->dword = dw_e_tbl->entry[entry_idx].data;
1142                         dw_e_tbl->entry[entry_idx].ref_cnt++;
1143                         rte_errno = EEXIST;
1144                         return -1;
1145                 }
1146                 dw_e_tbl->entry[entry_idx].data = data->dword;
1147                 dw_e_tbl->entry[entry_idx].ref_cnt = 1;
1148                 dw_e_tbl->ref_cnt++;
1149                 break;
1150         case MLX5_L3T_TYPE_QWORD:
1151                 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
1152                 if (qw_e_tbl->entry[entry_idx].data) {
1153                         data->qword = qw_e_tbl->entry[entry_idx].data;
1154                         qw_e_tbl->entry[entry_idx].ref_cnt++;
1155                         rte_errno = EEXIST;
1156                         return -1;
1157                 }
1158                 qw_e_tbl->entry[entry_idx].data = data->qword;
1159                 qw_e_tbl->entry[entry_idx].ref_cnt = 1;
1160                 qw_e_tbl->ref_cnt++;
1161                 break;
1162         default:
1163                 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
1164                 if (ptr_e_tbl->entry[entry_idx].data) {
1165                         data->ptr = ptr_e_tbl->entry[entry_idx].data;
1166                         ptr_e_tbl->entry[entry_idx].ref_cnt++;
1167                         rte_errno = EEXIST;
1168                         return -1;
1169                 }
1170                 ptr_e_tbl->entry[entry_idx].data = data->ptr;
1171                 ptr_e_tbl->entry[entry_idx].ref_cnt = 1;
1172                 ptr_e_tbl->ref_cnt++;
1173                 break;
1174         }
1175         return 0;
1176 }
1177
1178 int32_t
1179 mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
1180                    union mlx5_l3t_data *data)
1181 {
1182         int ret;
1183
1184         rte_spinlock_lock(&tbl->sl);
1185         ret = __l3t_set_entry(tbl, idx, data);
1186         rte_spinlock_unlock(&tbl->sl);
1187         return ret;
1188 }