net/i40e: fix Rx packet statistics
[dpdk.git] / drivers / net / mlx5 / mlx5_utils.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4
5 #include <rte_malloc.h>
6
7 #include <mlx5_malloc.h>
8
9 #include "mlx5_utils.h"
10
11 /********************* Indexed pool **********************/
12
13 static inline void
14 mlx5_ipool_lock(struct mlx5_indexed_pool *pool)
15 {
16         if (pool->cfg.need_lock)
17                 rte_spinlock_lock(&pool->rsz_lock);
18 }
19
20 static inline void
21 mlx5_ipool_unlock(struct mlx5_indexed_pool *pool)
22 {
23         if (pool->cfg.need_lock)
24                 rte_spinlock_unlock(&pool->rsz_lock);
25 }
26
27 static inline uint32_t
28 mlx5_trunk_idx_get(struct mlx5_indexed_pool *pool, uint32_t entry_idx)
29 {
30         struct mlx5_indexed_pool_config *cfg = &pool->cfg;
31         uint32_t trunk_idx = 0;
32         uint32_t i;
33
34         if (!cfg->grow_trunk)
35                 return entry_idx / cfg->trunk_size;
36         if (entry_idx >= pool->grow_tbl[cfg->grow_trunk - 1]) {
37                 trunk_idx = (entry_idx - pool->grow_tbl[cfg->grow_trunk - 1]) /
38                             (cfg->trunk_size << (cfg->grow_shift *
39                             cfg->grow_trunk)) + cfg->grow_trunk;
40         } else {
41                 for (i = 0; i < cfg->grow_trunk; i++) {
42                         if (entry_idx < pool->grow_tbl[i])
43                                 break;
44                 }
45                 trunk_idx = i;
46         }
47         return trunk_idx;
48 }
49
50 static inline uint32_t
51 mlx5_trunk_size_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx)
52 {
53         struct mlx5_indexed_pool_config *cfg = &pool->cfg;
54
55         return cfg->trunk_size << (cfg->grow_shift *
56                (trunk_idx > cfg->grow_trunk ? cfg->grow_trunk : trunk_idx));
57 }
58
59 static inline uint32_t
60 mlx5_trunk_idx_offset_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx)
61 {
62         struct mlx5_indexed_pool_config *cfg = &pool->cfg;
63         uint32_t offset = 0;
64
65         if (!trunk_idx)
66                 return 0;
67         if (!cfg->grow_trunk)
68                 return cfg->trunk_size * trunk_idx;
69         if (trunk_idx < cfg->grow_trunk)
70                 offset = pool->grow_tbl[trunk_idx - 1];
71         else
72                 offset = pool->grow_tbl[cfg->grow_trunk - 1] +
73                          (cfg->trunk_size << (cfg->grow_shift *
74                          cfg->grow_trunk)) * (trunk_idx - cfg->grow_trunk);
75         return offset;
76 }
77
78 struct mlx5_indexed_pool *
79 mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg)
80 {
81         struct mlx5_indexed_pool *pool;
82         uint32_t i;
83
84         if (!cfg || (!cfg->malloc ^ !cfg->free) ||
85             (cfg->per_core_cache && cfg->release_mem_en) ||
86             (cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) ||
87             ((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32))))
88                 return NULL;
89         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool) + cfg->grow_trunk *
90                            sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE,
91                            SOCKET_ID_ANY);
92         if (!pool)
93                 return NULL;
94         pool->cfg = *cfg;
95         if (!pool->cfg.trunk_size)
96                 pool->cfg.trunk_size = MLX5_IPOOL_DEFAULT_TRUNK_SIZE;
97         if (!cfg->malloc && !cfg->free) {
98                 pool->cfg.malloc = mlx5_malloc;
99                 pool->cfg.free = mlx5_free;
100         }
101         if (pool->cfg.need_lock)
102                 rte_spinlock_init(&pool->rsz_lock);
103         /*
104          * Initialize the dynamic grow trunk size lookup table to have a quick
105          * lookup for the trunk entry index offset.
106          */
107         for (i = 0; i < cfg->grow_trunk; i++) {
108                 pool->grow_tbl[i] = cfg->trunk_size << (cfg->grow_shift * i);
109                 if (i > 0)
110                         pool->grow_tbl[i] += pool->grow_tbl[i - 1];
111         }
112         if (!pool->cfg.max_idx)
113                 pool->cfg.max_idx =
114                         mlx5_trunk_idx_offset_get(pool, TRUNK_MAX_IDX + 1);
115         if (!cfg->per_core_cache)
116                 pool->free_list = TRUNK_INVALID;
117         rte_spinlock_init(&pool->lcore_lock);
118         return pool;
119 }
120
121 static int
122 mlx5_ipool_grow(struct mlx5_indexed_pool *pool)
123 {
124         struct mlx5_indexed_trunk *trunk;
125         struct mlx5_indexed_trunk **trunk_tmp;
126         struct mlx5_indexed_trunk **p;
127         size_t trunk_size = 0;
128         size_t data_size;
129         size_t bmp_size;
130         uint32_t idx, cur_max_idx, i;
131
132         cur_max_idx = mlx5_trunk_idx_offset_get(pool, pool->n_trunk_valid);
133         if (pool->n_trunk_valid == TRUNK_MAX_IDX ||
134             cur_max_idx >= pool->cfg.max_idx)
135                 return -ENOMEM;
136         if (pool->n_trunk_valid == pool->n_trunk) {
137                 /* No free trunk flags, expand trunk list. */
138                 int n_grow = pool->n_trunk_valid ? pool->n_trunk :
139                              RTE_CACHE_LINE_SIZE / sizeof(void *);
140
141                 p = pool->cfg.malloc(0, (pool->n_trunk_valid + n_grow) *
142                                      sizeof(struct mlx5_indexed_trunk *),
143                                      RTE_CACHE_LINE_SIZE, rte_socket_id());
144                 if (!p)
145                         return -ENOMEM;
146                 if (pool->trunks)
147                         memcpy(p, pool->trunks, pool->n_trunk_valid *
148                                sizeof(struct mlx5_indexed_trunk *));
149                 memset(RTE_PTR_ADD(p, pool->n_trunk_valid * sizeof(void *)), 0,
150                        n_grow * sizeof(void *));
151                 trunk_tmp = pool->trunks;
152                 pool->trunks = p;
153                 if (trunk_tmp)
154                         pool->cfg.free(trunk_tmp);
155                 pool->n_trunk += n_grow;
156         }
157         if (!pool->cfg.release_mem_en) {
158                 idx = pool->n_trunk_valid;
159         } else {
160                 /* Find the first available slot in trunk list */
161                 for (idx = 0; idx < pool->n_trunk; idx++)
162                         if (pool->trunks[idx] == NULL)
163                                 break;
164         }
165         trunk_size += sizeof(*trunk);
166         data_size = mlx5_trunk_size_get(pool, idx);
167         bmp_size = rte_bitmap_get_memory_footprint(data_size);
168         /* rte_bitmap requires memory cacheline aligned. */
169         trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size);
170         trunk_size += bmp_size;
171         trunk = pool->cfg.malloc(0, trunk_size,
172                                  RTE_CACHE_LINE_SIZE, rte_socket_id());
173         if (!trunk)
174                 return -ENOMEM;
175         pool->trunks[idx] = trunk;
176         trunk->idx = idx;
177         trunk->free = data_size;
178         trunk->prev = TRUNK_INVALID;
179         trunk->next = TRUNK_INVALID;
180         MLX5_ASSERT(pool->free_list == TRUNK_INVALID);
181         pool->free_list = idx;
182         /* Mark all entries as available. */
183         trunk->bmp = rte_bitmap_init_with_all_set(data_size, &trunk->data
184                      [RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size)],
185                      bmp_size);
186         /* Clear the overhead bits in the trunk if it happens. */
187         if (cur_max_idx + data_size > pool->cfg.max_idx) {
188                 for (i = pool->cfg.max_idx - cur_max_idx; i < data_size; i++)
189                         rte_bitmap_clear(trunk->bmp, i);
190         }
191         MLX5_ASSERT(trunk->bmp);
192         pool->n_trunk_valid++;
193 #ifdef POOL_DEBUG
194         pool->trunk_new++;
195         pool->trunk_avail++;
196 #endif
197         return 0;
198 }
199
200 static inline struct mlx5_indexed_cache *
201 mlx5_ipool_update_global_cache(struct mlx5_indexed_pool *pool, int cidx)
202 {
203         struct mlx5_indexed_cache *gc, *lc, *olc = NULL;
204
205         lc = pool->cache[cidx]->lc;
206         gc = __atomic_load_n(&pool->gc, __ATOMIC_RELAXED);
207         if (gc && lc != gc) {
208                 mlx5_ipool_lock(pool);
209                 if (lc && !(--lc->ref_cnt))
210                         olc = lc;
211                 lc = pool->gc;
212                 lc->ref_cnt++;
213                 pool->cache[cidx]->lc = lc;
214                 mlx5_ipool_unlock(pool);
215                 if (olc)
216                         pool->cfg.free(olc);
217         }
218         return lc;
219 }
220
221 static uint32_t
222 mlx5_ipool_allocate_from_global(struct mlx5_indexed_pool *pool, int cidx)
223 {
224         struct mlx5_indexed_trunk *trunk;
225         struct mlx5_indexed_cache *p, *lc, *olc = NULL;
226         size_t trunk_size = 0;
227         size_t data_size;
228         uint32_t cur_max_idx, trunk_idx, trunk_n;
229         uint32_t fetch_size, ts_idx, i;
230         int n_grow;
231
232 check_again:
233         p = NULL;
234         fetch_size = 0;
235         /*
236          * Fetch new index from global if possible. First round local
237          * cache will be NULL.
238          */
239         lc = pool->cache[cidx]->lc;
240         mlx5_ipool_lock(pool);
241         /* Try to update local cache first. */
242         if (likely(pool->gc)) {
243                 if (lc != pool->gc) {
244                         if (lc && !(--lc->ref_cnt))
245                                 olc = lc;
246                         lc = pool->gc;
247                         lc->ref_cnt++;
248                         pool->cache[cidx]->lc = lc;
249                 }
250                 if (lc->len) {
251                         /* Use the updated local cache to fetch index. */
252                         fetch_size = pool->cfg.per_core_cache >> 2;
253                         if (lc->len < fetch_size)
254                                 fetch_size = lc->len;
255                         lc->len -= fetch_size;
256                         memcpy(pool->cache[cidx]->idx, &lc->idx[lc->len],
257                                sizeof(uint32_t) * fetch_size);
258                 }
259         }
260         mlx5_ipool_unlock(pool);
261         if (unlikely(olc)) {
262                 pool->cfg.free(olc);
263                 olc = NULL;
264         }
265         if (fetch_size) {
266                 pool->cache[cidx]->len = fetch_size - 1;
267                 return pool->cache[cidx]->idx[pool->cache[cidx]->len];
268         }
269         trunk_idx = lc ? __atomic_load_n(&lc->n_trunk_valid,
270                          __ATOMIC_ACQUIRE) : 0;
271         trunk_n = lc ? lc->n_trunk : 0;
272         cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx);
273         /* Check if index reach maximum. */
274         if (trunk_idx == TRUNK_MAX_IDX ||
275             cur_max_idx >= pool->cfg.max_idx)
276                 return 0;
277         /* No enough space in trunk array, resize the trunks array. */
278         if (trunk_idx == trunk_n) {
279                 n_grow = trunk_idx ? trunk_idx :
280                              RTE_CACHE_LINE_SIZE / sizeof(void *);
281                 cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_n + n_grow);
282                 /* Resize the trunk array. */
283                 p = pool->cfg.malloc(0, ((trunk_idx + n_grow) *
284                         sizeof(struct mlx5_indexed_trunk *)) +
285                         (cur_max_idx * sizeof(uint32_t)) + sizeof(*p),
286                         RTE_CACHE_LINE_SIZE, rte_socket_id());
287                 if (!p)
288                         return 0;
289                 p->trunks = (struct mlx5_indexed_trunk **)&p->idx[cur_max_idx];
290                 if (lc)
291                         memcpy(p->trunks, lc->trunks, trunk_idx *
292                        sizeof(struct mlx5_indexed_trunk *));
293 #ifdef RTE_LIBRTE_MLX5_DEBUG
294                 memset(RTE_PTR_ADD(p->trunks, trunk_idx * sizeof(void *)), 0,
295                         n_grow * sizeof(void *));
296 #endif
297                 p->n_trunk_valid = trunk_idx;
298                 p->n_trunk = trunk_n + n_grow;
299                 p->len = 0;
300         }
301         /* Prepare the new trunk. */
302         trunk_size = sizeof(*trunk);
303         data_size = mlx5_trunk_size_get(pool, trunk_idx);
304         trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size);
305         trunk = pool->cfg.malloc(0, trunk_size,
306                                  RTE_CACHE_LINE_SIZE, rte_socket_id());
307         if (unlikely(!trunk)) {
308                 pool->cfg.free(p);
309                 return 0;
310         }
311         trunk->idx = trunk_idx;
312         trunk->free = data_size;
313         mlx5_ipool_lock(pool);
314         /*
315          * Double check if trunks has been updated or have available index.
316          * During the new trunk allocate, index may still be flushed to the
317          * global cache. So also need to check the pool->gc->len.
318          */
319         if (pool->gc && (lc != pool->gc ||
320             lc->n_trunk_valid != trunk_idx ||
321             pool->gc->len)) {
322                 mlx5_ipool_unlock(pool);
323                 if (p)
324                         pool->cfg.free(p);
325                 pool->cfg.free(trunk);
326                 goto check_again;
327         }
328         /* Resize the trunk array and update local cache first.  */
329         if (p) {
330                 if (lc && !(--lc->ref_cnt))
331                         olc = lc;
332                 lc = p;
333                 lc->ref_cnt = 1;
334                 pool->cache[cidx]->lc = lc;
335                 __atomic_store_n(&pool->gc, p, __ATOMIC_RELAXED);
336         }
337         /* Add trunk to trunks array. */
338         lc->trunks[trunk_idx] = trunk;
339         __atomic_fetch_add(&lc->n_trunk_valid, 1, __ATOMIC_RELAXED);
340         /* Enqueue half of the index to global. */
341         ts_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx) + 1;
342         fetch_size = trunk->free >> 1;
343         for (i = 0; i < fetch_size; i++)
344                 lc->idx[i] = ts_idx + i;
345         lc->len = fetch_size;
346         mlx5_ipool_unlock(pool);
347         /* Copy left half - 1 to local cache index array. */
348         pool->cache[cidx]->len = trunk->free - fetch_size - 1;
349         ts_idx += fetch_size;
350         for (i = 0; i < pool->cache[cidx]->len; i++)
351                 pool->cache[cidx]->idx[i] = ts_idx + i;
352         if (olc)
353                 pool->cfg.free(olc);
354         return ts_idx + i;
355 }
356
357 static void *
358 _mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx)
359 {
360         struct mlx5_indexed_trunk *trunk;
361         struct mlx5_indexed_cache *lc;
362         uint32_t trunk_idx;
363         uint32_t entry_idx;
364
365         MLX5_ASSERT(idx);
366         if (unlikely(!pool->cache[cidx])) {
367                 pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO,
368                         sizeof(struct mlx5_ipool_per_lcore) +
369                         (pool->cfg.per_core_cache * sizeof(uint32_t)),
370                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
371                 if (!pool->cache[cidx]) {
372                         DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx);
373                         return NULL;
374                 }
375         }
376         lc = mlx5_ipool_update_global_cache(pool, cidx);
377         idx -= 1;
378         trunk_idx = mlx5_trunk_idx_get(pool, idx);
379         trunk = lc->trunks[trunk_idx];
380         MLX5_ASSERT(trunk);
381         entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk_idx);
382         return &trunk->data[entry_idx * pool->cfg.size];
383 }
384
385 static void *
386 mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, uint32_t idx)
387 {
388         void *entry;
389         int cidx;
390
391         cidx = rte_lcore_index(rte_lcore_id());
392         if (unlikely(cidx == -1)) {
393                 cidx = RTE_MAX_LCORE;
394                 rte_spinlock_lock(&pool->lcore_lock);
395         }
396         entry = _mlx5_ipool_get_cache(pool, cidx, idx);
397         if (unlikely(cidx == RTE_MAX_LCORE))
398                 rte_spinlock_unlock(&pool->lcore_lock);
399         return entry;
400 }
401
402
403 static void *
404 _mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, int cidx,
405                          uint32_t *idx)
406 {
407         if (unlikely(!pool->cache[cidx])) {
408                 pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO,
409                         sizeof(struct mlx5_ipool_per_lcore) +
410                         (pool->cfg.per_core_cache * sizeof(uint32_t)),
411                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
412                 if (!pool->cache[cidx]) {
413                         DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx);
414                         return NULL;
415                 }
416         } else if (pool->cache[cidx]->len) {
417                 pool->cache[cidx]->len--;
418                 *idx = pool->cache[cidx]->idx[pool->cache[cidx]->len];
419                 return _mlx5_ipool_get_cache(pool, cidx, *idx);
420         }
421         /* Not enough idx in global cache. Keep fetching from global. */
422         *idx = mlx5_ipool_allocate_from_global(pool, cidx);
423         if (unlikely(!(*idx)))
424                 return NULL;
425         return _mlx5_ipool_get_cache(pool, cidx, *idx);
426 }
427
428 static void *
429 mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, uint32_t *idx)
430 {
431         void *entry;
432         int cidx;
433
434         cidx = rte_lcore_index(rte_lcore_id());
435         if (unlikely(cidx == -1)) {
436                 cidx = RTE_MAX_LCORE;
437                 rte_spinlock_lock(&pool->lcore_lock);
438         }
439         entry = _mlx5_ipool_malloc_cache(pool, cidx, idx);
440         if (unlikely(cidx == RTE_MAX_LCORE))
441                 rte_spinlock_unlock(&pool->lcore_lock);
442         return entry;
443 }
444
445 static void
446 _mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx)
447 {
448         struct mlx5_ipool_per_lcore *ilc;
449         struct mlx5_indexed_cache *gc, *olc = NULL;
450         uint32_t reclaim_num = 0;
451
452         MLX5_ASSERT(idx);
453         /*
454          * When index was allocated on core A but freed on core B. In this
455          * case check if local cache on core B was allocated before.
456          */
457         if (unlikely(!pool->cache[cidx])) {
458                 pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO,
459                         sizeof(struct mlx5_ipool_per_lcore) +
460                         (pool->cfg.per_core_cache * sizeof(uint32_t)),
461                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
462                 if (!pool->cache[cidx]) {
463                         DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx);
464                         return;
465                 }
466         }
467         /* Try to enqueue to local index cache. */
468         if (pool->cache[cidx]->len < pool->cfg.per_core_cache) {
469                 pool->cache[cidx]->idx[pool->cache[cidx]->len] = idx;
470                 pool->cache[cidx]->len++;
471                 return;
472         }
473         ilc = pool->cache[cidx];
474         reclaim_num = pool->cfg.per_core_cache >> 2;
475         ilc->len -= reclaim_num;
476         /* Local index cache full, try with global index cache. */
477         mlx5_ipool_lock(pool);
478         gc = pool->gc;
479         if (ilc->lc != gc) {
480                 if (!(--ilc->lc->ref_cnt))
481                         olc = ilc->lc;
482                 gc->ref_cnt++;
483                 ilc->lc = gc;
484         }
485         memcpy(&gc->idx[gc->len], &ilc->idx[ilc->len],
486                reclaim_num * sizeof(uint32_t));
487         gc->len += reclaim_num;
488         mlx5_ipool_unlock(pool);
489         if (olc)
490                 pool->cfg.free(olc);
491         pool->cache[cidx]->idx[pool->cache[cidx]->len] = idx;
492         pool->cache[cidx]->len++;
493 }
494
495 static void
496 mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, uint32_t idx)
497 {
498         int cidx;
499
500         cidx = rte_lcore_index(rte_lcore_id());
501         if (unlikely(cidx == -1)) {
502                 cidx = RTE_MAX_LCORE;
503                 rte_spinlock_lock(&pool->lcore_lock);
504         }
505         _mlx5_ipool_free_cache(pool, cidx, idx);
506         if (unlikely(cidx == RTE_MAX_LCORE))
507                 rte_spinlock_unlock(&pool->lcore_lock);
508 }
509
510 void *
511 mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx)
512 {
513         struct mlx5_indexed_trunk *trunk;
514         uint64_t slab = 0;
515         uint32_t iidx = 0;
516         void *p;
517
518         if (pool->cfg.per_core_cache)
519                 return mlx5_ipool_malloc_cache(pool, idx);
520         mlx5_ipool_lock(pool);
521         if (pool->free_list == TRUNK_INVALID) {
522                 /* If no available trunks, grow new. */
523                 if (mlx5_ipool_grow(pool)) {
524                         mlx5_ipool_unlock(pool);
525                         return NULL;
526                 }
527         }
528         MLX5_ASSERT(pool->free_list != TRUNK_INVALID);
529         trunk = pool->trunks[pool->free_list];
530         MLX5_ASSERT(trunk->free);
531         if (!rte_bitmap_scan(trunk->bmp, &iidx, &slab)) {
532                 mlx5_ipool_unlock(pool);
533                 return NULL;
534         }
535         MLX5_ASSERT(slab);
536         iidx += __builtin_ctzll(slab);
537         MLX5_ASSERT(iidx != UINT32_MAX);
538         MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx));
539         rte_bitmap_clear(trunk->bmp, iidx);
540         p = &trunk->data[iidx * pool->cfg.size];
541         /*
542          * The ipool index should grow continually from small to big,
543          * some features as metering only accept limited bits of index.
544          * Random index with MSB set may be rejected.
545          */
546         iidx += mlx5_trunk_idx_offset_get(pool, trunk->idx);
547         iidx += 1; /* non-zero index. */
548         trunk->free--;
549 #ifdef POOL_DEBUG
550         pool->n_entry++;
551 #endif
552         if (!trunk->free) {
553                 /* Full trunk will be removed from free list in imalloc. */
554                 MLX5_ASSERT(pool->free_list == trunk->idx);
555                 pool->free_list = trunk->next;
556                 if (trunk->next != TRUNK_INVALID)
557                         pool->trunks[trunk->next]->prev = TRUNK_INVALID;
558                 trunk->prev = TRUNK_INVALID;
559                 trunk->next = TRUNK_INVALID;
560 #ifdef POOL_DEBUG
561                 pool->trunk_empty++;
562                 pool->trunk_avail--;
563 #endif
564         }
565         *idx = iidx;
566         mlx5_ipool_unlock(pool);
567         return p;
568 }
569
570 void *
571 mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx)
572 {
573         void *entry = mlx5_ipool_malloc(pool, idx);
574
575         if (entry && pool->cfg.size)
576                 memset(entry, 0, pool->cfg.size);
577         return entry;
578 }
579
580 void
581 mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx)
582 {
583         struct mlx5_indexed_trunk *trunk;
584         uint32_t trunk_idx;
585         uint32_t entry_idx;
586
587         if (!idx)
588                 return;
589         if (pool->cfg.per_core_cache) {
590                 mlx5_ipool_free_cache(pool, idx);
591                 return;
592         }
593         idx -= 1;
594         mlx5_ipool_lock(pool);
595         trunk_idx = mlx5_trunk_idx_get(pool, idx);
596         if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) ||
597             (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk))
598                 goto out;
599         trunk = pool->trunks[trunk_idx];
600         if (!trunk)
601                 goto out;
602         entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx);
603         if (trunk_idx != trunk->idx ||
604             rte_bitmap_get(trunk->bmp, entry_idx))
605                 goto out;
606         rte_bitmap_set(trunk->bmp, entry_idx);
607         trunk->free++;
608         if (pool->cfg.release_mem_en && trunk->free == mlx5_trunk_size_get
609            (pool, trunk->idx)) {
610                 if (pool->free_list == trunk->idx)
611                         pool->free_list = trunk->next;
612                 if (trunk->next != TRUNK_INVALID)
613                         pool->trunks[trunk->next]->prev = trunk->prev;
614                 if (trunk->prev != TRUNK_INVALID)
615                         pool->trunks[trunk->prev]->next = trunk->next;
616                 pool->cfg.free(trunk);
617                 pool->trunks[trunk_idx] = NULL;
618                 pool->n_trunk_valid--;
619 #ifdef POOL_DEBUG
620                 pool->trunk_avail--;
621                 pool->trunk_free++;
622 #endif
623                 if (pool->n_trunk_valid == 0) {
624                         pool->cfg.free(pool->trunks);
625                         pool->trunks = NULL;
626                         pool->n_trunk = 0;
627                 }
628         } else if (trunk->free == 1) {
629                 /* Put into free trunk list head. */
630                 MLX5_ASSERT(pool->free_list != trunk->idx);
631                 trunk->next = pool->free_list;
632                 trunk->prev = TRUNK_INVALID;
633                 if (pool->free_list != TRUNK_INVALID)
634                         pool->trunks[pool->free_list]->prev = trunk->idx;
635                 pool->free_list = trunk->idx;
636 #ifdef POOL_DEBUG
637                 pool->trunk_empty--;
638                 pool->trunk_avail++;
639 #endif
640         }
641 #ifdef POOL_DEBUG
642         pool->n_entry--;
643 #endif
644 out:
645         mlx5_ipool_unlock(pool);
646 }
647
648 void *
649 mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx)
650 {
651         struct mlx5_indexed_trunk *trunk;
652         void *p = NULL;
653         uint32_t trunk_idx;
654         uint32_t entry_idx;
655
656         if (!idx)
657                 return NULL;
658         if (pool->cfg.per_core_cache)
659                 return mlx5_ipool_get_cache(pool, idx);
660         idx -= 1;
661         mlx5_ipool_lock(pool);
662         trunk_idx = mlx5_trunk_idx_get(pool, idx);
663         if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) ||
664             (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk))
665                 goto out;
666         trunk = pool->trunks[trunk_idx];
667         if (!trunk)
668                 goto out;
669         entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx);
670         if (trunk_idx != trunk->idx ||
671             rte_bitmap_get(trunk->bmp, entry_idx))
672                 goto out;
673         p = &trunk->data[entry_idx * pool->cfg.size];
674 out:
675         mlx5_ipool_unlock(pool);
676         return p;
677 }
678
679 int
680 mlx5_ipool_destroy(struct mlx5_indexed_pool *pool)
681 {
682         struct mlx5_indexed_trunk **trunks = NULL;
683         struct mlx5_indexed_cache *gc = pool->gc;
684         uint32_t i, n_trunk_valid = 0;
685
686         MLX5_ASSERT(pool);
687         mlx5_ipool_lock(pool);
688         if (pool->cfg.per_core_cache) {
689                 for (i = 0; i <= RTE_MAX_LCORE; i++) {
690                         /*
691                          * Free only old global cache. Pool gc will be
692                          * freed at last.
693                          */
694                         if (pool->cache[i]) {
695                                 if (pool->cache[i]->lc &&
696                                     pool->cache[i]->lc != pool->gc &&
697                                     (!(--pool->cache[i]->lc->ref_cnt)))
698                                         pool->cfg.free(pool->cache[i]->lc);
699                                 pool->cfg.free(pool->cache[i]);
700                         }
701                 }
702                 if (gc) {
703                         trunks = gc->trunks;
704                         n_trunk_valid = gc->n_trunk_valid;
705                 }
706         } else {
707                 gc = NULL;
708                 trunks = pool->trunks;
709                 n_trunk_valid = pool->n_trunk_valid;
710         }
711         for (i = 0; i < n_trunk_valid; i++) {
712                 if (trunks[i])
713                         pool->cfg.free(trunks[i]);
714         }
715         if (!gc && trunks)
716                 pool->cfg.free(trunks);
717         if (gc)
718                 pool->cfg.free(gc);
719         mlx5_ipool_unlock(pool);
720         mlx5_free(pool);
721         return 0;
722 }
723
724 void
725 mlx5_ipool_flush_cache(struct mlx5_indexed_pool *pool)
726 {
727         uint32_t i, j;
728         struct mlx5_indexed_cache *gc;
729         struct rte_bitmap *ibmp;
730         uint32_t bmp_num, mem_size;
731
732         if (!pool->cfg.per_core_cache)
733                 return;
734         gc = pool->gc;
735         if (!gc)
736                 return;
737         /* Reset bmp. */
738         bmp_num = mlx5_trunk_idx_offset_get(pool, gc->n_trunk_valid);
739         mem_size = rte_bitmap_get_memory_footprint(bmp_num);
740         pool->bmp_mem = pool->cfg.malloc(MLX5_MEM_ZERO, mem_size,
741                                          RTE_CACHE_LINE_SIZE, rte_socket_id());
742         if (!pool->bmp_mem) {
743                 DRV_LOG(ERR, "Ipool bitmap mem allocate failed.\n");
744                 return;
745         }
746         ibmp = rte_bitmap_init_with_all_set(bmp_num, pool->bmp_mem, mem_size);
747         if (!ibmp) {
748                 pool->cfg.free(pool->bmp_mem);
749                 pool->bmp_mem = NULL;
750                 DRV_LOG(ERR, "Ipool bitmap create failed.\n");
751                 return;
752         }
753         pool->ibmp = ibmp;
754         /* Clear global cache. */
755         for (i = 0; i < gc->len; i++)
756                 rte_bitmap_clear(ibmp, gc->idx[i] - 1);
757         /* Clear core cache. */
758         for (i = 0; i < RTE_MAX_LCORE + 1; i++) {
759                 struct mlx5_ipool_per_lcore *ilc = pool->cache[i];
760
761                 if (!ilc)
762                         continue;
763                 for (j = 0; j < ilc->len; j++)
764                         rte_bitmap_clear(ibmp, ilc->idx[j] - 1);
765         }
766 }
767
768 static void *
769 mlx5_ipool_get_next_cache(struct mlx5_indexed_pool *pool, uint32_t *pos)
770 {
771         struct rte_bitmap *ibmp;
772         uint64_t slab = 0;
773         uint32_t iidx = *pos;
774
775         ibmp = pool->ibmp;
776         if (!ibmp || !rte_bitmap_scan(ibmp, &iidx, &slab)) {
777                 if (pool->bmp_mem) {
778                         pool->cfg.free(pool->bmp_mem);
779                         pool->bmp_mem = NULL;
780                         pool->ibmp = NULL;
781                 }
782                 return NULL;
783         }
784         iidx += __builtin_ctzll(slab);
785         rte_bitmap_clear(ibmp, iidx);
786         iidx++;
787         *pos = iidx;
788         return mlx5_ipool_get_cache(pool, iidx);
789 }
790
791 void *
792 mlx5_ipool_get_next(struct mlx5_indexed_pool *pool, uint32_t *pos)
793 {
794         uint32_t idx = *pos;
795         void *entry;
796
797         if (pool->cfg.per_core_cache)
798                 return mlx5_ipool_get_next_cache(pool, pos);
799         while (idx <= mlx5_trunk_idx_offset_get(pool, pool->n_trunk)) {
800                 entry = mlx5_ipool_get(pool, idx);
801                 if (entry) {
802                         *pos = idx;
803                         return entry;
804                 }
805                 idx++;
806         }
807         return NULL;
808 }
809
810 void
811 mlx5_ipool_dump(struct mlx5_indexed_pool *pool)
812 {
813         printf("Pool %s entry size %u, trunks %u, %d entry per trunk, "
814                "total: %d\n",
815                pool->cfg.type, pool->cfg.size, pool->n_trunk_valid,
816                pool->cfg.trunk_size, pool->n_trunk_valid);
817 #ifdef POOL_DEBUG
818         printf("Pool %s entry %u, trunk alloc %u, empty: %u, "
819                "available %u free %u\n",
820                pool->cfg.type, pool->n_entry, pool->trunk_new,
821                pool->trunk_empty, pool->trunk_avail, pool->trunk_free);
822 #endif
823 }
824
825 struct mlx5_l3t_tbl *
826 mlx5_l3t_create(enum mlx5_l3t_type type)
827 {
828         struct mlx5_l3t_tbl *tbl;
829         struct mlx5_indexed_pool_config l3t_ip_cfg = {
830                 .trunk_size = 16,
831                 .grow_trunk = 6,
832                 .grow_shift = 1,
833                 .need_lock = 0,
834                 .release_mem_en = 1,
835                 .malloc = mlx5_malloc,
836                 .free = mlx5_free,
837         };
838
839         if (type >= MLX5_L3T_TYPE_MAX) {
840                 rte_errno = EINVAL;
841                 return NULL;
842         }
843         tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_l3t_tbl), 1,
844                           SOCKET_ID_ANY);
845         if (!tbl) {
846                 rte_errno = ENOMEM;
847                 return NULL;
848         }
849         tbl->type = type;
850         switch (type) {
851         case MLX5_L3T_TYPE_WORD:
852                 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_word);
853                 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_w";
854                 break;
855         case MLX5_L3T_TYPE_DWORD:
856                 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_dword);
857                 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_dw";
858                 break;
859         case MLX5_L3T_TYPE_QWORD:
860                 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_qword);
861                 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_qw";
862                 break;
863         default:
864                 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_ptr);
865                 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_tpr";
866                 break;
867         }
868         rte_spinlock_init(&tbl->sl);
869         tbl->eip = mlx5_ipool_create(&l3t_ip_cfg);
870         if (!tbl->eip) {
871                 rte_errno = ENOMEM;
872                 mlx5_free(tbl);
873                 tbl = NULL;
874         }
875         return tbl;
876 }
877
878 void
879 mlx5_l3t_destroy(struct mlx5_l3t_tbl *tbl)
880 {
881         struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
882         uint32_t i, j;
883
884         if (!tbl)
885                 return;
886         g_tbl = tbl->tbl;
887         if (g_tbl) {
888                 for (i = 0; i < MLX5_L3T_GT_SIZE; i++) {
889                         m_tbl = g_tbl->tbl[i];
890                         if (!m_tbl)
891                                 continue;
892                         for (j = 0; j < MLX5_L3T_MT_SIZE; j++) {
893                                 if (!m_tbl->tbl[j])
894                                         continue;
895                                 MLX5_ASSERT(!((struct mlx5_l3t_entry_word *)
896                                             m_tbl->tbl[j])->ref_cnt);
897                                 mlx5_ipool_free(tbl->eip,
898                                                 ((struct mlx5_l3t_entry_word *)
899                                                 m_tbl->tbl[j])->idx);
900                                 m_tbl->tbl[j] = 0;
901                                 if (!(--m_tbl->ref_cnt))
902                                         break;
903                         }
904                         MLX5_ASSERT(!m_tbl->ref_cnt);
905                         mlx5_free(g_tbl->tbl[i]);
906                         g_tbl->tbl[i] = 0;
907                         if (!(--g_tbl->ref_cnt))
908                                 break;
909                 }
910                 MLX5_ASSERT(!g_tbl->ref_cnt);
911                 mlx5_free(tbl->tbl);
912                 tbl->tbl = 0;
913         }
914         mlx5_ipool_destroy(tbl->eip);
915         mlx5_free(tbl);
916 }
917
918 static int32_t
919 __l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
920                 union mlx5_l3t_data *data)
921 {
922         struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
923         struct mlx5_l3t_entry_word *w_e_tbl;
924         struct mlx5_l3t_entry_dword *dw_e_tbl;
925         struct mlx5_l3t_entry_qword *qw_e_tbl;
926         struct mlx5_l3t_entry_ptr *ptr_e_tbl;
927         void *e_tbl;
928         uint32_t entry_idx;
929
930         g_tbl = tbl->tbl;
931         if (!g_tbl)
932                 return -1;
933         m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
934         if (!m_tbl)
935                 return -1;
936         e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
937         if (!e_tbl)
938                 return -1;
939         entry_idx = idx & MLX5_L3T_ET_MASK;
940         switch (tbl->type) {
941         case MLX5_L3T_TYPE_WORD:
942                 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
943                 data->word = w_e_tbl->entry[entry_idx].data;
944                 if (w_e_tbl->entry[entry_idx].data)
945                         w_e_tbl->entry[entry_idx].ref_cnt++;
946                 break;
947         case MLX5_L3T_TYPE_DWORD:
948                 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
949                 data->dword = dw_e_tbl->entry[entry_idx].data;
950                 if (dw_e_tbl->entry[entry_idx].data)
951                         dw_e_tbl->entry[entry_idx].ref_cnt++;
952                 break;
953         case MLX5_L3T_TYPE_QWORD:
954                 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
955                 data->qword = qw_e_tbl->entry[entry_idx].data;
956                 if (qw_e_tbl->entry[entry_idx].data)
957                         qw_e_tbl->entry[entry_idx].ref_cnt++;
958                 break;
959         default:
960                 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
961                 data->ptr = ptr_e_tbl->entry[entry_idx].data;
962                 if (ptr_e_tbl->entry[entry_idx].data)
963                         ptr_e_tbl->entry[entry_idx].ref_cnt++;
964                 break;
965         }
966         return 0;
967 }
968
969 int32_t
970 mlx5_l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
971                    union mlx5_l3t_data *data)
972 {
973         int ret;
974
975         rte_spinlock_lock(&tbl->sl);
976         ret = __l3t_get_entry(tbl, idx, data);
977         rte_spinlock_unlock(&tbl->sl);
978         return ret;
979 }
980
981 int32_t
982 mlx5_l3t_clear_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx)
983 {
984         struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
985         struct mlx5_l3t_entry_word *w_e_tbl;
986         struct mlx5_l3t_entry_dword *dw_e_tbl;
987         struct mlx5_l3t_entry_qword *qw_e_tbl;
988         struct mlx5_l3t_entry_ptr *ptr_e_tbl;
989         void *e_tbl;
990         uint32_t entry_idx;
991         uint64_t ref_cnt;
992         int32_t ret = -1;
993
994         rte_spinlock_lock(&tbl->sl);
995         g_tbl = tbl->tbl;
996         if (!g_tbl)
997                 goto out;
998         m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
999         if (!m_tbl)
1000                 goto out;
1001         e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
1002         if (!e_tbl)
1003                 goto out;
1004         entry_idx = idx & MLX5_L3T_ET_MASK;
1005         switch (tbl->type) {
1006         case MLX5_L3T_TYPE_WORD:
1007                 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
1008                 MLX5_ASSERT(w_e_tbl->entry[entry_idx].ref_cnt);
1009                 ret = --w_e_tbl->entry[entry_idx].ref_cnt;
1010                 if (ret)
1011                         goto out;
1012                 w_e_tbl->entry[entry_idx].data = 0;
1013                 ref_cnt = --w_e_tbl->ref_cnt;
1014                 break;
1015         case MLX5_L3T_TYPE_DWORD:
1016                 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
1017                 MLX5_ASSERT(dw_e_tbl->entry[entry_idx].ref_cnt);
1018                 ret = --dw_e_tbl->entry[entry_idx].ref_cnt;
1019                 if (ret)
1020                         goto out;
1021                 dw_e_tbl->entry[entry_idx].data = 0;
1022                 ref_cnt = --dw_e_tbl->ref_cnt;
1023                 break;
1024         case MLX5_L3T_TYPE_QWORD:
1025                 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
1026                 MLX5_ASSERT(qw_e_tbl->entry[entry_idx].ref_cnt);
1027                 ret = --qw_e_tbl->entry[entry_idx].ref_cnt;
1028                 if (ret)
1029                         goto out;
1030                 qw_e_tbl->entry[entry_idx].data = 0;
1031                 ref_cnt = --qw_e_tbl->ref_cnt;
1032                 break;
1033         default:
1034                 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
1035                 MLX5_ASSERT(ptr_e_tbl->entry[entry_idx].ref_cnt);
1036                 ret = --ptr_e_tbl->entry[entry_idx].ref_cnt;
1037                 if (ret)
1038                         goto out;
1039                 ptr_e_tbl->entry[entry_idx].data = NULL;
1040                 ref_cnt = --ptr_e_tbl->ref_cnt;
1041                 break;
1042         }
1043         if (!ref_cnt) {
1044                 mlx5_ipool_free(tbl->eip,
1045                                 ((struct mlx5_l3t_entry_word *)e_tbl)->idx);
1046                 m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] =
1047                                                                         NULL;
1048                 if (!(--m_tbl->ref_cnt)) {
1049                         mlx5_free(m_tbl);
1050                         g_tbl->tbl
1051                         [(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] = NULL;
1052                         if (!(--g_tbl->ref_cnt)) {
1053                                 mlx5_free(g_tbl);
1054                                 tbl->tbl = 0;
1055                         }
1056                 }
1057         }
1058 out:
1059         rte_spinlock_unlock(&tbl->sl);
1060         return ret;
1061 }
1062
1063 static int32_t
1064 __l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
1065                 union mlx5_l3t_data *data)
1066 {
1067         struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
1068         struct mlx5_l3t_entry_word *w_e_tbl;
1069         struct mlx5_l3t_entry_dword *dw_e_tbl;
1070         struct mlx5_l3t_entry_qword *qw_e_tbl;
1071         struct mlx5_l3t_entry_ptr *ptr_e_tbl;
1072         void *e_tbl;
1073         uint32_t entry_idx, tbl_idx = 0;
1074
1075         /* Check the global table, create it if empty. */
1076         g_tbl = tbl->tbl;
1077         if (!g_tbl) {
1078                 g_tbl = mlx5_malloc(MLX5_MEM_ZERO,
1079                                     sizeof(struct mlx5_l3t_level_tbl) +
1080                                     sizeof(void *) * MLX5_L3T_GT_SIZE, 1,
1081                                     SOCKET_ID_ANY);
1082                 if (!g_tbl) {
1083                         rte_errno = ENOMEM;
1084                         return -1;
1085                 }
1086                 tbl->tbl = g_tbl;
1087         }
1088         /*
1089          * Check the middle table, create it if empty. Ref_cnt will be
1090          * increased if new sub table created.
1091          */
1092         m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
1093         if (!m_tbl) {
1094                 m_tbl = mlx5_malloc(MLX5_MEM_ZERO,
1095                                     sizeof(struct mlx5_l3t_level_tbl) +
1096                                     sizeof(void *) * MLX5_L3T_MT_SIZE, 1,
1097                                     SOCKET_ID_ANY);
1098                 if (!m_tbl) {
1099                         rte_errno = ENOMEM;
1100                         return -1;
1101                 }
1102                 g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] =
1103                                                                         m_tbl;
1104                 g_tbl->ref_cnt++;
1105         }
1106         /*
1107          * Check the entry table, create it if empty. Ref_cnt will be
1108          * increased if new sub entry table created.
1109          */
1110         e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
1111         if (!e_tbl) {
1112                 e_tbl = mlx5_ipool_zmalloc(tbl->eip, &tbl_idx);
1113                 if (!e_tbl) {
1114                         rte_errno = ENOMEM;
1115                         return -1;
1116                 }
1117                 ((struct mlx5_l3t_entry_word *)e_tbl)->idx = tbl_idx;
1118                 m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] =
1119                                                                         e_tbl;
1120                 m_tbl->ref_cnt++;
1121         }
1122         entry_idx = idx & MLX5_L3T_ET_MASK;
1123         switch (tbl->type) {
1124         case MLX5_L3T_TYPE_WORD:
1125                 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
1126                 if (w_e_tbl->entry[entry_idx].data) {
1127                         data->word = w_e_tbl->entry[entry_idx].data;
1128                         w_e_tbl->entry[entry_idx].ref_cnt++;
1129                         rte_errno = EEXIST;
1130                         return -1;
1131                 }
1132                 w_e_tbl->entry[entry_idx].data = data->word;
1133                 w_e_tbl->entry[entry_idx].ref_cnt = 1;
1134                 w_e_tbl->ref_cnt++;
1135                 break;
1136         case MLX5_L3T_TYPE_DWORD:
1137                 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
1138                 if (dw_e_tbl->entry[entry_idx].data) {
1139                         data->dword = dw_e_tbl->entry[entry_idx].data;
1140                         dw_e_tbl->entry[entry_idx].ref_cnt++;
1141                         rte_errno = EEXIST;
1142                         return -1;
1143                 }
1144                 dw_e_tbl->entry[entry_idx].data = data->dword;
1145                 dw_e_tbl->entry[entry_idx].ref_cnt = 1;
1146                 dw_e_tbl->ref_cnt++;
1147                 break;
1148         case MLX5_L3T_TYPE_QWORD:
1149                 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
1150                 if (qw_e_tbl->entry[entry_idx].data) {
1151                         data->qword = qw_e_tbl->entry[entry_idx].data;
1152                         qw_e_tbl->entry[entry_idx].ref_cnt++;
1153                         rte_errno = EEXIST;
1154                         return -1;
1155                 }
1156                 qw_e_tbl->entry[entry_idx].data = data->qword;
1157                 qw_e_tbl->entry[entry_idx].ref_cnt = 1;
1158                 qw_e_tbl->ref_cnt++;
1159                 break;
1160         default:
1161                 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
1162                 if (ptr_e_tbl->entry[entry_idx].data) {
1163                         data->ptr = ptr_e_tbl->entry[entry_idx].data;
1164                         ptr_e_tbl->entry[entry_idx].ref_cnt++;
1165                         rte_errno = EEXIST;
1166                         return -1;
1167                 }
1168                 ptr_e_tbl->entry[entry_idx].data = data->ptr;
1169                 ptr_e_tbl->entry[entry_idx].ref_cnt = 1;
1170                 ptr_e_tbl->ref_cnt++;
1171                 break;
1172         }
1173         return 0;
1174 }
1175
1176 int32_t
1177 mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
1178                    union mlx5_l3t_data *data)
1179 {
1180         int ret;
1181
1182         rte_spinlock_lock(&tbl->sl);
1183         ret = __l3t_set_entry(tbl, idx, data);
1184         rte_spinlock_unlock(&tbl->sl);
1185         return ret;
1186 }
1187
1188 int32_t
1189 mlx5_l3t_prepare_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
1190                        union mlx5_l3t_data *data,
1191                        mlx5_l3t_alloc_callback_fn cb, void *ctx)
1192 {
1193         int32_t ret;
1194
1195         rte_spinlock_lock(&tbl->sl);
1196         /* Check if entry data is ready. */
1197         ret = __l3t_get_entry(tbl, idx, data);
1198         if (!ret) {
1199                 switch (tbl->type) {
1200                 case MLX5_L3T_TYPE_WORD:
1201                         if (data->word)
1202                                 goto out;
1203                         break;
1204                 case MLX5_L3T_TYPE_DWORD:
1205                         if (data->dword)
1206                                 goto out;
1207                         break;
1208                 case MLX5_L3T_TYPE_QWORD:
1209                         if (data->qword)
1210                                 goto out;
1211                         break;
1212                 default:
1213                         if (data->ptr)
1214                                 goto out;
1215                         break;
1216                 }
1217         }
1218         /* Entry data is not ready, use user callback to create it. */
1219         ret = cb(ctx, data);
1220         if (ret)
1221                 goto out;
1222         /* Save the new allocated data to entry. */
1223         ret = __l3t_set_entry(tbl, idx, data);
1224 out:
1225         rte_spinlock_unlock(&tbl->sl);
1226         return ret;
1227 }