1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_UTILS_H_
7 #define RTE_PMD_MLX5_UTILS_H_
15 #include <rte_spinlock.h>
16 #include <rte_rwlock.h>
17 #include <rte_memory.h>
18 #include <rte_bitmap.h>
20 #include <mlx5_common.h>
22 #include "mlx5_defs.h"
24 /* Convert a bit number to the corresponding 64-bit mask */
25 #define MLX5_BITSHIFT(v) (UINT64_C(1) << (v))
27 /* Save and restore errno around argument evaluation. */
28 #define ERRNO_SAFE(x) ((errno = (int []){ errno, ((x), 0) }[0]))
30 extern int mlx5_logtype;
32 #define MLX5_NET_LOG_PREFIX "mlx5_net"
34 /* Generic printf()-like logging macro with automatic line feed. */
35 #define DRV_LOG(level, ...) \
36 PMD_DRV_LOG_(level, mlx5_logtype, MLX5_NET_LOG_PREFIX, \
37 __VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \
40 /* Convenience macros for accessing mbuf fields. */
41 #define NEXT(m) ((m)->next)
42 #define DATA_LEN(m) ((m)->data_len)
43 #define PKT_LEN(m) ((m)->pkt_len)
44 #define DATA_OFF(m) ((m)->data_off)
45 #define SET_DATA_OFF(m, o) ((m)->data_off = (o))
46 #define NB_SEGS(m) ((m)->nb_segs)
47 #define PORT(m) ((m)->port)
49 /* Transpose flags. Useful to convert IBV to DPDK flags. */
50 #define TRANSPOSE(val, from, to) \
52 (((val) & (from)) / ((from) / (to))) : \
53 (((val) & (from)) * ((to) / (from))))
56 * For the case which data is linked with sequence increased index, the
57 * array table will be more efficiect than hash table once need to serarch
58 * one data entry in large numbers of entries. Since the traditional hash
59 * tables has fixed table size, when huge numbers of data saved to the hash
60 * table, it also comes lots of hash conflict.
62 * But simple array table also has fixed size, allocates all the needed
63 * memory at once will waste lots of memory. For the case don't know the
64 * exactly number of entries will be impossible to allocate the array.
66 * Then the multiple level table helps to balance the two disadvantages.
67 * Allocate a global high level table with sub table entries at first,
68 * the global table contains the sub table entries, and the sub table will
69 * be allocated only once the corresponding index entry need to be saved.
70 * e.g. for up to 32-bits index, three level table with 10-10-12 splitting,
71 * with sequence increased index, the memory grows with every 4K entries.
73 * The currently implementation introduces 10-10-12 32-bits splitting
74 * Three-Level table to help the cases which have millions of enties to
75 * save. The index entries can be addressed directly by the index, no
76 * search will be needed.q
79 /* L3 table global table define. */
80 #define MLX5_L3T_GT_OFFSET 22
81 #define MLX5_L3T_GT_SIZE (1 << 10)
82 #define MLX5_L3T_GT_MASK (MLX5_L3T_GT_SIZE - 1)
84 /* L3 table middle table define. */
85 #define MLX5_L3T_MT_OFFSET 12
86 #define MLX5_L3T_MT_SIZE (1 << 10)
87 #define MLX5_L3T_MT_MASK (MLX5_L3T_MT_SIZE - 1)
89 /* L3 table entry table define. */
90 #define MLX5_L3T_ET_OFFSET 0
91 #define MLX5_L3T_ET_SIZE (1 << 12)
92 #define MLX5_L3T_ET_MASK (MLX5_L3T_ET_SIZE - 1)
96 MLX5_L3T_TYPE_WORD = 0,
103 struct mlx5_indexed_pool;
105 /* Generic data struct. */
106 union mlx5_l3t_data {
113 /* L3 level table data structure. */
114 struct mlx5_l3t_level_tbl {
115 uint64_t ref_cnt; /* Table ref_cnt. */
116 void *tbl[]; /* Table array. */
119 /* L3 word entry table data structure. */
120 struct mlx5_l3t_entry_word {
121 uint32_t idx; /* Table index. */
122 uint64_t ref_cnt; /* Table ref_cnt. */
126 } entry[MLX5_L3T_ET_SIZE]; /* Entry array */
129 /* L3 double word entry table data structure. */
130 struct mlx5_l3t_entry_dword {
131 uint32_t idx; /* Table index. */
132 uint64_t ref_cnt; /* Table ref_cnt. */
136 } entry[MLX5_L3T_ET_SIZE]; /* Entry array */
139 /* L3 quad word entry table data structure. */
140 struct mlx5_l3t_entry_qword {
141 uint32_t idx; /* Table index. */
142 uint64_t ref_cnt; /* Table ref_cnt. */
146 } entry[MLX5_L3T_ET_SIZE]; /* Entry array */
149 /* L3 pointer entry table data structure. */
150 struct mlx5_l3t_entry_ptr {
151 uint32_t idx; /* Table index. */
152 uint64_t ref_cnt; /* Table ref_cnt. */
156 } entry[MLX5_L3T_ET_SIZE]; /* Entry array */
159 /* L3 table data structure. */
160 struct mlx5_l3t_tbl {
161 enum mlx5_l3t_type type; /* Table type. */
162 struct mlx5_indexed_pool *eip;
163 /* Table index pool handles. */
164 struct mlx5_l3t_level_tbl *tbl; /* Global table index. */
165 rte_spinlock_t sl; /* The table lock. */
168 /** Type of function that is used to handle the data before freeing. */
169 typedef int32_t (*mlx5_l3t_alloc_callback_fn)(void *ctx,
170 union mlx5_l3t_data *data);
173 * The indexed memory entry index is made up of trunk index and offset of
174 * the entry in the trunk. Since the entry index is 32 bits, in case user
175 * prefers to have small trunks, user can change the macro below to a big
176 * number which helps the pool contains more trunks with lots of entries
179 #define TRUNK_IDX_BITS 16
180 #define TRUNK_MAX_IDX ((1 << TRUNK_IDX_BITS) - 1)
181 #define TRUNK_INVALID TRUNK_MAX_IDX
182 #define MLX5_IPOOL_DEFAULT_TRUNK_SIZE (1 << (28 - TRUNK_IDX_BITS))
183 #ifdef RTE_LIBRTE_MLX5_DEBUG
187 struct mlx5_indexed_pool_config {
188 uint32_t size; /* Pool entry size. */
189 uint32_t trunk_size:22;
191 * Trunk entry number. Must be power of 2. It can be increased
192 * if trunk_grow enable. The trunk entry number increases with
193 * left shift grow_shift. Trunks with index are after grow_trunk
194 * will keep the entry number same with the last grow trunk.
196 uint32_t grow_trunk:4;
198 * Trunks with entry number increase in the pool. Set it to 0
199 * to make the pool works as trunk entry fixed pool. It works
200 * only if grow_shift is not 0.
202 uint32_t grow_shift:4;
204 * Trunk entry number increase shift value, stop after grow_trunk.
205 * It works only if grow_trunk is not 0.
207 uint32_t need_lock:1;
208 /* Lock is needed for multiple thread usage. */
209 uint32_t release_mem_en:1; /* Rlease trunk when it is free. */
210 const char *type; /* Memory allocate type name. */
211 void *(*malloc)(uint32_t flags, size_t size, unsigned int align,
213 /* User defined memory allocator. */
214 void (*free)(void *addr); /* User defined memory release. */
217 struct mlx5_indexed_trunk {
218 uint32_t idx; /* Trunk id. */
219 uint32_t prev; /* Previous free trunk in free list. */
220 uint32_t next; /* Next free trunk in free list. */
221 uint32_t free; /* Free entries available */
222 struct rte_bitmap *bmp;
223 uint8_t data[] __rte_cache_aligned; /* Entry data start. */
226 struct mlx5_indexed_pool {
227 struct mlx5_indexed_pool_config cfg; /* Indexed pool configuration. */
228 rte_spinlock_t lock; /* Pool lock for multiple thread usage. */
229 uint32_t n_trunk_valid; /* Trunks allocated. */
230 uint32_t n_trunk; /* Trunk pointer array size. */
231 /* Dim of trunk pointer array. */
232 struct mlx5_indexed_trunk **trunks;
233 uint32_t free_list; /* Index to first free trunk. */
237 uint32_t trunk_avail;
238 uint32_t trunk_empty;
241 uint32_t grow_tbl[]; /* Save the index offset for the grow trunks. */
245 * Return logarithm of the nearest power of two above input value.
251 * Logarithm of the nearest power of two above input value.
253 static inline unsigned int
254 log2above(unsigned int v)
259 for (l = 0, r = 0; (v >> 1); ++l, v >>= 1)
264 #define MLX5_HLIST_DIRECT_KEY 0x0001 /* Use the key directly as hash index. */
265 #define MLX5_HLIST_WRITE_MOST 0x0002 /* List mostly used for append new. */
267 /** Maximum size of string for naming the hlist table. */
268 #define MLX5_HLIST_NAMESIZE 32
273 * Structure of the entry in the hash list, user should define its own struct
274 * that contains this in order to store the data. The 'key' is 64-bits right
275 * now and its user's responsibility to guarantee there is no collision.
277 struct mlx5_hlist_entry {
278 LIST_ENTRY(mlx5_hlist_entry) next; /* entry pointers in the list. */
279 uint32_t idx; /* Bucket index the entry belongs to. */
280 uint32_t ref_cnt; /* Reference count. */
283 /** Structure for hash head. */
284 LIST_HEAD(mlx5_hlist_head, mlx5_hlist_entry);
287 * Type of callback function for entry removal.
292 * The entry in the list.
294 typedef void (*mlx5_hlist_remove_cb)(struct mlx5_hlist *list,
295 struct mlx5_hlist_entry *entry);
298 * Type of function for user defined matching.
303 * The entry in the list.
307 * The pointer to new entry context.
310 * 0 if matching, non-zero number otherwise.
312 typedef int (*mlx5_hlist_match_cb)(struct mlx5_hlist *list,
313 struct mlx5_hlist_entry *entry,
314 uint64_t key, void *ctx);
317 * Type of function for user defined hash list entry creation.
322 * The key of the new entry.
324 * The pointer to new entry context.
327 * Pointer to allocated entry on success, NULL otherwise.
329 typedef struct mlx5_hlist_entry *(*mlx5_hlist_create_cb)
330 (struct mlx5_hlist *list,
331 uint64_t key, void *ctx);
333 /* Hash list bucket head. */
334 struct mlx5_hlist_bucket {
335 struct mlx5_hlist_head head; /* List head. */
336 rte_rwlock_t lock; /* Bucket lock. */
337 uint32_t gen_cnt; /* List modification will update generation count. */
338 } __rte_cache_aligned;
341 * Hash list table structure
343 * Entry in hash list could be reused if entry already exists, reference
344 * count will increase and the existing entry returns.
346 * When destroy an entry from list, decrease reference count and only
347 * destroy when no further reference.
350 char name[MLX5_HLIST_NAMESIZE]; /**< Name of the hash list. */
351 /**< number of heads, need to be power of 2. */
353 uint32_t entry_sz; /**< Size of entry, used to allocate entry. */
354 /**< mask to get the index of the list heads. */
356 bool direct_key; /* Use the new entry key directly as hash index. */
357 bool write_most; /* List mostly used for append new or destroy. */
359 mlx5_hlist_create_cb cb_create; /**< entry create callback. */
360 mlx5_hlist_match_cb cb_match; /**< entry match callback. */
361 mlx5_hlist_remove_cb cb_remove; /**< entry remove callback. */
362 struct mlx5_hlist_bucket buckets[] __rte_cache_aligned;
363 /**< list bucket arrays. */
367 * Create a hash list table, the user can specify the list heads array size
368 * of the table, now the size should be a power of 2 in order to get better
369 * distribution for the entries. Each entry is a part of the whole data element
370 * and the caller should be responsible for the data element's allocation and
371 * cleanup / free. Key of each entry will be calculated with CRC in order to
372 * generate a little fairer distribution.
375 * Name of the hash list(optional).
377 * Heads array size of the hash list.
379 * Entry size to allocate if cb_create not specified.
381 * The hash list attribute flags.
383 * Callback function for entry create.
385 * Callback function for entry match.
387 * Callback function for entry destroy.
389 * Pointer of the hash list table created, NULL on failure.
391 struct mlx5_hlist *mlx5_hlist_create(const char *name, uint32_t size,
392 uint32_t entry_size, uint32_t flags,
393 mlx5_hlist_create_cb cb_create,
394 mlx5_hlist_match_cb cb_match,
395 mlx5_hlist_remove_cb cb_destroy);
398 * Search an entry matching the key.
400 * Result returned might be destroyed by other thread, must use
401 * this function only in main thread.
404 * Pointer to the hast list table.
406 * Key for the searching entry.
408 * Common context parameter used by entry callback function.
411 * Pointer of the hlist entry if found, NULL otherwise.
413 struct mlx5_hlist_entry *mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key,
417 * Insert an entry to the hash list table, the entry is only part of whole data
418 * element and a 64B key is used for matching. User should construct the key or
419 * give a calculated hash signature and guarantee there is no collision.
422 * Pointer to the hast list table.
424 * Entry to be inserted into the hash list table.
426 * Common context parameter used by callback function.
429 * registered entry on success, NULL otherwise
431 struct mlx5_hlist_entry *mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key,
435 * Remove an entry from the hash list table. User should guarantee the validity
439 * Pointer to the hast list table. (not used)
441 * Entry to be removed from the hash list table.
443 * 0 on entry removed, 1 on entry still referenced.
445 int mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry);
448 * Destroy the hash list table, all the entries already inserted into the lists
449 * will be handled by the callback function provided by the user (including
450 * free if needed) before the table is freed.
453 * Pointer to the hast list table.
455 void mlx5_hlist_destroy(struct mlx5_hlist *h);
457 /************************ cache list *****************************/
459 /** Maximum size of string for naming. */
460 #define MLX5_NAME_SIZE 32
462 struct mlx5_cache_list;
465 * Structure of the entry in the cache list, user should define its own struct
466 * that contains this in order to store the data.
468 struct mlx5_cache_entry {
469 LIST_ENTRY(mlx5_cache_entry) next; /* Entry pointers in the list. */
470 uint32_t ref_cnt; /* Reference count. */
474 * Type of callback function for entry removal.
479 * The entry in the list.
481 typedef void (*mlx5_cache_remove_cb)(struct mlx5_cache_list *list,
482 struct mlx5_cache_entry *entry);
485 * Type of function for user defined matching.
490 * The entry in the list.
492 * The pointer to new entry context.
495 * 0 if matching, non-zero number otherwise.
497 typedef int (*mlx5_cache_match_cb)(struct mlx5_cache_list *list,
498 struct mlx5_cache_entry *entry, void *ctx);
501 * Type of function for user defined cache list entry creation.
506 * The new allocated entry, NULL if list entry size unspecified,
507 * New entry has to be allocated in callback and return.
509 * The pointer to new entry context.
512 * Pointer of entry on success, NULL otherwise.
514 typedef struct mlx5_cache_entry *(*mlx5_cache_create_cb)
515 (struct mlx5_cache_list *list,
516 struct mlx5_cache_entry *entry,
520 * Linked cache list structure.
522 * Entry in cache list could be reused if entry already exists,
523 * reference count will increase and the existing entry returns.
525 * When destroy an entry from list, decrease reference count and only
526 * destroy when no further reference.
528 * Linked list cache is designed for limited number of entries cache,
529 * read mostly, less modification.
531 * For huge amount of entries cache, please consider hash list cache.
534 struct mlx5_cache_list {
535 char name[MLX5_NAME_SIZE]; /**< Name of the cache list. */
536 uint32_t entry_sz; /**< Entry size, 0: use create callback. */
537 rte_rwlock_t lock; /* read/write lock. */
538 uint32_t gen_cnt; /* List modification will update generation count. */
539 uint32_t count; /* number of entries in list. */
540 void *ctx; /* user objects target to callback. */
541 mlx5_cache_create_cb cb_create; /**< entry create callback. */
542 mlx5_cache_match_cb cb_match; /**< entry match callback. */
543 mlx5_cache_remove_cb cb_remove; /**< entry remove callback. */
544 LIST_HEAD(mlx5_cache_head, mlx5_cache_entry) head;
548 * Initialize a cache list.
551 * Pointer to the hast list table.
553 * Name of the cache list.
555 * Entry size to allocate, 0 to allocate by creation callback.
557 * Pointer to the list context data.
559 * Callback function for entry create.
561 * Callback function for entry match.
563 * Callback function for entry remove.
565 * 0 on success, otherwise failure.
567 int mlx5_cache_list_init(struct mlx5_cache_list *list,
568 const char *name, uint32_t entry_size, void *ctx,
569 mlx5_cache_create_cb cb_create,
570 mlx5_cache_match_cb cb_match,
571 mlx5_cache_remove_cb cb_remove);
574 * Search an entry matching the key.
576 * Result returned might be destroyed by other thread, must use
577 * this function only in main thread.
580 * Pointer to the cache list.
582 * Common context parameter used by entry callback function.
585 * Pointer of the cache entry if found, NULL otherwise.
587 struct mlx5_cache_entry *mlx5_cache_lookup(struct mlx5_cache_list *list,
591 * Reuse or create an entry to the cache list.
594 * Pointer to the hast list table.
596 * Common context parameter used by callback function.
599 * registered entry on success, NULL otherwise
601 struct mlx5_cache_entry *mlx5_cache_register(struct mlx5_cache_list *list,
605 * Remove an entry from the cache list.
607 * User should guarantee the validity of the entry.
610 * Pointer to the hast list.
612 * Entry to be removed from the cache list table.
614 * 0 on entry removed, 1 on entry still referenced.
616 int mlx5_cache_unregister(struct mlx5_cache_list *list,
617 struct mlx5_cache_entry *entry);
620 * Destroy the cache list.
623 * Pointer to the cache list.
625 void mlx5_cache_list_destroy(struct mlx5_cache_list *list);
628 * Get entry number from the cache list.
631 * Pointer to the hast list.
633 * Cache list entry number.
636 mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list);
638 /********************************* indexed pool *************************/
641 * This function allocates non-initialized memory entry from pool.
642 * In NUMA systems, the memory entry allocated resides on the same
643 * NUMA socket as the core that calls this function.
645 * Memory entry is allocated from memory trunk, no alignment.
648 * Pointer to indexed memory entry pool.
649 * No initialization required.
651 * Pointer to memory to save allocated index.
652 * Memory index always positive value.
654 * - Pointer to the allocated memory entry.
655 * - NULL on error. Not enough memory, or invalid arguments.
657 void *mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx);
660 * This function allocates zero initialized memory entry from pool.
661 * In NUMA systems, the memory entry allocated resides on the same
662 * NUMA socket as the core that calls this function.
664 * Memory entry is allocated from memory trunk, no alignment.
667 * Pointer to indexed memory pool.
668 * No initialization required.
670 * Pointer to memory to save allocated index.
671 * Memory index always positive value.
673 * - Pointer to the allocated memory entry .
674 * - NULL on error. Not enough memory, or invalid arguments.
676 void *mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx);
679 * This function frees indexed memory entry to pool.
680 * Caller has to make sure that the index is allocated from same pool.
683 * Pointer to indexed memory pool.
685 * Allocated memory entry index.
687 void mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx);
690 * This function returns pointer of indexed memory entry from index.
691 * Caller has to make sure that the index is valid, and allocated
695 * Pointer to indexed memory pool.
697 * Allocated memory index.
699 * - Pointer to indexed memory entry.
701 void *mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx);
704 * This function creates indexed memory pool.
705 * Caller has to configure the configuration accordingly.
708 * Pointer to indexed memory pool.
710 * Allocated memory index.
712 struct mlx5_indexed_pool *
713 mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg);
716 * This function releases all resources of pool.
717 * Caller has to make sure that all indexes and memories allocated
718 * from this pool not referenced anymore.
721 * Pointer to indexed memory pool.
723 * - non-zero value on error.
726 int mlx5_ipool_destroy(struct mlx5_indexed_pool *pool);
729 * This function dumps debug info of pool.
732 * Pointer to indexed memory pool.
734 void mlx5_ipool_dump(struct mlx5_indexed_pool *pool);
737 * This function allocates new empty Three-level table.
740 * The l3t can set as word, double word, quad word or pointer with index.
743 * - Pointer to the allocated l3t.
744 * - NULL on error. Not enough memory, or invalid arguments.
746 struct mlx5_l3t_tbl *mlx5_l3t_create(enum mlx5_l3t_type type);
749 * This function destroys Three-level table.
752 * Pointer to the l3t.
754 void mlx5_l3t_destroy(struct mlx5_l3t_tbl *tbl);
757 * This function gets the index entry from Three-level table.
760 * Pointer to the l3t.
762 * Index to the entry.
764 * Pointer to the memory which saves the entry data.
765 * When function call returns 0, data contains the entry data get from
767 * When function call returns -1, data is not modified.
770 * 0 if success, -1 on error.
773 int32_t mlx5_l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
774 union mlx5_l3t_data *data);
777 * This function gets the index entry from Three-level table.
779 * If the index entry is not available, allocate new one by callback
780 * function and fill in the entry.
783 * Pointer to the l3t.
785 * Index to the entry.
787 * Pointer to the memory which saves the entry data.
788 * When function call returns 0, data contains the entry data get from
790 * When function call returns -1, data is not modified.
792 * Callback function to allocate new data.
794 * Context for callback function.
797 * 0 if success, -1 on error.
800 int32_t mlx5_l3t_prepare_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
801 union mlx5_l3t_data *data,
802 mlx5_l3t_alloc_callback_fn cb, void *ctx);
805 * This function decreases and clear index entry if reference
806 * counter is 0 from Three-level table.
809 * Pointer to the l3t.
811 * Index to the entry.
814 * The remaining reference count, 0 means entry be cleared, -1 on error.
816 int32_t mlx5_l3t_clear_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx);
819 * This function sets the index entry to Three-level table.
820 * If the entry is already set, the EEXIST errno will be given, and
821 * the set data will be filled to the data.
824 * Pointer to the l3t.
826 * Index to the entry.
827 * @param data[in/out]
828 * Pointer to the memory which contains the entry data save to l3t.
829 * If the entry is already set, the set data will be filled.
832 * 0 if success, -1 on error.
834 int32_t mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
835 union mlx5_l3t_data *data);
838 * Macros for linked list based on indexed memory.
839 * Example data structure:
841 * ILIST_ENTRY(uint16_t) next;
846 #define ILIST_ENTRY(type) \
848 type prev; /* Index of previous element. */ \
849 type next; /* Index of next element. */ \
852 #define ILIST_INSERT(pool, head, idx, elem, field) \
855 MLX5_ASSERT((elem) && (idx)); \
856 (elem)->field.next = *(head); \
857 (elem)->field.prev = 0; \
859 (peer) = mlx5_ipool_get(pool, *(head)); \
861 (peer)->field.prev = (idx); \
866 #define ILIST_REMOVE(pool, head, idx, elem, field) \
871 if ((elem)->field.prev) { \
872 (peer) = mlx5_ipool_get \
873 (pool, (elem)->field.prev); \
875 (peer)->field.next = (elem)->field.next;\
877 if ((elem)->field.next) { \
878 (peer) = mlx5_ipool_get \
879 (pool, (elem)->field.next); \
881 (peer)->field.prev = (elem)->field.prev;\
883 if (*(head) == (idx)) \
884 *(head) = (elem)->field.next; \
887 #define ILIST_FOREACH(pool, head, idx, elem, field) \
888 for ((idx) = (head), (elem) = \
889 (idx) ? mlx5_ipool_get(pool, (idx)) : NULL; (elem); \
890 idx = (elem)->field.next, (elem) = \
891 (idx) ? mlx5_ipool_get(pool, idx) : NULL)
893 /* Single index list. */
894 #define SILIST_ENTRY(type) \
896 type next; /* Index of next element. */ \
899 #define SILIST_INSERT(head, idx, elem, field) \
901 MLX5_ASSERT((elem) && (idx)); \
902 (elem)->field.next = *(head); \
906 #define SILIST_FOREACH(pool, head, idx, elem, field) \
907 for ((idx) = (head), (elem) = \
908 (idx) ? mlx5_ipool_get(pool, (idx)) : NULL; (elem); \
909 idx = (elem)->field.next, (elem) = \
910 (idx) ? mlx5_ipool_get(pool, idx) : NULL)
912 #endif /* RTE_PMD_MLX5_UTILS_H_ */