1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_UTILS_H_
7 #define RTE_PMD_MLX5_UTILS_H_
15 #include <rte_spinlock.h>
16 #include <rte_memory.h>
17 #include <rte_bitmap.h>
19 #include <mlx5_common.h>
21 #include "mlx5_defs.h"
25 * Compilation workaround for PPC64 when AltiVec is fully enabled, e.g. std=c11.
26 * Otherwise there would be a type conflict between stdbool and altivec.
28 #if defined(__PPC64__) && !defined(__APPLE_ALTIVEC__)
30 /* redefine as in stdbool.h */
34 /* Convert a bit number to the corresponding 64-bit mask */
35 #define MLX5_BITSHIFT(v) (UINT64_C(1) << (v))
37 /* Save and restore errno around argument evaluation. */
38 #define ERRNO_SAFE(x) ((errno = (int []){ errno, ((x), 0) }[0]))
40 extern int mlx5_logtype;
42 /* Generic printf()-like logging macro with automatic line feed. */
43 #define DRV_LOG(level, ...) \
44 PMD_DRV_LOG_(level, mlx5_logtype, MLX5_DRIVER_NAME, \
45 __VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \
48 #define INFO(...) DRV_LOG(INFO, __VA_ARGS__)
49 #define WARN(...) DRV_LOG(WARNING, __VA_ARGS__)
50 #define ERROR(...) DRV_LOG(ERR, __VA_ARGS__)
52 /* Convenience macros for accessing mbuf fields. */
53 #define NEXT(m) ((m)->next)
54 #define DATA_LEN(m) ((m)->data_len)
55 #define PKT_LEN(m) ((m)->pkt_len)
56 #define DATA_OFF(m) ((m)->data_off)
57 #define SET_DATA_OFF(m, o) ((m)->data_off = (o))
58 #define NB_SEGS(m) ((m)->nb_segs)
59 #define PORT(m) ((m)->port)
61 /* Transpose flags. Useful to convert IBV to DPDK flags. */
62 #define TRANSPOSE(val, from, to) \
64 (((val) & (from)) / ((from) / (to))) : \
65 (((val) & (from)) * ((to) / (from))))
68 * The indexed memory entry index is made up of trunk index and offset of
69 * the entry in the trunk. Since the entry index is 32 bits, in case user
70 * prefers to have small trunks, user can change the macro below to a big
71 * number which helps the pool contains more trunks with lots of entries
74 #define TRUNK_IDX_BITS 16
75 #define TRUNK_MAX_IDX ((1 << TRUNK_IDX_BITS) - 1)
76 #define TRUNK_INVALID TRUNK_MAX_IDX
77 #define MLX5_IPOOL_DEFAULT_TRUNK_SIZE (1 << (28 - TRUNK_IDX_BITS))
78 #ifdef RTE_LIBRTE_MLX5_DEBUG
82 struct mlx5_indexed_pool_config {
83 uint32_t size; /* Pool entry size. */
84 uint32_t trunk_size:22;
86 * Trunk entry number. Must be power of 2. It can be increased
87 * if trunk_grow enable. The trunk entry number increases with
88 * left shift grow_shift. Trunks with index are after grow_trunk
89 * will keep the entry number same with the last grow trunk.
91 uint32_t grow_trunk:4;
93 * Trunks with entry number increase in the pool. Set it to 0
94 * to make the pool works as trunk entry fixed pool. It works
95 * only if grow_shift is not 0.
97 uint32_t grow_shift:4;
99 * Trunk entry number increase shift value, stop after grow_trunk.
100 * It works only if grow_trunk is not 0.
102 uint32_t need_lock:1;
103 /* Lock is needed for multiple thread usage. */
104 const char *type; /* Memory allocate type name. */
105 void *(*malloc)(const char *type, size_t size, unsigned int align,
107 /* User defined memory allocator. */
108 void (*free)(void *addr); /* User defined memory release. */
111 struct mlx5_indexed_trunk {
112 uint32_t idx; /* Trunk id. */
113 uint32_t prev; /* Previous free trunk in free list. */
114 uint32_t next; /* Next free trunk in free list. */
115 uint32_t free; /* Free entries available */
116 struct rte_bitmap *bmp;
117 uint8_t data[] __rte_cache_min_aligned; /* Entry data start. */
120 struct mlx5_indexed_pool {
121 struct mlx5_indexed_pool_config cfg; /* Indexed pool configuration. */
122 rte_spinlock_t lock; /* Pool lock for multiple thread usage. */
123 uint32_t n_trunk_valid; /* Trunks allocated. */
124 uint32_t n_trunk; /* Trunk pointer array size. */
125 /* Dim of trunk pointer array. */
126 struct mlx5_indexed_trunk **trunks;
127 uint32_t free_list; /* Index to first free trunk. */
131 uint32_t trunk_avail;
132 uint32_t trunk_empty;
135 uint32_t grow_tbl[]; /* Save the index offset for the grow trunks. */
139 * Return logarithm of the nearest power of two above input value.
145 * Logarithm of the nearest power of two above input value.
147 static inline unsigned int
148 log2above(unsigned int v)
153 for (l = 0, r = 0; (v >> 1); ++l, v >>= 1)
158 /** Maximum size of string for naming the hlist table. */
159 #define MLX5_HLIST_NAMESIZE 32
162 * Structure of the entry in the hash list, user should define its own struct
163 * that contains this in order to store the data. The 'key' is 64-bits right
164 * now and its user's responsibility to guarantee there is no collision.
166 struct mlx5_hlist_entry {
167 LIST_ENTRY(mlx5_hlist_entry) next; /* entry pointers in the list. */
168 uint64_t key; /* user defined 'key', could be the hash signature. */
171 /** Structure for hash head. */
172 LIST_HEAD(mlx5_hlist_head, mlx5_hlist_entry);
174 /** Type of function that is used to handle the data before freeing. */
175 typedef void (*mlx5_hlist_destroy_callback_fn)(void *p, void *ctx);
177 /** hash list table structure */
179 char name[MLX5_HLIST_NAMESIZE]; /**< Name of the hash list. */
180 /**< number of heads, need to be power of 2. */
182 /**< mask to get the index of the list heads. */
184 struct mlx5_hlist_head heads[]; /**< list head arrays. */
188 * Create a hash list table, the user can specify the list heads array size
189 * of the table, now the size should be a power of 2 in order to get better
190 * distribution for the entries. Each entry is a part of the whole data element
191 * and the caller should be responsible for the data element's allocation and
192 * cleanup / free. Key of each entry will be calculated with CRC in order to
193 * generate a little fairer distribution.
196 * Name of the hash list(optional).
198 * Heads array size of the hash list.
201 * Pointer of the hash list table created, NULL on failure.
203 struct mlx5_hlist *mlx5_hlist_create(const char *name, uint32_t size);
206 * Search an entry matching the key.
209 * Pointer to the hast list table.
211 * Key for the searching entry.
214 * Pointer of the hlist entry if found, NULL otherwise.
216 struct mlx5_hlist_entry *mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key);
219 * Insert an entry to the hash list table, the entry is only part of whole data
220 * element and a 64B key is used for matching. User should construct the key or
221 * give a calculated hash signature and guarantee there is no collision.
224 * Pointer to the hast list table.
226 * Entry to be inserted into the hash list table.
229 * - zero for success.
230 * - -EEXIST if the entry is already inserted.
232 int mlx5_hlist_insert(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry);
235 * Remove an entry from the hash list table. User should guarantee the validity
239 * Pointer to the hast list table. (not used)
241 * Entry to be removed from the hash list table.
243 void mlx5_hlist_remove(struct mlx5_hlist *h __rte_unused,
244 struct mlx5_hlist_entry *entry);
247 * Destroy the hash list table, all the entries already inserted into the lists
248 * will be handled by the callback function provided by the user (including
249 * free if needed) before the table is freed.
252 * Pointer to the hast list table.
254 * Callback function for each inserted entry when destroying the hash list.
256 * Common context parameter used by callback function for each entry.
258 void mlx5_hlist_destroy(struct mlx5_hlist *h,
259 mlx5_hlist_destroy_callback_fn cb, void *ctx);
262 * This function allocates non-initialized memory entry from pool.
263 * In NUMA systems, the memory entry allocated resides on the same
264 * NUMA socket as the core that calls this function.
266 * Memory entry is allocated from memory trunk, no alignment.
269 * Pointer to indexed memory entry pool.
270 * No initialization required.
272 * Pointer to memory to save allocated index.
273 * Memory index always positive value.
275 * - Pointer to the allocated memory entry.
276 * - NULL on error. Not enough memory, or invalid arguments.
278 void *mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx);
281 * This function allocates zero initialized memory entry from pool.
282 * In NUMA systems, the memory entry allocated resides on the same
283 * NUMA socket as the core that calls this function.
285 * Memory entry is allocated from memory trunk, no alignment.
288 * Pointer to indexed memory pool.
289 * No initialization required.
291 * Pointer to memory to save allocated index.
292 * Memory index always positive value.
294 * - Pointer to the allocated memory entry .
295 * - NULL on error. Not enough memory, or invalid arguments.
297 void *mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx);
300 * This function frees indexed memory entry to pool.
301 * Caller has to make sure that the index is allocated from same pool.
304 * Pointer to indexed memory pool.
306 * Allocated memory entry index.
308 void mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx);
311 * This function returns pointer of indexed memory entry from index.
312 * Caller has to make sure that the index is valid, and allocated
316 * Pointer to indexed memory pool.
318 * Allocated memory index.
320 * - Pointer to indexed memory entry.
322 void *mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx);
325 * This function creates indexed memory pool.
326 * Caller has to configure the configuration accordingly.
329 * Pointer to indexed memory pool.
331 * Allocated memory index.
333 struct mlx5_indexed_pool *
334 mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg);
337 * This function releases all resources of pool.
338 * Caller has to make sure that all indexes and memories allocated
339 * from this pool not referenced anymore.
342 * Pointer to indexed memory pool.
344 * - non-zero value on error.
347 int mlx5_ipool_destroy(struct mlx5_indexed_pool *pool);
350 * This function dumps debug info of pool.
353 * Pointer to indexed memory pool.
355 void mlx5_ipool_dump(struct mlx5_indexed_pool *pool);
358 * Macros for linked list based on indexed memory.
359 * Example data structure:
361 * ILIST_ENTRY(uint16_t) next;
366 #define ILIST_ENTRY(type) \
368 type prev; /* Index of previous element. */ \
369 type next; /* Index of next element. */ \
372 #define ILIST_INSERT(pool, head, idx, elem, field) \
375 MLX5_ASSERT((elem) && (idx)); \
376 (elem)->field.next = *(head); \
377 (elem)->field.prev = 0; \
379 (peer) = mlx5_ipool_get(pool, *(head)); \
381 (peer)->field.prev = (idx); \
386 #define ILIST_REMOVE(pool, head, idx, elem, field) \
391 if ((elem)->field.prev) { \
392 (peer) = mlx5_ipool_get \
393 (pool, (elem)->field.prev); \
395 (peer)->field.next = (elem)->field.next;\
397 if ((elem)->field.next) { \
398 (peer) = mlx5_ipool_get \
399 (pool, (elem)->field.next); \
401 (peer)->field.prev = (elem)->field.prev;\
403 if (*(head) == (idx)) \
404 *(head) = (elem)->field.next; \
407 #define ILIST_FOREACH(pool, head, idx, elem, field) \
408 for ((idx) = (head), (elem) = \
409 (idx) ? mlx5_ipool_get(pool, (idx)) : NULL; (elem); \
410 idx = (elem)->field.next, (elem) = \
411 (idx) ? mlx5_ipool_get(pool, idx) : NULL)
413 /* Single index list. */
414 #define SILIST_ENTRY(type) \
416 type next; /* Index of next element. */ \
419 #define SILIST_INSERT(head, idx, elem, field) \
421 MLX5_ASSERT((elem) && (idx)); \
422 (elem)->field.next = *(head); \
426 #define SILIST_FOREACH(pool, head, idx, elem, field) \
427 for ((idx) = (head), (elem) = \
428 (idx) ? mlx5_ipool_get(pool, (idx)) : NULL; (elem); \
429 idx = (elem)->field.next, (elem) = \
430 (idx) ? mlx5_ipool_get(pool, idx) : NULL)
432 #endif /* RTE_PMD_MLX5_UTILS_H_ */