1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_UTILS_H_
7 #define RTE_PMD_MLX5_UTILS_H_
15 #include <rte_spinlock.h>
16 #include <rte_memory.h>
17 #include <rte_bitmap.h>
19 #include <mlx5_common.h>
21 #include "mlx5_defs.h"
25 * Compilation workaround for PPC64 when AltiVec is fully enabled, e.g. std=c11.
26 * Otherwise there would be a type conflict between stdbool and altivec.
28 #if defined(__PPC64__) && !defined(__APPLE_ALTIVEC__)
30 /* redefine as in stdbool.h */
34 /* Convert a bit number to the corresponding 64-bit mask */
35 #define MLX5_BITSHIFT(v) (UINT64_C(1) << (v))
37 /* Save and restore errno around argument evaluation. */
38 #define ERRNO_SAFE(x) ((errno = (int []){ errno, ((x), 0) }[0]))
40 extern int mlx5_logtype;
42 /* Generic printf()-like logging macro with automatic line feed. */
43 #define DRV_LOG(level, ...) \
44 PMD_DRV_LOG_(level, mlx5_logtype, MLX5_DRIVER_NAME, \
45 __VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \
48 #define INFO(...) DRV_LOG(INFO, __VA_ARGS__)
49 #define WARN(...) DRV_LOG(WARNING, __VA_ARGS__)
50 #define ERROR(...) DRV_LOG(ERR, __VA_ARGS__)
52 /* Convenience macros for accessing mbuf fields. */
53 #define NEXT(m) ((m)->next)
54 #define DATA_LEN(m) ((m)->data_len)
55 #define PKT_LEN(m) ((m)->pkt_len)
56 #define DATA_OFF(m) ((m)->data_off)
57 #define SET_DATA_OFF(m, o) ((m)->data_off = (o))
58 #define NB_SEGS(m) ((m)->nb_segs)
59 #define PORT(m) ((m)->port)
61 /* Transpose flags. Useful to convert IBV to DPDK flags. */
62 #define TRANSPOSE(val, from, to) \
64 (((val) & (from)) / ((from) / (to))) : \
65 (((val) & (from)) * ((to) / (from))))
68 * The indexed memory entry index is made up of trunk index and offset of
69 * the entry in the trunk. Since the entry index is 32 bits, in case user
70 * prefers to have small trunks, user can change the macro below to a big
71 * number which helps the pool contains more trunks with lots of entries
74 #define TRUNK_IDX_BITS 16
75 #define TRUNK_MAX_IDX ((1 << TRUNK_IDX_BITS) - 1)
76 #define TRUNK_INVALID TRUNK_MAX_IDX
77 #define MLX5_IPOOL_DEFAULT_TRUNK_SIZE (1 << (28 - TRUNK_IDX_BITS))
78 #ifdef RTE_LIBRTE_MLX5_DEBUG
82 struct mlx5_indexed_pool_config {
83 uint32_t size; /* Pool entry size. */
84 uint32_t trunk_size:22;
86 * Trunk entry number. Must be power of 2. It can be increased
87 * if trunk_grow enable. The trunk entry number increases with
88 * left shift grow_shift. Trunks with index are after grow_trunk
89 * will keep the entry number same with the last grow trunk.
91 uint32_t grow_trunk:4;
93 * Trunks with entry number increase in the pool. Set it to 0
94 * to make the pool works as trunk entry fixed pool. It works
95 * only if grow_shift is not 0.
97 uint32_t grow_shift:4;
99 * Trunk entry number increase shift value, stop after grow_trunk.
100 * It works only if grow_trunk is not 0.
102 uint32_t need_lock:1;
103 /* Lock is needed for multiple thread usage. */
104 uint32_t release_mem_en:1; /* Rlease trunk when it is free. */
105 const char *type; /* Memory allocate type name. */
106 void *(*malloc)(const char *type, size_t size, unsigned int align,
108 /* User defined memory allocator. */
109 void (*free)(void *addr); /* User defined memory release. */
112 struct mlx5_indexed_trunk {
113 uint32_t idx; /* Trunk id. */
114 uint32_t prev; /* Previous free trunk in free list. */
115 uint32_t next; /* Next free trunk in free list. */
116 uint32_t free; /* Free entries available */
117 struct rte_bitmap *bmp;
118 uint8_t data[] __rte_cache_min_aligned; /* Entry data start. */
121 struct mlx5_indexed_pool {
122 struct mlx5_indexed_pool_config cfg; /* Indexed pool configuration. */
123 rte_spinlock_t lock; /* Pool lock for multiple thread usage. */
124 uint32_t n_trunk_valid; /* Trunks allocated. */
125 uint32_t n_trunk; /* Trunk pointer array size. */
126 /* Dim of trunk pointer array. */
127 struct mlx5_indexed_trunk **trunks;
128 uint32_t free_list; /* Index to first free trunk. */
132 uint32_t trunk_avail;
133 uint32_t trunk_empty;
136 uint32_t grow_tbl[]; /* Save the index offset for the grow trunks. */
140 * Return logarithm of the nearest power of two above input value.
146 * Logarithm of the nearest power of two above input value.
148 static inline unsigned int
149 log2above(unsigned int v)
154 for (l = 0, r = 0; (v >> 1); ++l, v >>= 1)
159 /** Maximum size of string for naming the hlist table. */
160 #define MLX5_HLIST_NAMESIZE 32
163 * Structure of the entry in the hash list, user should define its own struct
164 * that contains this in order to store the data. The 'key' is 64-bits right
165 * now and its user's responsibility to guarantee there is no collision.
167 struct mlx5_hlist_entry {
168 LIST_ENTRY(mlx5_hlist_entry) next; /* entry pointers in the list. */
169 uint64_t key; /* user defined 'key', could be the hash signature. */
172 /** Structure for hash head. */
173 LIST_HEAD(mlx5_hlist_head, mlx5_hlist_entry);
175 /** Type of function that is used to handle the data before freeing. */
176 typedef void (*mlx5_hlist_destroy_callback_fn)(void *p, void *ctx);
178 /** hash list table structure */
180 char name[MLX5_HLIST_NAMESIZE]; /**< Name of the hash list. */
181 /**< number of heads, need to be power of 2. */
183 /**< mask to get the index of the list heads. */
185 struct mlx5_hlist_head heads[]; /**< list head arrays. */
189 * Create a hash list table, the user can specify the list heads array size
190 * of the table, now the size should be a power of 2 in order to get better
191 * distribution for the entries. Each entry is a part of the whole data element
192 * and the caller should be responsible for the data element's allocation and
193 * cleanup / free. Key of each entry will be calculated with CRC in order to
194 * generate a little fairer distribution.
197 * Name of the hash list(optional).
199 * Heads array size of the hash list.
202 * Pointer of the hash list table created, NULL on failure.
204 struct mlx5_hlist *mlx5_hlist_create(const char *name, uint32_t size);
207 * Search an entry matching the key.
210 * Pointer to the hast list table.
212 * Key for the searching entry.
215 * Pointer of the hlist entry if found, NULL otherwise.
217 struct mlx5_hlist_entry *mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key);
220 * Insert an entry to the hash list table, the entry is only part of whole data
221 * element and a 64B key is used for matching. User should construct the key or
222 * give a calculated hash signature and guarantee there is no collision.
225 * Pointer to the hast list table.
227 * Entry to be inserted into the hash list table.
230 * - zero for success.
231 * - -EEXIST if the entry is already inserted.
233 int mlx5_hlist_insert(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry);
236 * Remove an entry from the hash list table. User should guarantee the validity
240 * Pointer to the hast list table. (not used)
242 * Entry to be removed from the hash list table.
244 void mlx5_hlist_remove(struct mlx5_hlist *h __rte_unused,
245 struct mlx5_hlist_entry *entry);
248 * Destroy the hash list table, all the entries already inserted into the lists
249 * will be handled by the callback function provided by the user (including
250 * free if needed) before the table is freed.
253 * Pointer to the hast list table.
255 * Callback function for each inserted entry when destroying the hash list.
257 * Common context parameter used by callback function for each entry.
259 void mlx5_hlist_destroy(struct mlx5_hlist *h,
260 mlx5_hlist_destroy_callback_fn cb, void *ctx);
263 * This function allocates non-initialized memory entry from pool.
264 * In NUMA systems, the memory entry allocated resides on the same
265 * NUMA socket as the core that calls this function.
267 * Memory entry is allocated from memory trunk, no alignment.
270 * Pointer to indexed memory entry pool.
271 * No initialization required.
273 * Pointer to memory to save allocated index.
274 * Memory index always positive value.
276 * - Pointer to the allocated memory entry.
277 * - NULL on error. Not enough memory, or invalid arguments.
279 void *mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx);
282 * This function allocates zero initialized memory entry from pool.
283 * In NUMA systems, the memory entry allocated resides on the same
284 * NUMA socket as the core that calls this function.
286 * Memory entry is allocated from memory trunk, no alignment.
289 * Pointer to indexed memory pool.
290 * No initialization required.
292 * Pointer to memory to save allocated index.
293 * Memory index always positive value.
295 * - Pointer to the allocated memory entry .
296 * - NULL on error. Not enough memory, or invalid arguments.
298 void *mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx);
301 * This function frees indexed memory entry to pool.
302 * Caller has to make sure that the index is allocated from same pool.
305 * Pointer to indexed memory pool.
307 * Allocated memory entry index.
309 void mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx);
312 * This function returns pointer of indexed memory entry from index.
313 * Caller has to make sure that the index is valid, and allocated
317 * Pointer to indexed memory pool.
319 * Allocated memory index.
321 * - Pointer to indexed memory entry.
323 void *mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx);
326 * This function creates indexed memory pool.
327 * Caller has to configure the configuration accordingly.
330 * Pointer to indexed memory pool.
332 * Allocated memory index.
334 struct mlx5_indexed_pool *
335 mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg);
338 * This function releases all resources of pool.
339 * Caller has to make sure that all indexes and memories allocated
340 * from this pool not referenced anymore.
343 * Pointer to indexed memory pool.
345 * - non-zero value on error.
348 int mlx5_ipool_destroy(struct mlx5_indexed_pool *pool);
351 * This function dumps debug info of pool.
354 * Pointer to indexed memory pool.
356 void mlx5_ipool_dump(struct mlx5_indexed_pool *pool);
359 * Macros for linked list based on indexed memory.
360 * Example data structure:
362 * ILIST_ENTRY(uint16_t) next;
367 #define ILIST_ENTRY(type) \
369 type prev; /* Index of previous element. */ \
370 type next; /* Index of next element. */ \
373 #define ILIST_INSERT(pool, head, idx, elem, field) \
376 MLX5_ASSERT((elem) && (idx)); \
377 (elem)->field.next = *(head); \
378 (elem)->field.prev = 0; \
380 (peer) = mlx5_ipool_get(pool, *(head)); \
382 (peer)->field.prev = (idx); \
387 #define ILIST_REMOVE(pool, head, idx, elem, field) \
392 if ((elem)->field.prev) { \
393 (peer) = mlx5_ipool_get \
394 (pool, (elem)->field.prev); \
396 (peer)->field.next = (elem)->field.next;\
398 if ((elem)->field.next) { \
399 (peer) = mlx5_ipool_get \
400 (pool, (elem)->field.next); \
402 (peer)->field.prev = (elem)->field.prev;\
404 if (*(head) == (idx)) \
405 *(head) = (elem)->field.next; \
408 #define ILIST_FOREACH(pool, head, idx, elem, field) \
409 for ((idx) = (head), (elem) = \
410 (idx) ? mlx5_ipool_get(pool, (idx)) : NULL; (elem); \
411 idx = (elem)->field.next, (elem) = \
412 (idx) ? mlx5_ipool_get(pool, idx) : NULL)
414 /* Single index list. */
415 #define SILIST_ENTRY(type) \
417 type next; /* Index of next element. */ \
420 #define SILIST_INSERT(head, idx, elem, field) \
422 MLX5_ASSERT((elem) && (idx)); \
423 (elem)->field.next = *(head); \
427 #define SILIST_FOREACH(pool, head, idx, elem, field) \
428 for ((idx) = (head), (elem) = \
429 (idx) ? mlx5_ipool_get(pool, (idx)) : NULL; (elem); \
430 idx = (elem)->field.next, (elem) = \
431 (idx) ? mlx5_ipool_get(pool, idx) : NULL)
433 #endif /* RTE_PMD_MLX5_UTILS_H_ */