1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_UTILS_H_
7 #define RTE_PMD_MLX5_UTILS_H_
15 #include <rte_spinlock.h>
16 #include <rte_memory.h>
17 #include <rte_bitmap.h>
19 #include <mlx5_common.h>
21 #include "mlx5_defs.h"
24 /* Convert a bit number to the corresponding 64-bit mask */
25 #define MLX5_BITSHIFT(v) (UINT64_C(1) << (v))
27 /* Save and restore errno around argument evaluation. */
28 #define ERRNO_SAFE(x) ((errno = (int []){ errno, ((x), 0) }[0]))
30 extern int mlx5_logtype;
32 /* Generic printf()-like logging macro with automatic line feed. */
33 #define DRV_LOG(level, ...) \
34 PMD_DRV_LOG_(level, mlx5_logtype, MLX5_DRIVER_NAME, \
35 __VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \
38 #define INFO(...) DRV_LOG(INFO, __VA_ARGS__)
39 #define WARN(...) DRV_LOG(WARNING, __VA_ARGS__)
40 #define ERROR(...) DRV_LOG(ERR, __VA_ARGS__)
42 /* Convenience macros for accessing mbuf fields. */
43 #define NEXT(m) ((m)->next)
44 #define DATA_LEN(m) ((m)->data_len)
45 #define PKT_LEN(m) ((m)->pkt_len)
46 #define DATA_OFF(m) ((m)->data_off)
47 #define SET_DATA_OFF(m, o) ((m)->data_off = (o))
48 #define NB_SEGS(m) ((m)->nb_segs)
49 #define PORT(m) ((m)->port)
51 /* Transpose flags. Useful to convert IBV to DPDK flags. */
52 #define TRANSPOSE(val, from, to) \
54 (((val) & (from)) / ((from) / (to))) : \
55 (((val) & (from)) * ((to) / (from))))
58 * The indexed memory entry index is made up of trunk index and offset of
59 * the entry in the trunk. Since the entry index is 32 bits, in case user
60 * prefers to have small trunks, user can change the macro below to a big
61 * number which helps the pool contains more trunks with lots of entries
64 #define TRUNK_IDX_BITS 16
65 #define TRUNK_MAX_IDX ((1 << TRUNK_IDX_BITS) - 1)
66 #define TRUNK_INVALID TRUNK_MAX_IDX
67 #define MLX5_IPOOL_DEFAULT_TRUNK_SIZE (1 << (28 - TRUNK_IDX_BITS))
68 #ifdef RTE_LIBRTE_MLX5_DEBUG
72 struct mlx5_indexed_pool_config {
73 uint32_t size; /* Pool entry size. */
74 uint32_t trunk_size:22;
76 * Trunk entry number. Must be power of 2. It can be increased
77 * if trunk_grow enable. The trunk entry number increases with
78 * left shift grow_shift. Trunks with index are after grow_trunk
79 * will keep the entry number same with the last grow trunk.
81 uint32_t grow_trunk:4;
83 * Trunks with entry number increase in the pool. Set it to 0
84 * to make the pool works as trunk entry fixed pool. It works
85 * only if grow_shift is not 0.
87 uint32_t grow_shift:4;
89 * Trunk entry number increase shift value, stop after grow_trunk.
90 * It works only if grow_trunk is not 0.
93 /* Lock is needed for multiple thread usage. */
94 uint32_t release_mem_en:1; /* Rlease trunk when it is free. */
95 const char *type; /* Memory allocate type name. */
96 void *(*malloc)(const char *type, size_t size, unsigned int align,
98 /* User defined memory allocator. */
99 void (*free)(void *addr); /* User defined memory release. */
102 struct mlx5_indexed_trunk {
103 uint32_t idx; /* Trunk id. */
104 uint32_t prev; /* Previous free trunk in free list. */
105 uint32_t next; /* Next free trunk in free list. */
106 uint32_t free; /* Free entries available */
107 struct rte_bitmap *bmp;
108 uint8_t data[] __rte_cache_aligned; /* Entry data start. */
111 struct mlx5_indexed_pool {
112 struct mlx5_indexed_pool_config cfg; /* Indexed pool configuration. */
113 rte_spinlock_t lock; /* Pool lock for multiple thread usage. */
114 uint32_t n_trunk_valid; /* Trunks allocated. */
115 uint32_t n_trunk; /* Trunk pointer array size. */
116 /* Dim of trunk pointer array. */
117 struct mlx5_indexed_trunk **trunks;
118 uint32_t free_list; /* Index to first free trunk. */
122 uint32_t trunk_avail;
123 uint32_t trunk_empty;
126 uint32_t grow_tbl[]; /* Save the index offset for the grow trunks. */
130 * Return logarithm of the nearest power of two above input value.
136 * Logarithm of the nearest power of two above input value.
138 static inline unsigned int
139 log2above(unsigned int v)
144 for (l = 0, r = 0; (v >> 1); ++l, v >>= 1)
149 /** Maximum size of string for naming the hlist table. */
150 #define MLX5_HLIST_NAMESIZE 32
153 * Structure of the entry in the hash list, user should define its own struct
154 * that contains this in order to store the data. The 'key' is 64-bits right
155 * now and its user's responsibility to guarantee there is no collision.
157 struct mlx5_hlist_entry {
158 LIST_ENTRY(mlx5_hlist_entry) next; /* entry pointers in the list. */
159 uint64_t key; /* user defined 'key', could be the hash signature. */
162 /** Structure for hash head. */
163 LIST_HEAD(mlx5_hlist_head, mlx5_hlist_entry);
165 /** Type of function that is used to handle the data before freeing. */
166 typedef void (*mlx5_hlist_destroy_callback_fn)(void *p, void *ctx);
168 /** hash list table structure */
170 char name[MLX5_HLIST_NAMESIZE]; /**< Name of the hash list. */
171 /**< number of heads, need to be power of 2. */
173 /**< mask to get the index of the list heads. */
175 struct mlx5_hlist_head heads[]; /**< list head arrays. */
179 * Create a hash list table, the user can specify the list heads array size
180 * of the table, now the size should be a power of 2 in order to get better
181 * distribution for the entries. Each entry is a part of the whole data element
182 * and the caller should be responsible for the data element's allocation and
183 * cleanup / free. Key of each entry will be calculated with CRC in order to
184 * generate a little fairer distribution.
187 * Name of the hash list(optional).
189 * Heads array size of the hash list.
192 * Pointer of the hash list table created, NULL on failure.
194 struct mlx5_hlist *mlx5_hlist_create(const char *name, uint32_t size);
197 * Search an entry matching the key.
200 * Pointer to the hast list table.
202 * Key for the searching entry.
205 * Pointer of the hlist entry if found, NULL otherwise.
207 struct mlx5_hlist_entry *mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key);
210 * Insert an entry to the hash list table, the entry is only part of whole data
211 * element and a 64B key is used for matching. User should construct the key or
212 * give a calculated hash signature and guarantee there is no collision.
215 * Pointer to the hast list table.
217 * Entry to be inserted into the hash list table.
220 * - zero for success.
221 * - -EEXIST if the entry is already inserted.
223 int mlx5_hlist_insert(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry);
226 * Remove an entry from the hash list table. User should guarantee the validity
230 * Pointer to the hast list table. (not used)
232 * Entry to be removed from the hash list table.
234 void mlx5_hlist_remove(struct mlx5_hlist *h __rte_unused,
235 struct mlx5_hlist_entry *entry);
238 * Destroy the hash list table, all the entries already inserted into the lists
239 * will be handled by the callback function provided by the user (including
240 * free if needed) before the table is freed.
243 * Pointer to the hast list table.
245 * Callback function for each inserted entry when destroying the hash list.
247 * Common context parameter used by callback function for each entry.
249 void mlx5_hlist_destroy(struct mlx5_hlist *h,
250 mlx5_hlist_destroy_callback_fn cb, void *ctx);
253 * This function allocates non-initialized memory entry from pool.
254 * In NUMA systems, the memory entry allocated resides on the same
255 * NUMA socket as the core that calls this function.
257 * Memory entry is allocated from memory trunk, no alignment.
260 * Pointer to indexed memory entry pool.
261 * No initialization required.
263 * Pointer to memory to save allocated index.
264 * Memory index always positive value.
266 * - Pointer to the allocated memory entry.
267 * - NULL on error. Not enough memory, or invalid arguments.
269 void *mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx);
272 * This function allocates zero initialized memory entry from pool.
273 * In NUMA systems, the memory entry allocated resides on the same
274 * NUMA socket as the core that calls this function.
276 * Memory entry is allocated from memory trunk, no alignment.
279 * Pointer to indexed memory pool.
280 * No initialization required.
282 * Pointer to memory to save allocated index.
283 * Memory index always positive value.
285 * - Pointer to the allocated memory entry .
286 * - NULL on error. Not enough memory, or invalid arguments.
288 void *mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx);
291 * This function frees indexed memory entry to pool.
292 * Caller has to make sure that the index is allocated from same pool.
295 * Pointer to indexed memory pool.
297 * Allocated memory entry index.
299 void mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx);
302 * This function returns pointer of indexed memory entry from index.
303 * Caller has to make sure that the index is valid, and allocated
307 * Pointer to indexed memory pool.
309 * Allocated memory index.
311 * - Pointer to indexed memory entry.
313 void *mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx);
316 * This function creates indexed memory pool.
317 * Caller has to configure the configuration accordingly.
320 * Pointer to indexed memory pool.
322 * Allocated memory index.
324 struct mlx5_indexed_pool *
325 mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg);
328 * This function releases all resources of pool.
329 * Caller has to make sure that all indexes and memories allocated
330 * from this pool not referenced anymore.
333 * Pointer to indexed memory pool.
335 * - non-zero value on error.
338 int mlx5_ipool_destroy(struct mlx5_indexed_pool *pool);
341 * This function dumps debug info of pool.
344 * Pointer to indexed memory pool.
346 void mlx5_ipool_dump(struct mlx5_indexed_pool *pool);
349 * Macros for linked list based on indexed memory.
350 * Example data structure:
352 * ILIST_ENTRY(uint16_t) next;
357 #define ILIST_ENTRY(type) \
359 type prev; /* Index of previous element. */ \
360 type next; /* Index of next element. */ \
363 #define ILIST_INSERT(pool, head, idx, elem, field) \
366 MLX5_ASSERT((elem) && (idx)); \
367 (elem)->field.next = *(head); \
368 (elem)->field.prev = 0; \
370 (peer) = mlx5_ipool_get(pool, *(head)); \
372 (peer)->field.prev = (idx); \
377 #define ILIST_REMOVE(pool, head, idx, elem, field) \
382 if ((elem)->field.prev) { \
383 (peer) = mlx5_ipool_get \
384 (pool, (elem)->field.prev); \
386 (peer)->field.next = (elem)->field.next;\
388 if ((elem)->field.next) { \
389 (peer) = mlx5_ipool_get \
390 (pool, (elem)->field.next); \
392 (peer)->field.prev = (elem)->field.prev;\
394 if (*(head) == (idx)) \
395 *(head) = (elem)->field.next; \
398 #define ILIST_FOREACH(pool, head, idx, elem, field) \
399 for ((idx) = (head), (elem) = \
400 (idx) ? mlx5_ipool_get(pool, (idx)) : NULL; (elem); \
401 idx = (elem)->field.next, (elem) = \
402 (idx) ? mlx5_ipool_get(pool, idx) : NULL)
404 /* Single index list. */
405 #define SILIST_ENTRY(type) \
407 type next; /* Index of next element. */ \
410 #define SILIST_INSERT(head, idx, elem, field) \
412 MLX5_ASSERT((elem) && (idx)); \
413 (elem)->field.next = *(head); \
417 #define SILIST_FOREACH(pool, head, idx, elem, field) \
418 for ((idx) = (head), (elem) = \
419 (idx) ? mlx5_ipool_get(pool, (idx)) : NULL; (elem); \
420 idx = (elem)->field.next, (elem) = \
421 (idx) ? mlx5_ipool_get(pool, idx) : NULL)
423 #endif /* RTE_PMD_MLX5_UTILS_H_ */