1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright(c) 2016 6WIND S.A.
6 #ifndef _RTE_MEMPOOL_H_
7 #define _RTE_MEMPOOL_H_
13 * A memory pool is an allocator of fixed-size object. It is
14 * identified by its name, and uses a ring to store free objects. It
15 * provides some other optional services, like a per-core object
16 * cache, and an alignment helper to ensure that objects are padded
17 * to spread them equally on all RAM channels, ranks, and so on.
19 * Objects owned by a mempool should never be added in another
20 * mempool. When an object is freed using rte_mempool_put() or
21 * equivalent, the object data is not modified; the user can save some
22 * meta-data in the object data and retrieve them when allocating a
25 * Note: the mempool implementation is not preemptible. An lcore must not be
26 * interrupted by another task that uses the same mempool (because it uses a
27 * ring which is not preemptible). Also, usual mempool functions like
28 * rte_mempool_get() or rte_mempool_put() are designed to be called from an EAL
29 * thread due to the internal per-lcore cache. Due to the lack of caching,
30 * rte_mempool_get() or rte_mempool_put() performance will suffer when called
31 * by unregistered non-EAL threads. Instead, unregistered non-EAL threads
32 * should call rte_mempool_generic_get() or rte_mempool_generic_put() with a
33 * user cache created with rte_mempool_cache_create().
42 #include <rte_config.h>
43 #include <rte_spinlock.h>
45 #include <rte_debug.h>
46 #include <rte_lcore.h>
47 #include <rte_memory.h>
48 #include <rte_branch_prediction.h>
50 #include <rte_memcpy.h>
51 #include <rte_common.h>
53 #include "rte_mempool_trace_fp.h"
59 #define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL /**< Header cookie. */
60 #define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL /**< Header cookie. */
61 #define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/
63 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
65 * A structure that stores the mempool statistics (per-lcore).
66 * Note: Cache stats (put_cache_bulk/objs, get_cache_bulk/objs) are not
67 * captured since they can be calculated from other stats.
68 * For example: put_cache_objs = put_objs - put_common_pool_objs.
70 struct rte_mempool_debug_stats {
71 uint64_t put_bulk; /**< Number of puts. */
72 uint64_t put_objs; /**< Number of objects successfully put. */
73 uint64_t put_common_pool_bulk; /**< Number of bulks enqueued in common pool. */
74 uint64_t put_common_pool_objs; /**< Number of objects enqueued in common pool. */
75 uint64_t get_common_pool_bulk; /**< Number of bulks dequeued from common pool. */
76 uint64_t get_common_pool_objs; /**< Number of objects dequeued from common pool. */
77 uint64_t get_success_bulk; /**< Successful allocation number. */
78 uint64_t get_success_objs; /**< Objects successfully allocated. */
79 uint64_t get_fail_bulk; /**< Failed allocation number. */
80 uint64_t get_fail_objs; /**< Objects that failed to be allocated. */
81 uint64_t get_success_blks; /**< Successful allocation number of contiguous blocks. */
82 uint64_t get_fail_blks; /**< Failed allocation number of contiguous blocks. */
83 } __rte_cache_aligned;
87 * A structure that stores a per-core object cache.
89 struct rte_mempool_cache {
90 uint32_t size; /**< Size of the cache */
91 uint32_t flushthresh; /**< Threshold before we flush excess elements */
92 uint32_t len; /**< Current cache count */
94 * Cache is allocated to this size to allow it to overflow in certain
95 * cases to avoid needless emptying of cache.
97 void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3]; /**< Cache objects */
98 } __rte_cache_aligned;
101 * A structure that stores the size of mempool elements.
103 struct rte_mempool_objsz {
104 uint32_t elt_size; /**< Size of an element. */
105 uint32_t header_size; /**< Size of header (before elt). */
106 uint32_t trailer_size; /**< Size of trailer (after elt). */
108 /**< Total size of an object (header + elt + trailer). */
111 /**< Maximum length of a memory pool's name. */
112 #define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \
113 sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1)
114 #define RTE_MEMPOOL_MZ_PREFIX "MP_"
117 #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
119 #define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1)
121 /** Mempool over one chunk of physically continuous memory */
122 #define MEMPOOL_PG_NUM_DEFAULT 1
124 #ifndef RTE_MEMPOOL_ALIGN
126 * Alignment of elements inside mempool.
128 #define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
131 #define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
134 * Mempool object header structure
136 * Each object stored in mempools are prefixed by this header structure,
137 * it allows to retrieve the mempool pointer from the object and to
138 * iterate on all objects attached to a mempool. When debug is enabled,
139 * a cookie is also added in this structure preventing corruptions and
142 struct rte_mempool_objhdr {
143 RTE_STAILQ_ENTRY(rte_mempool_objhdr) next; /**< Next in list. */
144 struct rte_mempool *mp; /**< The mempool owning the object. */
145 rte_iova_t iova; /**< IO address of the object. */
146 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
147 uint64_t cookie; /**< Debug cookie. */
152 * A list of object headers type
154 RTE_STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr);
156 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
159 * Mempool object trailer structure
161 * In debug mode, each object stored in mempools are suffixed by this
162 * trailer structure containing a cookie preventing memory corruptions.
164 struct rte_mempool_objtlr {
165 uint64_t cookie; /**< Debug cookie. */
171 * A list of memory where objects are stored
173 RTE_STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr);
176 * Callback used to free a memory chunk
178 typedef void (rte_mempool_memchunk_free_cb_t)(struct rte_mempool_memhdr *memhdr,
182 * Mempool objects memory header structure
184 * The memory chunks where objects are stored. Each chunk is virtually
185 * and physically contiguous.
187 struct rte_mempool_memhdr {
188 RTE_STAILQ_ENTRY(rte_mempool_memhdr) next; /**< Next in list. */
189 struct rte_mempool *mp; /**< The mempool owning the chunk */
190 void *addr; /**< Virtual address of the chunk */
191 rte_iova_t iova; /**< IO address of the chunk */
192 size_t len; /**< length of the chunk */
193 rte_mempool_memchunk_free_cb_t *free_cb; /**< Free callback */
194 void *opaque; /**< Argument passed to the free callback */
198 * Additional information about the mempool
200 * The structure is cache-line aligned to avoid ABI breakages in
201 * a number of cases when something small is added.
203 struct rte_mempool_info {
204 /** Number of objects in the contiguous block */
205 unsigned int contig_block_size;
206 } __rte_cache_aligned;
209 * The RTE mempool structure.
212 char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */
215 void *pool_data; /**< Ring or pool to store objects. */
216 uint64_t pool_id; /**< External mempool identifier. */
218 void *pool_config; /**< optional args for ops alloc. */
219 const struct rte_memzone *mz; /**< Memzone where pool is alloc'd. */
220 unsigned int flags; /**< Flags of the mempool. */
221 int socket_id; /**< Socket id passed at create. */
222 uint32_t size; /**< Max size of the mempool. */
224 /**< Size of per-lcore default local cache. */
226 uint32_t elt_size; /**< Size of an element. */
227 uint32_t header_size; /**< Size of header (before elt). */
228 uint32_t trailer_size; /**< Size of trailer (after elt). */
230 unsigned private_data_size; /**< Size of private data. */
232 * Index into rte_mempool_ops_table array of mempool ops
233 * structs, which contain callback function pointers.
234 * We're using an index here rather than pointers to the callbacks
235 * to facilitate any secondary processes that may want to use
240 struct rte_mempool_cache *local_cache; /**< Per-lcore local cache */
242 uint32_t populated_size; /**< Number of populated objects. */
243 struct rte_mempool_objhdr_list elt_list; /**< List of objects in pool */
244 uint32_t nb_mem_chunks; /**< Number of memory chunks */
245 struct rte_mempool_memhdr_list mem_list; /**< List of memory chunks */
247 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
248 /** Per-lcore statistics. */
249 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
251 } __rte_cache_aligned;
253 /** Spreading among memory channels not required. */
254 #define RTE_MEMPOOL_F_NO_SPREAD 0x0001
256 * Backward compatibility synonym for RTE_MEMPOOL_F_NO_SPREAD.
259 #define MEMPOOL_F_NO_SPREAD RTE_MEMPOOL_F_NO_SPREAD
260 /** Do not align objects on cache lines. */
261 #define RTE_MEMPOOL_F_NO_CACHE_ALIGN 0x0002
263 * Backward compatibility synonym for RTE_MEMPOOL_F_NO_CACHE_ALIGN.
266 #define MEMPOOL_F_NO_CACHE_ALIGN RTE_MEMPOOL_F_NO_CACHE_ALIGN
267 /** Default put is "single-producer". */
268 #define RTE_MEMPOOL_F_SP_PUT 0x0004
270 * Backward compatibility synonym for RTE_MEMPOOL_F_SP_PUT.
273 #define MEMPOOL_F_SP_PUT RTE_MEMPOOL_F_SP_PUT
274 /** Default get is "single-consumer". */
275 #define RTE_MEMPOOL_F_SC_GET 0x0008
277 * Backward compatibility synonym for RTE_MEMPOOL_F_SC_GET.
280 #define MEMPOOL_F_SC_GET RTE_MEMPOOL_F_SC_GET
281 /** Internal: pool is created. */
282 #define RTE_MEMPOOL_F_POOL_CREATED 0x0010
283 /** Don't need IOVA contiguous objects. */
284 #define RTE_MEMPOOL_F_NO_IOVA_CONTIG 0x0020
286 * Backward compatibility synonym for RTE_MEMPOOL_F_NO_IOVA_CONTIG.
289 #define MEMPOOL_F_NO_IOVA_CONTIG RTE_MEMPOOL_F_NO_IOVA_CONTIG
290 /** Internal: no object from the pool can be used for device IO (DMA). */
291 #define RTE_MEMPOOL_F_NON_IO 0x0040
294 * @internal When debug is enabled, store some statistics.
297 * Pointer to the memory pool.
299 * Name of the statistics field to increment in the memory pool.
301 * Number to add to the object-oriented statistics.
303 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
304 #define RTE_MEMPOOL_STAT_ADD(mp, name, n) do { \
305 unsigned __lcore_id = rte_lcore_id(); \
306 if (__lcore_id < RTE_MAX_LCORE) { \
307 mp->stats[__lcore_id].name += n; \
311 #define RTE_MEMPOOL_STAT_ADD(mp, name, n) do {} while (0)
315 * Calculate the size of the mempool header.
318 * Pointer to the memory pool.
320 * Size of the per-lcore cache.
322 #define MEMPOOL_HEADER_SIZE(mp, cs) \
323 (sizeof(*(mp)) + (((cs) == 0) ? 0 : \
324 (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
326 /* return the header of a mempool object (internal) */
327 static inline struct rte_mempool_objhdr *
328 rte_mempool_get_header(void *obj)
330 return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj,
331 sizeof(struct rte_mempool_objhdr));
335 * Return a pointer to the mempool owning this object.
338 * An object that is owned by a pool. If this is not the case,
339 * the behavior is undefined.
341 * A pointer to the mempool structure.
343 static inline struct rte_mempool *rte_mempool_from_obj(void *obj)
345 struct rte_mempool_objhdr *hdr = rte_mempool_get_header(obj);
349 /* return the trailer of a mempool object (internal) */
350 static inline struct rte_mempool_objtlr *rte_mempool_get_trailer(void *obj)
352 struct rte_mempool *mp = rte_mempool_from_obj(obj);
353 return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size);
357 * @internal Check and update cookies or panic.
360 * Pointer to the memory pool.
361 * @param obj_table_const
362 * Pointer to a table of void * pointers (objects).
364 * Index of object in object table.
366 * - 0: object is supposed to be allocated, mark it as free
367 * - 1: object is supposed to be free, mark it as allocated
368 * - 2: just check that cookie is valid (free or allocated)
370 void rte_mempool_check_cookies(const struct rte_mempool *mp,
371 void * const *obj_table_const, unsigned n, int free);
373 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
374 #define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) \
375 rte_mempool_check_cookies(mp, obj_table_const, n, free)
377 #define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) do {} while (0)
378 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
381 * @internal Check contiguous object blocks and update cookies or panic.
384 * Pointer to the memory pool.
385 * @param first_obj_table_const
386 * Pointer to a table of void * pointers (first object of the contiguous
389 * Number of contiguous object blocks.
391 * - 0: object is supposed to be allocated, mark it as free
392 * - 1: object is supposed to be free, mark it as allocated
393 * - 2: just check that cookie is valid (free or allocated)
395 void rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp,
396 void * const *first_obj_table_const, unsigned int n, int free);
398 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
399 #define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \
401 rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
404 #define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \
407 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
409 #define RTE_MEMPOOL_OPS_NAMESIZE 32 /**< Max length of ops struct name. */
412 * Prototype for implementation specific data provisioning function.
414 * The function should provide the implementation specific memory for
415 * use by the other mempool ops functions in a given mempool ops struct.
416 * E.g. the default ops provides an instance of the rte_ring for this purpose.
417 * it will most likely point to a different type of data structure, and
418 * will be transparent to the application programmer.
419 * This function should set mp->pool_data.
421 typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp);
424 * Free the opaque private data pointed to by mp->pool_data pointer.
426 typedef void (*rte_mempool_free_t)(struct rte_mempool *mp);
429 * Enqueue an object into the external pool.
431 typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp,
432 void * const *obj_table, unsigned int n);
435 * Dequeue an object from the external pool.
437 typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
438 void **obj_table, unsigned int n);
441 * Dequeue a number of contiguous object blocks from the external pool.
443 typedef int (*rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp,
444 void **first_obj_table, unsigned int n);
447 * Return the number of available objects in the external pool.
449 typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
452 * Calculate memory size required to store given number of objects.
454 * If mempool objects are not required to be IOVA-contiguous
455 * (the flag RTE_MEMPOOL_F_NO_IOVA_CONTIG is set), min_chunk_size defines
456 * virtually contiguous chunk size. Otherwise, if mempool objects must
457 * be IOVA-contiguous (the flag RTE_MEMPOOL_F_NO_IOVA_CONTIG is clear),
458 * min_chunk_size defines IOVA-contiguous chunk size.
461 * Pointer to the memory pool.
464 * @param[in] pg_shift
465 * LOG2 of the physical pages size. If set to 0, ignore page boundaries.
466 * @param[out] min_chunk_size
467 * Location for minimum size of the memory chunk which may be used to
468 * store memory pool objects.
470 * Location for required memory chunk alignment.
472 * Required memory size.
474 typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp,
475 uint32_t obj_num, uint32_t pg_shift,
476 size_t *min_chunk_size, size_t *align);
479 * @internal Helper to calculate memory size required to store given
482 * This function is internal to mempool library and mempool drivers.
484 * If page boundaries may be ignored, it is just a product of total
485 * object size including header and trailer and number of objects.
486 * Otherwise, it is a number of pages required to store given number of
487 * objects without crossing page boundary.
489 * Note that if object size is bigger than page size, then it assumes
490 * that pages are grouped in subsets of physically continuous pages big
491 * enough to store at least one object.
493 * Minimum size of memory chunk is the total element size.
494 * Required memory chunk alignment is the cache line size.
497 * A pointer to the mempool structure.
499 * Number of objects to be added in mempool.
500 * @param[in] pg_shift
501 * LOG2 of the physical pages size. If set to 0, ignore page boundaries.
502 * @param[in] chunk_reserve
503 * Amount of memory that must be reserved at the beginning of each page,
504 * or at the beginning of the memory area if pg_shift is 0.
505 * @param[out] min_chunk_size
506 * Location for minimum size of the memory chunk which may be used to
507 * store memory pool objects.
509 * Location for required memory chunk alignment.
511 * Required memory size.
513 ssize_t rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
514 uint32_t obj_num, uint32_t pg_shift, size_t chunk_reserve,
515 size_t *min_chunk_size, size_t *align);
518 * Default way to calculate memory size required to store given number of
521 * Equivalent to rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,
522 * 0, min_chunk_size, align).
524 ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
525 uint32_t obj_num, uint32_t pg_shift,
526 size_t *min_chunk_size, size_t *align);
529 * Function to be called for each populated object.
532 * A pointer to the mempool structure.
534 * An opaque pointer passed to iterator.
536 * Object virtual address.
538 * Input/output virtual address of the object or RTE_BAD_IOVA.
540 typedef void (rte_mempool_populate_obj_cb_t)(struct rte_mempool *mp,
541 void *opaque, void *vaddr, rte_iova_t iova);
544 * Populate memory pool objects using provided memory chunk.
546 * Populated objects should be enqueued to the pool, e.g. using
547 * rte_mempool_ops_enqueue_bulk().
549 * If the given IO address is unknown (iova = RTE_BAD_IOVA),
550 * the chunk doesn't need to be physically contiguous (only virtually),
551 * and allocated objects may span two pages.
554 * A pointer to the mempool structure.
555 * @param[in] max_objs
556 * Maximum number of objects to be populated.
558 * The virtual address of memory that should be used to store objects.
562 * The length of memory in bytes.
564 * Callback function to be executed for each populated object.
565 * @param[in] obj_cb_arg
566 * An opaque pointer passed to the callback function.
568 * The number of objects added on success.
569 * On error, no objects are populated and a negative errno is returned.
571 typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp,
572 unsigned int max_objs,
573 void *vaddr, rte_iova_t iova, size_t len,
574 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
577 * Align objects on addresses multiple of total_elt_sz.
579 #define RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ 0x0001
582 * @internal Helper to populate memory pool object using provided memory
583 * chunk: just slice objects one by one, taking care of not
584 * crossing page boundaries.
586 * If RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ is set in flags, the addresses
587 * of object headers will be aligned on a multiple of total_elt_sz.
588 * This feature is used by octeontx hardware.
590 * This function is internal to mempool library and mempool drivers.
593 * A pointer to the mempool structure.
595 * Logical OR of following flags:
596 * - RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ: align objects on addresses
597 * multiple of total_elt_sz.
598 * @param[in] max_objs
599 * Maximum number of objects to be added in mempool.
601 * The virtual address of memory that should be used to store objects.
603 * The IO address corresponding to vaddr, or RTE_BAD_IOVA.
605 * The length of memory in bytes.
607 * Callback function to be executed for each populated object.
608 * @param[in] obj_cb_arg
609 * An opaque pointer passed to the callback function.
611 * The number of objects added in mempool.
613 int rte_mempool_op_populate_helper(struct rte_mempool *mp,
614 unsigned int flags, unsigned int max_objs,
615 void *vaddr, rte_iova_t iova, size_t len,
616 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
619 * Default way to populate memory pool object using provided memory chunk.
621 * Equivalent to rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, iova,
622 * len, obj_cb, obj_cb_arg).
624 int rte_mempool_op_populate_default(struct rte_mempool *mp,
625 unsigned int max_objs,
626 void *vaddr, rte_iova_t iova, size_t len,
627 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
630 * Get some additional information about a mempool.
632 typedef int (*rte_mempool_get_info_t)(const struct rte_mempool *mp,
633 struct rte_mempool_info *info);
636 /** Structure defining mempool operations structure */
637 struct rte_mempool_ops {
638 char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct. */
639 rte_mempool_alloc_t alloc; /**< Allocate private data. */
640 rte_mempool_free_t free; /**< Free the external pool. */
641 rte_mempool_enqueue_t enqueue; /**< Enqueue an object. */
642 rte_mempool_dequeue_t dequeue; /**< Dequeue an object. */
643 rte_mempool_get_count get_count; /**< Get qty of available objs. */
645 * Optional callback to calculate memory size required to
646 * store specified number of objects.
648 rte_mempool_calc_mem_size_t calc_mem_size;
650 * Optional callback to populate mempool objects using
651 * provided memory chunk.
653 rte_mempool_populate_t populate;
657 rte_mempool_get_info_t get_info;
659 * Dequeue a number of contiguous object blocks.
661 rte_mempool_dequeue_contig_blocks_t dequeue_contig_blocks;
662 } __rte_cache_aligned;
664 #define RTE_MEMPOOL_MAX_OPS_IDX 16 /**< Max registered ops structs */
667 * Structure storing the table of registered ops structs, each of which contain
668 * the function pointers for the mempool ops functions.
669 * Each process has its own storage for this ops struct array so that
670 * the mempools can be shared across primary and secondary processes.
671 * The indices used to access the array are valid across processes, whereas
672 * any function pointers stored directly in the mempool struct would not be.
673 * This results in us simply having "ops_index" in the mempool struct.
675 struct rte_mempool_ops_table {
676 rte_spinlock_t sl; /**< Spinlock for add/delete. */
677 uint32_t num_ops; /**< Number of used ops structs in the table. */
679 * Storage for all possible ops structs.
681 struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX];
682 } __rte_cache_aligned;
684 /** Array of registered ops structs. */
685 extern struct rte_mempool_ops_table rte_mempool_ops_table;
688 * @internal Get the mempool ops struct from its index.
691 * The index of the ops struct in the ops struct table. It must be a valid
692 * index: (0 <= idx < num_ops).
694 * The pointer to the ops struct in the table.
696 static inline struct rte_mempool_ops *
697 rte_mempool_get_ops(int ops_index)
699 RTE_VERIFY((ops_index >= 0) && (ops_index < RTE_MEMPOOL_MAX_OPS_IDX));
701 return &rte_mempool_ops_table.ops[ops_index];
705 * @internal Wrapper for mempool_ops alloc callback.
708 * Pointer to the memory pool.
710 * - 0: Success; successfully allocated mempool pool_data.
711 * - <0: Error; code of alloc function.
714 rte_mempool_ops_alloc(struct rte_mempool *mp);
717 * @internal Wrapper for mempool_ops dequeue callback.
720 * Pointer to the memory pool.
722 * Pointer to a table of void * pointers (objects).
724 * Number of objects to get.
726 * - 0: Success; got n objects.
727 * - <0: Error; code of dequeue function.
730 rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp,
731 void **obj_table, unsigned n)
733 struct rte_mempool_ops *ops;
736 rte_mempool_trace_ops_dequeue_bulk(mp, obj_table, n);
737 ops = rte_mempool_get_ops(mp->ops_index);
738 ret = ops->dequeue(mp, obj_table, n);
740 RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_bulk, 1);
741 RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_objs, n);
747 * @internal Wrapper for mempool_ops dequeue_contig_blocks callback.
750 * Pointer to the memory pool.
751 * @param[out] first_obj_table
752 * Pointer to a table of void * pointers (first objects).
754 * Number of blocks to get.
756 * - 0: Success; got n objects.
757 * - <0: Error; code of dequeue function.
760 rte_mempool_ops_dequeue_contig_blocks(struct rte_mempool *mp,
761 void **first_obj_table, unsigned int n)
763 struct rte_mempool_ops *ops;
765 ops = rte_mempool_get_ops(mp->ops_index);
766 RTE_ASSERT(ops->dequeue_contig_blocks != NULL);
767 rte_mempool_trace_ops_dequeue_contig_blocks(mp, first_obj_table, n);
768 return ops->dequeue_contig_blocks(mp, first_obj_table, n);
772 * @internal wrapper for mempool_ops enqueue callback.
775 * Pointer to the memory pool.
777 * Pointer to a table of void * pointers (objects).
779 * Number of objects to put.
781 * - 0: Success; n objects supplied.
782 * - <0: Error; code of enqueue function.
785 rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table,
788 struct rte_mempool_ops *ops;
790 RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_bulk, 1);
791 RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_objs, n);
792 rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n);
793 ops = rte_mempool_get_ops(mp->ops_index);
794 return ops->enqueue(mp, obj_table, n);
798 * @internal wrapper for mempool_ops get_count callback.
801 * Pointer to the memory pool.
803 * The number of available objects in the external pool.
806 rte_mempool_ops_get_count(const struct rte_mempool *mp);
809 * @internal wrapper for mempool_ops calc_mem_size callback.
810 * API to calculate size of memory required to store specified number of
814 * Pointer to the memory pool.
817 * @param[in] pg_shift
818 * LOG2 of the physical pages size. If set to 0, ignore page boundaries.
819 * @param[out] min_chunk_size
820 * Location for minimum size of the memory chunk which may be used to
821 * store memory pool objects.
823 * Location for required memory chunk alignment.
825 * Required memory size aligned at page boundary.
827 ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
828 uint32_t obj_num, uint32_t pg_shift,
829 size_t *min_chunk_size, size_t *align);
832 * @internal wrapper for mempool_ops populate callback.
834 * Populate memory pool objects using provided memory chunk.
837 * A pointer to the mempool structure.
838 * @param[in] max_objs
839 * Maximum number of objects to be populated.
841 * The virtual address of memory that should be used to store objects.
845 * The length of memory in bytes.
847 * Callback function to be executed for each populated object.
848 * @param[in] obj_cb_arg
849 * An opaque pointer passed to the callback function.
851 * The number of objects added on success.
852 * On error, no objects are populated and a negative errno is returned.
854 int rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs,
855 void *vaddr, rte_iova_t iova, size_t len,
856 rte_mempool_populate_obj_cb_t *obj_cb,
860 * Wrapper for mempool_ops get_info callback.
863 * Pointer to the memory pool.
865 * Pointer to the rte_mempool_info structure
867 * - 0: Success; The mempool driver supports retrieving supplementary
868 * mempool information
869 * - -ENOTSUP - doesn't support get_info ops (valid case).
871 int rte_mempool_ops_get_info(const struct rte_mempool *mp,
872 struct rte_mempool_info *info);
875 * @internal wrapper for mempool_ops free callback.
878 * Pointer to the memory pool.
881 rte_mempool_ops_free(struct rte_mempool *mp);
884 * Set the ops of a mempool.
886 * This can only be done on a mempool that is not populated, i.e. just after
887 * a call to rte_mempool_create_empty().
890 * Pointer to the memory pool.
892 * Name of the ops structure to use for this mempool.
894 * Opaque data that can be passed by the application to the ops functions.
896 * - 0: Success; the mempool is now using the requested ops functions.
897 * - -EINVAL - Invalid ops struct name provided.
898 * - -EEXIST - mempool already has an ops struct assigned.
901 rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,
905 * Register mempool operations.
908 * Pointer to an ops structure to register.
910 * - >=0: Success; return the index of the ops struct in the table.
911 * - -EINVAL - some missing callbacks while registering ops struct.
912 * - -ENOSPC - the maximum number of ops structs has been reached.
914 int rte_mempool_register_ops(const struct rte_mempool_ops *ops);
917 * Macro to statically register the ops of a mempool handler.
918 * Note that the rte_mempool_register_ops fails silently here when
919 * more than RTE_MEMPOOL_MAX_OPS_IDX is registered.
921 #define MEMPOOL_REGISTER_OPS(ops) \
922 RTE_INIT(mp_hdlr_init_##ops) \
924 rte_mempool_register_ops(&ops); \
928 * An object callback function for mempool.
930 * Used by rte_mempool_create() and rte_mempool_obj_iter().
932 typedef void (rte_mempool_obj_cb_t)(struct rte_mempool *mp,
933 void *opaque, void *obj, unsigned obj_idx);
934 typedef rte_mempool_obj_cb_t rte_mempool_obj_ctor_t; /* compat */
937 * A memory callback function for mempool.
939 * Used by rte_mempool_mem_iter().
941 typedef void (rte_mempool_mem_cb_t)(struct rte_mempool *mp,
942 void *opaque, struct rte_mempool_memhdr *memhdr,
946 * A mempool constructor callback function.
948 * Arguments are the mempool and the opaque pointer given by the user in
949 * rte_mempool_create().
951 typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
954 * Create a new mempool named *name* in memory.
956 * This function uses ``rte_memzone_reserve()`` to allocate memory. The
957 * pool contains n elements of elt_size. Its size is set to n.
960 * The name of the mempool.
962 * The number of elements in the mempool. The optimum size (in terms of
963 * memory usage) for a mempool is when n is a power of two minus one:
966 * The size of each element.
968 * If cache_size is non-zero, the rte_mempool library will try to
969 * limit the accesses to the common lockless pool, by maintaining a
970 * per-lcore object cache. This argument must be lower or equal to
971 * RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose
972 * cache_size to have "n modulo cache_size == 0": if this is
973 * not the case, some elements will always stay in the pool and will
974 * never be used. The access to the per-lcore table is of course
975 * faster than the multi-producer/consumer pool. The cache can be
976 * disabled if the cache_size argument is set to 0; it can be useful to
977 * avoid losing objects in cache.
978 * @param private_data_size
979 * The size of the private data appended after the mempool
980 * structure. This is useful for storing some private data after the
981 * mempool structure, as is done for rte_mbuf_pool for example.
983 * A function pointer that is called for initialization of the pool,
984 * before object initialization. The user can initialize the private
985 * data in this function if needed. This parameter can be NULL if
988 * An opaque pointer to data that can be used in the mempool
989 * constructor function.
991 * A function pointer that is called for each object at
992 * initialization of the pool. The user can set some meta data in
993 * objects if needed. This parameter can be NULL if not needed.
994 * The obj_init() function takes the mempool pointer, the init_arg,
995 * the object pointer and the object number as parameters.
996 * @param obj_init_arg
997 * An opaque pointer to data that can be used as an argument for
998 * each call to the object constructor function.
1000 * The *socket_id* argument is the socket identifier in the case of
1001 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1002 * constraint for the reserved zone.
1004 * The *flags* arguments is an OR of following flags:
1005 * - RTE_MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
1006 * between channels in RAM: the pool allocator will add padding
1007 * between objects depending on the hardware configuration. See
1008 * Memory alignment constraints for details. If this flag is set,
1009 * the allocator will just align them to a cache line.
1010 * - RTE_MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
1011 * cache-aligned. This flag removes this constraint, and no
1012 * padding will be present between objects. This flag implies
1013 * RTE_MEMPOOL_F_NO_SPREAD.
1014 * - RTE_MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
1015 * when using rte_mempool_put() or rte_mempool_put_bulk() is
1016 * "single-producer". Otherwise, it is "multi-producers".
1017 * - RTE_MEMPOOL_F_SC_GET: If this flag is set, the default behavior
1018 * when using rte_mempool_get() or rte_mempool_get_bulk() is
1019 * "single-consumer". Otherwise, it is "multi-consumers".
1020 * - RTE_MEMPOOL_F_NO_IOVA_CONTIG: If set, allocated objects won't
1021 * necessarily be contiguous in IO memory.
1023 * The pointer to the new allocated mempool, on success. NULL on error
1024 * with rte_errno set appropriately. Possible rte_errno values include:
1025 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
1026 * - E_RTE_SECONDARY - function was called from a secondary process instance
1027 * - EINVAL - cache size provided is too large or an unknown flag was passed
1028 * - ENOSPC - the maximum number of memzones has already been allocated
1029 * - EEXIST - a memzone with the same name already exists
1030 * - ENOMEM - no appropriate memory area found in which to create memzone
1032 struct rte_mempool *
1033 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
1034 unsigned cache_size, unsigned private_data_size,
1035 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
1036 rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
1037 int socket_id, unsigned flags);
1040 * Create an empty mempool
1042 * The mempool is allocated and initialized, but it is not populated: no
1043 * memory is allocated for the mempool elements. The user has to call
1044 * rte_mempool_populate_*() to add memory chunks to the pool. Once
1045 * populated, the user may also want to initialize each object with
1046 * rte_mempool_obj_iter().
1049 * The name of the mempool.
1051 * The maximum number of elements that can be added in the mempool.
1052 * The optimum size (in terms of memory usage) for a mempool is when n
1053 * is a power of two minus one: n = (2^q - 1).
1055 * The size of each element.
1057 * Size of the cache. See rte_mempool_create() for details.
1058 * @param private_data_size
1059 * The size of the private data appended after the mempool
1060 * structure. This is useful for storing some private data after the
1061 * mempool structure, as is done for rte_mbuf_pool for example.
1063 * The *socket_id* argument is the socket identifier in the case of
1064 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1065 * constraint for the reserved zone.
1067 * Flags controlling the behavior of the mempool. See
1068 * rte_mempool_create() for details.
1070 * The pointer to the new allocated mempool, on success. NULL on error
1071 * with rte_errno set appropriately. See rte_mempool_create() for details.
1073 struct rte_mempool *
1074 rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
1075 unsigned cache_size, unsigned private_data_size,
1076 int socket_id, unsigned flags);
1080 * Unlink the mempool from global list, free the memory chunks, and all
1081 * memory referenced by the mempool. The objects must not be used by
1082 * other cores as they will be freed.
1085 * A pointer to the mempool structure.
1088 rte_mempool_free(struct rte_mempool *mp);
1091 * Add physically contiguous memory for objects in the pool at init
1093 * Add a virtually and physically contiguous memory chunk in the pool
1094 * where objects can be instantiated.
1096 * If the given IO address is unknown (iova = RTE_BAD_IOVA),
1097 * the chunk doesn't need to be physically contiguous (only virtually),
1098 * and allocated objects may span two pages.
1101 * A pointer to the mempool structure.
1103 * The virtual address of memory that should be used to store objects.
1107 * The length of memory in bytes.
1109 * The callback used to free this chunk when destroying the mempool.
1111 * An opaque argument passed to free_cb.
1113 * The number of objects added on success (strictly positive).
1114 * On error, the chunk is not added in the memory list of the
1115 * mempool the following code is returned:
1116 * (0): not enough room in chunk for one object.
1117 * (-ENOSPC): mempool is already populated.
1118 * (-ENOMEM): allocation failure.
1120 int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
1121 rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
1125 * Add virtually contiguous memory for objects in the pool at init
1127 * Add a virtually contiguous memory chunk in the pool where objects can
1131 * A pointer to the mempool structure.
1133 * The virtual address of memory that should be used to store objects.
1135 * The length of memory in bytes.
1137 * The size of memory pages in this virtual area.
1139 * The callback used to free this chunk when destroying the mempool.
1141 * An opaque argument passed to free_cb.
1143 * The number of objects added on success (strictly positive).
1144 * On error, the chunk is not added in the memory list of the
1145 * mempool the following code is returned:
1146 * (0): not enough room in chunk for one object.
1147 * (-ENOSPC): mempool is already populated.
1148 * (-ENOMEM): allocation failure.
1151 rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
1152 size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
1156 * Add memory for objects in the pool at init
1158 * This is the default function used by rte_mempool_create() to populate
1159 * the mempool. It adds memory allocated using rte_memzone_reserve().
1162 * A pointer to the mempool structure.
1164 * The number of objects added on success.
1165 * On error, the chunk is not added in the memory list of the
1166 * mempool and a negative errno is returned.
1168 int rte_mempool_populate_default(struct rte_mempool *mp);
1171 * Add memory from anonymous mapping for objects in the pool at init
1173 * This function mmap an anonymous memory zone that is locked in
1174 * memory to store the objects of the mempool.
1177 * A pointer to the mempool structure.
1179 * The number of objects added on success.
1180 * On error, 0 is returned, rte_errno is set, and the chunk is not added in
1181 * the memory list of the mempool.
1183 int rte_mempool_populate_anon(struct rte_mempool *mp);
1186 * Call a function for each mempool element
1188 * Iterate across all objects attached to a rte_mempool and call the
1189 * callback function on it.
1192 * A pointer to an initialized mempool.
1194 * A function pointer that is called for each object.
1196 * An opaque pointer passed to the callback function.
1198 * Number of objects iterated.
1200 uint32_t rte_mempool_obj_iter(struct rte_mempool *mp,
1201 rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg);
1204 * Call a function for each mempool memory chunk
1206 * Iterate across all memory chunks attached to a rte_mempool and call
1207 * the callback function on it.
1210 * A pointer to an initialized mempool.
1212 * A function pointer that is called for each memory chunk.
1214 * An opaque pointer passed to the callback function.
1216 * Number of memory chunks iterated.
1218 uint32_t rte_mempool_mem_iter(struct rte_mempool *mp,
1219 rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg);
1222 * Dump the status of the mempool to a file.
1225 * A pointer to a file for output
1227 * A pointer to the mempool structure.
1229 void rte_mempool_dump(FILE *f, struct rte_mempool *mp);
1232 * Create a user-owned mempool cache.
1234 * This can be used by unregistered non-EAL threads to enable caching when they
1235 * interact with a mempool.
1238 * The size of the mempool cache. See rte_mempool_create()'s cache_size
1239 * parameter description for more information. The same limits and
1240 * considerations apply here too.
1242 * The socket identifier in the case of NUMA. The value can be
1243 * SOCKET_ID_ANY if there is no NUMA constraint for the reserved zone.
1245 struct rte_mempool_cache *
1246 rte_mempool_cache_create(uint32_t size, int socket_id);
1249 * Free a user-owned mempool cache.
1252 * A pointer to the mempool cache.
1255 rte_mempool_cache_free(struct rte_mempool_cache *cache);
1258 * Get a pointer to the per-lcore default mempool cache.
1261 * A pointer to the mempool structure.
1263 * The logical core id.
1265 * A pointer to the mempool cache or NULL if disabled or unregistered non-EAL
1268 static __rte_always_inline struct rte_mempool_cache *
1269 rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
1271 if (mp->cache_size == 0)
1274 if (lcore_id >= RTE_MAX_LCORE)
1277 rte_mempool_trace_default_cache(mp, lcore_id,
1278 &mp->local_cache[lcore_id]);
1279 return &mp->local_cache[lcore_id];
1283 * Flush a user-owned mempool cache to the specified mempool.
1286 * A pointer to the mempool cache.
1288 * A pointer to the mempool.
1290 static __rte_always_inline void
1291 rte_mempool_cache_flush(struct rte_mempool_cache *cache,
1292 struct rte_mempool *mp)
1295 cache = rte_mempool_default_cache(mp, rte_lcore_id());
1296 if (cache == NULL || cache->len == 0)
1298 rte_mempool_trace_cache_flush(cache, mp);
1299 rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len);
1304 * @internal Put several objects back in the mempool; used internally.
1306 * A pointer to the mempool structure.
1308 * A pointer to a table of void * pointers (objects).
1310 * The number of objects to store back in the mempool, must be strictly
1313 * A pointer to a mempool cache structure. May be NULL if not needed.
1315 static __rte_always_inline void
1316 rte_mempool_do_generic_put(struct rte_mempool *mp, void * const *obj_table,
1317 unsigned int n, struct rte_mempool_cache *cache)
1321 /* increment stat now, adding in mempool always success */
1322 RTE_MEMPOOL_STAT_ADD(mp, put_bulk, 1);
1323 RTE_MEMPOOL_STAT_ADD(mp, put_objs, n);
1325 /* No cache provided or if put would overflow mem allocated for cache */
1326 if (unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE))
1329 cache_objs = &cache->objs[cache->len];
1332 * The cache follows the following algorithm
1333 * 1. Add the objects to the cache
1334 * 2. Anything greater than the cache min value (if it crosses the
1335 * cache flush threshold) is flushed to the ring.
1338 /* Add elements back into the cache */
1339 rte_memcpy(&cache_objs[0], obj_table, sizeof(void *) * n);
1343 if (cache->len >= cache->flushthresh) {
1344 rte_mempool_ops_enqueue_bulk(mp, &cache->objs[cache->size],
1345 cache->len - cache->size);
1346 cache->len = cache->size;
1353 /* push remaining objects in ring */
1354 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1355 if (rte_mempool_ops_enqueue_bulk(mp, obj_table, n) < 0)
1356 rte_panic("cannot put objects in mempool\n");
1358 rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
1364 * Put several objects back in the mempool.
1367 * A pointer to the mempool structure.
1369 * A pointer to a table of void * pointers (objects).
1371 * The number of objects to add in the mempool from the obj_table.
1373 * A pointer to a mempool cache structure. May be NULL if not needed.
1375 static __rte_always_inline void
1376 rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
1377 unsigned int n, struct rte_mempool_cache *cache)
1379 rte_mempool_trace_generic_put(mp, obj_table, n, cache);
1380 RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 0);
1381 rte_mempool_do_generic_put(mp, obj_table, n, cache);
1385 * Put several objects back in the mempool.
1387 * This function calls the multi-producer or the single-producer
1388 * version depending on the default behavior that was specified at
1389 * mempool creation time (see flags).
1392 * A pointer to the mempool structure.
1394 * A pointer to a table of void * pointers (objects).
1396 * The number of objects to add in the mempool from obj_table.
1398 static __rte_always_inline void
1399 rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
1402 struct rte_mempool_cache *cache;
1403 cache = rte_mempool_default_cache(mp, rte_lcore_id());
1404 rte_mempool_trace_put_bulk(mp, obj_table, n, cache);
1405 rte_mempool_generic_put(mp, obj_table, n, cache);
1409 * Put one object back in the mempool.
1411 * This function calls the multi-producer or the single-producer
1412 * version depending on the default behavior that was specified at
1413 * mempool creation time (see flags).
1416 * A pointer to the mempool structure.
1418 * A pointer to the object to be added.
1420 static __rte_always_inline void
1421 rte_mempool_put(struct rte_mempool *mp, void *obj)
1423 rte_mempool_put_bulk(mp, &obj, 1);
1427 * @internal Get several objects from the mempool; used internally.
1429 * A pointer to the mempool structure.
1431 * A pointer to a table of void * pointers (objects).
1433 * The number of objects to get, must be strictly positive.
1435 * A pointer to a mempool cache structure. May be NULL if not needed.
1437 * - >=0: Success; number of objects supplied.
1438 * - <0: Error; code of ring dequeue function.
1440 static __rte_always_inline int
1441 rte_mempool_do_generic_get(struct rte_mempool *mp, void **obj_table,
1442 unsigned int n, struct rte_mempool_cache *cache)
1445 uint32_t index, len;
1448 /* No cache provided or cannot be satisfied from cache */
1449 if (unlikely(cache == NULL || n >= cache->size))
1452 cache_objs = cache->objs;
1454 /* Can this be satisfied from the cache? */
1455 if (cache->len < n) {
1456 /* No. Backfill the cache first, and then fill from it */
1457 uint32_t req = n + (cache->size - cache->len);
1459 /* How many do we require i.e. number to fill the cache + the request */
1460 ret = rte_mempool_ops_dequeue_bulk(mp,
1461 &cache->objs[cache->len], req);
1462 if (unlikely(ret < 0)) {
1464 * In the off chance that we are buffer constrained,
1465 * where we are not able to allocate cache + n, go to
1466 * the ring directly. If that fails, we are truly out of
1475 /* Now fill in the response ... */
1476 for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++)
1477 *obj_table = cache_objs[len];
1481 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1482 RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
1488 /* get remaining objects from ring */
1489 ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n);
1492 RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
1493 RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
1495 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1496 RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
1503 * Get several objects from the mempool.
1505 * If cache is enabled, objects will be retrieved first from cache,
1506 * subsequently from the common pool. Note that it can return -ENOENT when
1507 * the local cache and common pool are empty, even if cache from other
1511 * A pointer to the mempool structure.
1513 * A pointer to a table of void * pointers (objects) that will be filled.
1515 * The number of objects to get from mempool to obj_table.
1517 * A pointer to a mempool cache structure. May be NULL if not needed.
1519 * - 0: Success; objects taken.
1520 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
1522 static __rte_always_inline int
1523 rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table,
1524 unsigned int n, struct rte_mempool_cache *cache)
1527 ret = rte_mempool_do_generic_get(mp, obj_table, n, cache);
1529 RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 1);
1530 rte_mempool_trace_generic_get(mp, obj_table, n, cache);
1535 * Get several objects from the mempool.
1537 * This function calls the multi-consumers or the single-consumer
1538 * version, depending on the default behaviour that was specified at
1539 * mempool creation time (see flags).
1541 * If cache is enabled, objects will be retrieved first from cache,
1542 * subsequently from the common pool. Note that it can return -ENOENT when
1543 * the local cache and common pool are empty, even if cache from other
1547 * A pointer to the mempool structure.
1549 * A pointer to a table of void * pointers (objects) that will be filled.
1551 * The number of objects to get from the mempool to obj_table.
1553 * - 0: Success; objects taken
1554 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
1556 static __rte_always_inline int
1557 rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
1559 struct rte_mempool_cache *cache;
1560 cache = rte_mempool_default_cache(mp, rte_lcore_id());
1561 rte_mempool_trace_get_bulk(mp, obj_table, n, cache);
1562 return rte_mempool_generic_get(mp, obj_table, n, cache);
1566 * Get one object from the mempool.
1568 * This function calls the multi-consumers or the single-consumer
1569 * version, depending on the default behavior that was specified at
1570 * mempool creation (see flags).
1572 * If cache is enabled, objects will be retrieved first from cache,
1573 * subsequently from the common pool. Note that it can return -ENOENT when
1574 * the local cache and common pool are empty, even if cache from other
1578 * A pointer to the mempool structure.
1580 * A pointer to a void * pointer (object) that will be filled.
1582 * - 0: Success; objects taken.
1583 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
1585 static __rte_always_inline int
1586 rte_mempool_get(struct rte_mempool *mp, void **obj_p)
1588 return rte_mempool_get_bulk(mp, obj_p, 1);
1592 * Get a contiguous blocks of objects from the mempool.
1594 * If cache is enabled, consider to flush it first, to reuse objects
1595 * as soon as possible.
1597 * The application should check that the driver supports the operation
1598 * by calling rte_mempool_ops_get_info() and checking that `contig_block_size`
1602 * A pointer to the mempool structure.
1603 * @param first_obj_table
1604 * A pointer to a pointer to the first object in each block.
1606 * The number of blocks to get from mempool.
1608 * - 0: Success; blocks taken.
1609 * - -ENOBUFS: Not enough entries in the mempool; no object is retrieved.
1610 * - -EOPNOTSUPP: The mempool driver does not support block dequeue
1612 static __rte_always_inline int
1613 rte_mempool_get_contig_blocks(struct rte_mempool *mp,
1614 void **first_obj_table, unsigned int n)
1618 ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n);
1620 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1621 RTE_MEMPOOL_STAT_ADD(mp, get_success_blks, n);
1622 RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table, n,
1625 RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
1626 RTE_MEMPOOL_STAT_ADD(mp, get_fail_blks, n);
1629 rte_mempool_trace_get_contig_blocks(mp, first_obj_table, n);
1634 * Return the number of entries in the mempool.
1636 * When cache is enabled, this function has to browse the length of
1637 * all lcores, so it should not be used in a data path, but only for
1638 * debug purposes. User-owned mempool caches are not accounted for.
1641 * A pointer to the mempool structure.
1643 * The number of entries in the mempool.
1645 unsigned int rte_mempool_avail_count(const struct rte_mempool *mp);
1648 * Return the number of elements which have been allocated from the mempool
1650 * When cache is enabled, this function has to browse the length of
1651 * all lcores, so it should not be used in a data path, but only for
1655 * A pointer to the mempool structure.
1657 * The number of free entries in the mempool.
1660 rte_mempool_in_use_count(const struct rte_mempool *mp);
1663 * Test if the mempool is full.
1665 * When cache is enabled, this function has to browse the length of all
1666 * lcores, so it should not be used in a data path, but only for debug
1667 * purposes. User-owned mempool caches are not accounted for.
1670 * A pointer to the mempool structure.
1672 * - 1: The mempool is full.
1673 * - 0: The mempool is not full.
1676 rte_mempool_full(const struct rte_mempool *mp)
1678 return rte_mempool_avail_count(mp) == mp->size;
1682 * Test if the mempool is empty.
1684 * When cache is enabled, this function has to browse the length of all
1685 * lcores, so it should not be used in a data path, but only for debug
1686 * purposes. User-owned mempool caches are not accounted for.
1689 * A pointer to the mempool structure.
1691 * - 1: The mempool is empty.
1692 * - 0: The mempool is not empty.
1695 rte_mempool_empty(const struct rte_mempool *mp)
1697 return rte_mempool_avail_count(mp) == 0;
1701 * Return the IO address of elt, which is an element of the pool mp.
1704 * A pointer (virtual address) to the element of the pool.
1706 * The IO address of the elt element.
1707 * If the mempool was created with RTE_MEMPOOL_F_NO_IOVA_CONTIG, the
1708 * returned value is RTE_BAD_IOVA.
1710 static inline rte_iova_t
1711 rte_mempool_virt2iova(const void *elt)
1713 const struct rte_mempool_objhdr *hdr;
1714 hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt,
1720 * Check the consistency of mempool objects.
1722 * Verify the coherency of fields in the mempool structure. Also check
1723 * that the cookies of mempool objects (even the ones that are not
1724 * present in pool) have a correct value. If not, a panic will occur.
1727 * A pointer to the mempool structure.
1729 void rte_mempool_audit(struct rte_mempool *mp);
1732 * Return a pointer to the private data in an mempool structure.
1735 * A pointer to the mempool structure.
1737 * A pointer to the private data.
1739 static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
1742 MEMPOOL_HEADER_SIZE(mp, mp->cache_size);
1746 * Dump the status of all mempools on the console
1749 * A pointer to a file for output
1751 void rte_mempool_list_dump(FILE *f);
1754 * Search a mempool from its name
1757 * The name of the mempool.
1759 * The pointer to the mempool matching the name, or NULL if not found.
1761 * with rte_errno set appropriately. Possible rte_errno values include:
1762 * - ENOENT - required entry not available to return.
1765 struct rte_mempool *rte_mempool_lookup(const char *name);
1768 * Get the header, trailer and total size of a mempool element.
1770 * Given a desired size of the mempool element and mempool flags,
1771 * calculates header, trailer, body and total sizes of the mempool object.
1774 * The size of each element, without header and trailer.
1776 * The flags used for the mempool creation.
1777 * Consult rte_mempool_create() for more information about possible values.
1778 * The size of each element.
1780 * The calculated detailed size the mempool object. May be NULL.
1782 * Total size of the mempool object.
1784 uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
1785 struct rte_mempool_objsz *sz);
1788 * Walk list of all memory pools
1793 * Argument passed to iterator
1795 void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
1799 * @internal Get page size used for mempool object allocation.
1800 * This function is internal to mempool library and mempool drivers.
1803 rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz);
1806 * Mempool event type.
1809 enum rte_mempool_event {
1810 /** Occurs after a mempool is fully populated. */
1811 RTE_MEMPOOL_EVENT_READY = 0,
1812 /** Occurs before the destruction of a mempool begins. */
1813 RTE_MEMPOOL_EVENT_DESTROY = 1,
1818 * Mempool event callback.
1820 * rte_mempool_event_callback_register() may be called from within the callback,
1821 * but the callbacks registered this way will not be invoked for the same event.
1822 * rte_mempool_event_callback_unregister() may only be safely called
1823 * to remove the running callback.
1825 typedef void (rte_mempool_event_callback)(
1826 enum rte_mempool_event event,
1827 struct rte_mempool *mp,
1832 * Register a callback function invoked on mempool life cycle event.
1833 * The function will be invoked in the process
1834 * that performs an action which triggers the callback.
1837 * Callback function.
1842 * 0 on success, negative on failure and rte_errno is set.
1846 rte_mempool_event_callback_register(rte_mempool_event_callback *func,
1851 * Unregister a callback added with rte_mempool_event_callback_register().
1852 * @p func and @p user_data must exactly match registration parameters.
1855 * Callback function.
1860 * 0 on success, negative on failure and rte_errno is set.
1864 rte_mempool_event_callback_unregister(rte_mempool_event_callback *func,
1871 #endif /* _RTE_MEMPOOL_H_ */