1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright(c) 2016 6WIND S.A.
6 #ifndef _RTE_MEMPOOL_H_
7 #define _RTE_MEMPOOL_H_
13 * A memory pool is an allocator of fixed-size object. It is
14 * identified by its name, and uses a ring to store free objects. It
15 * provides some other optional services, like a per-core object
16 * cache, and an alignment helper to ensure that objects are padded
17 * to spread them equally on all RAM channels, ranks, and so on.
19 * Objects owned by a mempool should never be added in another
20 * mempool. When an object is freed using rte_mempool_put() or
21 * equivalent, the object data is not modified; the user can save some
22 * meta-data in the object data and retrieve them when allocating a
25 * Note: the mempool implementation is not preemptible. An lcore must not be
26 * interrupted by another task that uses the same mempool (because it uses a
27 * ring which is not preemptible). Also, usual mempool functions like
28 * rte_mempool_get() or rte_mempool_put() are designed to be called from an EAL
29 * thread due to the internal per-lcore cache. Due to the lack of caching,
30 * rte_mempool_get() or rte_mempool_put() performance will suffer when called
31 * by unregistered non-EAL threads. Instead, unregistered non-EAL threads
32 * should call rte_mempool_generic_get() or rte_mempool_generic_put() with a
33 * user cache created with rte_mempool_cache_create().
42 #include <rte_config.h>
43 #include <rte_spinlock.h>
45 #include <rte_debug.h>
46 #include <rte_lcore.h>
47 #include <rte_memory.h>
48 #include <rte_branch_prediction.h>
50 #include <rte_memcpy.h>
51 #include <rte_common.h>
53 #include "rte_mempool_trace_fp.h"
59 #define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL /**< Header cookie. */
60 #define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL /**< Header cookie. */
61 #define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/
63 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
65 * A structure that stores the mempool statistics (per-lcore).
66 * Note: Cache stats (put_cache_bulk/objs, get_cache_bulk/objs) are not
67 * captured since they can be calculated from other stats.
68 * For example: put_cache_objs = put_objs - put_common_pool_objs.
70 struct rte_mempool_debug_stats {
71 uint64_t put_bulk; /**< Number of puts. */
72 uint64_t put_objs; /**< Number of objects successfully put. */
73 uint64_t put_common_pool_bulk; /**< Number of bulks enqueued in common pool. */
74 uint64_t put_common_pool_objs; /**< Number of objects enqueued in common pool. */
75 uint64_t get_common_pool_bulk; /**< Number of bulks dequeued from common pool. */
76 uint64_t get_common_pool_objs; /**< Number of objects dequeued from common pool. */
77 uint64_t get_success_bulk; /**< Successful allocation number. */
78 uint64_t get_success_objs; /**< Objects successfully allocated. */
79 uint64_t get_fail_bulk; /**< Failed allocation number. */
80 uint64_t get_fail_objs; /**< Objects that failed to be allocated. */
81 uint64_t get_success_blks; /**< Successful allocation number of contiguous blocks. */
82 uint64_t get_fail_blks; /**< Failed allocation number of contiguous blocks. */
83 } __rte_cache_aligned;
87 * A structure that stores a per-core object cache.
89 struct rte_mempool_cache {
90 uint32_t size; /**< Size of the cache */
91 uint32_t flushthresh; /**< Threshold before we flush excess elements */
92 uint32_t len; /**< Current cache count */
94 * Cache is allocated to this size to allow it to overflow in certain
95 * cases to avoid needless emptying of cache.
97 void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3]; /**< Cache objects */
98 } __rte_cache_aligned;
101 * A structure that stores the size of mempool elements.
103 struct rte_mempool_objsz {
104 uint32_t elt_size; /**< Size of an element. */
105 uint32_t header_size; /**< Size of header (before elt). */
106 uint32_t trailer_size; /**< Size of trailer (after elt). */
108 /**< Total size of an object (header + elt + trailer). */
111 /**< Maximum length of a memory pool's name. */
112 #define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \
113 sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1)
114 #define RTE_MEMPOOL_MZ_PREFIX "MP_"
117 #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
119 #define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1)
121 /** Mempool over one chunk of physically continuous memory */
122 #define MEMPOOL_PG_NUM_DEFAULT 1
124 #ifndef RTE_MEMPOOL_ALIGN
126 * Alignment of elements inside mempool.
128 #define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
131 #define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
134 * Mempool object header structure
136 * Each object stored in mempools are prefixed by this header structure,
137 * it allows to retrieve the mempool pointer from the object and to
138 * iterate on all objects attached to a mempool. When debug is enabled,
139 * a cookie is also added in this structure preventing corruptions and
142 struct rte_mempool_objhdr {
143 RTE_STAILQ_ENTRY(rte_mempool_objhdr) next; /**< Next in list. */
144 struct rte_mempool *mp; /**< The mempool owning the object. */
145 rte_iova_t iova; /**< IO address of the object. */
146 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
147 uint64_t cookie; /**< Debug cookie. */
152 * A list of object headers type
154 RTE_STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr);
156 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
159 * Mempool object trailer structure
161 * In debug mode, each object stored in mempools are suffixed by this
162 * trailer structure containing a cookie preventing memory corruptions.
164 struct rte_mempool_objtlr {
165 uint64_t cookie; /**< Debug cookie. */
171 * A list of memory where objects are stored
173 RTE_STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr);
176 * Callback used to free a memory chunk
178 typedef void (rte_mempool_memchunk_free_cb_t)(struct rte_mempool_memhdr *memhdr,
182 * Mempool objects memory header structure
184 * The memory chunks where objects are stored. Each chunk is virtually
185 * and physically contiguous.
187 struct rte_mempool_memhdr {
188 RTE_STAILQ_ENTRY(rte_mempool_memhdr) next; /**< Next in list. */
189 struct rte_mempool *mp; /**< The mempool owning the chunk */
190 void *addr; /**< Virtual address of the chunk */
191 rte_iova_t iova; /**< IO address of the chunk */
192 size_t len; /**< length of the chunk */
193 rte_mempool_memchunk_free_cb_t *free_cb; /**< Free callback */
194 void *opaque; /**< Argument passed to the free callback */
198 * Additional information about the mempool
200 * The structure is cache-line aligned to avoid ABI breakages in
201 * a number of cases when something small is added.
203 struct rte_mempool_info {
204 /** Number of objects in the contiguous block */
205 unsigned int contig_block_size;
206 } __rte_cache_aligned;
209 * The RTE mempool structure.
212 char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */
215 void *pool_data; /**< Ring or pool to store objects. */
216 uint64_t pool_id; /**< External mempool identifier. */
218 void *pool_config; /**< optional args for ops alloc. */
219 const struct rte_memzone *mz; /**< Memzone where pool is alloc'd. */
220 unsigned int flags; /**< Flags of the mempool. */
221 int socket_id; /**< Socket id passed at create. */
222 uint32_t size; /**< Max size of the mempool. */
224 /**< Size of per-lcore default local cache. */
226 uint32_t elt_size; /**< Size of an element. */
227 uint32_t header_size; /**< Size of header (before elt). */
228 uint32_t trailer_size; /**< Size of trailer (after elt). */
230 unsigned private_data_size; /**< Size of private data. */
232 * Index into rte_mempool_ops_table array of mempool ops
233 * structs, which contain callback function pointers.
234 * We're using an index here rather than pointers to the callbacks
235 * to facilitate any secondary processes that may want to use
240 struct rte_mempool_cache *local_cache; /**< Per-lcore local cache */
242 uint32_t populated_size; /**< Number of populated objects. */
243 struct rte_mempool_objhdr_list elt_list; /**< List of objects in pool */
244 uint32_t nb_mem_chunks; /**< Number of memory chunks */
245 struct rte_mempool_memhdr_list mem_list; /**< List of memory chunks */
247 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
248 /** Per-lcore statistics. */
249 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
251 } __rte_cache_aligned;
253 /** Spreading among memory channels not required. */
254 #define RTE_MEMPOOL_F_NO_SPREAD 0x0001
256 * Backward compatibility synonym for RTE_MEMPOOL_F_NO_SPREAD.
259 #define MEMPOOL_F_NO_SPREAD RTE_MEMPOOL_F_NO_SPREAD
260 /** Do not align objects on cache lines. */
261 #define RTE_MEMPOOL_F_NO_CACHE_ALIGN 0x0002
263 * Backward compatibility synonym for RTE_MEMPOOL_F_NO_CACHE_ALIGN.
266 #define MEMPOOL_F_NO_CACHE_ALIGN RTE_MEMPOOL_F_NO_CACHE_ALIGN
267 /** Default put is "single-producer". */
268 #define RTE_MEMPOOL_F_SP_PUT 0x0004
270 * Backward compatibility synonym for RTE_MEMPOOL_F_SP_PUT.
273 #define MEMPOOL_F_SP_PUT RTE_MEMPOOL_F_SP_PUT
274 /** Default get is "single-consumer". */
275 #define RTE_MEMPOOL_F_SC_GET 0x0008
277 * Backward compatibility synonym for RTE_MEMPOOL_F_SC_GET.
280 #define MEMPOOL_F_SC_GET RTE_MEMPOOL_F_SC_GET
281 /** Internal: pool is created. */
282 #define RTE_MEMPOOL_F_POOL_CREATED 0x0010
283 /** Don't need IOVA contiguous objects. */
284 #define RTE_MEMPOOL_F_NO_IOVA_CONTIG 0x0020
286 * Backward compatibility synonym for RTE_MEMPOOL_F_NO_IOVA_CONTIG.
289 #define MEMPOOL_F_NO_IOVA_CONTIG RTE_MEMPOOL_F_NO_IOVA_CONTIG
290 /** Internal: no object from the pool can be used for device IO (DMA). */
291 #define RTE_MEMPOOL_F_NON_IO 0x0040
294 * @internal When debug is enabled, store some statistics.
297 * Pointer to the memory pool.
299 * Name of the statistics field to increment in the memory pool.
301 * Number to add to the object-oriented statistics.
303 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
304 #define RTE_MEMPOOL_STAT_ADD(mp, name, n) do { \
305 unsigned __lcore_id = rte_lcore_id(); \
306 if (__lcore_id < RTE_MAX_LCORE) { \
307 mp->stats[__lcore_id].name += n; \
311 #define RTE_MEMPOOL_STAT_ADD(mp, name, n) do {} while (0)
315 * @internal Calculate the size of the mempool header.
318 * Pointer to the memory pool.
320 * Size of the per-lcore cache.
322 #define RTE_MEMPOOL_HEADER_SIZE(mp, cs) \
323 (sizeof(*(mp)) + (((cs) == 0) ? 0 : \
324 (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
326 /** Deprecated. Use RTE_MEMPOOL_HEADER_SIZE() for internal purposes only. */
327 #define MEMPOOL_HEADER_SIZE(mp, cs) \
328 RTE_DEPRECATED(MEMPOOL_HEADER_SIZE) RTE_MEMPOOL_HEADER_SIZE(mp, cs)
330 /* return the header of a mempool object (internal) */
331 static inline struct rte_mempool_objhdr *
332 rte_mempool_get_header(void *obj)
334 return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj,
335 sizeof(struct rte_mempool_objhdr));
339 * Return a pointer to the mempool owning this object.
342 * An object that is owned by a pool. If this is not the case,
343 * the behavior is undefined.
345 * A pointer to the mempool structure.
347 static inline struct rte_mempool *rte_mempool_from_obj(void *obj)
349 struct rte_mempool_objhdr *hdr = rte_mempool_get_header(obj);
353 /* return the trailer of a mempool object (internal) */
354 static inline struct rte_mempool_objtlr *rte_mempool_get_trailer(void *obj)
356 struct rte_mempool *mp = rte_mempool_from_obj(obj);
357 return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size);
361 * @internal Check and update cookies or panic.
364 * Pointer to the memory pool.
365 * @param obj_table_const
366 * Pointer to a table of void * pointers (objects).
368 * Index of object in object table.
370 * - 0: object is supposed to be allocated, mark it as free
371 * - 1: object is supposed to be free, mark it as allocated
372 * - 2: just check that cookie is valid (free or allocated)
374 void rte_mempool_check_cookies(const struct rte_mempool *mp,
375 void * const *obj_table_const, unsigned n, int free);
377 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
378 #define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) \
379 rte_mempool_check_cookies(mp, obj_table_const, n, free)
381 #define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) do {} while (0)
382 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
385 * @internal Check contiguous object blocks and update cookies or panic.
388 * Pointer to the memory pool.
389 * @param first_obj_table_const
390 * Pointer to a table of void * pointers (first object of the contiguous
393 * Number of contiguous object blocks.
395 * - 0: object is supposed to be allocated, mark it as free
396 * - 1: object is supposed to be free, mark it as allocated
397 * - 2: just check that cookie is valid (free or allocated)
399 void rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp,
400 void * const *first_obj_table_const, unsigned int n, int free);
402 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
403 #define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \
405 rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
408 #define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \
411 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
413 #define RTE_MEMPOOL_OPS_NAMESIZE 32 /**< Max length of ops struct name. */
416 * Prototype for implementation specific data provisioning function.
418 * The function should provide the implementation specific memory for
419 * use by the other mempool ops functions in a given mempool ops struct.
420 * E.g. the default ops provides an instance of the rte_ring for this purpose.
421 * it will most likely point to a different type of data structure, and
422 * will be transparent to the application programmer.
423 * This function should set mp->pool_data.
425 typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp);
428 * Free the opaque private data pointed to by mp->pool_data pointer.
430 typedef void (*rte_mempool_free_t)(struct rte_mempool *mp);
433 * Enqueue an object into the external pool.
435 typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp,
436 void * const *obj_table, unsigned int n);
439 * Dequeue an object from the external pool.
441 typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
442 void **obj_table, unsigned int n);
445 * Dequeue a number of contiguous object blocks from the external pool.
447 typedef int (*rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp,
448 void **first_obj_table, unsigned int n);
451 * Return the number of available objects in the external pool.
453 typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
456 * Calculate memory size required to store given number of objects.
458 * If mempool objects are not required to be IOVA-contiguous
459 * (the flag RTE_MEMPOOL_F_NO_IOVA_CONTIG is set), min_chunk_size defines
460 * virtually contiguous chunk size. Otherwise, if mempool objects must
461 * be IOVA-contiguous (the flag RTE_MEMPOOL_F_NO_IOVA_CONTIG is clear),
462 * min_chunk_size defines IOVA-contiguous chunk size.
465 * Pointer to the memory pool.
468 * @param[in] pg_shift
469 * LOG2 of the physical pages size. If set to 0, ignore page boundaries.
470 * @param[out] min_chunk_size
471 * Location for minimum size of the memory chunk which may be used to
472 * store memory pool objects.
474 * Location for required memory chunk alignment.
476 * Required memory size.
478 typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp,
479 uint32_t obj_num, uint32_t pg_shift,
480 size_t *min_chunk_size, size_t *align);
483 * @internal Helper to calculate memory size required to store given
486 * This function is internal to mempool library and mempool drivers.
488 * If page boundaries may be ignored, it is just a product of total
489 * object size including header and trailer and number of objects.
490 * Otherwise, it is a number of pages required to store given number of
491 * objects without crossing page boundary.
493 * Note that if object size is bigger than page size, then it assumes
494 * that pages are grouped in subsets of physically continuous pages big
495 * enough to store at least one object.
497 * Minimum size of memory chunk is the total element size.
498 * Required memory chunk alignment is the cache line size.
501 * A pointer to the mempool structure.
503 * Number of objects to be added in mempool.
504 * @param[in] pg_shift
505 * LOG2 of the physical pages size. If set to 0, ignore page boundaries.
506 * @param[in] chunk_reserve
507 * Amount of memory that must be reserved at the beginning of each page,
508 * or at the beginning of the memory area if pg_shift is 0.
509 * @param[out] min_chunk_size
510 * Location for minimum size of the memory chunk which may be used to
511 * store memory pool objects.
513 * Location for required memory chunk alignment.
515 * Required memory size.
517 ssize_t rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
518 uint32_t obj_num, uint32_t pg_shift, size_t chunk_reserve,
519 size_t *min_chunk_size, size_t *align);
522 * Default way to calculate memory size required to store given number of
525 * Equivalent to rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,
526 * 0, min_chunk_size, align).
528 ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
529 uint32_t obj_num, uint32_t pg_shift,
530 size_t *min_chunk_size, size_t *align);
533 * Function to be called for each populated object.
536 * A pointer to the mempool structure.
538 * An opaque pointer passed to iterator.
540 * Object virtual address.
542 * Input/output virtual address of the object or RTE_BAD_IOVA.
544 typedef void (rte_mempool_populate_obj_cb_t)(struct rte_mempool *mp,
545 void *opaque, void *vaddr, rte_iova_t iova);
548 * Populate memory pool objects using provided memory chunk.
550 * Populated objects should be enqueued to the pool, e.g. using
551 * rte_mempool_ops_enqueue_bulk().
553 * If the given IO address is unknown (iova = RTE_BAD_IOVA),
554 * the chunk doesn't need to be physically contiguous (only virtually),
555 * and allocated objects may span two pages.
558 * A pointer to the mempool structure.
559 * @param[in] max_objs
560 * Maximum number of objects to be populated.
562 * The virtual address of memory that should be used to store objects.
566 * The length of memory in bytes.
568 * Callback function to be executed for each populated object.
569 * @param[in] obj_cb_arg
570 * An opaque pointer passed to the callback function.
572 * The number of objects added on success.
573 * On error, no objects are populated and a negative errno is returned.
575 typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp,
576 unsigned int max_objs,
577 void *vaddr, rte_iova_t iova, size_t len,
578 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
581 * Align objects on addresses multiple of total_elt_sz.
583 #define RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ 0x0001
586 * @internal Helper to populate memory pool object using provided memory
587 * chunk: just slice objects one by one, taking care of not
588 * crossing page boundaries.
590 * If RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ is set in flags, the addresses
591 * of object headers will be aligned on a multiple of total_elt_sz.
592 * This feature is used by octeontx hardware.
594 * This function is internal to mempool library and mempool drivers.
597 * A pointer to the mempool structure.
599 * Logical OR of following flags:
600 * - RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ: align objects on addresses
601 * multiple of total_elt_sz.
602 * @param[in] max_objs
603 * Maximum number of objects to be added in mempool.
605 * The virtual address of memory that should be used to store objects.
607 * The IO address corresponding to vaddr, or RTE_BAD_IOVA.
609 * The length of memory in bytes.
611 * Callback function to be executed for each populated object.
612 * @param[in] obj_cb_arg
613 * An opaque pointer passed to the callback function.
615 * The number of objects added in mempool.
617 int rte_mempool_op_populate_helper(struct rte_mempool *mp,
618 unsigned int flags, unsigned int max_objs,
619 void *vaddr, rte_iova_t iova, size_t len,
620 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
623 * Default way to populate memory pool object using provided memory chunk.
625 * Equivalent to rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, iova,
626 * len, obj_cb, obj_cb_arg).
628 int rte_mempool_op_populate_default(struct rte_mempool *mp,
629 unsigned int max_objs,
630 void *vaddr, rte_iova_t iova, size_t len,
631 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
634 * Get some additional information about a mempool.
636 typedef int (*rte_mempool_get_info_t)(const struct rte_mempool *mp,
637 struct rte_mempool_info *info);
640 /** Structure defining mempool operations structure */
641 struct rte_mempool_ops {
642 char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct. */
643 rte_mempool_alloc_t alloc; /**< Allocate private data. */
644 rte_mempool_free_t free; /**< Free the external pool. */
645 rte_mempool_enqueue_t enqueue; /**< Enqueue an object. */
646 rte_mempool_dequeue_t dequeue; /**< Dequeue an object. */
647 rte_mempool_get_count get_count; /**< Get qty of available objs. */
649 * Optional callback to calculate memory size required to
650 * store specified number of objects.
652 rte_mempool_calc_mem_size_t calc_mem_size;
654 * Optional callback to populate mempool objects using
655 * provided memory chunk.
657 rte_mempool_populate_t populate;
661 rte_mempool_get_info_t get_info;
663 * Dequeue a number of contiguous object blocks.
665 rte_mempool_dequeue_contig_blocks_t dequeue_contig_blocks;
666 } __rte_cache_aligned;
668 #define RTE_MEMPOOL_MAX_OPS_IDX 16 /**< Max registered ops structs */
671 * Structure storing the table of registered ops structs, each of which contain
672 * the function pointers for the mempool ops functions.
673 * Each process has its own storage for this ops struct array so that
674 * the mempools can be shared across primary and secondary processes.
675 * The indices used to access the array are valid across processes, whereas
676 * any function pointers stored directly in the mempool struct would not be.
677 * This results in us simply having "ops_index" in the mempool struct.
679 struct rte_mempool_ops_table {
680 rte_spinlock_t sl; /**< Spinlock for add/delete. */
681 uint32_t num_ops; /**< Number of used ops structs in the table. */
683 * Storage for all possible ops structs.
685 struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX];
686 } __rte_cache_aligned;
688 /** Array of registered ops structs. */
689 extern struct rte_mempool_ops_table rte_mempool_ops_table;
692 * @internal Get the mempool ops struct from its index.
695 * The index of the ops struct in the ops struct table. It must be a valid
696 * index: (0 <= idx < num_ops).
698 * The pointer to the ops struct in the table.
700 static inline struct rte_mempool_ops *
701 rte_mempool_get_ops(int ops_index)
703 RTE_VERIFY((ops_index >= 0) && (ops_index < RTE_MEMPOOL_MAX_OPS_IDX));
705 return &rte_mempool_ops_table.ops[ops_index];
709 * @internal Wrapper for mempool_ops alloc callback.
712 * Pointer to the memory pool.
714 * - 0: Success; successfully allocated mempool pool_data.
715 * - <0: Error; code of alloc function.
718 rte_mempool_ops_alloc(struct rte_mempool *mp);
721 * @internal Wrapper for mempool_ops dequeue callback.
724 * Pointer to the memory pool.
726 * Pointer to a table of void * pointers (objects).
728 * Number of objects to get.
730 * - 0: Success; got n objects.
731 * - <0: Error; code of dequeue function.
734 rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp,
735 void **obj_table, unsigned n)
737 struct rte_mempool_ops *ops;
740 rte_mempool_trace_ops_dequeue_bulk(mp, obj_table, n);
741 ops = rte_mempool_get_ops(mp->ops_index);
742 ret = ops->dequeue(mp, obj_table, n);
744 RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_bulk, 1);
745 RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_objs, n);
751 * @internal Wrapper for mempool_ops dequeue_contig_blocks callback.
754 * Pointer to the memory pool.
755 * @param[out] first_obj_table
756 * Pointer to a table of void * pointers (first objects).
758 * Number of blocks to get.
760 * - 0: Success; got n objects.
761 * - <0: Error; code of dequeue function.
764 rte_mempool_ops_dequeue_contig_blocks(struct rte_mempool *mp,
765 void **first_obj_table, unsigned int n)
767 struct rte_mempool_ops *ops;
769 ops = rte_mempool_get_ops(mp->ops_index);
770 RTE_ASSERT(ops->dequeue_contig_blocks != NULL);
771 rte_mempool_trace_ops_dequeue_contig_blocks(mp, first_obj_table, n);
772 return ops->dequeue_contig_blocks(mp, first_obj_table, n);
776 * @internal wrapper for mempool_ops enqueue callback.
779 * Pointer to the memory pool.
781 * Pointer to a table of void * pointers (objects).
783 * Number of objects to put.
785 * - 0: Success; n objects supplied.
786 * - <0: Error; code of enqueue function.
789 rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table,
792 struct rte_mempool_ops *ops;
794 RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_bulk, 1);
795 RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_objs, n);
796 rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n);
797 ops = rte_mempool_get_ops(mp->ops_index);
798 return ops->enqueue(mp, obj_table, n);
802 * @internal wrapper for mempool_ops get_count callback.
805 * Pointer to the memory pool.
807 * The number of available objects in the external pool.
810 rte_mempool_ops_get_count(const struct rte_mempool *mp);
813 * @internal wrapper for mempool_ops calc_mem_size callback.
814 * API to calculate size of memory required to store specified number of
818 * Pointer to the memory pool.
821 * @param[in] pg_shift
822 * LOG2 of the physical pages size. If set to 0, ignore page boundaries.
823 * @param[out] min_chunk_size
824 * Location for minimum size of the memory chunk which may be used to
825 * store memory pool objects.
827 * Location for required memory chunk alignment.
829 * Required memory size aligned at page boundary.
831 ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
832 uint32_t obj_num, uint32_t pg_shift,
833 size_t *min_chunk_size, size_t *align);
836 * @internal wrapper for mempool_ops populate callback.
838 * Populate memory pool objects using provided memory chunk.
841 * A pointer to the mempool structure.
842 * @param[in] max_objs
843 * Maximum number of objects to be populated.
845 * The virtual address of memory that should be used to store objects.
849 * The length of memory in bytes.
851 * Callback function to be executed for each populated object.
852 * @param[in] obj_cb_arg
853 * An opaque pointer passed to the callback function.
855 * The number of objects added on success.
856 * On error, no objects are populated and a negative errno is returned.
858 int rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs,
859 void *vaddr, rte_iova_t iova, size_t len,
860 rte_mempool_populate_obj_cb_t *obj_cb,
864 * Wrapper for mempool_ops get_info callback.
867 * Pointer to the memory pool.
869 * Pointer to the rte_mempool_info structure
871 * - 0: Success; The mempool driver supports retrieving supplementary
872 * mempool information
873 * - -ENOTSUP - doesn't support get_info ops (valid case).
875 int rte_mempool_ops_get_info(const struct rte_mempool *mp,
876 struct rte_mempool_info *info);
879 * @internal wrapper for mempool_ops free callback.
882 * Pointer to the memory pool.
885 rte_mempool_ops_free(struct rte_mempool *mp);
888 * Set the ops of a mempool.
890 * This can only be done on a mempool that is not populated, i.e. just after
891 * a call to rte_mempool_create_empty().
894 * Pointer to the memory pool.
896 * Name of the ops structure to use for this mempool.
898 * Opaque data that can be passed by the application to the ops functions.
900 * - 0: Success; the mempool is now using the requested ops functions.
901 * - -EINVAL - Invalid ops struct name provided.
902 * - -EEXIST - mempool already has an ops struct assigned.
905 rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,
909 * Register mempool operations.
912 * Pointer to an ops structure to register.
914 * - >=0: Success; return the index of the ops struct in the table.
915 * - -EINVAL - some missing callbacks while registering ops struct.
916 * - -ENOSPC - the maximum number of ops structs has been reached.
918 int rte_mempool_register_ops(const struct rte_mempool_ops *ops);
921 * Macro to statically register the ops of a mempool handler.
922 * Note that the rte_mempool_register_ops fails silently here when
923 * more than RTE_MEMPOOL_MAX_OPS_IDX is registered.
925 #define MEMPOOL_REGISTER_OPS(ops) \
926 RTE_INIT(mp_hdlr_init_##ops) \
928 rte_mempool_register_ops(&ops); \
932 * An object callback function for mempool.
934 * Used by rte_mempool_create() and rte_mempool_obj_iter().
936 typedef void (rte_mempool_obj_cb_t)(struct rte_mempool *mp,
937 void *opaque, void *obj, unsigned obj_idx);
938 typedef rte_mempool_obj_cb_t rte_mempool_obj_ctor_t; /* compat */
941 * A memory callback function for mempool.
943 * Used by rte_mempool_mem_iter().
945 typedef void (rte_mempool_mem_cb_t)(struct rte_mempool *mp,
946 void *opaque, struct rte_mempool_memhdr *memhdr,
950 * A mempool constructor callback function.
952 * Arguments are the mempool and the opaque pointer given by the user in
953 * rte_mempool_create().
955 typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
958 * Create a new mempool named *name* in memory.
960 * This function uses ``rte_memzone_reserve()`` to allocate memory. The
961 * pool contains n elements of elt_size. Its size is set to n.
964 * The name of the mempool.
966 * The number of elements in the mempool. The optimum size (in terms of
967 * memory usage) for a mempool is when n is a power of two minus one:
970 * The size of each element.
972 * If cache_size is non-zero, the rte_mempool library will try to
973 * limit the accesses to the common lockless pool, by maintaining a
974 * per-lcore object cache. This argument must be lower or equal to
975 * RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose
976 * cache_size to have "n modulo cache_size == 0": if this is
977 * not the case, some elements will always stay in the pool and will
978 * never be used. The access to the per-lcore table is of course
979 * faster than the multi-producer/consumer pool. The cache can be
980 * disabled if the cache_size argument is set to 0; it can be useful to
981 * avoid losing objects in cache.
982 * @param private_data_size
983 * The size of the private data appended after the mempool
984 * structure. This is useful for storing some private data after the
985 * mempool structure, as is done for rte_mbuf_pool for example.
987 * A function pointer that is called for initialization of the pool,
988 * before object initialization. The user can initialize the private
989 * data in this function if needed. This parameter can be NULL if
992 * An opaque pointer to data that can be used in the mempool
993 * constructor function.
995 * A function pointer that is called for each object at
996 * initialization of the pool. The user can set some meta data in
997 * objects if needed. This parameter can be NULL if not needed.
998 * The obj_init() function takes the mempool pointer, the init_arg,
999 * the object pointer and the object number as parameters.
1000 * @param obj_init_arg
1001 * An opaque pointer to data that can be used as an argument for
1002 * each call to the object constructor function.
1004 * The *socket_id* argument is the socket identifier in the case of
1005 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1006 * constraint for the reserved zone.
1008 * The *flags* arguments is an OR of following flags:
1009 * - RTE_MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
1010 * between channels in RAM: the pool allocator will add padding
1011 * between objects depending on the hardware configuration. See
1012 * Memory alignment constraints for details. If this flag is set,
1013 * the allocator will just align them to a cache line.
1014 * - RTE_MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
1015 * cache-aligned. This flag removes this constraint, and no
1016 * padding will be present between objects. This flag implies
1017 * RTE_MEMPOOL_F_NO_SPREAD.
1018 * - RTE_MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
1019 * when using rte_mempool_put() or rte_mempool_put_bulk() is
1020 * "single-producer". Otherwise, it is "multi-producers".
1021 * - RTE_MEMPOOL_F_SC_GET: If this flag is set, the default behavior
1022 * when using rte_mempool_get() or rte_mempool_get_bulk() is
1023 * "single-consumer". Otherwise, it is "multi-consumers".
1024 * - RTE_MEMPOOL_F_NO_IOVA_CONTIG: If set, allocated objects won't
1025 * necessarily be contiguous in IO memory.
1027 * The pointer to the new allocated mempool, on success. NULL on error
1028 * with rte_errno set appropriately. Possible rte_errno values include:
1029 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
1030 * - E_RTE_SECONDARY - function was called from a secondary process instance
1031 * - EINVAL - cache size provided is too large or an unknown flag was passed
1032 * - ENOSPC - the maximum number of memzones has already been allocated
1033 * - EEXIST - a memzone with the same name already exists
1034 * - ENOMEM - no appropriate memory area found in which to create memzone
1036 struct rte_mempool *
1037 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
1038 unsigned cache_size, unsigned private_data_size,
1039 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
1040 rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
1041 int socket_id, unsigned flags);
1044 * Create an empty mempool
1046 * The mempool is allocated and initialized, but it is not populated: no
1047 * memory is allocated for the mempool elements. The user has to call
1048 * rte_mempool_populate_*() to add memory chunks to the pool. Once
1049 * populated, the user may also want to initialize each object with
1050 * rte_mempool_obj_iter().
1053 * The name of the mempool.
1055 * The maximum number of elements that can be added in the mempool.
1056 * The optimum size (in terms of memory usage) for a mempool is when n
1057 * is a power of two minus one: n = (2^q - 1).
1059 * The size of each element.
1061 * Size of the cache. See rte_mempool_create() for details.
1062 * @param private_data_size
1063 * The size of the private data appended after the mempool
1064 * structure. This is useful for storing some private data after the
1065 * mempool structure, as is done for rte_mbuf_pool for example.
1067 * The *socket_id* argument is the socket identifier in the case of
1068 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1069 * constraint for the reserved zone.
1071 * Flags controlling the behavior of the mempool. See
1072 * rte_mempool_create() for details.
1074 * The pointer to the new allocated mempool, on success. NULL on error
1075 * with rte_errno set appropriately. See rte_mempool_create() for details.
1077 struct rte_mempool *
1078 rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
1079 unsigned cache_size, unsigned private_data_size,
1080 int socket_id, unsigned flags);
1084 * Unlink the mempool from global list, free the memory chunks, and all
1085 * memory referenced by the mempool. The objects must not be used by
1086 * other cores as they will be freed.
1089 * A pointer to the mempool structure.
1092 rte_mempool_free(struct rte_mempool *mp);
1095 * Add physically contiguous memory for objects in the pool at init
1097 * Add a virtually and physically contiguous memory chunk in the pool
1098 * where objects can be instantiated.
1100 * If the given IO address is unknown (iova = RTE_BAD_IOVA),
1101 * the chunk doesn't need to be physically contiguous (only virtually),
1102 * and allocated objects may span two pages.
1105 * A pointer to the mempool structure.
1107 * The virtual address of memory that should be used to store objects.
1111 * The length of memory in bytes.
1113 * The callback used to free this chunk when destroying the mempool.
1115 * An opaque argument passed to free_cb.
1117 * The number of objects added on success (strictly positive).
1118 * On error, the chunk is not added in the memory list of the
1119 * mempool the following code is returned:
1120 * (0): not enough room in chunk for one object.
1121 * (-ENOSPC): mempool is already populated.
1122 * (-ENOMEM): allocation failure.
1124 int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
1125 rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
1129 * Add virtually contiguous memory for objects in the pool at init
1131 * Add a virtually contiguous memory chunk in the pool where objects can
1135 * A pointer to the mempool structure.
1137 * The virtual address of memory that should be used to store objects.
1139 * The length of memory in bytes.
1141 * The size of memory pages in this virtual area.
1143 * The callback used to free this chunk when destroying the mempool.
1145 * An opaque argument passed to free_cb.
1147 * The number of objects added on success (strictly positive).
1148 * On error, the chunk is not added in the memory list of the
1149 * mempool the following code is returned:
1150 * (0): not enough room in chunk for one object.
1151 * (-ENOSPC): mempool is already populated.
1152 * (-ENOMEM): allocation failure.
1155 rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
1156 size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
1160 * Add memory for objects in the pool at init
1162 * This is the default function used by rte_mempool_create() to populate
1163 * the mempool. It adds memory allocated using rte_memzone_reserve().
1166 * A pointer to the mempool structure.
1168 * The number of objects added on success.
1169 * On error, the chunk is not added in the memory list of the
1170 * mempool and a negative errno is returned.
1172 int rte_mempool_populate_default(struct rte_mempool *mp);
1175 * Add memory from anonymous mapping for objects in the pool at init
1177 * This function mmap an anonymous memory zone that is locked in
1178 * memory to store the objects of the mempool.
1181 * A pointer to the mempool structure.
1183 * The number of objects added on success.
1184 * On error, 0 is returned, rte_errno is set, and the chunk is not added in
1185 * the memory list of the mempool.
1187 int rte_mempool_populate_anon(struct rte_mempool *mp);
1190 * Call a function for each mempool element
1192 * Iterate across all objects attached to a rte_mempool and call the
1193 * callback function on it.
1196 * A pointer to an initialized mempool.
1198 * A function pointer that is called for each object.
1200 * An opaque pointer passed to the callback function.
1202 * Number of objects iterated.
1204 uint32_t rte_mempool_obj_iter(struct rte_mempool *mp,
1205 rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg);
1208 * Call a function for each mempool memory chunk
1210 * Iterate across all memory chunks attached to a rte_mempool and call
1211 * the callback function on it.
1214 * A pointer to an initialized mempool.
1216 * A function pointer that is called for each memory chunk.
1218 * An opaque pointer passed to the callback function.
1220 * Number of memory chunks iterated.
1222 uint32_t rte_mempool_mem_iter(struct rte_mempool *mp,
1223 rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg);
1226 * Dump the status of the mempool to a file.
1229 * A pointer to a file for output
1231 * A pointer to the mempool structure.
1233 void rte_mempool_dump(FILE *f, struct rte_mempool *mp);
1236 * Create a user-owned mempool cache.
1238 * This can be used by unregistered non-EAL threads to enable caching when they
1239 * interact with a mempool.
1242 * The size of the mempool cache. See rte_mempool_create()'s cache_size
1243 * parameter description for more information. The same limits and
1244 * considerations apply here too.
1246 * The socket identifier in the case of NUMA. The value can be
1247 * SOCKET_ID_ANY if there is no NUMA constraint for the reserved zone.
1249 struct rte_mempool_cache *
1250 rte_mempool_cache_create(uint32_t size, int socket_id);
1253 * Free a user-owned mempool cache.
1256 * A pointer to the mempool cache.
1259 rte_mempool_cache_free(struct rte_mempool_cache *cache);
1262 * Get a pointer to the per-lcore default mempool cache.
1265 * A pointer to the mempool structure.
1267 * The logical core id.
1269 * A pointer to the mempool cache or NULL if disabled or unregistered non-EAL
1272 static __rte_always_inline struct rte_mempool_cache *
1273 rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
1275 if (mp->cache_size == 0)
1278 if (lcore_id >= RTE_MAX_LCORE)
1281 rte_mempool_trace_default_cache(mp, lcore_id,
1282 &mp->local_cache[lcore_id]);
1283 return &mp->local_cache[lcore_id];
1287 * Flush a user-owned mempool cache to the specified mempool.
1290 * A pointer to the mempool cache.
1292 * A pointer to the mempool.
1294 static __rte_always_inline void
1295 rte_mempool_cache_flush(struct rte_mempool_cache *cache,
1296 struct rte_mempool *mp)
1299 cache = rte_mempool_default_cache(mp, rte_lcore_id());
1300 if (cache == NULL || cache->len == 0)
1302 rte_mempool_trace_cache_flush(cache, mp);
1303 rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len);
1308 * @internal Put several objects back in the mempool; used internally.
1310 * A pointer to the mempool structure.
1312 * A pointer to a table of void * pointers (objects).
1314 * The number of objects to store back in the mempool, must be strictly
1317 * A pointer to a mempool cache structure. May be NULL if not needed.
1319 static __rte_always_inline void
1320 rte_mempool_do_generic_put(struct rte_mempool *mp, void * const *obj_table,
1321 unsigned int n, struct rte_mempool_cache *cache)
1325 /* increment stat now, adding in mempool always success */
1326 RTE_MEMPOOL_STAT_ADD(mp, put_bulk, 1);
1327 RTE_MEMPOOL_STAT_ADD(mp, put_objs, n);
1329 /* No cache provided or if put would overflow mem allocated for cache */
1330 if (unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE))
1333 cache_objs = &cache->objs[cache->len];
1336 * The cache follows the following algorithm
1337 * 1. Add the objects to the cache
1338 * 2. Anything greater than the cache min value (if it crosses the
1339 * cache flush threshold) is flushed to the ring.
1342 /* Add elements back into the cache */
1343 rte_memcpy(&cache_objs[0], obj_table, sizeof(void *) * n);
1347 if (cache->len >= cache->flushthresh) {
1348 rte_mempool_ops_enqueue_bulk(mp, &cache->objs[cache->size],
1349 cache->len - cache->size);
1350 cache->len = cache->size;
1357 /* push remaining objects in ring */
1358 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1359 if (rte_mempool_ops_enqueue_bulk(mp, obj_table, n) < 0)
1360 rte_panic("cannot put objects in mempool\n");
1362 rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
1368 * Put several objects back in the mempool.
1371 * A pointer to the mempool structure.
1373 * A pointer to a table of void * pointers (objects).
1375 * The number of objects to add in the mempool from the obj_table.
1377 * A pointer to a mempool cache structure. May be NULL if not needed.
1379 static __rte_always_inline void
1380 rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
1381 unsigned int n, struct rte_mempool_cache *cache)
1383 rte_mempool_trace_generic_put(mp, obj_table, n, cache);
1384 RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 0);
1385 rte_mempool_do_generic_put(mp, obj_table, n, cache);
1389 * Put several objects back in the mempool.
1391 * This function calls the multi-producer or the single-producer
1392 * version depending on the default behavior that was specified at
1393 * mempool creation time (see flags).
1396 * A pointer to the mempool structure.
1398 * A pointer to a table of void * pointers (objects).
1400 * The number of objects to add in the mempool from obj_table.
1402 static __rte_always_inline void
1403 rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
1406 struct rte_mempool_cache *cache;
1407 cache = rte_mempool_default_cache(mp, rte_lcore_id());
1408 rte_mempool_trace_put_bulk(mp, obj_table, n, cache);
1409 rte_mempool_generic_put(mp, obj_table, n, cache);
1413 * Put one object back in the mempool.
1415 * This function calls the multi-producer or the single-producer
1416 * version depending on the default behavior that was specified at
1417 * mempool creation time (see flags).
1420 * A pointer to the mempool structure.
1422 * A pointer to the object to be added.
1424 static __rte_always_inline void
1425 rte_mempool_put(struct rte_mempool *mp, void *obj)
1427 rte_mempool_put_bulk(mp, &obj, 1);
1431 * @internal Get several objects from the mempool; used internally.
1433 * A pointer to the mempool structure.
1435 * A pointer to a table of void * pointers (objects).
1437 * The number of objects to get, must be strictly positive.
1439 * A pointer to a mempool cache structure. May be NULL if not needed.
1441 * - >=0: Success; number of objects supplied.
1442 * - <0: Error; code of ring dequeue function.
1444 static __rte_always_inline int
1445 rte_mempool_do_generic_get(struct rte_mempool *mp, void **obj_table,
1446 unsigned int n, struct rte_mempool_cache *cache)
1449 uint32_t index, len;
1452 /* No cache provided or cannot be satisfied from cache */
1453 if (unlikely(cache == NULL || n >= cache->size))
1456 cache_objs = cache->objs;
1458 /* Can this be satisfied from the cache? */
1459 if (cache->len < n) {
1460 /* No. Backfill the cache first, and then fill from it */
1461 uint32_t req = n + (cache->size - cache->len);
1463 /* How many do we require i.e. number to fill the cache + the request */
1464 ret = rte_mempool_ops_dequeue_bulk(mp,
1465 &cache->objs[cache->len], req);
1466 if (unlikely(ret < 0)) {
1468 * In the off chance that we are buffer constrained,
1469 * where we are not able to allocate cache + n, go to
1470 * the ring directly. If that fails, we are truly out of
1479 /* Now fill in the response ... */
1480 for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++)
1481 *obj_table = cache_objs[len];
1485 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1486 RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
1492 /* get remaining objects from ring */
1493 ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n);
1496 RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
1497 RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
1499 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1500 RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
1507 * Get several objects from the mempool.
1509 * If cache is enabled, objects will be retrieved first from cache,
1510 * subsequently from the common pool. Note that it can return -ENOENT when
1511 * the local cache and common pool are empty, even if cache from other
1515 * A pointer to the mempool structure.
1517 * A pointer to a table of void * pointers (objects) that will be filled.
1519 * The number of objects to get from mempool to obj_table.
1521 * A pointer to a mempool cache structure. May be NULL if not needed.
1523 * - 0: Success; objects taken.
1524 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
1526 static __rte_always_inline int
1527 rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table,
1528 unsigned int n, struct rte_mempool_cache *cache)
1531 ret = rte_mempool_do_generic_get(mp, obj_table, n, cache);
1533 RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 1);
1534 rte_mempool_trace_generic_get(mp, obj_table, n, cache);
1539 * Get several objects from the mempool.
1541 * This function calls the multi-consumers or the single-consumer
1542 * version, depending on the default behaviour that was specified at
1543 * mempool creation time (see flags).
1545 * If cache is enabled, objects will be retrieved first from cache,
1546 * subsequently from the common pool. Note that it can return -ENOENT when
1547 * the local cache and common pool are empty, even if cache from other
1551 * A pointer to the mempool structure.
1553 * A pointer to a table of void * pointers (objects) that will be filled.
1555 * The number of objects to get from the mempool to obj_table.
1557 * - 0: Success; objects taken
1558 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
1560 static __rte_always_inline int
1561 rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
1563 struct rte_mempool_cache *cache;
1564 cache = rte_mempool_default_cache(mp, rte_lcore_id());
1565 rte_mempool_trace_get_bulk(mp, obj_table, n, cache);
1566 return rte_mempool_generic_get(mp, obj_table, n, cache);
1570 * Get one object from the mempool.
1572 * This function calls the multi-consumers or the single-consumer
1573 * version, depending on the default behavior that was specified at
1574 * mempool creation (see flags).
1576 * If cache is enabled, objects will be retrieved first from cache,
1577 * subsequently from the common pool. Note that it can return -ENOENT when
1578 * the local cache and common pool are empty, even if cache from other
1582 * A pointer to the mempool structure.
1584 * A pointer to a void * pointer (object) that will be filled.
1586 * - 0: Success; objects taken.
1587 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
1589 static __rte_always_inline int
1590 rte_mempool_get(struct rte_mempool *mp, void **obj_p)
1592 return rte_mempool_get_bulk(mp, obj_p, 1);
1596 * Get a contiguous blocks of objects from the mempool.
1598 * If cache is enabled, consider to flush it first, to reuse objects
1599 * as soon as possible.
1601 * The application should check that the driver supports the operation
1602 * by calling rte_mempool_ops_get_info() and checking that `contig_block_size`
1606 * A pointer to the mempool structure.
1607 * @param first_obj_table
1608 * A pointer to a pointer to the first object in each block.
1610 * The number of blocks to get from mempool.
1612 * - 0: Success; blocks taken.
1613 * - -ENOBUFS: Not enough entries in the mempool; no object is retrieved.
1614 * - -EOPNOTSUPP: The mempool driver does not support block dequeue
1616 static __rte_always_inline int
1617 rte_mempool_get_contig_blocks(struct rte_mempool *mp,
1618 void **first_obj_table, unsigned int n)
1622 ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n);
1624 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1625 RTE_MEMPOOL_STAT_ADD(mp, get_success_blks, n);
1626 RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table, n,
1629 RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
1630 RTE_MEMPOOL_STAT_ADD(mp, get_fail_blks, n);
1633 rte_mempool_trace_get_contig_blocks(mp, first_obj_table, n);
1638 * Return the number of entries in the mempool.
1640 * When cache is enabled, this function has to browse the length of
1641 * all lcores, so it should not be used in a data path, but only for
1642 * debug purposes. User-owned mempool caches are not accounted for.
1645 * A pointer to the mempool structure.
1647 * The number of entries in the mempool.
1649 unsigned int rte_mempool_avail_count(const struct rte_mempool *mp);
1652 * Return the number of elements which have been allocated from the mempool
1654 * When cache is enabled, this function has to browse the length of
1655 * all lcores, so it should not be used in a data path, but only for
1659 * A pointer to the mempool structure.
1661 * The number of free entries in the mempool.
1664 rte_mempool_in_use_count(const struct rte_mempool *mp);
1667 * Test if the mempool is full.
1669 * When cache is enabled, this function has to browse the length of all
1670 * lcores, so it should not be used in a data path, but only for debug
1671 * purposes. User-owned mempool caches are not accounted for.
1674 * A pointer to the mempool structure.
1676 * - 1: The mempool is full.
1677 * - 0: The mempool is not full.
1680 rte_mempool_full(const struct rte_mempool *mp)
1682 return rte_mempool_avail_count(mp) == mp->size;
1686 * Test if the mempool is empty.
1688 * When cache is enabled, this function has to browse the length of all
1689 * lcores, so it should not be used in a data path, but only for debug
1690 * purposes. User-owned mempool caches are not accounted for.
1693 * A pointer to the mempool structure.
1695 * - 1: The mempool is empty.
1696 * - 0: The mempool is not empty.
1699 rte_mempool_empty(const struct rte_mempool *mp)
1701 return rte_mempool_avail_count(mp) == 0;
1705 * Return the IO address of elt, which is an element of the pool mp.
1708 * A pointer (virtual address) to the element of the pool.
1710 * The IO address of the elt element.
1711 * If the mempool was created with RTE_MEMPOOL_F_NO_IOVA_CONTIG, the
1712 * returned value is RTE_BAD_IOVA.
1714 static inline rte_iova_t
1715 rte_mempool_virt2iova(const void *elt)
1717 const struct rte_mempool_objhdr *hdr;
1718 hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt,
1724 * Check the consistency of mempool objects.
1726 * Verify the coherency of fields in the mempool structure. Also check
1727 * that the cookies of mempool objects (even the ones that are not
1728 * present in pool) have a correct value. If not, a panic will occur.
1731 * A pointer to the mempool structure.
1733 void rte_mempool_audit(struct rte_mempool *mp);
1736 * Return a pointer to the private data in an mempool structure.
1739 * A pointer to the mempool structure.
1741 * A pointer to the private data.
1743 static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
1746 RTE_MEMPOOL_HEADER_SIZE(mp, mp->cache_size);
1750 * Dump the status of all mempools on the console
1753 * A pointer to a file for output
1755 void rte_mempool_list_dump(FILE *f);
1758 * Search a mempool from its name
1761 * The name of the mempool.
1763 * The pointer to the mempool matching the name, or NULL if not found.
1765 * with rte_errno set appropriately. Possible rte_errno values include:
1766 * - ENOENT - required entry not available to return.
1769 struct rte_mempool *rte_mempool_lookup(const char *name);
1772 * Get the header, trailer and total size of a mempool element.
1774 * Given a desired size of the mempool element and mempool flags,
1775 * calculates header, trailer, body and total sizes of the mempool object.
1778 * The size of each element, without header and trailer.
1780 * The flags used for the mempool creation.
1781 * Consult rte_mempool_create() for more information about possible values.
1782 * The size of each element.
1784 * The calculated detailed size the mempool object. May be NULL.
1786 * Total size of the mempool object.
1788 uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
1789 struct rte_mempool_objsz *sz);
1792 * Walk list of all memory pools
1797 * Argument passed to iterator
1799 void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
1803 * @internal Get page size used for mempool object allocation.
1804 * This function is internal to mempool library and mempool drivers.
1807 rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz);
1810 * Mempool event type.
1813 enum rte_mempool_event {
1814 /** Occurs after a mempool is fully populated. */
1815 RTE_MEMPOOL_EVENT_READY = 0,
1816 /** Occurs before the destruction of a mempool begins. */
1817 RTE_MEMPOOL_EVENT_DESTROY = 1,
1822 * Mempool event callback.
1824 * rte_mempool_event_callback_register() may be called from within the callback,
1825 * but the callbacks registered this way will not be invoked for the same event.
1826 * rte_mempool_event_callback_unregister() may only be safely called
1827 * to remove the running callback.
1829 typedef void (rte_mempool_event_callback)(
1830 enum rte_mempool_event event,
1831 struct rte_mempool *mp,
1836 * Register a callback function invoked on mempool life cycle event.
1837 * The function will be invoked in the process
1838 * that performs an action which triggers the callback.
1841 * Callback function.
1846 * 0 on success, negative on failure and rte_errno is set.
1850 rte_mempool_event_callback_register(rte_mempool_event_callback *func,
1855 * Unregister a callback added with rte_mempool_event_callback_register().
1856 * @p func and @p user_data must exactly match registration parameters.
1859 * Callback function.
1864 * 0 on success, negative on failure and rte_errno is set.
1868 rte_mempool_event_callback_unregister(rte_mempool_event_callback *func,
1875 #endif /* _RTE_MEMPOOL_H_ */