4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * Copyright(c) 2016 6WIND S.A.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #ifndef _RTE_MEMPOOL_H_
36 #define _RTE_MEMPOOL_H_
42 * A memory pool is an allocator of fixed-size object. It is
43 * identified by its name, and uses a ring to store free objects. It
44 * provides some other optional services, like a per-core object
45 * cache, and an alignment helper to ensure that objects are padded
46 * to spread them equally on all RAM channels, ranks, and so on.
48 * Objects owned by a mempool should never be added in another
49 * mempool. When an object is freed using rte_mempool_put() or
50 * equivalent, the object data is not modified; the user can save some
51 * meta-data in the object data and retrieve them when allocating a
54 * Note: the mempool implementation is not preemptible. An lcore must not be
55 * interrupted by another task that uses the same mempool (because it uses a
56 * ring which is not preemptible). Also, usual mempool functions like
57 * rte_mempool_get() or rte_mempool_put() are designed to be called from an EAL
58 * thread due to the internal per-lcore cache. Due to the lack of caching,
59 * rte_mempool_get() or rte_mempool_put() performance will suffer when called
60 * by non-EAL threads. Instead, non-EAL threads should call
61 * rte_mempool_generic_get() or rte_mempool_generic_put() with a user cache
62 * created with rte_mempool_cache_create().
70 #include <sys/queue.h>
72 #include <rte_spinlock.h>
74 #include <rte_debug.h>
75 #include <rte_lcore.h>
76 #include <rte_memory.h>
77 #include <rte_branch_prediction.h>
79 #include <rte_memcpy.h>
80 #include <rte_common.h>
86 #define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL /**< Header cookie. */
87 #define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL /**< Header cookie. */
88 #define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/
90 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
92 * A structure that stores the mempool statistics (per-lcore).
94 struct rte_mempool_debug_stats {
95 uint64_t put_bulk; /**< Number of puts. */
96 uint64_t put_objs; /**< Number of objects successfully put. */
97 uint64_t get_success_bulk; /**< Successful allocation number. */
98 uint64_t get_success_objs; /**< Objects successfully allocated. */
99 uint64_t get_fail_bulk; /**< Failed allocation number. */
100 uint64_t get_fail_objs; /**< Objects that failed to be allocated. */
101 } __rte_cache_aligned;
105 * A structure that stores a per-core object cache.
107 struct rte_mempool_cache {
108 uint32_t size; /**< Size of the cache */
109 uint32_t flushthresh; /**< Threshold before we flush excess elements */
110 uint32_t len; /**< Current cache count */
112 * Cache is allocated to this size to allow it to overflow in certain
113 * cases to avoid needless emptying of cache.
115 void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3]; /**< Cache objects */
116 } __rte_cache_aligned;
119 * A structure that stores the size of mempool elements.
121 struct rte_mempool_objsz {
122 uint32_t elt_size; /**< Size of an element. */
123 uint32_t header_size; /**< Size of header (before elt). */
124 uint32_t trailer_size; /**< Size of trailer (after elt). */
126 /**< Total size of an object (header + elt + trailer). */
129 /**< Maximum length of a memory pool's name. */
130 #define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \
131 sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1)
132 #define RTE_MEMPOOL_MZ_PREFIX "MP_"
135 #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
137 #define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1)
139 /** Mempool over one chunk of physically continuous memory */
140 #define MEMPOOL_PG_NUM_DEFAULT 1
142 #ifndef RTE_MEMPOOL_ALIGN
143 #define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
146 #define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
149 * Mempool object header structure
151 * Each object stored in mempools are prefixed by this header structure,
152 * it allows to retrieve the mempool pointer from the object and to
153 * iterate on all objects attached to a mempool. When debug is enabled,
154 * a cookie is also added in this structure preventing corruptions and
157 struct rte_mempool_objhdr {
158 STAILQ_ENTRY(rte_mempool_objhdr) next; /**< Next in list. */
159 struct rte_mempool *mp; /**< The mempool owning the object. */
160 phys_addr_t physaddr; /**< Physical address of the object. */
161 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
162 uint64_t cookie; /**< Debug cookie. */
167 * A list of object headers type
169 STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr);
171 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
174 * Mempool object trailer structure
176 * In debug mode, each object stored in mempools are suffixed by this
177 * trailer structure containing a cookie preventing memory corruptions.
179 struct rte_mempool_objtlr {
180 uint64_t cookie; /**< Debug cookie. */
186 * A list of memory where objects are stored
188 STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr);
191 * Callback used to free a memory chunk
193 typedef void (rte_mempool_memchunk_free_cb_t)(struct rte_mempool_memhdr *memhdr,
197 * Mempool objects memory header structure
199 * The memory chunks where objects are stored. Each chunk is virtually
200 * and physically contiguous.
202 struct rte_mempool_memhdr {
203 STAILQ_ENTRY(rte_mempool_memhdr) next; /**< Next in list. */
204 struct rte_mempool *mp; /**< The mempool owning the chunk */
205 void *addr; /**< Virtual address of the chunk */
206 phys_addr_t phys_addr; /**< Physical address of the chunk */
207 size_t len; /**< length of the chunk */
208 rte_mempool_memchunk_free_cb_t *free_cb; /**< Free callback */
209 void *opaque; /**< Argument passed to the free callback */
213 * The RTE mempool structure.
217 * Note: this field kept the RTE_MEMZONE_NAMESIZE size due to ABI
218 * compatibility requirements, it could be changed to
219 * RTE_MEMPOOL_NAMESIZE next time the ABI changes
221 char name[RTE_MEMZONE_NAMESIZE]; /**< Name of mempool. */
224 void *pool_data; /**< Ring or pool to store objects. */
225 uint64_t pool_id; /**< External mempool identifier. */
227 void *pool_config; /**< optional args for ops alloc. */
228 const struct rte_memzone *mz; /**< Memzone where pool is alloc'd. */
229 unsigned int flags; /**< Flags of the mempool. */
230 int socket_id; /**< Socket id passed at create. */
231 uint32_t size; /**< Max size of the mempool. */
233 /**< Size of per-lcore default local cache. */
235 uint32_t elt_size; /**< Size of an element. */
236 uint32_t header_size; /**< Size of header (before elt). */
237 uint32_t trailer_size; /**< Size of trailer (after elt). */
239 unsigned private_data_size; /**< Size of private data. */
241 * Index into rte_mempool_ops_table array of mempool ops
242 * structs, which contain callback function pointers.
243 * We're using an index here rather than pointers to the callbacks
244 * to facilitate any secondary processes that may want to use
249 struct rte_mempool_cache *local_cache; /**< Per-lcore local cache */
251 uint32_t populated_size; /**< Number of populated objects. */
252 struct rte_mempool_objhdr_list elt_list; /**< List of objects in pool */
253 uint32_t nb_mem_chunks; /**< Number of memory chunks */
254 struct rte_mempool_memhdr_list mem_list; /**< List of memory chunks */
256 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
257 /** Per-lcore statistics. */
258 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
260 } __rte_cache_aligned;
262 #define MEMPOOL_F_NO_SPREAD 0x0001 /**< Do not spread among memory channels. */
263 #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/
264 #define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/
265 #define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/
266 #define MEMPOOL_F_POOL_CREATED 0x0010 /**< Internal: pool is created. */
267 #define MEMPOOL_F_NO_PHYS_CONTIG 0x0020 /**< Don't need physically contiguous objs. */
270 * @internal When debug is enabled, store some statistics.
273 * Pointer to the memory pool.
275 * Name of the statistics field to increment in the memory pool.
277 * Number to add to the object-oriented statistics.
279 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
280 #define __MEMPOOL_STAT_ADD(mp, name, n) do { \
281 unsigned __lcore_id = rte_lcore_id(); \
282 if (__lcore_id < RTE_MAX_LCORE) { \
283 mp->stats[__lcore_id].name##_objs += n; \
284 mp->stats[__lcore_id].name##_bulk += 1; \
288 #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
292 * Calculate the size of the mempool header.
295 * Pointer to the memory pool.
297 * Size of the per-lcore cache.
299 #define MEMPOOL_HEADER_SIZE(mp, cs) \
300 (sizeof(*(mp)) + (((cs) == 0) ? 0 : \
301 (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
303 /* return the header of a mempool object (internal) */
304 static inline struct rte_mempool_objhdr *__mempool_get_header(void *obj)
306 return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj,
307 sizeof(struct rte_mempool_objhdr));
311 * Return a pointer to the mempool owning this object.
314 * An object that is owned by a pool. If this is not the case,
315 * the behavior is undefined.
317 * A pointer to the mempool structure.
319 static inline struct rte_mempool *rte_mempool_from_obj(void *obj)
321 struct rte_mempool_objhdr *hdr = __mempool_get_header(obj);
325 /* return the trailer of a mempool object (internal) */
326 static inline struct rte_mempool_objtlr *__mempool_get_trailer(void *obj)
328 struct rte_mempool *mp = rte_mempool_from_obj(obj);
329 return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size);
333 * @internal Check and update cookies or panic.
336 * Pointer to the memory pool.
337 * @param obj_table_const
338 * Pointer to a table of void * pointers (objects).
340 * Index of object in object table.
342 * - 0: object is supposed to be allocated, mark it as free
343 * - 1: object is supposed to be free, mark it as allocated
344 * - 2: just check that cookie is valid (free or allocated)
346 void rte_mempool_check_cookies(const struct rte_mempool *mp,
347 void * const *obj_table_const, unsigned n, int free);
349 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
350 #define __mempool_check_cookies(mp, obj_table_const, n, free) \
351 rte_mempool_check_cookies(mp, obj_table_const, n, free)
353 #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
354 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
356 #define RTE_MEMPOOL_OPS_NAMESIZE 32 /**< Max length of ops struct name. */
359 * Prototype for implementation specific data provisioning function.
361 * The function should provide the implementation specific memory for
362 * use by the other mempool ops functions in a given mempool ops struct.
363 * E.g. the default ops provides an instance of the rte_ring for this purpose.
364 * it will most likely point to a different type of data structure, and
365 * will be transparent to the application programmer.
366 * This function should set mp->pool_data.
368 typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp);
371 * Free the opaque private data pointed to by mp->pool_data pointer.
373 typedef void (*rte_mempool_free_t)(struct rte_mempool *mp);
376 * Enqueue an object into the external pool.
378 typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp,
379 void * const *obj_table, unsigned int n);
382 * Dequeue an object from the external pool.
384 typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
385 void **obj_table, unsigned int n);
388 * Return the number of available objects in the external pool.
390 typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
393 * Get the mempool capabilities.
395 typedef int (*rte_mempool_get_capabilities_t)(const struct rte_mempool *mp,
396 unsigned int *flags);
398 /** Structure defining mempool operations structure */
399 struct rte_mempool_ops {
400 char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct. */
401 rte_mempool_alloc_t alloc; /**< Allocate private data. */
402 rte_mempool_free_t free; /**< Free the external pool. */
403 rte_mempool_enqueue_t enqueue; /**< Enqueue an object. */
404 rte_mempool_dequeue_t dequeue; /**< Dequeue an object. */
405 rte_mempool_get_count get_count; /**< Get qty of available objs. */
407 * Get the mempool capabilities
409 rte_mempool_get_capabilities_t get_capabilities;
410 } __rte_cache_aligned;
412 #define RTE_MEMPOOL_MAX_OPS_IDX 16 /**< Max registered ops structs */
415 * Structure storing the table of registered ops structs, each of which contain
416 * the function pointers for the mempool ops functions.
417 * Each process has its own storage for this ops struct array so that
418 * the mempools can be shared across primary and secondary processes.
419 * The indices used to access the array are valid across processes, whereas
420 * any function pointers stored directly in the mempool struct would not be.
421 * This results in us simply having "ops_index" in the mempool struct.
423 struct rte_mempool_ops_table {
424 rte_spinlock_t sl; /**< Spinlock for add/delete. */
425 uint32_t num_ops; /**< Number of used ops structs in the table. */
427 * Storage for all possible ops structs.
429 struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX];
430 } __rte_cache_aligned;
432 /** Array of registered ops structs. */
433 extern struct rte_mempool_ops_table rte_mempool_ops_table;
436 * @internal Get the mempool ops struct from its index.
439 * The index of the ops struct in the ops struct table. It must be a valid
440 * index: (0 <= idx < num_ops).
442 * The pointer to the ops struct in the table.
444 static inline struct rte_mempool_ops *
445 rte_mempool_get_ops(int ops_index)
447 RTE_VERIFY((ops_index >= 0) && (ops_index < RTE_MEMPOOL_MAX_OPS_IDX));
449 return &rte_mempool_ops_table.ops[ops_index];
453 * @internal Wrapper for mempool_ops alloc callback.
456 * Pointer to the memory pool.
458 * - 0: Success; successfully allocated mempool pool_data.
459 * - <0: Error; code of alloc function.
462 rte_mempool_ops_alloc(struct rte_mempool *mp);
465 * @internal Wrapper for mempool_ops dequeue callback.
468 * Pointer to the memory pool.
470 * Pointer to a table of void * pointers (objects).
472 * Number of objects to get.
474 * - 0: Success; got n objects.
475 * - <0: Error; code of dequeue function.
478 rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp,
479 void **obj_table, unsigned n)
481 struct rte_mempool_ops *ops;
483 ops = rte_mempool_get_ops(mp->ops_index);
484 return ops->dequeue(mp, obj_table, n);
488 * @internal wrapper for mempool_ops enqueue callback.
491 * Pointer to the memory pool.
493 * Pointer to a table of void * pointers (objects).
495 * Number of objects to put.
497 * - 0: Success; n objects supplied.
498 * - <0: Error; code of enqueue function.
501 rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table,
504 struct rte_mempool_ops *ops;
506 ops = rte_mempool_get_ops(mp->ops_index);
507 return ops->enqueue(mp, obj_table, n);
511 * @internal wrapper for mempool_ops get_count callback.
514 * Pointer to the memory pool.
516 * The number of available objects in the external pool.
519 rte_mempool_ops_get_count(const struct rte_mempool *mp);
522 * @internal wrapper for mempool_ops get_capabilities callback.
525 * Pointer to the memory pool.
527 * Pointer to the mempool flags.
529 * - 0: Success; The mempool driver has advertised his pool capabilities in
531 * - -ENOTSUP - doesn't support get_capabilities ops (valid case).
532 * - Otherwise, pool create fails.
535 rte_mempool_ops_get_capabilities(const struct rte_mempool *mp,
536 unsigned int *flags);
539 * @internal wrapper for mempool_ops free callback.
542 * Pointer to the memory pool.
545 rte_mempool_ops_free(struct rte_mempool *mp);
548 * Set the ops of a mempool.
550 * This can only be done on a mempool that is not populated, i.e. just after
551 * a call to rte_mempool_create_empty().
554 * Pointer to the memory pool.
556 * Name of the ops structure to use for this mempool.
558 * Opaque data that can be passed by the application to the ops functions.
560 * - 0: Success; the mempool is now using the requested ops functions.
561 * - -EINVAL - Invalid ops struct name provided.
562 * - -EEXIST - mempool already has an ops struct assigned.
565 rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,
569 * Register mempool operations.
572 * Pointer to an ops structure to register.
574 * - >=0: Success; return the index of the ops struct in the table.
575 * - -EINVAL - some missing callbacks while registering ops struct.
576 * - -ENOSPC - the maximum number of ops structs has been reached.
578 int rte_mempool_register_ops(const struct rte_mempool_ops *ops);
581 * Macro to statically register the ops of a mempool handler.
582 * Note that the rte_mempool_register_ops fails silently here when
583 * more than RTE_MEMPOOL_MAX_OPS_IDX is registered.
585 #define MEMPOOL_REGISTER_OPS(ops) \
586 void mp_hdlr_init_##ops(void); \
587 void __attribute__((constructor, used)) mp_hdlr_init_##ops(void)\
589 rte_mempool_register_ops(&ops); \
593 * An object callback function for mempool.
595 * Used by rte_mempool_create() and rte_mempool_obj_iter().
597 typedef void (rte_mempool_obj_cb_t)(struct rte_mempool *mp,
598 void *opaque, void *obj, unsigned obj_idx);
599 typedef rte_mempool_obj_cb_t rte_mempool_obj_ctor_t; /* compat */
602 * A memory callback function for mempool.
604 * Used by rte_mempool_mem_iter().
606 typedef void (rte_mempool_mem_cb_t)(struct rte_mempool *mp,
607 void *opaque, struct rte_mempool_memhdr *memhdr,
611 * A mempool constructor callback function.
613 * Arguments are the mempool and the opaque pointer given by the user in
614 * rte_mempool_create().
616 typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
619 * Create a new mempool named *name* in memory.
621 * This function uses ``rte_memzone_reserve()`` to allocate memory. The
622 * pool contains n elements of elt_size. Its size is set to n.
625 * The name of the mempool.
627 * The number of elements in the mempool. The optimum size (in terms of
628 * memory usage) for a mempool is when n is a power of two minus one:
631 * The size of each element.
633 * If cache_size is non-zero, the rte_mempool library will try to
634 * limit the accesses to the common lockless pool, by maintaining a
635 * per-lcore object cache. This argument must be lower or equal to
636 * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose
637 * cache_size to have "n modulo cache_size == 0": if this is
638 * not the case, some elements will always stay in the pool and will
639 * never be used. The access to the per-lcore table is of course
640 * faster than the multi-producer/consumer pool. The cache can be
641 * disabled if the cache_size argument is set to 0; it can be useful to
642 * avoid losing objects in cache.
643 * @param private_data_size
644 * The size of the private data appended after the mempool
645 * structure. This is useful for storing some private data after the
646 * mempool structure, as is done for rte_mbuf_pool for example.
648 * A function pointer that is called for initialization of the pool,
649 * before object initialization. The user can initialize the private
650 * data in this function if needed. This parameter can be NULL if
653 * An opaque pointer to data that can be used in the mempool
654 * constructor function.
656 * A function pointer that is called for each object at
657 * initialization of the pool. The user can set some meta data in
658 * objects if needed. This parameter can be NULL if not needed.
659 * The obj_init() function takes the mempool pointer, the init_arg,
660 * the object pointer and the object number as parameters.
661 * @param obj_init_arg
662 * An opaque pointer to data that can be used as an argument for
663 * each call to the object constructor function.
665 * The *socket_id* argument is the socket identifier in the case of
666 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
667 * constraint for the reserved zone.
669 * The *flags* arguments is an OR of following flags:
670 * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
671 * between channels in RAM: the pool allocator will add padding
672 * between objects depending on the hardware configuration. See
673 * Memory alignment constraints for details. If this flag is set,
674 * the allocator will just align them to a cache line.
675 * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
676 * cache-aligned. This flag removes this constraint, and no
677 * padding will be present between objects. This flag implies
678 * MEMPOOL_F_NO_SPREAD.
679 * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
680 * when using rte_mempool_put() or rte_mempool_put_bulk() is
681 * "single-producer". Otherwise, it is "multi-producers".
682 * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
683 * when using rte_mempool_get() or rte_mempool_get_bulk() is
684 * "single-consumer". Otherwise, it is "multi-consumers".
685 * - MEMPOOL_F_NO_PHYS_CONTIG: If set, allocated objects won't
686 * necessarily be contiguous in physical memory.
688 * The pointer to the new allocated mempool, on success. NULL on error
689 * with rte_errno set appropriately. Possible rte_errno values include:
690 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
691 * - E_RTE_SECONDARY - function was called from a secondary process instance
692 * - EINVAL - cache size provided is too large
693 * - ENOSPC - the maximum number of memzones has already been allocated
694 * - EEXIST - a memzone with the same name already exists
695 * - ENOMEM - no appropriate memory area found in which to create memzone
698 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
699 unsigned cache_size, unsigned private_data_size,
700 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
701 rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
702 int socket_id, unsigned flags);
705 * Create a new mempool named *name* in memory.
707 * The pool contains n elements of elt_size. Its size is set to n.
708 * This function uses ``memzone_reserve()`` to allocate the mempool header
709 * (and the objects if vaddr is NULL).
710 * Depending on the input parameters, mempool elements can be either allocated
711 * together with the mempool header, or an externally provided memory buffer
712 * could be used to store mempool objects. In later case, that external
713 * memory buffer can consist of set of disjoint physical pages.
716 * The name of the mempool.
718 * The number of elements in the mempool. The optimum size (in terms of
719 * memory usage) for a mempool is when n is a power of two minus one:
722 * The size of each element.
724 * Size of the cache. See rte_mempool_create() for details.
725 * @param private_data_size
726 * The size of the private data appended after the mempool
727 * structure. This is useful for storing some private data after the
728 * mempool structure, as is done for rte_mbuf_pool for example.
730 * A function pointer that is called for initialization of the pool,
731 * before object initialization. The user can initialize the private
732 * data in this function if needed. This parameter can be NULL if
735 * An opaque pointer to data that can be used in the mempool
736 * constructor function.
738 * A function called for each object at initialization of the pool.
739 * See rte_mempool_create() for details.
740 * @param obj_init_arg
741 * An opaque pointer passed to the object constructor function.
743 * The *socket_id* argument is the socket identifier in the case of
744 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
745 * constraint for the reserved zone.
747 * Flags controlling the behavior of the mempool. See
748 * rte_mempool_create() for details.
750 * Virtual address of the externally allocated memory buffer.
751 * Will be used to store mempool objects.
753 * Array of physical addresses of the pages that comprises given memory
756 * Number of elements in the paddr array.
758 * LOG2 of the physical pages size.
760 * The pointer to the new allocated mempool, on success. NULL on error
761 * with rte_errno set appropriately. See rte_mempool_create() for details.
764 rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
765 unsigned cache_size, unsigned private_data_size,
766 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
767 rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
768 int socket_id, unsigned flags, void *vaddr,
769 const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
772 * Create an empty mempool
774 * The mempool is allocated and initialized, but it is not populated: no
775 * memory is allocated for the mempool elements. The user has to call
776 * rte_mempool_populate_*() to add memory chunks to the pool. Once
777 * populated, the user may also want to initialize each object with
778 * rte_mempool_obj_iter().
781 * The name of the mempool.
783 * The maximum number of elements that can be added in the mempool.
784 * The optimum size (in terms of memory usage) for a mempool is when n
785 * is a power of two minus one: n = (2^q - 1).
787 * The size of each element.
789 * Size of the cache. See rte_mempool_create() for details.
790 * @param private_data_size
791 * The size of the private data appended after the mempool
792 * structure. This is useful for storing some private data after the
793 * mempool structure, as is done for rte_mbuf_pool for example.
795 * The *socket_id* argument is the socket identifier in the case of
796 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
797 * constraint for the reserved zone.
799 * Flags controlling the behavior of the mempool. See
800 * rte_mempool_create() for details.
802 * The pointer to the new allocated mempool, on success. NULL on error
803 * with rte_errno set appropriately. See rte_mempool_create() for details.
806 rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
807 unsigned cache_size, unsigned private_data_size,
808 int socket_id, unsigned flags);
812 * Unlink the mempool from global list, free the memory chunks, and all
813 * memory referenced by the mempool. The objects must not be used by
814 * other cores as they will be freed.
817 * A pointer to the mempool structure.
820 rte_mempool_free(struct rte_mempool *mp);
823 * Add physically contiguous memory for objects in the pool at init
825 * Add a virtually and physically contiguous memory chunk in the pool
826 * where objects can be instantiated.
828 * If the given physical address is unknown (paddr = RTE_BAD_PHYS_ADDR),
829 * the chunk doesn't need to be physically contiguous (only virtually),
830 * and allocated objects may span two pages.
833 * A pointer to the mempool structure.
835 * The virtual address of memory that should be used to store objects.
837 * The physical address
839 * The length of memory in bytes.
841 * The callback used to free this chunk when destroying the mempool.
843 * An opaque argument passed to free_cb.
845 * The number of objects added on success.
846 * On error, the chunk is not added in the memory list of the
847 * mempool and a negative errno is returned.
849 int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
850 phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
854 * Add physical memory for objects in the pool at init
856 * Add a virtually contiguous memory chunk in the pool where objects can
857 * be instantiated. The physical addresses corresponding to the virtual
858 * area are described in paddr[], pg_num, pg_shift.
861 * A pointer to the mempool structure.
863 * The virtual address of memory that should be used to store objects.
865 * An array of physical addresses of each page composing the virtual
868 * Number of elements in the paddr array.
870 * LOG2 of the physical pages size.
872 * The callback used to free this chunk when destroying the mempool.
874 * An opaque argument passed to free_cb.
876 * The number of objects added on success.
877 * On error, the chunks are not added in the memory list of the
878 * mempool and a negative errno is returned.
880 int rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
881 const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
882 rte_mempool_memchunk_free_cb_t *free_cb, void *opaque);
885 * Add virtually contiguous memory for objects in the pool at init
887 * Add a virtually contiguous memory chunk in the pool where objects can
891 * A pointer to the mempool structure.
893 * The virtual address of memory that should be used to store objects.
894 * Must be page-aligned.
896 * The length of memory in bytes. Must be page-aligned.
898 * The size of memory pages in this virtual area.
900 * The callback used to free this chunk when destroying the mempool.
902 * An opaque argument passed to free_cb.
904 * The number of objects added on success.
905 * On error, the chunk is not added in the memory list of the
906 * mempool and a negative errno is returned.
909 rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
910 size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
914 * Add memory for objects in the pool at init
916 * This is the default function used by rte_mempool_create() to populate
917 * the mempool. It adds memory allocated using rte_memzone_reserve().
920 * A pointer to the mempool structure.
922 * The number of objects added on success.
923 * On error, the chunk is not added in the memory list of the
924 * mempool and a negative errno is returned.
926 int rte_mempool_populate_default(struct rte_mempool *mp);
929 * Add memory from anonymous mapping for objects in the pool at init
931 * This function mmap an anonymous memory zone that is locked in
932 * memory to store the objects of the mempool.
935 * A pointer to the mempool structure.
937 * The number of objects added on success.
938 * On error, the chunk is not added in the memory list of the
939 * mempool and a negative errno is returned.
941 int rte_mempool_populate_anon(struct rte_mempool *mp);
944 * Call a function for each mempool element
946 * Iterate across all objects attached to a rte_mempool and call the
947 * callback function on it.
950 * A pointer to an initialized mempool.
952 * A function pointer that is called for each object.
954 * An opaque pointer passed to the callback function.
956 * Number of objects iterated.
958 uint32_t rte_mempool_obj_iter(struct rte_mempool *mp,
959 rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg);
962 * Call a function for each mempool memory chunk
964 * Iterate across all memory chunks attached to a rte_mempool and call
965 * the callback function on it.
968 * A pointer to an initialized mempool.
970 * A function pointer that is called for each memory chunk.
972 * An opaque pointer passed to the callback function.
974 * Number of memory chunks iterated.
976 uint32_t rte_mempool_mem_iter(struct rte_mempool *mp,
977 rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg);
980 * Dump the status of the mempool to a file.
983 * A pointer to a file for output
985 * A pointer to the mempool structure.
987 void rte_mempool_dump(FILE *f, struct rte_mempool *mp);
990 * Create a user-owned mempool cache.
992 * This can be used by non-EAL threads to enable caching when they
993 * interact with a mempool.
996 * The size of the mempool cache. See rte_mempool_create()'s cache_size
997 * parameter description for more information. The same limits and
998 * considerations apply here too.
1000 * The socket identifier in the case of NUMA. The value can be
1001 * SOCKET_ID_ANY if there is no NUMA constraint for the reserved zone.
1003 struct rte_mempool_cache *
1004 rte_mempool_cache_create(uint32_t size, int socket_id);
1007 * Free a user-owned mempool cache.
1010 * A pointer to the mempool cache.
1013 rte_mempool_cache_free(struct rte_mempool_cache *cache);
1016 * Flush a user-owned mempool cache to the specified mempool.
1019 * A pointer to the mempool cache.
1021 * A pointer to the mempool.
1023 static __rte_always_inline void
1024 rte_mempool_cache_flush(struct rte_mempool_cache *cache,
1025 struct rte_mempool *mp)
1027 rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len);
1032 * Get a pointer to the per-lcore default mempool cache.
1035 * A pointer to the mempool structure.
1037 * The logical core id.
1039 * A pointer to the mempool cache or NULL if disabled or non-EAL thread.
1041 static __rte_always_inline struct rte_mempool_cache *
1042 rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
1044 if (mp->cache_size == 0)
1047 if (lcore_id >= RTE_MAX_LCORE)
1050 return &mp->local_cache[lcore_id];
1054 * @internal Put several objects back in the mempool; used internally.
1056 * A pointer to the mempool structure.
1058 * A pointer to a table of void * pointers (objects).
1060 * The number of objects to store back in the mempool, must be strictly
1063 * A pointer to a mempool cache structure. May be NULL if not needed.
1065 static __rte_always_inline void
1066 __mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
1067 unsigned int n, struct rte_mempool_cache *cache)
1071 /* increment stat now, adding in mempool always success */
1072 __MEMPOOL_STAT_ADD(mp, put, n);
1074 /* No cache provided or if put would overflow mem allocated for cache */
1075 if (unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE))
1078 cache_objs = &cache->objs[cache->len];
1081 * The cache follows the following algorithm
1082 * 1. Add the objects to the cache
1083 * 2. Anything greater than the cache min value (if it crosses the
1084 * cache flush threshold) is flushed to the ring.
1087 /* Add elements back into the cache */
1088 rte_memcpy(&cache_objs[0], obj_table, sizeof(void *) * n);
1092 if (cache->len >= cache->flushthresh) {
1093 rte_mempool_ops_enqueue_bulk(mp, &cache->objs[cache->size],
1094 cache->len - cache->size);
1095 cache->len = cache->size;
1102 /* push remaining objects in ring */
1103 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1104 if (rte_mempool_ops_enqueue_bulk(mp, obj_table, n) < 0)
1105 rte_panic("cannot put objects in mempool\n");
1107 rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
1113 * Put several objects back in the mempool.
1116 * A pointer to the mempool structure.
1118 * A pointer to a table of void * pointers (objects).
1120 * The number of objects to add in the mempool from the obj_table.
1122 * A pointer to a mempool cache structure. May be NULL if not needed.
1124 static __rte_always_inline void
1125 rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
1126 unsigned int n, struct rte_mempool_cache *cache)
1128 __mempool_check_cookies(mp, obj_table, n, 0);
1129 __mempool_generic_put(mp, obj_table, n, cache);
1133 * Put several objects back in the mempool.
1135 * This function calls the multi-producer or the single-producer
1136 * version depending on the default behavior that was specified at
1137 * mempool creation time (see flags).
1140 * A pointer to the mempool structure.
1142 * A pointer to a table of void * pointers (objects).
1144 * The number of objects to add in the mempool from obj_table.
1146 static __rte_always_inline void
1147 rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
1150 struct rte_mempool_cache *cache;
1151 cache = rte_mempool_default_cache(mp, rte_lcore_id());
1152 rte_mempool_generic_put(mp, obj_table, n, cache);
1156 * Put one object back in the mempool.
1158 * This function calls the multi-producer or the single-producer
1159 * version depending on the default behavior that was specified at
1160 * mempool creation time (see flags).
1163 * A pointer to the mempool structure.
1165 * A pointer to the object to be added.
1167 static __rte_always_inline void
1168 rte_mempool_put(struct rte_mempool *mp, void *obj)
1170 rte_mempool_put_bulk(mp, &obj, 1);
1174 * @internal Get several objects from the mempool; used internally.
1176 * A pointer to the mempool structure.
1178 * A pointer to a table of void * pointers (objects).
1180 * The number of objects to get, must be strictly positive.
1182 * A pointer to a mempool cache structure. May be NULL if not needed.
1184 * - >=0: Success; number of objects supplied.
1185 * - <0: Error; code of ring dequeue function.
1187 static __rte_always_inline int
1188 __mempool_generic_get(struct rte_mempool *mp, void **obj_table,
1189 unsigned int n, struct rte_mempool_cache *cache)
1192 uint32_t index, len;
1195 /* No cache provided or cannot be satisfied from cache */
1196 if (unlikely(cache == NULL || n >= cache->size))
1199 cache_objs = cache->objs;
1201 /* Can this be satisfied from the cache? */
1202 if (cache->len < n) {
1203 /* No. Backfill the cache first, and then fill from it */
1204 uint32_t req = n + (cache->size - cache->len);
1206 /* How many do we require i.e. number to fill the cache + the request */
1207 ret = rte_mempool_ops_dequeue_bulk(mp,
1208 &cache->objs[cache->len], req);
1209 if (unlikely(ret < 0)) {
1211 * In the offchance that we are buffer constrained,
1212 * where we are not able to allocate cache + n, go to
1213 * the ring directly. If that fails, we are truly out of
1222 /* Now fill in the response ... */
1223 for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++)
1224 *obj_table = cache_objs[len];
1228 __MEMPOOL_STAT_ADD(mp, get_success, n);
1234 /* get remaining objects from ring */
1235 ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n);
1238 __MEMPOOL_STAT_ADD(mp, get_fail, n);
1240 __MEMPOOL_STAT_ADD(mp, get_success, n);
1246 * Get several objects from the mempool.
1248 * If cache is enabled, objects will be retrieved first from cache,
1249 * subsequently from the common pool. Note that it can return -ENOENT when
1250 * the local cache and common pool are empty, even if cache from other
1254 * A pointer to the mempool structure.
1256 * A pointer to a table of void * pointers (objects) that will be filled.
1258 * The number of objects to get from mempool to obj_table.
1260 * A pointer to a mempool cache structure. May be NULL if not needed.
1262 * - 0: Success; objects taken.
1263 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
1265 static __rte_always_inline int
1266 rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table,
1267 unsigned int n, struct rte_mempool_cache *cache)
1270 ret = __mempool_generic_get(mp, obj_table, n, cache);
1272 __mempool_check_cookies(mp, obj_table, n, 1);
1277 * Get several objects from the mempool.
1279 * This function calls the multi-consumers or the single-consumer
1280 * version, depending on the default behaviour that was specified at
1281 * mempool creation time (see flags).
1283 * If cache is enabled, objects will be retrieved first from cache,
1284 * subsequently from the common pool. Note that it can return -ENOENT when
1285 * the local cache and common pool are empty, even if cache from other
1289 * A pointer to the mempool structure.
1291 * A pointer to a table of void * pointers (objects) that will be filled.
1293 * The number of objects to get from the mempool to obj_table.
1295 * - 0: Success; objects taken
1296 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
1298 static __rte_always_inline int
1299 rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
1301 struct rte_mempool_cache *cache;
1302 cache = rte_mempool_default_cache(mp, rte_lcore_id());
1303 return rte_mempool_generic_get(mp, obj_table, n, cache);
1307 * Get one object from the mempool.
1309 * This function calls the multi-consumers or the single-consumer
1310 * version, depending on the default behavior that was specified at
1311 * mempool creation (see flags).
1313 * If cache is enabled, objects will be retrieved first from cache,
1314 * subsequently from the common pool. Note that it can return -ENOENT when
1315 * the local cache and common pool are empty, even if cache from other
1319 * A pointer to the mempool structure.
1321 * A pointer to a void * pointer (object) that will be filled.
1323 * - 0: Success; objects taken.
1324 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
1326 static __rte_always_inline int
1327 rte_mempool_get(struct rte_mempool *mp, void **obj_p)
1329 return rte_mempool_get_bulk(mp, obj_p, 1);
1333 * Return the number of entries in the mempool.
1335 * When cache is enabled, this function has to browse the length of
1336 * all lcores, so it should not be used in a data path, but only for
1337 * debug purposes. User-owned mempool caches are not accounted for.
1340 * A pointer to the mempool structure.
1342 * The number of entries in the mempool.
1344 unsigned int rte_mempool_avail_count(const struct rte_mempool *mp);
1347 * Return the number of elements which have been allocated from the mempool
1349 * When cache is enabled, this function has to browse the length of
1350 * all lcores, so it should not be used in a data path, but only for
1354 * A pointer to the mempool structure.
1356 * The number of free entries in the mempool.
1359 rte_mempool_in_use_count(const struct rte_mempool *mp);
1362 * Test if the mempool is full.
1364 * When cache is enabled, this function has to browse the length of all
1365 * lcores, so it should not be used in a data path, but only for debug
1366 * purposes. User-owned mempool caches are not accounted for.
1369 * A pointer to the mempool structure.
1371 * - 1: The mempool is full.
1372 * - 0: The mempool is not full.
1375 rte_mempool_full(const struct rte_mempool *mp)
1377 return !!(rte_mempool_avail_count(mp) == mp->size);
1381 * Test if the mempool is empty.
1383 * When cache is enabled, this function has to browse the length of all
1384 * lcores, so it should not be used in a data path, but only for debug
1385 * purposes. User-owned mempool caches are not accounted for.
1388 * A pointer to the mempool structure.
1390 * - 1: The mempool is empty.
1391 * - 0: The mempool is not empty.
1394 rte_mempool_empty(const struct rte_mempool *mp)
1396 return !!(rte_mempool_avail_count(mp) == 0);
1400 * Return the physical address of elt, which is an element of the pool mp.
1403 * A pointer to the mempool structure.
1405 * A pointer (virtual address) to the element of the pool.
1407 * The physical address of the elt element.
1408 * If the mempool was created with MEMPOOL_F_NO_PHYS_CONTIG, the
1409 * returned value is RTE_BAD_PHYS_ADDR.
1411 static inline phys_addr_t
1412 rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt)
1414 const struct rte_mempool_objhdr *hdr;
1415 hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt,
1417 return hdr->physaddr;
1421 * Check the consistency of mempool objects.
1423 * Verify the coherency of fields in the mempool structure. Also check
1424 * that the cookies of mempool objects (even the ones that are not
1425 * present in pool) have a correct value. If not, a panic will occur.
1428 * A pointer to the mempool structure.
1430 void rte_mempool_audit(struct rte_mempool *mp);
1433 * Return a pointer to the private data in an mempool structure.
1436 * A pointer to the mempool structure.
1438 * A pointer to the private data.
1440 static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
1443 MEMPOOL_HEADER_SIZE(mp, mp->cache_size);
1447 * Dump the status of all mempools on the console
1450 * A pointer to a file for output
1452 void rte_mempool_list_dump(FILE *f);
1455 * Search a mempool from its name
1458 * The name of the mempool.
1460 * The pointer to the mempool matching the name, or NULL if not found.
1462 * with rte_errno set appropriately. Possible rte_errno values include:
1463 * - ENOENT - required entry not available to return.
1466 struct rte_mempool *rte_mempool_lookup(const char *name);
1469 * Get the header, trailer and total size of a mempool element.
1471 * Given a desired size of the mempool element and mempool flags,
1472 * calculates header, trailer, body and total sizes of the mempool object.
1475 * The size of each element, without header and trailer.
1477 * The flags used for the mempool creation.
1478 * Consult rte_mempool_create() for more information about possible values.
1479 * The size of each element.
1481 * The calculated detailed size the mempool object. May be NULL.
1483 * Total size of the mempool object.
1485 uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
1486 struct rte_mempool_objsz *sz);
1489 * Get the size of memory required to store mempool elements.
1491 * Calculate the maximum amount of memory required to store given number
1492 * of objects. Assume that the memory buffer will be aligned at page
1495 * Note that if object size is bigger then page size, then it assumes
1496 * that pages are grouped in subsets of physically continuous pages big
1497 * enough to store at least one object.
1500 * Number of elements.
1501 * @param total_elt_sz
1502 * The size of each element, including header and trailer, as returned
1503 * by rte_mempool_calc_obj_size().
1505 * LOG2 of the physical pages size. If set to 0, ignore page boundaries.
1507 * The mempool flags.
1509 * Required memory size aligned at page boundary.
1511 size_t rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz,
1512 uint32_t pg_shift, unsigned int flags);
1515 * Get the size of memory required to store mempool elements.
1517 * Calculate how much memory would be actually required with the given
1518 * memory footprint to store required number of objects.
1521 * Virtual address of the externally allocated memory buffer.
1522 * Will be used to store mempool objects.
1524 * Number of elements.
1525 * @param total_elt_sz
1526 * The size of each element, including header and trailer, as returned
1527 * by rte_mempool_calc_obj_size().
1529 * Array of physical addresses of the pages that comprises given memory
1532 * Number of elements in the paddr array.
1534 * LOG2 of the physical pages size.
1536 * The mempool flags.
1538 * On success, the number of bytes needed to store given number of
1539 * objects, aligned to the given page size. If the provided memory
1540 * buffer is too small, return a negative value whose absolute value
1541 * is the actual number of elements that can be stored in that buffer.
1543 ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num,
1544 size_t total_elt_sz, const phys_addr_t paddr[], uint32_t pg_num,
1545 uint32_t pg_shift, unsigned int flags);
1548 * Walk list of all memory pools
1553 * Argument passed to iterator
1555 void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
1562 #endif /* _RTE_MEMPOOL_H_ */