4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * Copyright(c) 2016 6WIND S.A.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #ifndef _RTE_MEMPOOL_H_
36 #define _RTE_MEMPOOL_H_
42 * A memory pool is an allocator of fixed-size object. It is
43 * identified by its name, and uses a ring to store free objects. It
44 * provides some other optional services, like a per-core object
45 * cache, and an alignment helper to ensure that objects are padded
46 * to spread them equally on all RAM channels, ranks, and so on.
48 * Objects owned by a mempool should never be added in another
49 * mempool. When an object is freed using rte_mempool_put() or
50 * equivalent, the object data is not modified; the user can save some
51 * meta-data in the object data and retrieve them when allocating a
54 * Note: the mempool implementation is not preemptable. A lcore must
55 * not be interrupted by another task that uses the same mempool
56 * (because it uses a ring which is not preemptable). Also, mempool
57 * functions must not be used outside the DPDK environment: for
58 * example, in linuxapp environment, a thread that is not created by
59 * the EAL must not use mempools. This is due to the per-lcore cache
60 * that won't work as rte_lcore_id() will not return a correct value.
68 #include <sys/queue.h>
71 #include <rte_debug.h>
72 #include <rte_lcore.h>
73 #include <rte_memory.h>
74 #include <rte_branch_prediction.h>
81 #define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL /**< Header cookie. */
82 #define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL /**< Header cookie. */
83 #define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/
85 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
87 * A structure that stores the mempool statistics (per-lcore).
89 struct rte_mempool_debug_stats {
90 uint64_t put_bulk; /**< Number of puts. */
91 uint64_t put_objs; /**< Number of objects successfully put. */
92 uint64_t get_success_bulk; /**< Successful allocation number. */
93 uint64_t get_success_objs; /**< Objects successfully allocated. */
94 uint64_t get_fail_bulk; /**< Failed allocation number. */
95 uint64_t get_fail_objs; /**< Objects that failed to be allocated. */
96 } __rte_cache_aligned;
100 * A structure that stores a per-core object cache.
102 struct rte_mempool_cache {
103 unsigned len; /**< Cache len */
105 * Cache is allocated to this size to allow it to overflow in certain
106 * cases to avoid needless emptying of cache.
108 void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3]; /**< Cache objects */
109 } __rte_cache_aligned;
112 * A structure that stores the size of mempool elements.
114 struct rte_mempool_objsz {
115 uint32_t elt_size; /**< Size of an element. */
116 uint32_t header_size; /**< Size of header (before elt). */
117 uint32_t trailer_size; /**< Size of trailer (after elt). */
119 /**< Total size of an object (header + elt + trailer). */
122 #define RTE_MEMPOOL_NAMESIZE 32 /**< Maximum length of a memory pool. */
123 #define RTE_MEMPOOL_MZ_PREFIX "MP_"
126 #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
128 #ifdef RTE_LIBRTE_XEN_DOM0
130 /* "<name>_MP_elt" */
131 #define RTE_MEMPOOL_OBJ_NAME "%s_" RTE_MEMPOOL_MZ_PREFIX "elt"
135 #define RTE_MEMPOOL_OBJ_NAME RTE_MEMPOOL_MZ_FORMAT
137 #endif /* RTE_LIBRTE_XEN_DOM0 */
139 #define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1)
141 /** Mempool over one chunk of physically continuous memory */
142 #define MEMPOOL_PG_NUM_DEFAULT 1
144 #ifndef RTE_MEMPOOL_ALIGN
145 #define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
148 #define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
151 * Mempool object header structure
153 * Each object stored in mempools are prefixed by this header structure,
154 * it allows to retrieve the mempool pointer from the object and to
155 * iterate on all objects attached to a mempool. When debug is enabled,
156 * a cookie is also added in this structure preventing corruptions and
159 struct rte_mempool_objhdr {
160 STAILQ_ENTRY(rte_mempool_objhdr) next; /**< Next in list. */
161 struct rte_mempool *mp; /**< The mempool owning the object. */
162 phys_addr_t physaddr; /**< Physical address of the object. */
163 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
164 uint64_t cookie; /**< Debug cookie. */
169 * A list of object headers type
171 STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr);
174 * Mempool object trailer structure
176 * In debug mode, each object stored in mempools are suffixed by this
177 * trailer structure containing a cookie preventing memory corruptions.
179 struct rte_mempool_objtlr {
180 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
181 uint64_t cookie; /**< Debug cookie. */
186 * The RTE mempool structure.
189 char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */
190 struct rte_ring *ring; /**< Ring to store objects. */
191 phys_addr_t phys_addr; /**< Phys. addr. of mempool struct. */
192 int flags; /**< Flags of the mempool. */
193 int socket_id; /**< Socket id passed at mempool creation. */
194 uint32_t size; /**< Size of the mempool. */
195 uint32_t cache_size; /**< Size of per-lcore local cache. */
196 uint32_t cache_flushthresh;
197 /**< Threshold before we flush excess elements. */
199 uint32_t elt_size; /**< Size of an element. */
200 uint32_t header_size; /**< Size of header (before elt). */
201 uint32_t trailer_size; /**< Size of trailer (after elt). */
203 unsigned private_data_size; /**< Size of private data. */
205 struct rte_mempool_cache *local_cache; /**< Per-lcore local cache */
207 struct rte_mempool_objhdr_list elt_list; /**< List of objects in pool */
209 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
210 /** Per-lcore statistics. */
211 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
214 /* Address translation support, starts from next cache line. */
216 /** Number of elements in the elt_pa array. */
217 uint32_t pg_num __rte_cache_aligned;
218 uint32_t pg_shift; /**< LOG2 of the physical pages. */
219 uintptr_t pg_mask; /**< physical page mask value. */
220 uintptr_t elt_va_start;
221 /**< Virtual address of the first mempool object. */
222 uintptr_t elt_va_end;
223 /**< Virtual address of the <size + 1> mempool object. */
224 phys_addr_t elt_pa[MEMPOOL_PG_NUM_DEFAULT];
225 /**< Array of physical page addresses for the mempool objects buffer. */
227 } __rte_cache_aligned;
229 #define MEMPOOL_F_NO_SPREAD 0x0001 /**< Do not spread among memory channels. */
230 #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/
231 #define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/
232 #define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/
235 * @internal When debug is enabled, store some statistics.
238 * Pointer to the memory pool.
240 * Name of the statistics field to increment in the memory pool.
242 * Number to add to the object-oriented statistics.
244 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
245 #define __MEMPOOL_STAT_ADD(mp, name, n) do { \
246 unsigned __lcore_id = rte_lcore_id(); \
247 if (__lcore_id < RTE_MAX_LCORE) { \
248 mp->stats[__lcore_id].name##_objs += n; \
249 mp->stats[__lcore_id].name##_bulk += 1; \
253 #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
257 * Size of elt_pa array size based on number of pages. (Internal use)
259 #define __PA_SIZE(mp, pgn) \
260 RTE_ALIGN_CEIL((((pgn) - RTE_DIM((mp)->elt_pa)) * \
261 sizeof((mp)->elt_pa[0])), RTE_CACHE_LINE_SIZE)
264 * Calculate the size of the mempool header.
267 * Pointer to the memory pool.
269 * Number of pages used to store mempool objects.
271 * Size of the per-lcore cache.
273 #define MEMPOOL_HEADER_SIZE(mp, pgn, cs) \
274 (sizeof(*(mp)) + __PA_SIZE(mp, pgn) + (((cs) == 0) ? 0 : \
275 (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
278 * Return true if the whole mempool is in contiguous memory.
280 #define MEMPOOL_IS_CONTIG(mp) \
281 ((mp)->pg_num == MEMPOOL_PG_NUM_DEFAULT && \
282 (mp)->phys_addr == (mp)->elt_pa[0])
284 /* return the header of a mempool object (internal) */
285 static inline struct rte_mempool_objhdr *__mempool_get_header(void *obj)
287 return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj,
288 sizeof(struct rte_mempool_objhdr));
292 * Return a pointer to the mempool owning this object.
295 * An object that is owned by a pool. If this is not the case,
296 * the behavior is undefined.
298 * A pointer to the mempool structure.
300 static inline struct rte_mempool *rte_mempool_from_obj(void *obj)
302 struct rte_mempool_objhdr *hdr = __mempool_get_header(obj);
306 /* return the trailer of a mempool object (internal) */
307 static inline struct rte_mempool_objtlr *__mempool_get_trailer(void *obj)
309 struct rte_mempool *mp = rte_mempool_from_obj(obj);
310 return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size);
314 * @internal Check and update cookies or panic.
317 * Pointer to the memory pool.
318 * @param obj_table_const
319 * Pointer to a table of void * pointers (objects).
321 * Index of object in object table.
323 * - 0: object is supposed to be allocated, mark it as free
324 * - 1: object is supposed to be free, mark it as allocated
325 * - 2: just check that cookie is valid (free or allocated)
327 void rte_mempool_check_cookies(const struct rte_mempool *mp,
328 void * const *obj_table_const, unsigned n, int free);
330 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
331 #define __mempool_check_cookies(mp, obj_table_const, n, free) \
332 rte_mempool_check_cookies(mp, obj_table_const, n, free)
334 #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
335 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
338 * An object callback function for mempool.
340 * Used by rte_mempool_create() and rte_mempool_obj_iter().
342 typedef void (rte_mempool_obj_cb_t)(struct rte_mempool *mp,
343 void *opaque, void *obj, unsigned obj_idx);
344 typedef rte_mempool_obj_cb_t rte_mempool_obj_ctor_t; /* compat */
347 * A mempool constructor callback function.
349 * Arguments are the mempool and the opaque pointer given by the user in
350 * rte_mempool_create().
352 typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
355 * Create a new mempool named *name* in memory.
357 * This function uses ``memzone_reserve()`` to allocate memory. The
358 * pool contains n elements of elt_size. Its size is set to n.
359 * All elements of the mempool are allocated together with the mempool header,
360 * in one physically continuous chunk of memory.
363 * The name of the mempool.
365 * The number of elements in the mempool. The optimum size (in terms of
366 * memory usage) for a mempool is when n is a power of two minus one:
369 * The size of each element.
371 * If cache_size is non-zero, the rte_mempool library will try to
372 * limit the accesses to the common lockless pool, by maintaining a
373 * per-lcore object cache. This argument must be lower or equal to
374 * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose
375 * cache_size to have "n modulo cache_size == 0": if this is
376 * not the case, some elements will always stay in the pool and will
377 * never be used. The access to the per-lcore table is of course
378 * faster than the multi-producer/consumer pool. The cache can be
379 * disabled if the cache_size argument is set to 0; it can be useful to
380 * avoid losing objects in cache. Note that even if not used, the
381 * memory space for cache is always reserved in a mempool structure,
382 * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
383 * @param private_data_size
384 * The size of the private data appended after the mempool
385 * structure. This is useful for storing some private data after the
386 * mempool structure, as is done for rte_mbuf_pool for example.
388 * A function pointer that is called for initialization of the pool,
389 * before object initialization. The user can initialize the private
390 * data in this function if needed. This parameter can be NULL if
393 * An opaque pointer to data that can be used in the mempool
394 * constructor function.
396 * A function pointer that is called for each object at
397 * initialization of the pool. The user can set some meta data in
398 * objects if needed. This parameter can be NULL if not needed.
399 * The obj_init() function takes the mempool pointer, the init_arg,
400 * the object pointer and the object number as parameters.
401 * @param obj_init_arg
402 * An opaque pointer to data that can be used as an argument for
403 * each call to the object constructor function.
405 * The *socket_id* argument is the socket identifier in the case of
406 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
407 * constraint for the reserved zone.
409 * The *flags* arguments is an OR of following flags:
410 * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
411 * between channels in RAM: the pool allocator will add padding
412 * between objects depending on the hardware configuration. See
413 * Memory alignment constraints for details. If this flag is set,
414 * the allocator will just align them to a cache line.
415 * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
416 * cache-aligned. This flag removes this constraint, and no
417 * padding will be present between objects. This flag implies
418 * MEMPOOL_F_NO_SPREAD.
419 * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
420 * when using rte_mempool_put() or rte_mempool_put_bulk() is
421 * "single-producer". Otherwise, it is "multi-producers".
422 * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
423 * when using rte_mempool_get() or rte_mempool_get_bulk() is
424 * "single-consumer". Otherwise, it is "multi-consumers".
426 * The pointer to the new allocated mempool, on success. NULL on error
427 * with rte_errno set appropriately. Possible rte_errno values include:
428 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
429 * - E_RTE_SECONDARY - function was called from a secondary process instance
430 * - EINVAL - cache size provided is too large
431 * - ENOSPC - the maximum number of memzones has already been allocated
432 * - EEXIST - a memzone with the same name already exists
433 * - ENOMEM - no appropriate memory area found in which to create memzone
436 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
437 unsigned cache_size, unsigned private_data_size,
438 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
439 rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
440 int socket_id, unsigned flags);
443 * Create a new mempool named *name* in memory.
445 * The pool contains n elements of elt_size. Its size is set to n.
446 * This function uses ``memzone_reserve()`` to allocate the mempool header
447 * (and the objects if vaddr is NULL).
448 * Depending on the input parameters, mempool elements can be either allocated
449 * together with the mempool header, or an externally provided memory buffer
450 * could be used to store mempool objects. In later case, that external
451 * memory buffer can consist of set of disjoint physical pages.
454 * The name of the mempool.
456 * The number of elements in the mempool. The optimum size (in terms of
457 * memory usage) for a mempool is when n is a power of two minus one:
460 * The size of each element.
462 * Size of the cache. See rte_mempool_create() for details.
463 * @param private_data_size
464 * The size of the private data appended after the mempool
465 * structure. This is useful for storing some private data after the
466 * mempool structure, as is done for rte_mbuf_pool for example.
468 * A function pointer that is called for initialization of the pool,
469 * before object initialization. The user can initialize the private
470 * data in this function if needed. This parameter can be NULL if
473 * An opaque pointer to data that can be used in the mempool
474 * constructor function.
476 * A function called for each object at initialization of the pool.
477 * See rte_mempool_create() for details.
478 * @param obj_init_arg
479 * An opaque pointer passed to the object constructor function.
481 * The *socket_id* argument is the socket identifier in the case of
482 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
483 * constraint for the reserved zone.
485 * Flags controlling the behavior of the mempool. See
486 * rte_mempool_create() for details.
488 * Virtual address of the externally allocated memory buffer.
489 * Will be used to store mempool objects.
491 * Array of physical addresses of the pages that comprises given memory
494 * Number of elements in the paddr array.
496 * LOG2 of the physical pages size.
498 * The pointer to the new allocated mempool, on success. NULL on error
499 * with rte_errno set appropriately. See rte_mempool_create() for details.
502 rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
503 unsigned cache_size, unsigned private_data_size,
504 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
505 rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
506 int socket_id, unsigned flags, void *vaddr,
507 const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
510 * Create a new mempool named *name* in memory on Xen Dom0.
512 * This function uses ``rte_mempool_xmem_create()`` to allocate memory. The
513 * pool contains n elements of elt_size. Its size is set to n.
514 * All elements of the mempool are allocated together with the mempool header,
515 * and memory buffer can consist of set of disjoint physical pages.
518 * The name of the mempool.
520 * The number of elements in the mempool. The optimum size (in terms of
521 * memory usage) for a mempool is when n is a power of two minus one:
524 * The size of each element.
526 * If cache_size is non-zero, the rte_mempool library will try to
527 * limit the accesses to the common lockless pool, by maintaining a
528 * per-lcore object cache. This argument must be lower or equal to
529 * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose
530 * cache_size to have "n modulo cache_size == 0": if this is
531 * not the case, some elements will always stay in the pool and will
532 * never be used. The access to the per-lcore table is of course
533 * faster than the multi-producer/consumer pool. The cache can be
534 * disabled if the cache_size argument is set to 0; it can be useful to
535 * avoid losing objects in cache. Note that even if not used, the
536 * memory space for cache is always reserved in a mempool structure,
537 * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
538 * @param private_data_size
539 * The size of the private data appended after the mempool
540 * structure. This is useful for storing some private data after the
541 * mempool structure, as is done for rte_mbuf_pool for example.
543 * A function pointer that is called for initialization of the pool,
544 * before object initialization. The user can initialize the private
545 * data in this function if needed. This parameter can be NULL if
548 * An opaque pointer to data that can be used in the mempool
549 * constructor function.
551 * A function pointer that is called for each object at
552 * initialization of the pool. The user can set some meta data in
553 * objects if needed. This parameter can be NULL if not needed.
554 * The obj_init() function takes the mempool pointer, the init_arg,
555 * the object pointer and the object number as parameters.
556 * @param obj_init_arg
557 * An opaque pointer to data that can be used as an argument for
558 * each call to the object constructor function.
560 * The *socket_id* argument is the socket identifier in the case of
561 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
562 * constraint for the reserved zone.
564 * The *flags* arguments is an OR of following flags:
565 * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
566 * between channels in RAM: the pool allocator will add padding
567 * between objects depending on the hardware configuration. See
568 * Memory alignment constraints for details. If this flag is set,
569 * the allocator will just align them to a cache line.
570 * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
571 * cache-aligned. This flag removes this constraint, and no
572 * padding will be present between objects. This flag implies
573 * MEMPOOL_F_NO_SPREAD.
574 * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
575 * when using rte_mempool_put() or rte_mempool_put_bulk() is
576 * "single-producer". Otherwise, it is "multi-producers".
577 * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
578 * when using rte_mempool_get() or rte_mempool_get_bulk() is
579 * "single-consumer". Otherwise, it is "multi-consumers".
581 * The pointer to the new allocated mempool, on success. NULL on error
582 * with rte_errno set appropriately. Possible rte_errno values include:
583 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
584 * - E_RTE_SECONDARY - function was called from a secondary process instance
585 * - EINVAL - cache size provided is too large
586 * - ENOSPC - the maximum number of memzones has already been allocated
587 * - EEXIST - a memzone with the same name already exists
588 * - ENOMEM - no appropriate memory area found in which to create memzone
591 rte_dom0_mempool_create(const char *name, unsigned n, unsigned elt_size,
592 unsigned cache_size, unsigned private_data_size,
593 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
594 rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
595 int socket_id, unsigned flags);
599 * Call a function for each mempool element
601 * Iterate across all objects attached to a rte_mempool and call the
602 * callback function on it.
605 * A pointer to an initialized mempool.
607 * A function pointer that is called for each object.
609 * An opaque pointer passed to the callback function.
611 * Number of objects iterated.
613 uint32_t rte_mempool_obj_iter(struct rte_mempool *mp,
614 rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg);
617 * Dump the status of the mempool to the console.
620 * A pointer to a file for output
622 * A pointer to the mempool structure.
624 void rte_mempool_dump(FILE *f, struct rte_mempool *mp);
627 * @internal Put several objects back in the mempool; used internally.
629 * A pointer to the mempool structure.
631 * A pointer to a table of void * pointers (objects).
633 * The number of objects to store back in the mempool, must be strictly
636 * Mono-producer (0) or multi-producers (1).
638 static inline void __attribute__((always_inline))
639 __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
640 unsigned n, int is_mp)
642 struct rte_mempool_cache *cache;
645 unsigned lcore_id = rte_lcore_id();
646 uint32_t cache_size = mp->cache_size;
647 uint32_t flushthresh = mp->cache_flushthresh;
649 /* increment stat now, adding in mempool always success */
650 __MEMPOOL_STAT_ADD(mp, put, n);
652 /* cache is not enabled or single producer or non-EAL thread */
653 if (unlikely(cache_size == 0 || is_mp == 0 ||
654 lcore_id >= RTE_MAX_LCORE))
657 /* Go straight to ring if put would overflow mem allocated for cache */
658 if (unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE))
661 cache = &mp->local_cache[lcore_id];
662 cache_objs = &cache->objs[cache->len];
665 * The cache follows the following algorithm
666 * 1. Add the objects to the cache
667 * 2. Anything greater than the cache min value (if it crosses the
668 * cache flush threshold) is flushed to the ring.
671 /* Add elements back into the cache */
672 for (index = 0; index < n; ++index, obj_table++)
673 cache_objs[index] = *obj_table;
677 if (cache->len >= flushthresh) {
678 rte_ring_mp_enqueue_bulk(mp->ring, &cache->objs[cache_size],
679 cache->len - cache_size);
680 cache->len = cache_size;
687 /* push remaining objects in ring */
688 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
690 if (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0)
691 rte_panic("cannot put objects in mempool\n");
694 if (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0)
695 rte_panic("cannot put objects in mempool\n");
699 rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n);
701 rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n);
707 * Put several objects back in the mempool (multi-producers safe).
710 * A pointer to the mempool structure.
712 * A pointer to a table of void * pointers (objects).
714 * The number of objects to add in the mempool from the obj_table.
716 static inline void __attribute__((always_inline))
717 rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
720 __mempool_check_cookies(mp, obj_table, n, 0);
721 __mempool_put_bulk(mp, obj_table, n, 1);
725 * Put several objects back in the mempool (NOT multi-producers safe).
728 * A pointer to the mempool structure.
730 * A pointer to a table of void * pointers (objects).
732 * The number of objects to add in the mempool from obj_table.
735 rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
738 __mempool_check_cookies(mp, obj_table, n, 0);
739 __mempool_put_bulk(mp, obj_table, n, 0);
743 * Put several objects back in the mempool.
745 * This function calls the multi-producer or the single-producer
746 * version depending on the default behavior that was specified at
747 * mempool creation time (see flags).
750 * A pointer to the mempool structure.
752 * A pointer to a table of void * pointers (objects).
754 * The number of objects to add in the mempool from obj_table.
756 static inline void __attribute__((always_inline))
757 rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
760 __mempool_check_cookies(mp, obj_table, n, 0);
761 __mempool_put_bulk(mp, obj_table, n, !(mp->flags & MEMPOOL_F_SP_PUT));
765 * Put one object in the mempool (multi-producers safe).
768 * A pointer to the mempool structure.
770 * A pointer to the object to be added.
772 static inline void __attribute__((always_inline))
773 rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
775 rte_mempool_mp_put_bulk(mp, &obj, 1);
779 * Put one object back in the mempool (NOT multi-producers safe).
782 * A pointer to the mempool structure.
784 * A pointer to the object to be added.
786 static inline void __attribute__((always_inline))
787 rte_mempool_sp_put(struct rte_mempool *mp, void *obj)
789 rte_mempool_sp_put_bulk(mp, &obj, 1);
793 * Put one object back in the mempool.
795 * This function calls the multi-producer or the single-producer
796 * version depending on the default behavior that was specified at
797 * mempool creation time (see flags).
800 * A pointer to the mempool structure.
802 * A pointer to the object to be added.
804 static inline void __attribute__((always_inline))
805 rte_mempool_put(struct rte_mempool *mp, void *obj)
807 rte_mempool_put_bulk(mp, &obj, 1);
811 * @internal Get several objects from the mempool; used internally.
813 * A pointer to the mempool structure.
815 * A pointer to a table of void * pointers (objects).
817 * The number of objects to get, must be strictly positive.
819 * Mono-consumer (0) or multi-consumers (1).
821 * - >=0: Success; number of objects supplied.
822 * - <0: Error; code of ring dequeue function.
824 static inline int __attribute__((always_inline))
825 __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
826 unsigned n, int is_mc)
829 struct rte_mempool_cache *cache;
832 unsigned lcore_id = rte_lcore_id();
833 uint32_t cache_size = mp->cache_size;
835 /* cache is not enabled or single consumer */
836 if (unlikely(cache_size == 0 || is_mc == 0 ||
837 n >= cache_size || lcore_id >= RTE_MAX_LCORE))
840 cache = &mp->local_cache[lcore_id];
841 cache_objs = cache->objs;
843 /* Can this be satisfied from the cache? */
844 if (cache->len < n) {
845 /* No. Backfill the cache first, and then fill from it */
846 uint32_t req = n + (cache_size - cache->len);
848 /* How many do we require i.e. number to fill the cache + the request */
849 ret = rte_ring_mc_dequeue_bulk(mp->ring, &cache->objs[cache->len], req);
850 if (unlikely(ret < 0)) {
852 * In the offchance that we are buffer constrained,
853 * where we are not able to allocate cache + n, go to
854 * the ring directly. If that fails, we are truly out of
863 /* Now fill in the response ... */
864 for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++)
865 *obj_table = cache_objs[len];
869 __MEMPOOL_STAT_ADD(mp, get_success, n);
875 /* get remaining objects from ring */
877 ret = rte_ring_mc_dequeue_bulk(mp->ring, obj_table, n);
879 ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n);
882 __MEMPOOL_STAT_ADD(mp, get_fail, n);
884 __MEMPOOL_STAT_ADD(mp, get_success, n);
890 * Get several objects from the mempool (multi-consumers safe).
892 * If cache is enabled, objects will be retrieved first from cache,
893 * subsequently from the common pool. Note that it can return -ENOENT when
894 * the local cache and common pool are empty, even if cache from other
898 * A pointer to the mempool structure.
900 * A pointer to a table of void * pointers (objects) that will be filled.
902 * The number of objects to get from mempool to obj_table.
904 * - 0: Success; objects taken.
905 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
907 static inline int __attribute__((always_inline))
908 rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
911 ret = __mempool_get_bulk(mp, obj_table, n, 1);
913 __mempool_check_cookies(mp, obj_table, n, 1);
918 * Get several objects from the mempool (NOT multi-consumers safe).
920 * If cache is enabled, objects will be retrieved first from cache,
921 * subsequently from the common pool. Note that it can return -ENOENT when
922 * the local cache and common pool are empty, even if cache from other
926 * A pointer to the mempool structure.
928 * A pointer to a table of void * pointers (objects) that will be filled.
930 * The number of objects to get from the mempool to obj_table.
932 * - 0: Success; objects taken.
933 * - -ENOENT: Not enough entries in the mempool; no object is
936 static inline int __attribute__((always_inline))
937 rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
940 ret = __mempool_get_bulk(mp, obj_table, n, 0);
942 __mempool_check_cookies(mp, obj_table, n, 1);
947 * Get several objects from the mempool.
949 * This function calls the multi-consumers or the single-consumer
950 * version, depending on the default behaviour that was specified at
951 * mempool creation time (see flags).
953 * If cache is enabled, objects will be retrieved first from cache,
954 * subsequently from the common pool. Note that it can return -ENOENT when
955 * the local cache and common pool are empty, even if cache from other
959 * A pointer to the mempool structure.
961 * A pointer to a table of void * pointers (objects) that will be filled.
963 * The number of objects to get from the mempool to obj_table.
965 * - 0: Success; objects taken
966 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
968 static inline int __attribute__((always_inline))
969 rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
972 ret = __mempool_get_bulk(mp, obj_table, n,
973 !(mp->flags & MEMPOOL_F_SC_GET));
975 __mempool_check_cookies(mp, obj_table, n, 1);
980 * Get one object from the mempool (multi-consumers safe).
982 * If cache is enabled, objects will be retrieved first from cache,
983 * subsequently from the common pool. Note that it can return -ENOENT when
984 * the local cache and common pool are empty, even if cache from other
988 * A pointer to the mempool structure.
990 * A pointer to a void * pointer (object) that will be filled.
992 * - 0: Success; objects taken.
993 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
995 static inline int __attribute__((always_inline))
996 rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
998 return rte_mempool_mc_get_bulk(mp, obj_p, 1);
1002 * Get one object from the mempool (NOT multi-consumers safe).
1004 * If cache is enabled, objects will be retrieved first from cache,
1005 * subsequently from the common pool. Note that it can return -ENOENT when
1006 * the local cache and common pool are empty, even if cache from other
1010 * A pointer to the mempool structure.
1012 * A pointer to a void * pointer (object) that will be filled.
1014 * - 0: Success; objects taken.
1015 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
1017 static inline int __attribute__((always_inline))
1018 rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p)
1020 return rte_mempool_sc_get_bulk(mp, obj_p, 1);
1024 * Get one object from the mempool.
1026 * This function calls the multi-consumers or the single-consumer
1027 * version, depending on the default behavior that was specified at
1028 * mempool creation (see flags).
1030 * If cache is enabled, objects will be retrieved first from cache,
1031 * subsequently from the common pool. Note that it can return -ENOENT when
1032 * the local cache and common pool are empty, even if cache from other
1036 * A pointer to the mempool structure.
1038 * A pointer to a void * pointer (object) that will be filled.
1040 * - 0: Success; objects taken.
1041 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
1043 static inline int __attribute__((always_inline))
1044 rte_mempool_get(struct rte_mempool *mp, void **obj_p)
1046 return rte_mempool_get_bulk(mp, obj_p, 1);
1050 * Return the number of entries in the mempool.
1052 * When cache is enabled, this function has to browse the length of
1053 * all lcores, so it should not be used in a data path, but only for
1057 * A pointer to the mempool structure.
1059 * The number of entries in the mempool.
1061 unsigned rte_mempool_count(const struct rte_mempool *mp);
1064 * Return the number of free entries in the mempool ring.
1065 * i.e. how many entries can be freed back to the mempool.
1067 * NOTE: This corresponds to the number of elements *allocated* from the
1068 * memory pool, not the number of elements in the pool itself. To count
1069 * the number elements currently available in the pool, use "rte_mempool_count"
1071 * When cache is enabled, this function has to browse the length of
1072 * all lcores, so it should not be used in a data path, but only for
1076 * A pointer to the mempool structure.
1078 * The number of free entries in the mempool.
1080 static inline unsigned
1081 rte_mempool_free_count(const struct rte_mempool *mp)
1083 return mp->size - rte_mempool_count(mp);
1087 * Test if the mempool is full.
1089 * When cache is enabled, this function has to browse the length of all
1090 * lcores, so it should not be used in a data path, but only for debug
1094 * A pointer to the mempool structure.
1096 * - 1: The mempool is full.
1097 * - 0: The mempool is not full.
1100 rte_mempool_full(const struct rte_mempool *mp)
1102 return !!(rte_mempool_count(mp) == mp->size);
1106 * Test if the mempool is empty.
1108 * When cache is enabled, this function has to browse the length of all
1109 * lcores, so it should not be used in a data path, but only for debug
1113 * A pointer to the mempool structure.
1115 * - 1: The mempool is empty.
1116 * - 0: The mempool is not empty.
1119 rte_mempool_empty(const struct rte_mempool *mp)
1121 return !!(rte_mempool_count(mp) == 0);
1125 * Return the physical address of elt, which is an element of the pool mp.
1128 * A pointer to the mempool structure.
1130 * A pointer (virtual address) to the element of the pool.
1132 * The physical address of the elt element.
1134 static inline phys_addr_t
1135 rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt)
1137 if (rte_eal_has_hugepages()) {
1138 const struct rte_mempool_objhdr *hdr;
1139 hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt,
1141 return hdr->physaddr;
1144 * If huge pages are disabled, we cannot assume the
1145 * memory region to be physically contiguous.
1146 * Lookup for each element.
1148 return rte_mem_virt2phy(elt);
1153 * Check the consistency of mempool objects.
1155 * Verify the coherency of fields in the mempool structure. Also check
1156 * that the cookies of mempool objects (even the ones that are not
1157 * present in pool) have a correct value. If not, a panic will occur.
1160 * A pointer to the mempool structure.
1162 void rte_mempool_audit(struct rte_mempool *mp);
1165 * Return a pointer to the private data in an mempool structure.
1168 * A pointer to the mempool structure.
1170 * A pointer to the private data.
1172 static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
1175 MEMPOOL_HEADER_SIZE(mp, mp->pg_num, mp->cache_size);
1179 * Dump the status of all mempools on the console
1182 * A pointer to a file for output
1184 void rte_mempool_list_dump(FILE *f);
1187 * Search a mempool from its name
1190 * The name of the mempool.
1192 * The pointer to the mempool matching the name, or NULL if not found.
1194 * with rte_errno set appropriately. Possible rte_errno values include:
1195 * - ENOENT - required entry not available to return.
1198 struct rte_mempool *rte_mempool_lookup(const char *name);
1201 * Get the header, trailer and total size of a mempool element.
1203 * Given a desired size of the mempool element and mempool flags,
1204 * calculates header, trailer, body and total sizes of the mempool object.
1207 * The size of each element, without header and trailer.
1209 * The flags used for the mempool creation.
1210 * Consult rte_mempool_create() for more information about possible values.
1211 * The size of each element.
1213 * The calculated detailed size the mempool object. May be NULL.
1215 * Total size of the mempool object.
1217 uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
1218 struct rte_mempool_objsz *sz);
1221 * Get the size of memory required to store mempool elements.
1223 * Calculate the maximum amount of memory required to store given number
1224 * of objects. Assume that the memory buffer will be aligned at page
1227 * Note that if object size is bigger then page size, then it assumes
1228 * that pages are grouped in subsets of physically continuous pages big
1229 * enough to store at least one object.
1232 * Number of elements.
1233 * @param total_elt_sz
1234 * The size of each element, including header and trailer, as returned
1235 * by rte_mempool_calc_obj_size().
1237 * LOG2 of the physical pages size.
1239 * Required memory size aligned at page boundary.
1241 size_t rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz,
1245 * Get the size of memory required to store mempool elements.
1247 * Calculate how much memory would be actually required with the given
1248 * memory footprint to store required number of objects.
1251 * Virtual address of the externally allocated memory buffer.
1252 * Will be used to store mempool objects.
1254 * Number of elements.
1255 * @param total_elt_sz
1256 * The size of each element, including header and trailer, as returned
1257 * by rte_mempool_calc_obj_size().
1259 * Array of physical addresses of the pages that comprises given memory
1262 * Number of elements in the paddr array.
1264 * LOG2 of the physical pages size.
1266 * On success, the number of bytes needed to store given number of
1267 * objects, aligned to the given page size. If the provided memory
1268 * buffer is too small, return a negative value whose absolute value
1269 * is the actual number of elements that can be stored in that buffer.
1271 ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num,
1272 size_t total_elt_sz, const phys_addr_t paddr[], uint32_t pg_num,
1276 * Walk list of all memory pools
1281 * Argument passed to iterator
1283 void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
1290 #endif /* _RTE_MEMPOOL_H_ */