4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * Copyright(c) 2016 6WIND S.A.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #ifndef _RTE_MEMPOOL_H_
36 #define _RTE_MEMPOOL_H_
42 * A memory pool is an allocator of fixed-size object. It is
43 * identified by its name, and uses a ring to store free objects. It
44 * provides some other optional services, like a per-core object
45 * cache, and an alignment helper to ensure that objects are padded
46 * to spread them equally on all RAM channels, ranks, and so on.
48 * Objects owned by a mempool should never be added in another
49 * mempool. When an object is freed using rte_mempool_put() or
50 * equivalent, the object data is not modified; the user can save some
51 * meta-data in the object data and retrieve them when allocating a
54 * Note: the mempool implementation is not preemptable. A lcore must
55 * not be interrupted by another task that uses the same mempool
56 * (because it uses a ring which is not preemptable). Also, mempool
57 * functions must not be used outside the DPDK environment: for
58 * example, in linuxapp environment, a thread that is not created by
59 * the EAL must not use mempools. This is due to the per-lcore cache
60 * that won't work as rte_lcore_id() will not return a correct value.
68 #include <sys/queue.h>
71 #include <rte_debug.h>
72 #include <rte_lcore.h>
73 #include <rte_memory.h>
74 #include <rte_branch_prediction.h>
81 #define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL /**< Header cookie. */
82 #define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL /**< Header cookie. */
83 #define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/
85 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
87 * A structure that stores the mempool statistics (per-lcore).
89 struct rte_mempool_debug_stats {
90 uint64_t put_bulk; /**< Number of puts. */
91 uint64_t put_objs; /**< Number of objects successfully put. */
92 uint64_t get_success_bulk; /**< Successful allocation number. */
93 uint64_t get_success_objs; /**< Objects successfully allocated. */
94 uint64_t get_fail_bulk; /**< Failed allocation number. */
95 uint64_t get_fail_objs; /**< Objects that failed to be allocated. */
96 } __rte_cache_aligned;
100 * A structure that stores a per-core object cache.
102 struct rte_mempool_cache {
103 unsigned len; /**< Cache len */
105 * Cache is allocated to this size to allow it to overflow in certain
106 * cases to avoid needless emptying of cache.
108 void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3]; /**< Cache objects */
109 } __rte_cache_aligned;
112 * A structure that stores the size of mempool elements.
114 struct rte_mempool_objsz {
115 uint32_t elt_size; /**< Size of an element. */
116 uint32_t header_size; /**< Size of header (before elt). */
117 uint32_t trailer_size; /**< Size of trailer (after elt). */
119 /**< Total size of an object (header + elt + trailer). */
122 #define RTE_MEMPOOL_NAMESIZE 32 /**< Maximum length of a memory pool. */
123 #define RTE_MEMPOOL_MZ_PREFIX "MP_"
126 #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
128 #ifdef RTE_LIBRTE_XEN_DOM0
130 /* "<name>_MP_elt" */
131 #define RTE_MEMPOOL_OBJ_NAME "%s_" RTE_MEMPOOL_MZ_PREFIX "elt"
135 #define RTE_MEMPOOL_OBJ_NAME RTE_MEMPOOL_MZ_FORMAT
137 #endif /* RTE_LIBRTE_XEN_DOM0 */
139 #define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1)
141 /** Mempool over one chunk of physically continuous memory */
142 #define MEMPOOL_PG_NUM_DEFAULT 1
144 #ifndef RTE_MEMPOOL_ALIGN
145 #define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
148 #define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
151 * Mempool object header structure
153 * Each object stored in mempools are prefixed by this header structure,
154 * it allows to retrieve the mempool pointer from the object and to
155 * iterate on all objects attached to a mempool. When debug is enabled,
156 * a cookie is also added in this structure preventing corruptions and
159 struct rte_mempool_objhdr {
160 STAILQ_ENTRY(rte_mempool_objhdr) next; /**< Next in list. */
161 struct rte_mempool *mp; /**< The mempool owning the object. */
162 phys_addr_t physaddr; /**< Physical address of the object. */
163 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
164 uint64_t cookie; /**< Debug cookie. */
169 * A list of object headers type
171 STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr);
174 * Mempool object trailer structure
176 * In debug mode, each object stored in mempools are suffixed by this
177 * trailer structure containing a cookie preventing memory corruptions.
179 struct rte_mempool_objtlr {
180 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
181 uint64_t cookie; /**< Debug cookie. */
186 * The RTE mempool structure.
189 char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */
190 struct rte_ring *ring; /**< Ring to store objects. */
191 phys_addr_t phys_addr; /**< Phys. addr. of mempool struct. */
192 int flags; /**< Flags of the mempool. */
193 int socket_id; /**< Socket id passed at mempool creation. */
194 uint32_t size; /**< Size of the mempool. */
195 uint32_t cache_size; /**< Size of per-lcore local cache. */
196 uint32_t cache_flushthresh;
197 /**< Threshold before we flush excess elements. */
199 uint32_t elt_size; /**< Size of an element. */
200 uint32_t header_size; /**< Size of header (before elt). */
201 uint32_t trailer_size; /**< Size of trailer (after elt). */
203 unsigned private_data_size; /**< Size of private data. */
205 struct rte_mempool_cache *local_cache; /**< Per-lcore local cache */
207 struct rte_mempool_objhdr_list elt_list; /**< List of objects in pool */
209 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
210 /** Per-lcore statistics. */
211 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
214 /* Address translation support, starts from next cache line. */
216 /** Number of elements in the elt_pa array. */
217 uint32_t pg_num __rte_cache_aligned;
218 uint32_t pg_shift; /**< LOG2 of the physical pages. */
219 uintptr_t pg_mask; /**< physical page mask value. */
220 uintptr_t elt_va_start;
221 /**< Virtual address of the first mempool object. */
222 uintptr_t elt_va_end;
223 /**< Virtual address of the <size + 1> mempool object. */
224 phys_addr_t elt_pa[MEMPOOL_PG_NUM_DEFAULT];
225 /**< Array of physical page addresses for the mempool objects buffer. */
227 } __rte_cache_aligned;
229 #define MEMPOOL_F_NO_SPREAD 0x0001 /**< Do not spread among memory channels. */
230 #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/
231 #define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/
232 #define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/
235 * @internal When debug is enabled, store some statistics.
238 * Pointer to the memory pool.
240 * Name of the statistics field to increment in the memory pool.
242 * Number to add to the object-oriented statistics.
244 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
245 #define __MEMPOOL_STAT_ADD(mp, name, n) do { \
246 unsigned __lcore_id = rte_lcore_id(); \
247 if (__lcore_id < RTE_MAX_LCORE) { \
248 mp->stats[__lcore_id].name##_objs += n; \
249 mp->stats[__lcore_id].name##_bulk += 1; \
253 #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
257 * Size of elt_pa array size based on number of pages. (Internal use)
259 #define __PA_SIZE(mp, pgn) \
260 RTE_ALIGN_CEIL((((pgn) - RTE_DIM((mp)->elt_pa)) * \
261 sizeof((mp)->elt_pa[0])), RTE_CACHE_LINE_SIZE)
264 * Calculate the size of the mempool header.
267 * Pointer to the memory pool.
269 * Number of pages used to store mempool objects.
271 * Size of the per-lcore cache.
273 #define MEMPOOL_HEADER_SIZE(mp, pgn, cs) \
274 (sizeof(*(mp)) + __PA_SIZE(mp, pgn) + (((cs) == 0) ? 0 : \
275 (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
277 /* return the header of a mempool object (internal) */
278 static inline struct rte_mempool_objhdr *__mempool_get_header(void *obj)
280 return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj,
281 sizeof(struct rte_mempool_objhdr));
285 * Return a pointer to the mempool owning this object.
288 * An object that is owned by a pool. If this is not the case,
289 * the behavior is undefined.
291 * A pointer to the mempool structure.
293 static inline struct rte_mempool *rte_mempool_from_obj(void *obj)
295 struct rte_mempool_objhdr *hdr = __mempool_get_header(obj);
299 /* return the trailer of a mempool object (internal) */
300 static inline struct rte_mempool_objtlr *__mempool_get_trailer(void *obj)
302 struct rte_mempool *mp = rte_mempool_from_obj(obj);
303 return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size);
307 * @internal Check and update cookies or panic.
310 * Pointer to the memory pool.
311 * @param obj_table_const
312 * Pointer to a table of void * pointers (objects).
314 * Index of object in object table.
316 * - 0: object is supposed to be allocated, mark it as free
317 * - 1: object is supposed to be free, mark it as allocated
318 * - 2: just check that cookie is valid (free or allocated)
320 void rte_mempool_check_cookies(const struct rte_mempool *mp,
321 void * const *obj_table_const, unsigned n, int free);
323 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
324 #define __mempool_check_cookies(mp, obj_table_const, n, free) \
325 rte_mempool_check_cookies(mp, obj_table_const, n, free)
327 #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
328 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
331 * An object callback function for mempool.
333 * Used by rte_mempool_create() and rte_mempool_obj_iter().
335 typedef void (rte_mempool_obj_cb_t)(struct rte_mempool *mp,
336 void *opaque, void *obj, unsigned obj_idx);
337 typedef rte_mempool_obj_cb_t rte_mempool_obj_ctor_t; /* compat */
340 * A mempool constructor callback function.
342 * Arguments are the mempool and the opaque pointer given by the user in
343 * rte_mempool_create().
345 typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
348 * Create a new mempool named *name* in memory.
350 * This function uses ``memzone_reserve()`` to allocate memory. The
351 * pool contains n elements of elt_size. Its size is set to n.
352 * All elements of the mempool are allocated together with the mempool header,
353 * in one physically continuous chunk of memory.
356 * The name of the mempool.
358 * The number of elements in the mempool. The optimum size (in terms of
359 * memory usage) for a mempool is when n is a power of two minus one:
362 * The size of each element.
364 * If cache_size is non-zero, the rte_mempool library will try to
365 * limit the accesses to the common lockless pool, by maintaining a
366 * per-lcore object cache. This argument must be lower or equal to
367 * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose
368 * cache_size to have "n modulo cache_size == 0": if this is
369 * not the case, some elements will always stay in the pool and will
370 * never be used. The access to the per-lcore table is of course
371 * faster than the multi-producer/consumer pool. The cache can be
372 * disabled if the cache_size argument is set to 0; it can be useful to
373 * avoid losing objects in cache. Note that even if not used, the
374 * memory space for cache is always reserved in a mempool structure,
375 * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
376 * @param private_data_size
377 * The size of the private data appended after the mempool
378 * structure. This is useful for storing some private data after the
379 * mempool structure, as is done for rte_mbuf_pool for example.
381 * A function pointer that is called for initialization of the pool,
382 * before object initialization. The user can initialize the private
383 * data in this function if needed. This parameter can be NULL if
386 * An opaque pointer to data that can be used in the mempool
387 * constructor function.
389 * A function pointer that is called for each object at
390 * initialization of the pool. The user can set some meta data in
391 * objects if needed. This parameter can be NULL if not needed.
392 * The obj_init() function takes the mempool pointer, the init_arg,
393 * the object pointer and the object number as parameters.
394 * @param obj_init_arg
395 * An opaque pointer to data that can be used as an argument for
396 * each call to the object constructor function.
398 * The *socket_id* argument is the socket identifier in the case of
399 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
400 * constraint for the reserved zone.
402 * The *flags* arguments is an OR of following flags:
403 * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
404 * between channels in RAM: the pool allocator will add padding
405 * between objects depending on the hardware configuration. See
406 * Memory alignment constraints for details. If this flag is set,
407 * the allocator will just align them to a cache line.
408 * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
409 * cache-aligned. This flag removes this constraint, and no
410 * padding will be present between objects. This flag implies
411 * MEMPOOL_F_NO_SPREAD.
412 * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
413 * when using rte_mempool_put() or rte_mempool_put_bulk() is
414 * "single-producer". Otherwise, it is "multi-producers".
415 * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
416 * when using rte_mempool_get() or rte_mempool_get_bulk() is
417 * "single-consumer". Otherwise, it is "multi-consumers".
419 * The pointer to the new allocated mempool, on success. NULL on error
420 * with rte_errno set appropriately. Possible rte_errno values include:
421 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
422 * - E_RTE_SECONDARY - function was called from a secondary process instance
423 * - EINVAL - cache size provided is too large
424 * - ENOSPC - the maximum number of memzones has already been allocated
425 * - EEXIST - a memzone with the same name already exists
426 * - ENOMEM - no appropriate memory area found in which to create memzone
429 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
430 unsigned cache_size, unsigned private_data_size,
431 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
432 rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
433 int socket_id, unsigned flags);
436 * Create a new mempool named *name* in memory.
438 * The pool contains n elements of elt_size. Its size is set to n.
439 * This function uses ``memzone_reserve()`` to allocate the mempool header
440 * (and the objects if vaddr is NULL).
441 * Depending on the input parameters, mempool elements can be either allocated
442 * together with the mempool header, or an externally provided memory buffer
443 * could be used to store mempool objects. In later case, that external
444 * memory buffer can consist of set of disjoint physical pages.
447 * The name of the mempool.
449 * The number of elements in the mempool. The optimum size (in terms of
450 * memory usage) for a mempool is when n is a power of two minus one:
453 * The size of each element.
455 * Size of the cache. See rte_mempool_create() for details.
456 * @param private_data_size
457 * The size of the private data appended after the mempool
458 * structure. This is useful for storing some private data after the
459 * mempool structure, as is done for rte_mbuf_pool for example.
461 * A function pointer that is called for initialization of the pool,
462 * before object initialization. The user can initialize the private
463 * data in this function if needed. This parameter can be NULL if
466 * An opaque pointer to data that can be used in the mempool
467 * constructor function.
469 * A function called for each object at initialization of the pool.
470 * See rte_mempool_create() for details.
471 * @param obj_init_arg
472 * An opaque pointer passed to the object constructor function.
474 * The *socket_id* argument is the socket identifier in the case of
475 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
476 * constraint for the reserved zone.
478 * Flags controlling the behavior of the mempool. See
479 * rte_mempool_create() for details.
481 * Virtual address of the externally allocated memory buffer.
482 * Will be used to store mempool objects.
484 * Array of physical addresses of the pages that comprises given memory
487 * Number of elements in the paddr array.
489 * LOG2 of the physical pages size.
491 * The pointer to the new allocated mempool, on success. NULL on error
492 * with rte_errno set appropriately. See rte_mempool_create() for details.
495 rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
496 unsigned cache_size, unsigned private_data_size,
497 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
498 rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
499 int socket_id, unsigned flags, void *vaddr,
500 const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
503 * Create a new mempool named *name* in memory on Xen Dom0.
505 * This function uses ``rte_mempool_xmem_create()`` to allocate memory. The
506 * pool contains n elements of elt_size. Its size is set to n.
507 * All elements of the mempool are allocated together with the mempool header,
508 * and memory buffer can consist of set of disjoint physical pages.
511 * The name of the mempool.
513 * The number of elements in the mempool. The optimum size (in terms of
514 * memory usage) for a mempool is when n is a power of two minus one:
517 * The size of each element.
519 * If cache_size is non-zero, the rte_mempool library will try to
520 * limit the accesses to the common lockless pool, by maintaining a
521 * per-lcore object cache. This argument must be lower or equal to
522 * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose
523 * cache_size to have "n modulo cache_size == 0": if this is
524 * not the case, some elements will always stay in the pool and will
525 * never be used. The access to the per-lcore table is of course
526 * faster than the multi-producer/consumer pool. The cache can be
527 * disabled if the cache_size argument is set to 0; it can be useful to
528 * avoid losing objects in cache. Note that even if not used, the
529 * memory space for cache is always reserved in a mempool structure,
530 * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
531 * @param private_data_size
532 * The size of the private data appended after the mempool
533 * structure. This is useful for storing some private data after the
534 * mempool structure, as is done for rte_mbuf_pool for example.
536 * A function pointer that is called for initialization of the pool,
537 * before object initialization. The user can initialize the private
538 * data in this function if needed. This parameter can be NULL if
541 * An opaque pointer to data that can be used in the mempool
542 * constructor function.
544 * A function pointer that is called for each object at
545 * initialization of the pool. The user can set some meta data in
546 * objects if needed. This parameter can be NULL if not needed.
547 * The obj_init() function takes the mempool pointer, the init_arg,
548 * the object pointer and the object number as parameters.
549 * @param obj_init_arg
550 * An opaque pointer to data that can be used as an argument for
551 * each call to the object constructor function.
553 * The *socket_id* argument is the socket identifier in the case of
554 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
555 * constraint for the reserved zone.
557 * The *flags* arguments is an OR of following flags:
558 * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
559 * between channels in RAM: the pool allocator will add padding
560 * between objects depending on the hardware configuration. See
561 * Memory alignment constraints for details. If this flag is set,
562 * the allocator will just align them to a cache line.
563 * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
564 * cache-aligned. This flag removes this constraint, and no
565 * padding will be present between objects. This flag implies
566 * MEMPOOL_F_NO_SPREAD.
567 * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
568 * when using rte_mempool_put() or rte_mempool_put_bulk() is
569 * "single-producer". Otherwise, it is "multi-producers".
570 * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
571 * when using rte_mempool_get() or rte_mempool_get_bulk() is
572 * "single-consumer". Otherwise, it is "multi-consumers".
574 * The pointer to the new allocated mempool, on success. NULL on error
575 * with rte_errno set appropriately. Possible rte_errno values include:
576 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
577 * - E_RTE_SECONDARY - function was called from a secondary process instance
578 * - EINVAL - cache size provided is too large
579 * - ENOSPC - the maximum number of memzones has already been allocated
580 * - EEXIST - a memzone with the same name already exists
581 * - ENOMEM - no appropriate memory area found in which to create memzone
584 rte_dom0_mempool_create(const char *name, unsigned n, unsigned elt_size,
585 unsigned cache_size, unsigned private_data_size,
586 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
587 rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
588 int socket_id, unsigned flags);
592 * Call a function for each mempool element
594 * Iterate across all objects attached to a rte_mempool and call the
595 * callback function on it.
598 * A pointer to an initialized mempool.
600 * A function pointer that is called for each object.
602 * An opaque pointer passed to the callback function.
604 * Number of objects iterated.
606 uint32_t rte_mempool_obj_iter(struct rte_mempool *mp,
607 rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg);
610 * Dump the status of the mempool to the console.
613 * A pointer to a file for output
615 * A pointer to the mempool structure.
617 void rte_mempool_dump(FILE *f, struct rte_mempool *mp);
620 * @internal Put several objects back in the mempool; used internally.
622 * A pointer to the mempool structure.
624 * A pointer to a table of void * pointers (objects).
626 * The number of objects to store back in the mempool, must be strictly
629 * Mono-producer (0) or multi-producers (1).
631 static inline void __attribute__((always_inline))
632 __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
633 unsigned n, int is_mp)
635 struct rte_mempool_cache *cache;
638 unsigned lcore_id = rte_lcore_id();
639 uint32_t cache_size = mp->cache_size;
640 uint32_t flushthresh = mp->cache_flushthresh;
642 /* increment stat now, adding in mempool always success */
643 __MEMPOOL_STAT_ADD(mp, put, n);
645 /* cache is not enabled or single producer or non-EAL thread */
646 if (unlikely(cache_size == 0 || is_mp == 0 ||
647 lcore_id >= RTE_MAX_LCORE))
650 /* Go straight to ring if put would overflow mem allocated for cache */
651 if (unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE))
654 cache = &mp->local_cache[lcore_id];
655 cache_objs = &cache->objs[cache->len];
658 * The cache follows the following algorithm
659 * 1. Add the objects to the cache
660 * 2. Anything greater than the cache min value (if it crosses the
661 * cache flush threshold) is flushed to the ring.
664 /* Add elements back into the cache */
665 for (index = 0; index < n; ++index, obj_table++)
666 cache_objs[index] = *obj_table;
670 if (cache->len >= flushthresh) {
671 rte_ring_mp_enqueue_bulk(mp->ring, &cache->objs[cache_size],
672 cache->len - cache_size);
673 cache->len = cache_size;
680 /* push remaining objects in ring */
681 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
683 if (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0)
684 rte_panic("cannot put objects in mempool\n");
687 if (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0)
688 rte_panic("cannot put objects in mempool\n");
692 rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n);
694 rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n);
700 * Put several objects back in the mempool (multi-producers safe).
703 * A pointer to the mempool structure.
705 * A pointer to a table of void * pointers (objects).
707 * The number of objects to add in the mempool from the obj_table.
709 static inline void __attribute__((always_inline))
710 rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
713 __mempool_check_cookies(mp, obj_table, n, 0);
714 __mempool_put_bulk(mp, obj_table, n, 1);
718 * Put several objects back in the mempool (NOT multi-producers safe).
721 * A pointer to the mempool structure.
723 * A pointer to a table of void * pointers (objects).
725 * The number of objects to add in the mempool from obj_table.
728 rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
731 __mempool_check_cookies(mp, obj_table, n, 0);
732 __mempool_put_bulk(mp, obj_table, n, 0);
736 * Put several objects back in the mempool.
738 * This function calls the multi-producer or the single-producer
739 * version depending on the default behavior that was specified at
740 * mempool creation time (see flags).
743 * A pointer to the mempool structure.
745 * A pointer to a table of void * pointers (objects).
747 * The number of objects to add in the mempool from obj_table.
749 static inline void __attribute__((always_inline))
750 rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
753 __mempool_check_cookies(mp, obj_table, n, 0);
754 __mempool_put_bulk(mp, obj_table, n, !(mp->flags & MEMPOOL_F_SP_PUT));
758 * Put one object in the mempool (multi-producers safe).
761 * A pointer to the mempool structure.
763 * A pointer to the object to be added.
765 static inline void __attribute__((always_inline))
766 rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
768 rte_mempool_mp_put_bulk(mp, &obj, 1);
772 * Put one object back in the mempool (NOT multi-producers safe).
775 * A pointer to the mempool structure.
777 * A pointer to the object to be added.
779 static inline void __attribute__((always_inline))
780 rte_mempool_sp_put(struct rte_mempool *mp, void *obj)
782 rte_mempool_sp_put_bulk(mp, &obj, 1);
786 * Put one object back in the mempool.
788 * This function calls the multi-producer or the single-producer
789 * version depending on the default behavior that was specified at
790 * mempool creation time (see flags).
793 * A pointer to the mempool structure.
795 * A pointer to the object to be added.
797 static inline void __attribute__((always_inline))
798 rte_mempool_put(struct rte_mempool *mp, void *obj)
800 rte_mempool_put_bulk(mp, &obj, 1);
804 * @internal Get several objects from the mempool; used internally.
806 * A pointer to the mempool structure.
808 * A pointer to a table of void * pointers (objects).
810 * The number of objects to get, must be strictly positive.
812 * Mono-consumer (0) or multi-consumers (1).
814 * - >=0: Success; number of objects supplied.
815 * - <0: Error; code of ring dequeue function.
817 static inline int __attribute__((always_inline))
818 __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
819 unsigned n, int is_mc)
822 struct rte_mempool_cache *cache;
825 unsigned lcore_id = rte_lcore_id();
826 uint32_t cache_size = mp->cache_size;
828 /* cache is not enabled or single consumer */
829 if (unlikely(cache_size == 0 || is_mc == 0 ||
830 n >= cache_size || lcore_id >= RTE_MAX_LCORE))
833 cache = &mp->local_cache[lcore_id];
834 cache_objs = cache->objs;
836 /* Can this be satisfied from the cache? */
837 if (cache->len < n) {
838 /* No. Backfill the cache first, and then fill from it */
839 uint32_t req = n + (cache_size - cache->len);
841 /* How many do we require i.e. number to fill the cache + the request */
842 ret = rte_ring_mc_dequeue_bulk(mp->ring, &cache->objs[cache->len], req);
843 if (unlikely(ret < 0)) {
845 * In the offchance that we are buffer constrained,
846 * where we are not able to allocate cache + n, go to
847 * the ring directly. If that fails, we are truly out of
856 /* Now fill in the response ... */
857 for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++)
858 *obj_table = cache_objs[len];
862 __MEMPOOL_STAT_ADD(mp, get_success, n);
868 /* get remaining objects from ring */
870 ret = rte_ring_mc_dequeue_bulk(mp->ring, obj_table, n);
872 ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n);
875 __MEMPOOL_STAT_ADD(mp, get_fail, n);
877 __MEMPOOL_STAT_ADD(mp, get_success, n);
883 * Get several objects from the mempool (multi-consumers safe).
885 * If cache is enabled, objects will be retrieved first from cache,
886 * subsequently from the common pool. Note that it can return -ENOENT when
887 * the local cache and common pool are empty, even if cache from other
891 * A pointer to the mempool structure.
893 * A pointer to a table of void * pointers (objects) that will be filled.
895 * The number of objects to get from mempool to obj_table.
897 * - 0: Success; objects taken.
898 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
900 static inline int __attribute__((always_inline))
901 rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
904 ret = __mempool_get_bulk(mp, obj_table, n, 1);
906 __mempool_check_cookies(mp, obj_table, n, 1);
911 * Get several objects from the mempool (NOT multi-consumers safe).
913 * If cache is enabled, objects will be retrieved first from cache,
914 * subsequently from the common pool. Note that it can return -ENOENT when
915 * the local cache and common pool are empty, even if cache from other
919 * A pointer to the mempool structure.
921 * A pointer to a table of void * pointers (objects) that will be filled.
923 * The number of objects to get from the mempool to obj_table.
925 * - 0: Success; objects taken.
926 * - -ENOENT: Not enough entries in the mempool; no object is
929 static inline int __attribute__((always_inline))
930 rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
933 ret = __mempool_get_bulk(mp, obj_table, n, 0);
935 __mempool_check_cookies(mp, obj_table, n, 1);
940 * Get several objects from the mempool.
942 * This function calls the multi-consumers or the single-consumer
943 * version, depending on the default behaviour that was specified at
944 * mempool creation time (see flags).
946 * If cache is enabled, objects will be retrieved first from cache,
947 * subsequently from the common pool. Note that it can return -ENOENT when
948 * the local cache and common pool are empty, even if cache from other
952 * A pointer to the mempool structure.
954 * A pointer to a table of void * pointers (objects) that will be filled.
956 * The number of objects to get from the mempool to obj_table.
958 * - 0: Success; objects taken
959 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
961 static inline int __attribute__((always_inline))
962 rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
965 ret = __mempool_get_bulk(mp, obj_table, n,
966 !(mp->flags & MEMPOOL_F_SC_GET));
968 __mempool_check_cookies(mp, obj_table, n, 1);
973 * Get one object from the mempool (multi-consumers safe).
975 * If cache is enabled, objects will be retrieved first from cache,
976 * subsequently from the common pool. Note that it can return -ENOENT when
977 * the local cache and common pool are empty, even if cache from other
981 * A pointer to the mempool structure.
983 * A pointer to a void * pointer (object) that will be filled.
985 * - 0: Success; objects taken.
986 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
988 static inline int __attribute__((always_inline))
989 rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
991 return rte_mempool_mc_get_bulk(mp, obj_p, 1);
995 * Get one object from the mempool (NOT multi-consumers safe).
997 * If cache is enabled, objects will be retrieved first from cache,
998 * subsequently from the common pool. Note that it can return -ENOENT when
999 * the local cache and common pool are empty, even if cache from other
1003 * A pointer to the mempool structure.
1005 * A pointer to a void * pointer (object) that will be filled.
1007 * - 0: Success; objects taken.
1008 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
1010 static inline int __attribute__((always_inline))
1011 rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p)
1013 return rte_mempool_sc_get_bulk(mp, obj_p, 1);
1017 * Get one object from the mempool.
1019 * This function calls the multi-consumers or the single-consumer
1020 * version, depending on the default behavior that was specified at
1021 * mempool creation (see flags).
1023 * If cache is enabled, objects will be retrieved first from cache,
1024 * subsequently from the common pool. Note that it can return -ENOENT when
1025 * the local cache and common pool are empty, even if cache from other
1029 * A pointer to the mempool structure.
1031 * A pointer to a void * pointer (object) that will be filled.
1033 * - 0: Success; objects taken.
1034 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
1036 static inline int __attribute__((always_inline))
1037 rte_mempool_get(struct rte_mempool *mp, void **obj_p)
1039 return rte_mempool_get_bulk(mp, obj_p, 1);
1043 * Return the number of entries in the mempool.
1045 * When cache is enabled, this function has to browse the length of
1046 * all lcores, so it should not be used in a data path, but only for
1050 * A pointer to the mempool structure.
1052 * The number of entries in the mempool.
1054 unsigned rte_mempool_count(const struct rte_mempool *mp);
1057 * Return the number of free entries in the mempool ring.
1058 * i.e. how many entries can be freed back to the mempool.
1060 * NOTE: This corresponds to the number of elements *allocated* from the
1061 * memory pool, not the number of elements in the pool itself. To count
1062 * the number elements currently available in the pool, use "rte_mempool_count"
1064 * When cache is enabled, this function has to browse the length of
1065 * all lcores, so it should not be used in a data path, but only for
1069 * A pointer to the mempool structure.
1071 * The number of free entries in the mempool.
1073 static inline unsigned
1074 rte_mempool_free_count(const struct rte_mempool *mp)
1076 return mp->size - rte_mempool_count(mp);
1080 * Test if the mempool is full.
1082 * When cache is enabled, this function has to browse the length of all
1083 * lcores, so it should not be used in a data path, but only for debug
1087 * A pointer to the mempool structure.
1089 * - 1: The mempool is full.
1090 * - 0: The mempool is not full.
1093 rte_mempool_full(const struct rte_mempool *mp)
1095 return !!(rte_mempool_count(mp) == mp->size);
1099 * Test if the mempool is empty.
1101 * When cache is enabled, this function has to browse the length of all
1102 * lcores, so it should not be used in a data path, but only for debug
1106 * A pointer to the mempool structure.
1108 * - 1: The mempool is empty.
1109 * - 0: The mempool is not empty.
1112 rte_mempool_empty(const struct rte_mempool *mp)
1114 return !!(rte_mempool_count(mp) == 0);
1118 * Return the physical address of elt, which is an element of the pool mp.
1121 * A pointer to the mempool structure.
1123 * A pointer (virtual address) to the element of the pool.
1125 * The physical address of the elt element.
1127 static inline phys_addr_t
1128 rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt)
1130 if (rte_eal_has_hugepages()) {
1131 const struct rte_mempool_objhdr *hdr;
1132 hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt,
1134 return hdr->physaddr;
1137 * If huge pages are disabled, we cannot assume the
1138 * memory region to be physically contiguous.
1139 * Lookup for each element.
1141 return rte_mem_virt2phy(elt);
1146 * Check the consistency of mempool objects.
1148 * Verify the coherency of fields in the mempool structure. Also check
1149 * that the cookies of mempool objects (even the ones that are not
1150 * present in pool) have a correct value. If not, a panic will occur.
1153 * A pointer to the mempool structure.
1155 void rte_mempool_audit(struct rte_mempool *mp);
1158 * Return a pointer to the private data in an mempool structure.
1161 * A pointer to the mempool structure.
1163 * A pointer to the private data.
1165 static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
1168 MEMPOOL_HEADER_SIZE(mp, mp->pg_num, mp->cache_size);
1172 * Dump the status of all mempools on the console
1175 * A pointer to a file for output
1177 void rte_mempool_list_dump(FILE *f);
1180 * Search a mempool from its name
1183 * The name of the mempool.
1185 * The pointer to the mempool matching the name, or NULL if not found.
1187 * with rte_errno set appropriately. Possible rte_errno values include:
1188 * - ENOENT - required entry not available to return.
1191 struct rte_mempool *rte_mempool_lookup(const char *name);
1194 * Get the header, trailer and total size of a mempool element.
1196 * Given a desired size of the mempool element and mempool flags,
1197 * calculates header, trailer, body and total sizes of the mempool object.
1200 * The size of each element, without header and trailer.
1202 * The flags used for the mempool creation.
1203 * Consult rte_mempool_create() for more information about possible values.
1204 * The size of each element.
1206 * The calculated detailed size the mempool object. May be NULL.
1208 * Total size of the mempool object.
1210 uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
1211 struct rte_mempool_objsz *sz);
1214 * Get the size of memory required to store mempool elements.
1216 * Calculate the maximum amount of memory required to store given number
1217 * of objects. Assume that the memory buffer will be aligned at page
1220 * Note that if object size is bigger then page size, then it assumes
1221 * that pages are grouped in subsets of physically continuous pages big
1222 * enough to store at least one object.
1225 * Number of elements.
1226 * @param total_elt_sz
1227 * The size of each element, including header and trailer, as returned
1228 * by rte_mempool_calc_obj_size().
1230 * LOG2 of the physical pages size.
1232 * Required memory size aligned at page boundary.
1234 size_t rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz,
1238 * Get the size of memory required to store mempool elements.
1240 * Calculate how much memory would be actually required with the given
1241 * memory footprint to store required number of objects.
1244 * Virtual address of the externally allocated memory buffer.
1245 * Will be used to store mempool objects.
1247 * Number of elements.
1248 * @param total_elt_sz
1249 * The size of each element, including header and trailer, as returned
1250 * by rte_mempool_calc_obj_size().
1252 * Array of physical addresses of the pages that comprises given memory
1255 * Number of elements in the paddr array.
1257 * LOG2 of the physical pages size.
1259 * On success, the number of bytes needed to store given number of
1260 * objects, aligned to the given page size. If the provided memory
1261 * buffer is too small, return a negative value whose absolute value
1262 * is the actual number of elements that can be stored in that buffer.
1264 ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num,
1265 size_t total_elt_sz, const phys_addr_t paddr[], uint32_t pg_num,
1269 * Walk list of all memory pools
1274 * Argument passed to iterator
1276 void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
1283 #endif /* _RTE_MEMPOOL_H_ */