4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * Copyright(c) 2016 6WIND S.A.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #ifndef _RTE_MEMPOOL_H_
36 #define _RTE_MEMPOOL_H_
42 * A memory pool is an allocator of fixed-size object. It is
43 * identified by its name, and uses a ring to store free objects. It
44 * provides some other optional services, like a per-core object
45 * cache, and an alignment helper to ensure that objects are padded
46 * to spread them equally on all RAM channels, ranks, and so on.
48 * Objects owned by a mempool should never be added in another
49 * mempool. When an object is freed using rte_mempool_put() or
50 * equivalent, the object data is not modified; the user can save some
51 * meta-data in the object data and retrieve them when allocating a
54 * Note: the mempool implementation is not preemptable. A lcore must
55 * not be interrupted by another task that uses the same mempool
56 * (because it uses a ring which is not preemptable). Also, mempool
57 * functions must not be used outside the DPDK environment: for
58 * example, in linuxapp environment, a thread that is not created by
59 * the EAL must not use mempools. This is due to the per-lcore cache
60 * that won't work as rte_lcore_id() will not return a correct value.
68 #include <sys/queue.h>
71 #include <rte_debug.h>
72 #include <rte_lcore.h>
73 #include <rte_memory.h>
74 #include <rte_branch_prediction.h>
81 #define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL /**< Header cookie. */
82 #define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL /**< Header cookie. */
83 #define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/
85 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
87 * A structure that stores the mempool statistics (per-lcore).
89 struct rte_mempool_debug_stats {
90 uint64_t put_bulk; /**< Number of puts. */
91 uint64_t put_objs; /**< Number of objects successfully put. */
92 uint64_t get_success_bulk; /**< Successful allocation number. */
93 uint64_t get_success_objs; /**< Objects successfully allocated. */
94 uint64_t get_fail_bulk; /**< Failed allocation number. */
95 uint64_t get_fail_objs; /**< Objects that failed to be allocated. */
96 } __rte_cache_aligned;
100 * A structure that stores a per-core object cache.
102 struct rte_mempool_cache {
103 unsigned len; /**< Cache len */
105 * Cache is allocated to this size to allow it to overflow in certain
106 * cases to avoid needless emptying of cache.
108 void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3]; /**< Cache objects */
109 } __rte_cache_aligned;
112 * A structure that stores the size of mempool elements.
114 struct rte_mempool_objsz {
115 uint32_t elt_size; /**< Size of an element. */
116 uint32_t header_size; /**< Size of header (before elt). */
117 uint32_t trailer_size; /**< Size of trailer (after elt). */
119 /**< Total size of an object (header + elt + trailer). */
122 #define RTE_MEMPOOL_NAMESIZE 32 /**< Maximum length of a memory pool. */
123 #define RTE_MEMPOOL_MZ_PREFIX "MP_"
126 #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
128 #ifdef RTE_LIBRTE_XEN_DOM0
130 /* "<name>_MP_elt" */
131 #define RTE_MEMPOOL_OBJ_NAME "%s_" RTE_MEMPOOL_MZ_PREFIX "elt"
135 #define RTE_MEMPOOL_OBJ_NAME RTE_MEMPOOL_MZ_FORMAT
137 #endif /* RTE_LIBRTE_XEN_DOM0 */
139 #define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1)
141 /** Mempool over one chunk of physically continuous memory */
142 #define MEMPOOL_PG_NUM_DEFAULT 1
144 #ifndef RTE_MEMPOOL_ALIGN
145 #define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
148 #define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
151 * Mempool object header structure
153 * Each object stored in mempools are prefixed by this header structure,
154 * it allows to retrieve the mempool pointer from the object and to
155 * iterate on all objects attached to a mempool. When debug is enabled,
156 * a cookie is also added in this structure preventing corruptions and
159 struct rte_mempool_objhdr {
160 STAILQ_ENTRY(rte_mempool_objhdr) next; /**< Next in list. */
161 struct rte_mempool *mp; /**< The mempool owning the object. */
162 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
163 uint64_t cookie; /**< Debug cookie. */
168 * A list of object headers type
170 STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr);
173 * Mempool object trailer structure
175 * In debug mode, each object stored in mempools are suffixed by this
176 * trailer structure containing a cookie preventing memory corruptions.
178 struct rte_mempool_objtlr {
179 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
180 uint64_t cookie; /**< Debug cookie. */
185 * The RTE mempool structure.
188 char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */
189 struct rte_ring *ring; /**< Ring to store objects. */
190 phys_addr_t phys_addr; /**< Phys. addr. of mempool struct. */
191 int flags; /**< Flags of the mempool. */
192 int socket_id; /**< Socket id passed at mempool creation. */
193 uint32_t size; /**< Size of the mempool. */
194 uint32_t cache_size; /**< Size of per-lcore local cache. */
195 uint32_t cache_flushthresh;
196 /**< Threshold before we flush excess elements. */
198 uint32_t elt_size; /**< Size of an element. */
199 uint32_t header_size; /**< Size of header (before elt). */
200 uint32_t trailer_size; /**< Size of trailer (after elt). */
202 unsigned private_data_size; /**< Size of private data. */
204 struct rte_mempool_cache *local_cache; /**< Per-lcore local cache */
206 struct rte_mempool_objhdr_list elt_list; /**< List of objects in pool */
208 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
209 /** Per-lcore statistics. */
210 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
213 /* Address translation support, starts from next cache line. */
215 /** Number of elements in the elt_pa array. */
216 uint32_t pg_num __rte_cache_aligned;
217 uint32_t pg_shift; /**< LOG2 of the physical pages. */
218 uintptr_t pg_mask; /**< physical page mask value. */
219 uintptr_t elt_va_start;
220 /**< Virtual address of the first mempool object. */
221 uintptr_t elt_va_end;
222 /**< Virtual address of the <size + 1> mempool object. */
223 phys_addr_t elt_pa[MEMPOOL_PG_NUM_DEFAULT];
224 /**< Array of physical page addresses for the mempool objects buffer. */
226 } __rte_cache_aligned;
228 #define MEMPOOL_F_NO_SPREAD 0x0001 /**< Do not spread among memory channels. */
229 #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/
230 #define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/
231 #define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/
234 * @internal When debug is enabled, store some statistics.
237 * Pointer to the memory pool.
239 * Name of the statistics field to increment in the memory pool.
241 * Number to add to the object-oriented statistics.
243 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
244 #define __MEMPOOL_STAT_ADD(mp, name, n) do { \
245 unsigned __lcore_id = rte_lcore_id(); \
246 if (__lcore_id < RTE_MAX_LCORE) { \
247 mp->stats[__lcore_id].name##_objs += n; \
248 mp->stats[__lcore_id].name##_bulk += 1; \
252 #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
256 * Size of elt_pa array size based on number of pages. (Internal use)
258 #define __PA_SIZE(mp, pgn) \
259 RTE_ALIGN_CEIL((((pgn) - RTE_DIM((mp)->elt_pa)) * \
260 sizeof((mp)->elt_pa[0])), RTE_CACHE_LINE_SIZE)
263 * Calculate the size of the mempool header.
266 * Pointer to the memory pool.
268 * Number of pages used to store mempool objects.
270 * Size of the per-lcore cache.
272 #define MEMPOOL_HEADER_SIZE(mp, pgn, cs) \
273 (sizeof(*(mp)) + __PA_SIZE(mp, pgn) + (((cs) == 0) ? 0 : \
274 (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
277 * Return true if the whole mempool is in contiguous memory.
279 #define MEMPOOL_IS_CONTIG(mp) \
280 ((mp)->pg_num == MEMPOOL_PG_NUM_DEFAULT && \
281 (mp)->phys_addr == (mp)->elt_pa[0])
283 /* return the header of a mempool object (internal) */
284 static inline struct rte_mempool_objhdr *__mempool_get_header(void *obj)
286 return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj,
287 sizeof(struct rte_mempool_objhdr));
291 * Return a pointer to the mempool owning this object.
294 * An object that is owned by a pool. If this is not the case,
295 * the behavior is undefined.
297 * A pointer to the mempool structure.
299 static inline struct rte_mempool *rte_mempool_from_obj(void *obj)
301 struct rte_mempool_objhdr *hdr = __mempool_get_header(obj);
305 /* return the trailer of a mempool object (internal) */
306 static inline struct rte_mempool_objtlr *__mempool_get_trailer(void *obj)
308 struct rte_mempool *mp = rte_mempool_from_obj(obj);
309 return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size);
313 * @internal Check and update cookies or panic.
316 * Pointer to the memory pool.
317 * @param obj_table_const
318 * Pointer to a table of void * pointers (objects).
320 * Index of object in object table.
322 * - 0: object is supposed to be allocated, mark it as free
323 * - 1: object is supposed to be free, mark it as allocated
324 * - 2: just check that cookie is valid (free or allocated)
326 void rte_mempool_check_cookies(const struct rte_mempool *mp,
327 void * const *obj_table_const, unsigned n, int free);
329 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
330 #define __mempool_check_cookies(mp, obj_table_const, n, free) \
331 rte_mempool_check_cookies(mp, obj_table_const, n, free)
333 #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
334 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
337 * An object callback function for mempool.
339 * Used by rte_mempool_create() and rte_mempool_obj_iter().
341 typedef void (rte_mempool_obj_cb_t)(struct rte_mempool *mp,
342 void *opaque, void *obj, unsigned obj_idx);
343 typedef rte_mempool_obj_cb_t rte_mempool_obj_ctor_t; /* compat */
346 * A mempool constructor callback function.
348 * Arguments are the mempool and the opaque pointer given by the user in
349 * rte_mempool_create().
351 typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
354 * Create a new mempool named *name* in memory.
356 * This function uses ``memzone_reserve()`` to allocate memory. The
357 * pool contains n elements of elt_size. Its size is set to n.
358 * All elements of the mempool are allocated together with the mempool header,
359 * in one physically continuous chunk of memory.
362 * The name of the mempool.
364 * The number of elements in the mempool. The optimum size (in terms of
365 * memory usage) for a mempool is when n is a power of two minus one:
368 * The size of each element.
370 * If cache_size is non-zero, the rte_mempool library will try to
371 * limit the accesses to the common lockless pool, by maintaining a
372 * per-lcore object cache. This argument must be lower or equal to
373 * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose
374 * cache_size to have "n modulo cache_size == 0": if this is
375 * not the case, some elements will always stay in the pool and will
376 * never be used. The access to the per-lcore table is of course
377 * faster than the multi-producer/consumer pool. The cache can be
378 * disabled if the cache_size argument is set to 0; it can be useful to
379 * avoid losing objects in cache. Note that even if not used, the
380 * memory space for cache is always reserved in a mempool structure,
381 * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
382 * @param private_data_size
383 * The size of the private data appended after the mempool
384 * structure. This is useful for storing some private data after the
385 * mempool structure, as is done for rte_mbuf_pool for example.
387 * A function pointer that is called for initialization of the pool,
388 * before object initialization. The user can initialize the private
389 * data in this function if needed. This parameter can be NULL if
392 * An opaque pointer to data that can be used in the mempool
393 * constructor function.
395 * A function pointer that is called for each object at
396 * initialization of the pool. The user can set some meta data in
397 * objects if needed. This parameter can be NULL if not needed.
398 * The obj_init() function takes the mempool pointer, the init_arg,
399 * the object pointer and the object number as parameters.
400 * @param obj_init_arg
401 * An opaque pointer to data that can be used as an argument for
402 * each call to the object constructor function.
404 * The *socket_id* argument is the socket identifier in the case of
405 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
406 * constraint for the reserved zone.
408 * The *flags* arguments is an OR of following flags:
409 * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
410 * between channels in RAM: the pool allocator will add padding
411 * between objects depending on the hardware configuration. See
412 * Memory alignment constraints for details. If this flag is set,
413 * the allocator will just align them to a cache line.
414 * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
415 * cache-aligned. This flag removes this constraint, and no
416 * padding will be present between objects. This flag implies
417 * MEMPOOL_F_NO_SPREAD.
418 * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
419 * when using rte_mempool_put() or rte_mempool_put_bulk() is
420 * "single-producer". Otherwise, it is "multi-producers".
421 * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
422 * when using rte_mempool_get() or rte_mempool_get_bulk() is
423 * "single-consumer". Otherwise, it is "multi-consumers".
425 * The pointer to the new allocated mempool, on success. NULL on error
426 * with rte_errno set appropriately. Possible rte_errno values include:
427 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
428 * - E_RTE_SECONDARY - function was called from a secondary process instance
429 * - EINVAL - cache size provided is too large
430 * - ENOSPC - the maximum number of memzones has already been allocated
431 * - EEXIST - a memzone with the same name already exists
432 * - ENOMEM - no appropriate memory area found in which to create memzone
435 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
436 unsigned cache_size, unsigned private_data_size,
437 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
438 rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
439 int socket_id, unsigned flags);
442 * Create a new mempool named *name* in memory.
444 * The pool contains n elements of elt_size. Its size is set to n.
445 * This function uses ``memzone_reserve()`` to allocate the mempool header
446 * (and the objects if vaddr is NULL).
447 * Depending on the input parameters, mempool elements can be either allocated
448 * together with the mempool header, or an externally provided memory buffer
449 * could be used to store mempool objects. In later case, that external
450 * memory buffer can consist of set of disjoint physical pages.
453 * The name of the mempool.
455 * The number of elements in the mempool. The optimum size (in terms of
456 * memory usage) for a mempool is when n is a power of two minus one:
459 * The size of each element.
461 * Size of the cache. See rte_mempool_create() for details.
462 * @param private_data_size
463 * The size of the private data appended after the mempool
464 * structure. This is useful for storing some private data after the
465 * mempool structure, as is done for rte_mbuf_pool for example.
467 * A function pointer that is called for initialization of the pool,
468 * before object initialization. The user can initialize the private
469 * data in this function if needed. This parameter can be NULL if
472 * An opaque pointer to data that can be used in the mempool
473 * constructor function.
475 * A function called for each object at initialization of the pool.
476 * See rte_mempool_create() for details.
477 * @param obj_init_arg
478 * An opaque pointer passed to the object constructor function.
480 * The *socket_id* argument is the socket identifier in the case of
481 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
482 * constraint for the reserved zone.
484 * Flags controlling the behavior of the mempool. See
485 * rte_mempool_create() for details.
487 * Virtual address of the externally allocated memory buffer.
488 * Will be used to store mempool objects.
490 * Array of physical addresses of the pages that comprises given memory
493 * Number of elements in the paddr array.
495 * LOG2 of the physical pages size.
497 * The pointer to the new allocated mempool, on success. NULL on error
498 * with rte_errno set appropriately. See rte_mempool_create() for details.
501 rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
502 unsigned cache_size, unsigned private_data_size,
503 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
504 rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
505 int socket_id, unsigned flags, void *vaddr,
506 const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
509 * Create a new mempool named *name* in memory on Xen Dom0.
511 * This function uses ``rte_mempool_xmem_create()`` to allocate memory. The
512 * pool contains n elements of elt_size. Its size is set to n.
513 * All elements of the mempool are allocated together with the mempool header,
514 * and memory buffer can consist of set of disjoint physical pages.
517 * The name of the mempool.
519 * The number of elements in the mempool. The optimum size (in terms of
520 * memory usage) for a mempool is when n is a power of two minus one:
523 * The size of each element.
525 * If cache_size is non-zero, the rte_mempool library will try to
526 * limit the accesses to the common lockless pool, by maintaining a
527 * per-lcore object cache. This argument must be lower or equal to
528 * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose
529 * cache_size to have "n modulo cache_size == 0": if this is
530 * not the case, some elements will always stay in the pool and will
531 * never be used. The access to the per-lcore table is of course
532 * faster than the multi-producer/consumer pool. The cache can be
533 * disabled if the cache_size argument is set to 0; it can be useful to
534 * avoid losing objects in cache. Note that even if not used, the
535 * memory space for cache is always reserved in a mempool structure,
536 * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
537 * @param private_data_size
538 * The size of the private data appended after the mempool
539 * structure. This is useful for storing some private data after the
540 * mempool structure, as is done for rte_mbuf_pool for example.
542 * A function pointer that is called for initialization of the pool,
543 * before object initialization. The user can initialize the private
544 * data in this function if needed. This parameter can be NULL if
547 * An opaque pointer to data that can be used in the mempool
548 * constructor function.
550 * A function pointer that is called for each object at
551 * initialization of the pool. The user can set some meta data in
552 * objects if needed. This parameter can be NULL if not needed.
553 * The obj_init() function takes the mempool pointer, the init_arg,
554 * the object pointer and the object number as parameters.
555 * @param obj_init_arg
556 * An opaque pointer to data that can be used as an argument for
557 * each call to the object constructor function.
559 * The *socket_id* argument is the socket identifier in the case of
560 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
561 * constraint for the reserved zone.
563 * The *flags* arguments is an OR of following flags:
564 * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
565 * between channels in RAM: the pool allocator will add padding
566 * between objects depending on the hardware configuration. See
567 * Memory alignment constraints for details. If this flag is set,
568 * the allocator will just align them to a cache line.
569 * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
570 * cache-aligned. This flag removes this constraint, and no
571 * padding will be present between objects. This flag implies
572 * MEMPOOL_F_NO_SPREAD.
573 * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
574 * when using rte_mempool_put() or rte_mempool_put_bulk() is
575 * "single-producer". Otherwise, it is "multi-producers".
576 * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
577 * when using rte_mempool_get() or rte_mempool_get_bulk() is
578 * "single-consumer". Otherwise, it is "multi-consumers".
580 * The pointer to the new allocated mempool, on success. NULL on error
581 * with rte_errno set appropriately. Possible rte_errno values include:
582 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
583 * - E_RTE_SECONDARY - function was called from a secondary process instance
584 * - EINVAL - cache size provided is too large
585 * - ENOSPC - the maximum number of memzones has already been allocated
586 * - EEXIST - a memzone with the same name already exists
587 * - ENOMEM - no appropriate memory area found in which to create memzone
590 rte_dom0_mempool_create(const char *name, unsigned n, unsigned elt_size,
591 unsigned cache_size, unsigned private_data_size,
592 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
593 rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
594 int socket_id, unsigned flags);
598 * Call a function for each mempool element
600 * Iterate across all objects attached to a rte_mempool and call the
601 * callback function on it.
604 * A pointer to an initialized mempool.
606 * A function pointer that is called for each object.
608 * An opaque pointer passed to the callback function.
610 * Number of objects iterated.
612 uint32_t rte_mempool_obj_iter(struct rte_mempool *mp,
613 rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg);
616 * Dump the status of the mempool to the console.
619 * A pointer to a file for output
621 * A pointer to the mempool structure.
623 void rte_mempool_dump(FILE *f, struct rte_mempool *mp);
626 * @internal Put several objects back in the mempool; used internally.
628 * A pointer to the mempool structure.
630 * A pointer to a table of void * pointers (objects).
632 * The number of objects to store back in the mempool, must be strictly
635 * Mono-producer (0) or multi-producers (1).
637 static inline void __attribute__((always_inline))
638 __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
639 unsigned n, int is_mp)
641 struct rte_mempool_cache *cache;
644 unsigned lcore_id = rte_lcore_id();
645 uint32_t cache_size = mp->cache_size;
646 uint32_t flushthresh = mp->cache_flushthresh;
648 /* increment stat now, adding in mempool always success */
649 __MEMPOOL_STAT_ADD(mp, put, n);
651 /* cache is not enabled or single producer or non-EAL thread */
652 if (unlikely(cache_size == 0 || is_mp == 0 ||
653 lcore_id >= RTE_MAX_LCORE))
656 /* Go straight to ring if put would overflow mem allocated for cache */
657 if (unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE))
660 cache = &mp->local_cache[lcore_id];
661 cache_objs = &cache->objs[cache->len];
664 * The cache follows the following algorithm
665 * 1. Add the objects to the cache
666 * 2. Anything greater than the cache min value (if it crosses the
667 * cache flush threshold) is flushed to the ring.
670 /* Add elements back into the cache */
671 for (index = 0; index < n; ++index, obj_table++)
672 cache_objs[index] = *obj_table;
676 if (cache->len >= flushthresh) {
677 rte_ring_mp_enqueue_bulk(mp->ring, &cache->objs[cache_size],
678 cache->len - cache_size);
679 cache->len = cache_size;
686 /* push remaining objects in ring */
687 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
689 if (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0)
690 rte_panic("cannot put objects in mempool\n");
693 if (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0)
694 rte_panic("cannot put objects in mempool\n");
698 rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n);
700 rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n);
706 * Put several objects back in the mempool (multi-producers safe).
709 * A pointer to the mempool structure.
711 * A pointer to a table of void * pointers (objects).
713 * The number of objects to add in the mempool from the obj_table.
715 static inline void __attribute__((always_inline))
716 rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
719 __mempool_check_cookies(mp, obj_table, n, 0);
720 __mempool_put_bulk(mp, obj_table, n, 1);
724 * Put several objects back in the mempool (NOT multi-producers safe).
727 * A pointer to the mempool structure.
729 * A pointer to a table of void * pointers (objects).
731 * The number of objects to add in the mempool from obj_table.
734 rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
737 __mempool_check_cookies(mp, obj_table, n, 0);
738 __mempool_put_bulk(mp, obj_table, n, 0);
742 * Put several objects back in the mempool.
744 * This function calls the multi-producer or the single-producer
745 * version depending on the default behavior that was specified at
746 * mempool creation time (see flags).
749 * A pointer to the mempool structure.
751 * A pointer to a table of void * pointers (objects).
753 * The number of objects to add in the mempool from obj_table.
755 static inline void __attribute__((always_inline))
756 rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
759 __mempool_check_cookies(mp, obj_table, n, 0);
760 __mempool_put_bulk(mp, obj_table, n, !(mp->flags & MEMPOOL_F_SP_PUT));
764 * Put one object in the mempool (multi-producers safe).
767 * A pointer to the mempool structure.
769 * A pointer to the object to be added.
771 static inline void __attribute__((always_inline))
772 rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
774 rte_mempool_mp_put_bulk(mp, &obj, 1);
778 * Put one object back in the mempool (NOT multi-producers safe).
781 * A pointer to the mempool structure.
783 * A pointer to the object to be added.
785 static inline void __attribute__((always_inline))
786 rte_mempool_sp_put(struct rte_mempool *mp, void *obj)
788 rte_mempool_sp_put_bulk(mp, &obj, 1);
792 * Put one object back in the mempool.
794 * This function calls the multi-producer or the single-producer
795 * version depending on the default behavior that was specified at
796 * mempool creation time (see flags).
799 * A pointer to the mempool structure.
801 * A pointer to the object to be added.
803 static inline void __attribute__((always_inline))
804 rte_mempool_put(struct rte_mempool *mp, void *obj)
806 rte_mempool_put_bulk(mp, &obj, 1);
810 * @internal Get several objects from the mempool; used internally.
812 * A pointer to the mempool structure.
814 * A pointer to a table of void * pointers (objects).
816 * The number of objects to get, must be strictly positive.
818 * Mono-consumer (0) or multi-consumers (1).
820 * - >=0: Success; number of objects supplied.
821 * - <0: Error; code of ring dequeue function.
823 static inline int __attribute__((always_inline))
824 __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
825 unsigned n, int is_mc)
828 struct rte_mempool_cache *cache;
831 unsigned lcore_id = rte_lcore_id();
832 uint32_t cache_size = mp->cache_size;
834 /* cache is not enabled or single consumer */
835 if (unlikely(cache_size == 0 || is_mc == 0 ||
836 n >= cache_size || lcore_id >= RTE_MAX_LCORE))
839 cache = &mp->local_cache[lcore_id];
840 cache_objs = cache->objs;
842 /* Can this be satisfied from the cache? */
843 if (cache->len < n) {
844 /* No. Backfill the cache first, and then fill from it */
845 uint32_t req = n + (cache_size - cache->len);
847 /* How many do we require i.e. number to fill the cache + the request */
848 ret = rte_ring_mc_dequeue_bulk(mp->ring, &cache->objs[cache->len], req);
849 if (unlikely(ret < 0)) {
851 * In the offchance that we are buffer constrained,
852 * where we are not able to allocate cache + n, go to
853 * the ring directly. If that fails, we are truly out of
862 /* Now fill in the response ... */
863 for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++)
864 *obj_table = cache_objs[len];
868 __MEMPOOL_STAT_ADD(mp, get_success, n);
874 /* get remaining objects from ring */
876 ret = rte_ring_mc_dequeue_bulk(mp->ring, obj_table, n);
878 ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n);
881 __MEMPOOL_STAT_ADD(mp, get_fail, n);
883 __MEMPOOL_STAT_ADD(mp, get_success, n);
889 * Get several objects from the mempool (multi-consumers safe).
891 * If cache is enabled, objects will be retrieved first from cache,
892 * subsequently from the common pool. Note that it can return -ENOENT when
893 * the local cache and common pool are empty, even if cache from other
897 * A pointer to the mempool structure.
899 * A pointer to a table of void * pointers (objects) that will be filled.
901 * The number of objects to get from mempool to obj_table.
903 * - 0: Success; objects taken.
904 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
906 static inline int __attribute__((always_inline))
907 rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
910 ret = __mempool_get_bulk(mp, obj_table, n, 1);
912 __mempool_check_cookies(mp, obj_table, n, 1);
917 * Get several objects from the mempool (NOT multi-consumers safe).
919 * If cache is enabled, objects will be retrieved first from cache,
920 * subsequently from the common pool. Note that it can return -ENOENT when
921 * the local cache and common pool are empty, even if cache from other
925 * A pointer to the mempool structure.
927 * A pointer to a table of void * pointers (objects) that will be filled.
929 * The number of objects to get from the mempool to obj_table.
931 * - 0: Success; objects taken.
932 * - -ENOENT: Not enough entries in the mempool; no object is
935 static inline int __attribute__((always_inline))
936 rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
939 ret = __mempool_get_bulk(mp, obj_table, n, 0);
941 __mempool_check_cookies(mp, obj_table, n, 1);
946 * Get several objects from the mempool.
948 * This function calls the multi-consumers or the single-consumer
949 * version, depending on the default behaviour that was specified at
950 * mempool creation time (see flags).
952 * If cache is enabled, objects will be retrieved first from cache,
953 * subsequently from the common pool. Note that it can return -ENOENT when
954 * the local cache and common pool are empty, even if cache from other
958 * A pointer to the mempool structure.
960 * A pointer to a table of void * pointers (objects) that will be filled.
962 * The number of objects to get from the mempool to obj_table.
964 * - 0: Success; objects taken
965 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
967 static inline int __attribute__((always_inline))
968 rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
971 ret = __mempool_get_bulk(mp, obj_table, n,
972 !(mp->flags & MEMPOOL_F_SC_GET));
974 __mempool_check_cookies(mp, obj_table, n, 1);
979 * Get one object from the mempool (multi-consumers safe).
981 * If cache is enabled, objects will be retrieved first from cache,
982 * subsequently from the common pool. Note that it can return -ENOENT when
983 * the local cache and common pool are empty, even if cache from other
987 * A pointer to the mempool structure.
989 * A pointer to a void * pointer (object) that will be filled.
991 * - 0: Success; objects taken.
992 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
994 static inline int __attribute__((always_inline))
995 rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
997 return rte_mempool_mc_get_bulk(mp, obj_p, 1);
1001 * Get one object from the mempool (NOT multi-consumers safe).
1003 * If cache is enabled, objects will be retrieved first from cache,
1004 * subsequently from the common pool. Note that it can return -ENOENT when
1005 * the local cache and common pool are empty, even if cache from other
1009 * A pointer to the mempool structure.
1011 * A pointer to a void * pointer (object) that will be filled.
1013 * - 0: Success; objects taken.
1014 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
1016 static inline int __attribute__((always_inline))
1017 rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p)
1019 return rte_mempool_sc_get_bulk(mp, obj_p, 1);
1023 * Get one object from the mempool.
1025 * This function calls the multi-consumers or the single-consumer
1026 * version, depending on the default behavior that was specified at
1027 * mempool creation (see flags).
1029 * If cache is enabled, objects will be retrieved first from cache,
1030 * subsequently from the common pool. Note that it can return -ENOENT when
1031 * the local cache and common pool are empty, even if cache from other
1035 * A pointer to the mempool structure.
1037 * A pointer to a void * pointer (object) that will be filled.
1039 * - 0: Success; objects taken.
1040 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
1042 static inline int __attribute__((always_inline))
1043 rte_mempool_get(struct rte_mempool *mp, void **obj_p)
1045 return rte_mempool_get_bulk(mp, obj_p, 1);
1049 * Return the number of entries in the mempool.
1051 * When cache is enabled, this function has to browse the length of
1052 * all lcores, so it should not be used in a data path, but only for
1056 * A pointer to the mempool structure.
1058 * The number of entries in the mempool.
1060 unsigned rte_mempool_count(const struct rte_mempool *mp);
1063 * Return the number of free entries in the mempool ring.
1064 * i.e. how many entries can be freed back to the mempool.
1066 * NOTE: This corresponds to the number of elements *allocated* from the
1067 * memory pool, not the number of elements in the pool itself. To count
1068 * the number elements currently available in the pool, use "rte_mempool_count"
1070 * When cache is enabled, this function has to browse the length of
1071 * all lcores, so it should not be used in a data path, but only for
1075 * A pointer to the mempool structure.
1077 * The number of free entries in the mempool.
1079 static inline unsigned
1080 rte_mempool_free_count(const struct rte_mempool *mp)
1082 return mp->size - rte_mempool_count(mp);
1086 * Test if the mempool is full.
1088 * When cache is enabled, this function has to browse the length of all
1089 * lcores, so it should not be used in a data path, but only for debug
1093 * A pointer to the mempool structure.
1095 * - 1: The mempool is full.
1096 * - 0: The mempool is not full.
1099 rte_mempool_full(const struct rte_mempool *mp)
1101 return !!(rte_mempool_count(mp) == mp->size);
1105 * Test if the mempool is empty.
1107 * When cache is enabled, this function has to browse the length of all
1108 * lcores, so it should not be used in a data path, but only for debug
1112 * A pointer to the mempool structure.
1114 * - 1: The mempool is empty.
1115 * - 0: The mempool is not empty.
1118 rte_mempool_empty(const struct rte_mempool *mp)
1120 return !!(rte_mempool_count(mp) == 0);
1124 * Return the physical address of elt, which is an element of the pool mp.
1127 * A pointer to the mempool structure.
1129 * A pointer (virtual address) to the element of the pool.
1131 * The physical address of the elt element.
1133 static inline phys_addr_t
1134 rte_mempool_virt2phy(const struct rte_mempool *mp, const void *elt)
1136 if (rte_eal_has_hugepages()) {
1139 off = (const char *)elt - (const char *)mp->elt_va_start;
1140 return mp->elt_pa[off >> mp->pg_shift] + (off & mp->pg_mask);
1143 * If huge pages are disabled, we cannot assume the
1144 * memory region to be physically contiguous.
1145 * Lookup for each element.
1147 return rte_mem_virt2phy(elt);
1152 * Check the consistency of mempool objects.
1154 * Verify the coherency of fields in the mempool structure. Also check
1155 * that the cookies of mempool objects (even the ones that are not
1156 * present in pool) have a correct value. If not, a panic will occur.
1159 * A pointer to the mempool structure.
1161 void rte_mempool_audit(struct rte_mempool *mp);
1164 * Return a pointer to the private data in an mempool structure.
1167 * A pointer to the mempool structure.
1169 * A pointer to the private data.
1171 static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
1174 MEMPOOL_HEADER_SIZE(mp, mp->pg_num, mp->cache_size);
1178 * Dump the status of all mempools on the console
1181 * A pointer to a file for output
1183 void rte_mempool_list_dump(FILE *f);
1186 * Search a mempool from its name
1189 * The name of the mempool.
1191 * The pointer to the mempool matching the name, or NULL if not found.
1193 * with rte_errno set appropriately. Possible rte_errno values include:
1194 * - ENOENT - required entry not available to return.
1197 struct rte_mempool *rte_mempool_lookup(const char *name);
1200 * Get the header, trailer and total size of a mempool element.
1202 * Given a desired size of the mempool element and mempool flags,
1203 * calculates header, trailer, body and total sizes of the mempool object.
1206 * The size of each element, without header and trailer.
1208 * The flags used for the mempool creation.
1209 * Consult rte_mempool_create() for more information about possible values.
1210 * The size of each element.
1212 * The calculated detailed size the mempool object. May be NULL.
1214 * Total size of the mempool object.
1216 uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
1217 struct rte_mempool_objsz *sz);
1220 * Get the size of memory required to store mempool elements.
1222 * Calculate the maximum amount of memory required to store given number
1223 * of objects. Assume that the memory buffer will be aligned at page
1226 * Note that if object size is bigger then page size, then it assumes
1227 * that pages are grouped in subsets of physically continuous pages big
1228 * enough to store at least one object.
1231 * Number of elements.
1232 * @param total_elt_sz
1233 * The size of each element, including header and trailer, as returned
1234 * by rte_mempool_calc_obj_size().
1236 * LOG2 of the physical pages size.
1238 * Required memory size aligned at page boundary.
1240 size_t rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz,
1244 * Get the size of memory required to store mempool elements.
1246 * Calculate how much memory would be actually required with the given
1247 * memory footprint to store required number of objects.
1250 * Virtual address of the externally allocated memory buffer.
1251 * Will be used to store mempool objects.
1253 * Number of elements.
1254 * @param total_elt_sz
1255 * The size of each element, including header and trailer, as returned
1256 * by rte_mempool_calc_obj_size().
1258 * Array of physical addresses of the pages that comprises given memory
1261 * Number of elements in the paddr array.
1263 * LOG2 of the physical pages size.
1265 * On success, the number of bytes needed to store given number of
1266 * objects, aligned to the given page size. If the provided memory
1267 * buffer is too small, return a negative value whose absolute value
1268 * is the actual number of elements that can be stored in that buffer.
1270 ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num,
1271 size_t total_elt_sz, const phys_addr_t paddr[], uint32_t pg_num,
1275 * Walk list of all memory pools
1280 * Argument passed to iterator
1282 void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
1289 #endif /* _RTE_MEMPOOL_H_ */