4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * Copyright(c) 2016 6WIND S.A.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #ifndef _RTE_MEMPOOL_H_
36 #define _RTE_MEMPOOL_H_
42 * A memory pool is an allocator of fixed-size object. It is
43 * identified by its name, and uses a ring to store free objects. It
44 * provides some other optional services, like a per-core object
45 * cache, and an alignment helper to ensure that objects are padded
46 * to spread them equally on all RAM channels, ranks, and so on.
48 * Objects owned by a mempool should never be added in another
49 * mempool. When an object is freed using rte_mempool_put() or
50 * equivalent, the object data is not modified; the user can save some
51 * meta-data in the object data and retrieve them when allocating a
54 * Note: the mempool implementation is not preemptable. A lcore must
55 * not be interrupted by another task that uses the same mempool
56 * (because it uses a ring which is not preemptable). Also, mempool
57 * functions must not be used outside the DPDK environment: for
58 * example, in linuxapp environment, a thread that is not created by
59 * the EAL must not use mempools. This is due to the per-lcore cache
60 * that won't work as rte_lcore_id() will not return a correct value.
68 #include <sys/queue.h>
71 #include <rte_debug.h>
72 #include <rte_lcore.h>
73 #include <rte_memory.h>
74 #include <rte_branch_prediction.h>
81 #define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL /**< Header cookie. */
82 #define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL /**< Header cookie. */
83 #define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/
85 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
87 * A structure that stores the mempool statistics (per-lcore).
89 struct rte_mempool_debug_stats {
90 uint64_t put_bulk; /**< Number of puts. */
91 uint64_t put_objs; /**< Number of objects successfully put. */
92 uint64_t get_success_bulk; /**< Successful allocation number. */
93 uint64_t get_success_objs; /**< Objects successfully allocated. */
94 uint64_t get_fail_bulk; /**< Failed allocation number. */
95 uint64_t get_fail_objs; /**< Objects that failed to be allocated. */
96 } __rte_cache_aligned;
100 * A structure that stores a per-core object cache.
102 struct rte_mempool_cache {
103 unsigned len; /**< Cache len */
105 * Cache is allocated to this size to allow it to overflow in certain
106 * cases to avoid needless emptying of cache.
108 void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3]; /**< Cache objects */
109 } __rte_cache_aligned;
112 * A structure that stores the size of mempool elements.
114 struct rte_mempool_objsz {
115 uint32_t elt_size; /**< Size of an element. */
116 uint32_t header_size; /**< Size of header (before elt). */
117 uint32_t trailer_size; /**< Size of trailer (after elt). */
119 /**< Total size of an object (header + elt + trailer). */
122 #define RTE_MEMPOOL_NAMESIZE 32 /**< Maximum length of a memory pool. */
123 #define RTE_MEMPOOL_MZ_PREFIX "MP_"
126 #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
128 #define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1)
130 /** Mempool over one chunk of physically continuous memory */
131 #define MEMPOOL_PG_NUM_DEFAULT 1
133 #ifndef RTE_MEMPOOL_ALIGN
134 #define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
137 #define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
140 * Mempool object header structure
142 * Each object stored in mempools are prefixed by this header structure,
143 * it allows to retrieve the mempool pointer from the object and to
144 * iterate on all objects attached to a mempool. When debug is enabled,
145 * a cookie is also added in this structure preventing corruptions and
148 struct rte_mempool_objhdr {
149 STAILQ_ENTRY(rte_mempool_objhdr) next; /**< Next in list. */
150 struct rte_mempool *mp; /**< The mempool owning the object. */
151 phys_addr_t physaddr; /**< Physical address of the object. */
152 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
153 uint64_t cookie; /**< Debug cookie. */
158 * A list of object headers type
160 STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr);
163 * Mempool object trailer structure
165 * In debug mode, each object stored in mempools are suffixed by this
166 * trailer structure containing a cookie preventing memory corruptions.
168 struct rte_mempool_objtlr {
169 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
170 uint64_t cookie; /**< Debug cookie. */
175 * A list of memory where objects are stored
177 STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr);
180 * Callback used to free a memory chunk
182 typedef void (rte_mempool_memchunk_free_cb_t)(struct rte_mempool_memhdr *memhdr,
186 * Mempool objects memory header structure
188 * The memory chunks where objects are stored. Each chunk is virtually
189 * and physically contiguous.
191 struct rte_mempool_memhdr {
192 STAILQ_ENTRY(rte_mempool_memhdr) next; /**< Next in list. */
193 struct rte_mempool *mp; /**< The mempool owning the chunk */
194 void *addr; /**< Virtual address of the chunk */
195 phys_addr_t phys_addr; /**< Physical address of the chunk */
196 size_t len; /**< length of the chunk */
197 rte_mempool_memchunk_free_cb_t *free_cb; /**< Free callback */
198 void *opaque; /**< Argument passed to the free callback */
202 * The RTE mempool structure.
205 char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */
206 struct rte_ring *ring; /**< Ring to store objects. */
207 const struct rte_memzone *mz; /**< Memzone where pool is allocated */
208 int flags; /**< Flags of the mempool. */
209 int socket_id; /**< Socket id passed at mempool creation. */
210 uint32_t size; /**< Max size of the mempool. */
211 uint32_t cache_size; /**< Size of per-lcore local cache. */
212 uint32_t cache_flushthresh;
213 /**< Threshold before we flush excess elements. */
215 uint32_t elt_size; /**< Size of an element. */
216 uint32_t header_size; /**< Size of header (before elt). */
217 uint32_t trailer_size; /**< Size of trailer (after elt). */
219 unsigned private_data_size; /**< Size of private data. */
221 struct rte_mempool_cache *local_cache; /**< Per-lcore local cache */
223 uint32_t populated_size; /**< Number of populated objects. */
224 struct rte_mempool_objhdr_list elt_list; /**< List of objects in pool */
225 uint32_t nb_mem_chunks; /**< Number of memory chunks */
226 struct rte_mempool_memhdr_list mem_list; /**< List of memory chunks */
228 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
229 /** Per-lcore statistics. */
230 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
232 } __rte_cache_aligned;
234 #define MEMPOOL_F_NO_SPREAD 0x0001 /**< Do not spread among memory channels. */
235 #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/
236 #define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/
237 #define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/
238 #define MEMPOOL_F_RING_CREATED 0x0010 /**< Internal: ring is created */
241 * @internal When debug is enabled, store some statistics.
244 * Pointer to the memory pool.
246 * Name of the statistics field to increment in the memory pool.
248 * Number to add to the object-oriented statistics.
250 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
251 #define __MEMPOOL_STAT_ADD(mp, name, n) do { \
252 unsigned __lcore_id = rte_lcore_id(); \
253 if (__lcore_id < RTE_MAX_LCORE) { \
254 mp->stats[__lcore_id].name##_objs += n; \
255 mp->stats[__lcore_id].name##_bulk += 1; \
259 #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
263 * Calculate the size of the mempool header.
266 * Pointer to the memory pool.
268 * Size of the per-lcore cache.
270 #define MEMPOOL_HEADER_SIZE(mp, cs) \
271 (sizeof(*(mp)) + (((cs) == 0) ? 0 : \
272 (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
274 /* return the header of a mempool object (internal) */
275 static inline struct rte_mempool_objhdr *__mempool_get_header(void *obj)
277 return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj,
278 sizeof(struct rte_mempool_objhdr));
282 * Return a pointer to the mempool owning this object.
285 * An object that is owned by a pool. If this is not the case,
286 * the behavior is undefined.
288 * A pointer to the mempool structure.
290 static inline struct rte_mempool *rte_mempool_from_obj(void *obj)
292 struct rte_mempool_objhdr *hdr = __mempool_get_header(obj);
296 /* return the trailer of a mempool object (internal) */
297 static inline struct rte_mempool_objtlr *__mempool_get_trailer(void *obj)
299 struct rte_mempool *mp = rte_mempool_from_obj(obj);
300 return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size);
304 * @internal Check and update cookies or panic.
307 * Pointer to the memory pool.
308 * @param obj_table_const
309 * Pointer to a table of void * pointers (objects).
311 * Index of object in object table.
313 * - 0: object is supposed to be allocated, mark it as free
314 * - 1: object is supposed to be free, mark it as allocated
315 * - 2: just check that cookie is valid (free or allocated)
317 void rte_mempool_check_cookies(const struct rte_mempool *mp,
318 void * const *obj_table_const, unsigned n, int free);
320 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
321 #define __mempool_check_cookies(mp, obj_table_const, n, free) \
322 rte_mempool_check_cookies(mp, obj_table_const, n, free)
324 #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
325 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
328 * An object callback function for mempool.
330 * Used by rte_mempool_create() and rte_mempool_obj_iter().
332 typedef void (rte_mempool_obj_cb_t)(struct rte_mempool *mp,
333 void *opaque, void *obj, unsigned obj_idx);
334 typedef rte_mempool_obj_cb_t rte_mempool_obj_ctor_t; /* compat */
337 * A memory callback function for mempool.
339 * Used by rte_mempool_mem_iter().
341 typedef void (rte_mempool_mem_cb_t)(struct rte_mempool *mp,
342 void *opaque, struct rte_mempool_memhdr *memhdr,
346 * A mempool constructor callback function.
348 * Arguments are the mempool and the opaque pointer given by the user in
349 * rte_mempool_create().
351 typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
354 * Create a new mempool named *name* in memory.
356 * This function uses ``memzone_reserve()`` to allocate memory. The
357 * pool contains n elements of elt_size. Its size is set to n.
358 * All elements of the mempool are allocated together with the mempool header,
359 * in one physically continuous chunk of memory.
362 * The name of the mempool.
364 * The number of elements in the mempool. The optimum size (in terms of
365 * memory usage) for a mempool is when n is a power of two minus one:
368 * The size of each element.
370 * If cache_size is non-zero, the rte_mempool library will try to
371 * limit the accesses to the common lockless pool, by maintaining a
372 * per-lcore object cache. This argument must be lower or equal to
373 * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose
374 * cache_size to have "n modulo cache_size == 0": if this is
375 * not the case, some elements will always stay in the pool and will
376 * never be used. The access to the per-lcore table is of course
377 * faster than the multi-producer/consumer pool. The cache can be
378 * disabled if the cache_size argument is set to 0; it can be useful to
379 * avoid losing objects in cache. Note that even if not used, the
380 * memory space for cache is always reserved in a mempool structure,
381 * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
382 * @param private_data_size
383 * The size of the private data appended after the mempool
384 * structure. This is useful for storing some private data after the
385 * mempool structure, as is done for rte_mbuf_pool for example.
387 * A function pointer that is called for initialization of the pool,
388 * before object initialization. The user can initialize the private
389 * data in this function if needed. This parameter can be NULL if
392 * An opaque pointer to data that can be used in the mempool
393 * constructor function.
395 * A function pointer that is called for each object at
396 * initialization of the pool. The user can set some meta data in
397 * objects if needed. This parameter can be NULL if not needed.
398 * The obj_init() function takes the mempool pointer, the init_arg,
399 * the object pointer and the object number as parameters.
400 * @param obj_init_arg
401 * An opaque pointer to data that can be used as an argument for
402 * each call to the object constructor function.
404 * The *socket_id* argument is the socket identifier in the case of
405 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
406 * constraint for the reserved zone.
408 * The *flags* arguments is an OR of following flags:
409 * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
410 * between channels in RAM: the pool allocator will add padding
411 * between objects depending on the hardware configuration. See
412 * Memory alignment constraints for details. If this flag is set,
413 * the allocator will just align them to a cache line.
414 * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
415 * cache-aligned. This flag removes this constraint, and no
416 * padding will be present between objects. This flag implies
417 * MEMPOOL_F_NO_SPREAD.
418 * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
419 * when using rte_mempool_put() or rte_mempool_put_bulk() is
420 * "single-producer". Otherwise, it is "multi-producers".
421 * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
422 * when using rte_mempool_get() or rte_mempool_get_bulk() is
423 * "single-consumer". Otherwise, it is "multi-consumers".
425 * The pointer to the new allocated mempool, on success. NULL on error
426 * with rte_errno set appropriately. Possible rte_errno values include:
427 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
428 * - E_RTE_SECONDARY - function was called from a secondary process instance
429 * - EINVAL - cache size provided is too large
430 * - ENOSPC - the maximum number of memzones has already been allocated
431 * - EEXIST - a memzone with the same name already exists
432 * - ENOMEM - no appropriate memory area found in which to create memzone
435 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
436 unsigned cache_size, unsigned private_data_size,
437 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
438 rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
439 int socket_id, unsigned flags);
442 * Create a new mempool named *name* in memory.
444 * The pool contains n elements of elt_size. Its size is set to n.
445 * This function uses ``memzone_reserve()`` to allocate the mempool header
446 * (and the objects if vaddr is NULL).
447 * Depending on the input parameters, mempool elements can be either allocated
448 * together with the mempool header, or an externally provided memory buffer
449 * could be used to store mempool objects. In later case, that external
450 * memory buffer can consist of set of disjoint physical pages.
453 * The name of the mempool.
455 * The number of elements in the mempool. The optimum size (in terms of
456 * memory usage) for a mempool is when n is a power of two minus one:
459 * The size of each element.
461 * Size of the cache. See rte_mempool_create() for details.
462 * @param private_data_size
463 * The size of the private data appended after the mempool
464 * structure. This is useful for storing some private data after the
465 * mempool structure, as is done for rte_mbuf_pool for example.
467 * A function pointer that is called for initialization of the pool,
468 * before object initialization. The user can initialize the private
469 * data in this function if needed. This parameter can be NULL if
472 * An opaque pointer to data that can be used in the mempool
473 * constructor function.
475 * A function called for each object at initialization of the pool.
476 * See rte_mempool_create() for details.
477 * @param obj_init_arg
478 * An opaque pointer passed to the object constructor function.
480 * The *socket_id* argument is the socket identifier in the case of
481 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
482 * constraint for the reserved zone.
484 * Flags controlling the behavior of the mempool. See
485 * rte_mempool_create() for details.
487 * Virtual address of the externally allocated memory buffer.
488 * Will be used to store mempool objects.
490 * Array of physical addresses of the pages that comprises given memory
493 * Number of elements in the paddr array.
495 * LOG2 of the physical pages size.
497 * The pointer to the new allocated mempool, on success. NULL on error
498 * with rte_errno set appropriately. See rte_mempool_create() for details.
501 rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
502 unsigned cache_size, unsigned private_data_size,
503 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
504 rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
505 int socket_id, unsigned flags, void *vaddr,
506 const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
509 * Call a function for each mempool element
511 * Iterate across all objects attached to a rte_mempool and call the
512 * callback function on it.
515 * A pointer to an initialized mempool.
517 * A function pointer that is called for each object.
519 * An opaque pointer passed to the callback function.
521 * Number of objects iterated.
523 uint32_t rte_mempool_obj_iter(struct rte_mempool *mp,
524 rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg);
527 * Call a function for each mempool memory chunk
529 * Iterate across all memory chunks attached to a rte_mempool and call
530 * the callback function on it.
533 * A pointer to an initialized mempool.
535 * A function pointer that is called for each memory chunk.
537 * An opaque pointer passed to the callback function.
539 * Number of memory chunks iterated.
541 uint32_t rte_mempool_mem_iter(struct rte_mempool *mp,
542 rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg);
545 * Dump the status of the mempool to the console.
548 * A pointer to a file for output
550 * A pointer to the mempool structure.
552 void rte_mempool_dump(FILE *f, struct rte_mempool *mp);
555 * @internal Put several objects back in the mempool; used internally.
557 * A pointer to the mempool structure.
559 * A pointer to a table of void * pointers (objects).
561 * The number of objects to store back in the mempool, must be strictly
564 * Mono-producer (0) or multi-producers (1).
566 static inline void __attribute__((always_inline))
567 __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
568 unsigned n, int is_mp)
570 struct rte_mempool_cache *cache;
573 unsigned lcore_id = rte_lcore_id();
574 uint32_t cache_size = mp->cache_size;
575 uint32_t flushthresh = mp->cache_flushthresh;
577 /* increment stat now, adding in mempool always success */
578 __MEMPOOL_STAT_ADD(mp, put, n);
580 /* cache is not enabled or single producer or non-EAL thread */
581 if (unlikely(cache_size == 0 || is_mp == 0 ||
582 lcore_id >= RTE_MAX_LCORE))
585 /* Go straight to ring if put would overflow mem allocated for cache */
586 if (unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE))
589 cache = &mp->local_cache[lcore_id];
590 cache_objs = &cache->objs[cache->len];
593 * The cache follows the following algorithm
594 * 1. Add the objects to the cache
595 * 2. Anything greater than the cache min value (if it crosses the
596 * cache flush threshold) is flushed to the ring.
599 /* Add elements back into the cache */
600 for (index = 0; index < n; ++index, obj_table++)
601 cache_objs[index] = *obj_table;
605 if (cache->len >= flushthresh) {
606 rte_ring_mp_enqueue_bulk(mp->ring, &cache->objs[cache_size],
607 cache->len - cache_size);
608 cache->len = cache_size;
615 /* push remaining objects in ring */
616 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
618 if (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0)
619 rte_panic("cannot put objects in mempool\n");
622 if (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0)
623 rte_panic("cannot put objects in mempool\n");
627 rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n);
629 rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n);
635 * Put several objects back in the mempool (multi-producers safe).
638 * A pointer to the mempool structure.
640 * A pointer to a table of void * pointers (objects).
642 * The number of objects to add in the mempool from the obj_table.
644 static inline void __attribute__((always_inline))
645 rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
648 __mempool_check_cookies(mp, obj_table, n, 0);
649 __mempool_put_bulk(mp, obj_table, n, 1);
653 * Put several objects back in the mempool (NOT multi-producers safe).
656 * A pointer to the mempool structure.
658 * A pointer to a table of void * pointers (objects).
660 * The number of objects to add in the mempool from obj_table.
663 rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
666 __mempool_check_cookies(mp, obj_table, n, 0);
667 __mempool_put_bulk(mp, obj_table, n, 0);
671 * Put several objects back in the mempool.
673 * This function calls the multi-producer or the single-producer
674 * version depending on the default behavior that was specified at
675 * mempool creation time (see flags).
678 * A pointer to the mempool structure.
680 * A pointer to a table of void * pointers (objects).
682 * The number of objects to add in the mempool from obj_table.
684 static inline void __attribute__((always_inline))
685 rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
688 __mempool_check_cookies(mp, obj_table, n, 0);
689 __mempool_put_bulk(mp, obj_table, n, !(mp->flags & MEMPOOL_F_SP_PUT));
693 * Put one object in the mempool (multi-producers safe).
696 * A pointer to the mempool structure.
698 * A pointer to the object to be added.
700 static inline void __attribute__((always_inline))
701 rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
703 rte_mempool_mp_put_bulk(mp, &obj, 1);
707 * Put one object back in the mempool (NOT multi-producers safe).
710 * A pointer to the mempool structure.
712 * A pointer to the object to be added.
714 static inline void __attribute__((always_inline))
715 rte_mempool_sp_put(struct rte_mempool *mp, void *obj)
717 rte_mempool_sp_put_bulk(mp, &obj, 1);
721 * Put one object back in the mempool.
723 * This function calls the multi-producer or the single-producer
724 * version depending on the default behavior that was specified at
725 * mempool creation time (see flags).
728 * A pointer to the mempool structure.
730 * A pointer to the object to be added.
732 static inline void __attribute__((always_inline))
733 rte_mempool_put(struct rte_mempool *mp, void *obj)
735 rte_mempool_put_bulk(mp, &obj, 1);
739 * @internal Get several objects from the mempool; used internally.
741 * A pointer to the mempool structure.
743 * A pointer to a table of void * pointers (objects).
745 * The number of objects to get, must be strictly positive.
747 * Mono-consumer (0) or multi-consumers (1).
749 * - >=0: Success; number of objects supplied.
750 * - <0: Error; code of ring dequeue function.
752 static inline int __attribute__((always_inline))
753 __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
754 unsigned n, int is_mc)
757 struct rte_mempool_cache *cache;
760 unsigned lcore_id = rte_lcore_id();
761 uint32_t cache_size = mp->cache_size;
763 /* cache is not enabled or single consumer */
764 if (unlikely(cache_size == 0 || is_mc == 0 ||
765 n >= cache_size || lcore_id >= RTE_MAX_LCORE))
768 cache = &mp->local_cache[lcore_id];
769 cache_objs = cache->objs;
771 /* Can this be satisfied from the cache? */
772 if (cache->len < n) {
773 /* No. Backfill the cache first, and then fill from it */
774 uint32_t req = n + (cache_size - cache->len);
776 /* How many do we require i.e. number to fill the cache + the request */
777 ret = rte_ring_mc_dequeue_bulk(mp->ring, &cache->objs[cache->len], req);
778 if (unlikely(ret < 0)) {
780 * In the offchance that we are buffer constrained,
781 * where we are not able to allocate cache + n, go to
782 * the ring directly. If that fails, we are truly out of
791 /* Now fill in the response ... */
792 for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++)
793 *obj_table = cache_objs[len];
797 __MEMPOOL_STAT_ADD(mp, get_success, n);
803 /* get remaining objects from ring */
805 ret = rte_ring_mc_dequeue_bulk(mp->ring, obj_table, n);
807 ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n);
810 __MEMPOOL_STAT_ADD(mp, get_fail, n);
812 __MEMPOOL_STAT_ADD(mp, get_success, n);
818 * Get several objects from the mempool (multi-consumers safe).
820 * If cache is enabled, objects will be retrieved first from cache,
821 * subsequently from the common pool. Note that it can return -ENOENT when
822 * the local cache and common pool are empty, even if cache from other
826 * A pointer to the mempool structure.
828 * A pointer to a table of void * pointers (objects) that will be filled.
830 * The number of objects to get from mempool to obj_table.
832 * - 0: Success; objects taken.
833 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
835 static inline int __attribute__((always_inline))
836 rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
839 ret = __mempool_get_bulk(mp, obj_table, n, 1);
841 __mempool_check_cookies(mp, obj_table, n, 1);
846 * Get several objects from the mempool (NOT multi-consumers safe).
848 * If cache is enabled, objects will be retrieved first from cache,
849 * subsequently from the common pool. Note that it can return -ENOENT when
850 * the local cache and common pool are empty, even if cache from other
854 * A pointer to the mempool structure.
856 * A pointer to a table of void * pointers (objects) that will be filled.
858 * The number of objects to get from the mempool to obj_table.
860 * - 0: Success; objects taken.
861 * - -ENOENT: Not enough entries in the mempool; no object is
864 static inline int __attribute__((always_inline))
865 rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
868 ret = __mempool_get_bulk(mp, obj_table, n, 0);
870 __mempool_check_cookies(mp, obj_table, n, 1);
875 * Get several objects from the mempool.
877 * This function calls the multi-consumers or the single-consumer
878 * version, depending on the default behaviour that was specified at
879 * mempool creation time (see flags).
881 * If cache is enabled, objects will be retrieved first from cache,
882 * subsequently from the common pool. Note that it can return -ENOENT when
883 * the local cache and common pool are empty, even if cache from other
887 * A pointer to the mempool structure.
889 * A pointer to a table of void * pointers (objects) that will be filled.
891 * The number of objects to get from the mempool to obj_table.
893 * - 0: Success; objects taken
894 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
896 static inline int __attribute__((always_inline))
897 rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
900 ret = __mempool_get_bulk(mp, obj_table, n,
901 !(mp->flags & MEMPOOL_F_SC_GET));
903 __mempool_check_cookies(mp, obj_table, n, 1);
908 * Get one object from the mempool (multi-consumers safe).
910 * If cache is enabled, objects will be retrieved first from cache,
911 * subsequently from the common pool. Note that it can return -ENOENT when
912 * the local cache and common pool are empty, even if cache from other
916 * A pointer to the mempool structure.
918 * A pointer to a void * pointer (object) that will be filled.
920 * - 0: Success; objects taken.
921 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
923 static inline int __attribute__((always_inline))
924 rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
926 return rte_mempool_mc_get_bulk(mp, obj_p, 1);
930 * Get one object from the mempool (NOT multi-consumers safe).
932 * If cache is enabled, objects will be retrieved first from cache,
933 * subsequently from the common pool. Note that it can return -ENOENT when
934 * the local cache and common pool are empty, even if cache from other
938 * A pointer to the mempool structure.
940 * A pointer to a void * pointer (object) that will be filled.
942 * - 0: Success; objects taken.
943 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
945 static inline int __attribute__((always_inline))
946 rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p)
948 return rte_mempool_sc_get_bulk(mp, obj_p, 1);
952 * Get one object from the mempool.
954 * This function calls the multi-consumers or the single-consumer
955 * version, depending on the default behavior that was specified at
956 * mempool creation (see flags).
958 * If cache is enabled, objects will be retrieved first from cache,
959 * subsequently from the common pool. Note that it can return -ENOENT when
960 * the local cache and common pool are empty, even if cache from other
964 * A pointer to the mempool structure.
966 * A pointer to a void * pointer (object) that will be filled.
968 * - 0: Success; objects taken.
969 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
971 static inline int __attribute__((always_inline))
972 rte_mempool_get(struct rte_mempool *mp, void **obj_p)
974 return rte_mempool_get_bulk(mp, obj_p, 1);
978 * Return the number of entries in the mempool.
980 * When cache is enabled, this function has to browse the length of
981 * all lcores, so it should not be used in a data path, but only for
985 * A pointer to the mempool structure.
987 * The number of entries in the mempool.
989 unsigned rte_mempool_count(const struct rte_mempool *mp);
992 * Return the number of free entries in the mempool ring.
993 * i.e. how many entries can be freed back to the mempool.
995 * NOTE: This corresponds to the number of elements *allocated* from the
996 * memory pool, not the number of elements in the pool itself. To count
997 * the number elements currently available in the pool, use "rte_mempool_count"
999 * When cache is enabled, this function has to browse the length of
1000 * all lcores, so it should not be used in a data path, but only for
1004 * A pointer to the mempool structure.
1006 * The number of free entries in the mempool.
1008 static inline unsigned
1009 rte_mempool_free_count(const struct rte_mempool *mp)
1011 return mp->size - rte_mempool_count(mp);
1015 * Test if the mempool is full.
1017 * When cache is enabled, this function has to browse the length of all
1018 * lcores, so it should not be used in a data path, but only for debug
1022 * A pointer to the mempool structure.
1024 * - 1: The mempool is full.
1025 * - 0: The mempool is not full.
1028 rte_mempool_full(const struct rte_mempool *mp)
1030 return !!(rte_mempool_count(mp) == mp->size);
1034 * Test if the mempool is empty.
1036 * When cache is enabled, this function has to browse the length of all
1037 * lcores, so it should not be used in a data path, but only for debug
1041 * A pointer to the mempool structure.
1043 * - 1: The mempool is empty.
1044 * - 0: The mempool is not empty.
1047 rte_mempool_empty(const struct rte_mempool *mp)
1049 return !!(rte_mempool_count(mp) == 0);
1053 * Return the physical address of elt, which is an element of the pool mp.
1056 * A pointer to the mempool structure.
1058 * A pointer (virtual address) to the element of the pool.
1060 * The physical address of the elt element.
1062 static inline phys_addr_t
1063 rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt)
1065 const struct rte_mempool_objhdr *hdr;
1066 hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt,
1068 return hdr->physaddr;
1072 * Check the consistency of mempool objects.
1074 * Verify the coherency of fields in the mempool structure. Also check
1075 * that the cookies of mempool objects (even the ones that are not
1076 * present in pool) have a correct value. If not, a panic will occur.
1079 * A pointer to the mempool structure.
1081 void rte_mempool_audit(struct rte_mempool *mp);
1084 * Return a pointer to the private data in an mempool structure.
1087 * A pointer to the mempool structure.
1089 * A pointer to the private data.
1091 static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
1094 MEMPOOL_HEADER_SIZE(mp, mp->cache_size);
1098 * Dump the status of all mempools on the console
1101 * A pointer to a file for output
1103 void rte_mempool_list_dump(FILE *f);
1106 * Search a mempool from its name
1109 * The name of the mempool.
1111 * The pointer to the mempool matching the name, or NULL if not found.
1113 * with rte_errno set appropriately. Possible rte_errno values include:
1114 * - ENOENT - required entry not available to return.
1117 struct rte_mempool *rte_mempool_lookup(const char *name);
1120 * Get the header, trailer and total size of a mempool element.
1122 * Given a desired size of the mempool element and mempool flags,
1123 * calculates header, trailer, body and total sizes of the mempool object.
1126 * The size of each element, without header and trailer.
1128 * The flags used for the mempool creation.
1129 * Consult rte_mempool_create() for more information about possible values.
1130 * The size of each element.
1132 * The calculated detailed size the mempool object. May be NULL.
1134 * Total size of the mempool object.
1136 uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
1137 struct rte_mempool_objsz *sz);
1140 * Get the size of memory required to store mempool elements.
1142 * Calculate the maximum amount of memory required to store given number
1143 * of objects. Assume that the memory buffer will be aligned at page
1146 * Note that if object size is bigger then page size, then it assumes
1147 * that pages are grouped in subsets of physically continuous pages big
1148 * enough to store at least one object.
1151 * Number of elements.
1152 * @param total_elt_sz
1153 * The size of each element, including header and trailer, as returned
1154 * by rte_mempool_calc_obj_size().
1156 * LOG2 of the physical pages size. If set to 0, ignore page boundaries.
1158 * Required memory size aligned at page boundary.
1160 size_t rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz,
1164 * Get the size of memory required to store mempool elements.
1166 * Calculate how much memory would be actually required with the given
1167 * memory footprint to store required number of objects.
1170 * Virtual address of the externally allocated memory buffer.
1171 * Will be used to store mempool objects.
1173 * Number of elements.
1174 * @param total_elt_sz
1175 * The size of each element, including header and trailer, as returned
1176 * by rte_mempool_calc_obj_size().
1178 * Array of physical addresses of the pages that comprises given memory
1181 * Number of elements in the paddr array.
1183 * LOG2 of the physical pages size.
1185 * On success, the number of bytes needed to store given number of
1186 * objects, aligned to the given page size. If the provided memory
1187 * buffer is too small, return a negative value whose absolute value
1188 * is the actual number of elements that can be stored in that buffer.
1190 ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num,
1191 size_t total_elt_sz, const phys_addr_t paddr[], uint32_t pg_num,
1195 * Walk list of all memory pools
1200 * Argument passed to iterator
1202 void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
1209 #endif /* _RTE_MEMPOOL_H_ */