4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _RTE_MEMPOOL_H_
35 #define _RTE_MEMPOOL_H_
41 * A memory pool is an allocator of fixed-size object. It is
42 * identified by its name, and uses a ring to store free objects. It
43 * provides some other optional services, like a per-core object
44 * cache, and an alignment helper to ensure that objects are padded
45 * to spread them equally on all RAM channels, ranks, and so on.
47 * Objects owned by a mempool should never be added in another
48 * mempool. When an object is freed using rte_mempool_put() or
49 * equivalent, the object data is not modified; the user can save some
50 * meta-data in the object data and retrieve them when allocating a
53 * Note: the mempool implementation is not preemptable. A lcore must
54 * not be interrupted by another task that uses the same mempool
55 * (because it uses a ring which is not preemptable). Also, mempool
56 * functions must not be used outside the DPDK environment: for
57 * example, in linuxapp environment, a thread that is not created by
58 * the EAL must not use mempools. This is due to the per-lcore cache
59 * that won't work as rte_lcore_id() will not return a correct value.
66 #include <sys/queue.h>
69 #include <rte_debug.h>
70 #include <rte_lcore.h>
71 #include <rte_memory.h>
72 #include <rte_branch_prediction.h>
79 #define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL /**< Header cookie. */
80 #define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL /**< Header cookie. */
81 #define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/
83 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
85 * A structure that stores the mempool statistics (per-lcore).
87 struct rte_mempool_debug_stats {
88 uint64_t put_bulk; /**< Number of puts. */
89 uint64_t put_objs; /**< Number of objects successfully put. */
90 uint64_t get_success_bulk; /**< Successful allocation number. */
91 uint64_t get_success_objs; /**< Objects successfully allocated. */
92 uint64_t get_fail_bulk; /**< Failed allocation number. */
93 uint64_t get_fail_objs; /**< Objects that failed to be allocated. */
94 } __rte_cache_aligned;
97 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
99 * A structure that stores a per-core object cache.
101 struct rte_mempool_cache {
102 unsigned len; /**< Cache len */
104 * Cache is allocated to this size to allow it to overflow in certain
105 * cases to avoid needless emptying of cache.
107 void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3]; /**< Cache objects */
108 } __rte_cache_aligned;
109 #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
111 struct rte_mempool_objsz {
112 uint32_t elt_size; /**< Size of an element. */
113 uint32_t header_size; /**< Size of header (before elt). */
114 uint32_t trailer_size; /**< Size of trailer (after elt). */
116 /**< Total size of an object (header + elt + trailer). */
119 #define RTE_MEMPOOL_NAMESIZE 32 /**< Maximum length of a memory pool. */
120 #define RTE_MEMPOOL_MZ_PREFIX "MP_"
123 #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
125 #ifdef RTE_LIBRTE_XEN_DOM0
127 /* "<name>_MP_elt" */
128 #define RTE_MEMPOOL_OBJ_NAME "%s_" RTE_MEMPOOL_MZ_PREFIX "elt"
132 #define RTE_MEMPOOL_OBJ_NAME RTE_MEMPOOL_MZ_FORMAT
134 #endif /* RTE_LIBRTE_XEN_DOM0 */
136 #define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1)
138 /** Mempool over one chunk of physically continuous memory */
139 #define MEMPOOL_PG_NUM_DEFAULT 1
142 * The RTE mempool structure.
145 TAILQ_ENTRY(rte_mempool) next; /**< Next in list. */
147 char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */
148 struct rte_ring *ring; /**< Ring to store objects. */
149 phys_addr_t phys_addr; /**< Phys. addr. of mempool struct. */
150 int flags; /**< Flags of the mempool. */
151 uint32_t size; /**< Size of the mempool. */
152 uint32_t cache_size; /**< Size of per-lcore local cache. */
153 uint32_t cache_flushthresh;
154 /**< Threshold before we flush excess elements. */
156 uint32_t elt_size; /**< Size of an element. */
157 uint32_t header_size; /**< Size of header (before elt). */
158 uint32_t trailer_size; /**< Size of trailer (after elt). */
160 unsigned private_data_size; /**< Size of private data. */
162 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
163 /** Per-lcore local cache. */
164 struct rte_mempool_cache local_cache[RTE_MAX_LCORE];
167 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
168 /** Per-lcore statistics. */
169 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
172 /* Address translation support, starts from next cache line. */
174 /** Number of elements in the elt_pa array. */
175 uint32_t pg_num __rte_cache_aligned;
176 uint32_t pg_shift; /**< LOG2 of the physical pages. */
177 uintptr_t pg_mask; /**< physical page mask value. */
178 uintptr_t elt_va_start;
179 /**< Virtual address of the first mempool object. */
180 uintptr_t elt_va_end;
181 /**< Virtual address of the <size + 1> mempool object. */
182 phys_addr_t elt_pa[MEMPOOL_PG_NUM_DEFAULT];
183 /**< Array of physical pages addresses for the mempool objects buffer. */
185 } __rte_cache_aligned;
187 #define MEMPOOL_F_NO_SPREAD 0x0001 /**< Do not spread in memory. */
188 #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/
189 #define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/
190 #define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/
193 * @internal When debug is enabled, store some statistics.
195 * Pointer to the memory pool.
197 * Name of the statistics field to increment in the memory pool.
199 * Number to add to the object-oriented statistics.
201 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
202 #define __MEMPOOL_STAT_ADD(mp, name, n) do { \
203 unsigned __lcore_id = rte_lcore_id(); \
204 mp->stats[__lcore_id].name##_objs += n; \
205 mp->stats[__lcore_id].name##_bulk += 1; \
208 #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
212 * Calculates size of the mempool header.
214 * Pointer to the memory pool.
216 * Number of page used to store mempool objects.
218 #define MEMPOOL_HEADER_SIZE(mp, pgn) (sizeof(*(mp)) + \
219 RTE_ALIGN_CEIL(((pgn) - RTE_DIM((mp)->elt_pa)) * \
220 sizeof ((mp)->elt_pa[0]), CACHE_LINE_SIZE))
223 * Returns TRUE if whole mempool is allocated in one contiguous block of memory.
225 #define MEMPOOL_IS_CONTIG(mp) \
226 ((mp)->pg_num == MEMPOOL_PG_NUM_DEFAULT && \
227 (mp)->phys_addr == (mp)->elt_pa[0])
230 * @internal Get a pointer to a mempool pointer in the object header.
234 * The pointer to the mempool from which the object was allocated.
236 static inline struct rte_mempool **__mempool_from_obj(void *obj)
238 struct rte_mempool **mpp;
241 off = sizeof(struct rte_mempool *);
242 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
243 off += sizeof(uint64_t);
245 mpp = (struct rte_mempool **)((char *)obj - off);
250 * Return a pointer to the mempool owning this object.
253 * An object that is owned by a pool. If this is not the case,
254 * the behavior is undefined.
256 * A pointer to the mempool structure.
258 static inline const struct rte_mempool *rte_mempool_from_obj(void *obj)
260 struct rte_mempool * const *mpp;
261 mpp = __mempool_from_obj(obj);
265 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
266 /* get header cookie value */
267 static inline uint64_t __mempool_read_header_cookie(const void *obj)
269 return *(const uint64_t *)((const char *)obj - sizeof(uint64_t));
272 /* get trailer cookie value */
273 static inline uint64_t __mempool_read_trailer_cookie(void *obj)
275 struct rte_mempool **mpp = __mempool_from_obj(obj);
276 return *(uint64_t *)((char *)obj + (*mpp)->elt_size);
279 /* write header cookie value */
280 static inline void __mempool_write_header_cookie(void *obj, int free)
283 cookie_p = (uint64_t *)((char *)obj - sizeof(uint64_t));
285 *cookie_p = RTE_MEMPOOL_HEADER_COOKIE1;
287 *cookie_p = RTE_MEMPOOL_HEADER_COOKIE2;
291 /* write trailer cookie value */
292 static inline void __mempool_write_trailer_cookie(void *obj)
295 struct rte_mempool **mpp = __mempool_from_obj(obj);
296 cookie_p = (uint64_t *)((char *)obj + (*mpp)->elt_size);
297 *cookie_p = RTE_MEMPOOL_TRAILER_COOKIE;
299 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
302 * @internal Check and update cookies or panic.
305 * Pointer to the memory pool.
306 * @param obj_table_const
307 * Pointer to a table of void * pointers (objects).
309 * Index of object in object table.
311 * - 0: object is supposed to be allocated, mark it as free
312 * - 1: object is supposed to be free, mark it as allocated
313 * - 2: just check that cookie is valid (free or allocated)
315 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
316 #ifndef __INTEL_COMPILER
317 #pragma GCC diagnostic ignored "-Wcast-qual"
319 static inline void __mempool_check_cookies(const struct rte_mempool *mp,
320 void * const *obj_table_const,
321 unsigned n, int free)
328 /* Force to drop the "const" attribute. This is done only when
329 * DEBUG is enabled */
330 tmp = (void *) obj_table_const;
331 obj_table = (void **) tmp;
336 if (rte_mempool_from_obj(obj) != mp)
337 rte_panic("MEMPOOL: object is owned by another "
340 cookie = __mempool_read_header_cookie(obj);
343 if (cookie != RTE_MEMPOOL_HEADER_COOKIE1) {
344 rte_log_set_history(0);
345 RTE_LOG(CRIT, MEMPOOL,
346 "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
348 rte_panic("MEMPOOL: bad header cookie (put)\n");
350 __mempool_write_header_cookie(obj, 1);
352 else if (free == 1) {
353 if (cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
354 rte_log_set_history(0);
355 RTE_LOG(CRIT, MEMPOOL,
356 "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
358 rte_panic("MEMPOOL: bad header cookie (get)\n");
360 __mempool_write_header_cookie(obj, 0);
362 else if (free == 2) {
363 if (cookie != RTE_MEMPOOL_HEADER_COOKIE1 &&
364 cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
365 rte_log_set_history(0);
366 RTE_LOG(CRIT, MEMPOOL,
367 "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
369 rte_panic("MEMPOOL: bad header cookie (audit)\n");
372 cookie = __mempool_read_trailer_cookie(obj);
373 if (cookie != RTE_MEMPOOL_TRAILER_COOKIE) {
374 rte_log_set_history(0);
375 RTE_LOG(CRIT, MEMPOOL,
376 "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
378 rte_panic("MEMPOOL: bad trailer cookie\n");
382 #ifndef __INTEL_COMPILER
383 #pragma GCC diagnostic error "-Wcast-qual"
386 #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
387 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
390 * An mempool's object iterator callback function.
392 typedef void (*rte_mempool_obj_iter_t)(void * /*obj_iter_arg*/,
393 void * /*obj_start*/,
395 uint32_t /*obj_index */);
398 * Iterates across objects of the given size and alignment in the
399 * provided chunk of memory. The given memory buffer can consist of
400 * disjoint physical pages.
401 * For each object calls the provided callback (if any).
402 * Used to populate mempool, walk through all elements of the mempool,
403 * estimate how many elements of the given size could be created in the given
406 * Virtual address of the memory buffer.
408 * Maximum number of objects to iterate through.
410 * Size of each object.
412 * Array of phyiscall addresses of the pages that comprises given memory
415 * Number of elements in the paddr array.
417 * LOG2 of the physical pages size.
419 * Object iterator callback function (could be NULL).
420 * @param obj_iter_arg
421 * User defined Prameter for the object iterator callback function.
424 * Number of objects iterated through.
427 uint32_t rte_mempool_obj_iter(void *vaddr,
428 uint32_t elt_num, size_t elt_sz, size_t align,
429 const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
430 rte_mempool_obj_iter_t obj_iter, void *obj_iter_arg);
433 * An object constructor callback function for mempool.
435 * Arguments are the mempool, the opaque pointer given by the user in
436 * rte_mempool_create(), the pointer to the element and the index of
437 * the element in the pool.
439 typedef void (rte_mempool_obj_ctor_t)(struct rte_mempool *, void *,
443 * A mempool constructor callback function.
445 * Arguments are the mempool and the opaque pointer given by the user in
446 * rte_mempool_create().
448 typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
451 * Creates a new mempool named *name* in memory.
453 * This function uses ``memzone_reserve()`` to allocate memory. The
454 * pool contains n elements of elt_size. Its size is set to n.
455 * All elements of the mempool are allocated together with the mempool header,
456 * in one physically continuous chunk of memory.
459 * The name of the mempool.
461 * The number of elements in the mempool. The optimum size (in terms of
462 * memory usage) for a mempool is when n is a power of two minus one:
465 * The size of each element.
467 * If cache_size is non-zero, the rte_mempool library will try to
468 * limit the accesses to the common lockless pool, by maintaining a
469 * per-lcore object cache. This argument must be lower or equal to
470 * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose
471 * cache_size to have "n modulo cache_size == 0": if this is
472 * not the case, some elements will always stay in the pool and will
473 * never be used. The access to the per-lcore table is of course
474 * faster than the multi-producer/consumer pool. The cache can be
475 * disabled if the cache_size argument is set to 0; it can be useful to
476 * avoid loosing objects in cache. Note that even if not used, the
477 * memory space for cache is always reserved in a mempool structure,
478 * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
479 * @param private_data_size
480 * The size of the private data appended after the mempool
481 * structure. This is useful for storing some private data after the
482 * mempool structure, as is done for rte_mbuf_pool for example.
484 * A function pointer that is called for initialization of the pool,
485 * before object initialization. The user can initialize the private
486 * data in this function if needed. This parameter can be NULL if
489 * An opaque pointer to data that can be used in the mempool
490 * constructor function.
492 * A function pointer that is called for each object at
493 * initialization of the pool. The user can set some meta data in
494 * objects if needed. This parameter can be NULL if not needed.
495 * The obj_init() function takes the mempool pointer, the init_arg,
496 * the object pointer and the object number as parameters.
497 * @param obj_init_arg
498 * An opaque pointer to data that can be used as an argument for
499 * each call to the object constructor function.
501 * The *socket_id* argument is the socket identifier in the case of
502 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
503 * constraint for the reserved zone.
505 * The *flags* arguments is an OR of following flags:
506 * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
507 * between channels in RAM: the pool allocator will add padding
508 * between objects depending on the hardware configuration. See
509 * Memory alignment constraints for details. If this flag is set,
510 * the allocator will just align them to a cache line.
511 * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
512 * cache-aligned. This flag removes this constraint, and no
513 * padding will be present between objects. This flag implies
514 * MEMPOOL_F_NO_SPREAD.
515 * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
516 * when using rte_mempool_put() or rte_mempool_put_bulk() is
517 * "single-producer". Otherwise, it is "multi-producers".
518 * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
519 * when using rte_mempool_get() or rte_mempool_get_bulk() is
520 * "single-consumer". Otherwise, it is "multi-consumers".
522 * The pointer to the new allocated mempool, on success. NULL on error
523 * with rte_errno set appropriately. Possible rte_errno values include:
524 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
525 * - E_RTE_SECONDARY - function was called from a secondary process instance
526 * - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list
527 * - EINVAL - cache size provided is too large
528 * - ENOSPC - the maximum number of memzones has already been allocated
529 * - EEXIST - a memzone with the same name already exists
530 * - ENOMEM - no appropriate memory area found in which to create memzone
533 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
534 unsigned cache_size, unsigned private_data_size,
535 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
536 rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
537 int socket_id, unsigned flags);
540 * Creates a new mempool named *name* in memory.
542 * This function uses ``memzone_reserve()`` to allocate memory. The
543 * pool contains n elements of elt_size. Its size is set to n.
544 * Depending on the input parameters, mempool elements can be either allocated
545 * together with the mempool header, or an externally provided memory buffer
546 * could be used to store mempool objects. In later case, that external
547 * memory buffer can consist of set of disjoint phyiscal pages.
550 * The name of the mempool.
552 * The number of elements in the mempool. The optimum size (in terms of
553 * memory usage) for a mempool is when n is a power of two minus one:
556 * The size of each element.
558 * If cache_size is non-zero, the rte_mempool library will try to
559 * limit the accesses to the common lockless pool, by maintaining a
560 * per-lcore object cache. This argument must be lower or equal to
561 * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose
562 * cache_size to have "n modulo cache_size == 0": if this is
563 * not the case, some elements will always stay in the pool and will
564 * never be used. The access to the per-lcore table is of course
565 * faster than the multi-producer/consumer pool. The cache can be
566 * disabled if the cache_size argument is set to 0; it can be useful to
567 * avoid loosing objects in cache. Note that even if not used, the
568 * memory space for cache is always reserved in a mempool structure,
569 * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
570 * @param private_data_size
571 * The size of the private data appended after the mempool
572 * structure. This is useful for storing some private data after the
573 * mempool structure, as is done for rte_mbuf_pool for example.
575 * A function pointer that is called for initialization of the pool,
576 * before object initialization. The user can initialize the private
577 * data in this function if needed. This parameter can be NULL if
580 * An opaque pointer to data that can be used in the mempool
581 * constructor function.
583 * A function pointer that is called for each object at
584 * initialization of the pool. The user can set some meta data in
585 * objects if needed. This parameter can be NULL if not needed.
586 * The obj_init() function takes the mempool pointer, the init_arg,
587 * the object pointer and the object number as parameters.
588 * @param obj_init_arg
589 * An opaque pointer to data that can be used as an argument for
590 * each call to the object constructor function.
592 * The *socket_id* argument is the socket identifier in the case of
593 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
594 * constraint for the reserved zone.
596 * The *flags* arguments is an OR of following flags:
597 * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
598 * between channels in RAM: the pool allocator will add padding
599 * between objects depending on the hardware configuration. See
600 * Memory alignment constraints for details. If this flag is set,
601 * the allocator will just align them to a cache line.
602 * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
603 * cache-aligned. This flag removes this constraint, and no
604 * padding will be present between objects. This flag implies
605 * MEMPOOL_F_NO_SPREAD.
606 * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
607 * when using rte_mempool_put() or rte_mempool_put_bulk() is
608 * "single-producer". Otherwise, it is "multi-producers".
609 * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
610 * when using rte_mempool_get() or rte_mempool_get_bulk() is
611 * "single-consumer". Otherwise, it is "multi-consumers".
613 * Virtual address of the externally allocated memory buffer.
614 * Will be used to store mempool objects.
616 * Array of phyiscall addresses of the pages that comprises given memory
619 * Number of elements in the paddr array.
621 * LOG2 of the physical pages size.
623 * The pointer to the new allocated mempool, on success. NULL on error
624 * with rte_errno set appropriately. Possible rte_errno values include:
625 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
626 * - E_RTE_SECONDARY - function was called from a secondary process instance
627 * - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list
628 * - EINVAL - cache size provided is too large
629 * - ENOSPC - the maximum number of memzones has already been allocated
630 * - EEXIST - a memzone with the same name already exists
631 * - ENOMEM - no appropriate memory area found in which to create memzone
634 rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
635 unsigned cache_size, unsigned private_data_size,
636 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
637 rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
638 int socket_id, unsigned flags, void *vaddr,
639 const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
641 #ifdef RTE_LIBRTE_XEN_DOM0
643 * Creates a new mempool named *name* in memory on Xen Dom0.
645 * This function uses ``rte_mempool_xmem_create()`` to allocate memory. The
646 * pool contains n elements of elt_size. Its size is set to n.
647 * All elements of the mempool are allocated together with the mempool header,
648 * and memory buffer can consist of set of disjoint phyiscal pages.
651 * The name of the mempool.
653 * The number of elements in the mempool. The optimum size (in terms of
654 * memory usage) for a mempool is when n is a power of two minus one:
657 * The size of each element.
659 * If cache_size is non-zero, the rte_mempool library will try to
660 * limit the accesses to the common lockless pool, by maintaining a
661 * per-lcore object cache. This argument must be lower or equal to
662 * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose
663 * cache_size to have "n modulo cache_size == 0": if this is
664 * not the case, some elements will always stay in the pool and will
665 * never be used. The access to the per-lcore table is of course
666 * faster than the multi-producer/consumer pool. The cache can be
667 * disabled if the cache_size argument is set to 0; it can be useful to
668 * avoid loosing objects in cache. Note that even if not used, the
669 * memory space for cache is always reserved in a mempool structure,
670 * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
671 * @param private_data_size
672 * The size of the private data appended after the mempool
673 * structure. This is useful for storing some private data after the
674 * mempool structure, as is done for rte_mbuf_pool for example.
676 * A function pointer that is called for initialization of the pool,
677 * before object initialization. The user can initialize the private
678 * data in this function if needed. This parameter can be NULL if
681 * An opaque pointer to data that can be used in the mempool
682 * constructor function.
684 * A function pointer that is called for each object at
685 * initialization of the pool. The user can set some meta data in
686 * objects if needed. This parameter can be NULL if not needed.
687 * The obj_init() function takes the mempool pointer, the init_arg,
688 * the object pointer and the object number as parameters.
689 * @param obj_init_arg
690 * An opaque pointer to data that can be used as an argument for
691 * each call to the object constructor function.
693 * The *socket_id* argument is the socket identifier in the case of
694 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
695 * constraint for the reserved zone.
697 * The *flags* arguments is an OR of following flags:
698 * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
699 * between channels in RAM: the pool allocator will add padding
700 * between objects depending on the hardware configuration. See
701 * Memory alignment constraints for details. If this flag is set,
702 * the allocator will just align them to a cache line.
703 * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
704 * cache-aligned. This flag removes this constraint, and no
705 * padding will be present between objects. This flag implies
706 * MEMPOOL_F_NO_SPREAD.
707 * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
708 * when using rte_mempool_put() or rte_mempool_put_bulk() is
709 * "single-producer". Otherwise, it is "multi-producers".
710 * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
711 * when using rte_mempool_get() or rte_mempool_get_bulk() is
712 * "single-consumer". Otherwise, it is "multi-consumers".
714 * The pointer to the new allocated mempool, on success. NULL on error
715 * with rte_errno set appropriately. Possible rte_errno values include:
716 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
717 * - E_RTE_SECONDARY - function was called from a secondary process instance
718 * - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list
719 * - EINVAL - cache size provided is too large
720 * - ENOSPC - the maximum number of memzones has already been allocated
721 * - EEXIST - a memzone with the same name already exists
722 * - ENOMEM - no appropriate memory area found in which to create memzone
725 rte_dom0_mempool_create(const char *name, unsigned n, unsigned elt_size,
726 unsigned cache_size, unsigned private_data_size,
727 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
728 rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
729 int socket_id, unsigned flags);
733 * Dump the status of the mempool to the console.
736 * A pointer to the mempool structure.
738 void rte_mempool_dump(const struct rte_mempool *mp);
741 * @internal Put several objects back in the mempool; used internally.
743 * A pointer to the mempool structure.
745 * A pointer to a table of void * pointers (objects).
747 * The number of objects to store back in the mempool, must be strictly
750 * Mono-producer (0) or multi-producers (1).
752 static inline void __attribute__((always_inline))
753 __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
754 unsigned n, int is_mp)
756 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
757 struct rte_mempool_cache *cache;
760 unsigned lcore_id = rte_lcore_id();
761 uint32_t cache_size = mp->cache_size;
762 uint32_t flushthresh = mp->cache_flushthresh;
763 #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
765 /* increment stat now, adding in mempool always success */
766 __MEMPOOL_STAT_ADD(mp, put, n);
768 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
769 /* cache is not enabled or single producer */
770 if (unlikely(cache_size == 0 || is_mp == 0))
773 /* Go straight to ring if put would overflow mem allocated for cache */
774 if (unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE))
777 cache = &mp->local_cache[lcore_id];
778 cache_objs = &cache->objs[cache->len];
781 * The cache follows the following algorithm
782 * 1. Add the objects to the cache
783 * 2. Anything greater than the cache min value (if it crosses the
784 * cache flush threshold) is flushed to the ring.
787 /* Add elements back into the cache */
788 for (index = 0; index < n; ++index, obj_table++)
789 cache_objs[index] = *obj_table;
793 if (cache->len >= flushthresh) {
794 rte_ring_mp_enqueue_bulk(mp->ring, &cache->objs[cache_size],
795 cache->len - cache_size);
796 cache->len = cache_size;
802 #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
804 /* push remaining objects in ring */
805 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
807 if (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0)
808 rte_panic("cannot put objects in mempool\n");
811 if (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0)
812 rte_panic("cannot put objects in mempool\n");
816 rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n);
818 rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n);
824 * Put several objects back in the mempool (multi-producers safe).
827 * A pointer to the mempool structure.
829 * A pointer to a table of void * pointers (objects).
831 * The number of objects to add in the mempool from the obj_table.
833 static inline void __attribute__((always_inline))
834 rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
837 __mempool_check_cookies(mp, obj_table, n, 0);
838 __mempool_put_bulk(mp, obj_table, n, 1);
842 * Put several objects back in the mempool (NOT multi-producers safe).
845 * A pointer to the mempool structure.
847 * A pointer to a table of void * pointers (objects).
849 * The number of objects to add in the mempool from obj_table.
852 rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
855 __mempool_check_cookies(mp, obj_table, n, 0);
856 __mempool_put_bulk(mp, obj_table, n, 0);
860 * Put several objects back in the mempool.
862 * This function calls the multi-producer or the single-producer
863 * version depending on the default behavior that was specified at
864 * mempool creation time (see flags).
867 * A pointer to the mempool structure.
869 * A pointer to a table of void * pointers (objects).
871 * The number of objects to add in the mempool from obj_table.
873 static inline void __attribute__((always_inline))
874 rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
877 __mempool_check_cookies(mp, obj_table, n, 0);
878 __mempool_put_bulk(mp, obj_table, n, !(mp->flags & MEMPOOL_F_SP_PUT));
882 * Put one object in the mempool (multi-producers safe).
885 * A pointer to the mempool structure.
887 * A pointer to the object to be added.
889 static inline void __attribute__((always_inline))
890 rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
892 rte_mempool_mp_put_bulk(mp, &obj, 1);
896 * Put one object back in the mempool (NOT multi-producers safe).
899 * A pointer to the mempool structure.
901 * A pointer to the object to be added.
903 static inline void __attribute__((always_inline))
904 rte_mempool_sp_put(struct rte_mempool *mp, void *obj)
906 rte_mempool_sp_put_bulk(mp, &obj, 1);
910 * Put one object back in the mempool.
912 * This function calls the multi-producer or the single-producer
913 * version depending on the default behavior that was specified at
914 * mempool creation time (see flags).
917 * A pointer to the mempool structure.
919 * A pointer to the object to be added.
921 static inline void __attribute__((always_inline))
922 rte_mempool_put(struct rte_mempool *mp, void *obj)
924 rte_mempool_put_bulk(mp, &obj, 1);
928 * @internal Get several objects from the mempool; used internally.
930 * A pointer to the mempool structure.
932 * A pointer to a table of void * pointers (objects).
934 * The number of objects to get, must be strictly positive.
936 * Mono-consumer (0) or multi-consumers (1).
938 * - >=0: Success; number of objects supplied.
939 * - <0: Error; code of ring dequeue function.
941 static inline int __attribute__((always_inline))
942 __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
943 unsigned n, int is_mc)
946 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
949 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
950 struct rte_mempool_cache *cache;
953 unsigned lcore_id = rte_lcore_id();
954 uint32_t cache_size = mp->cache_size;
956 /* cache is not enabled or single consumer */
957 if (unlikely(cache_size == 0 || is_mc == 0 || n >= cache_size))
960 cache = &mp->local_cache[lcore_id];
961 cache_objs = cache->objs;
963 /* Can this be satisfied from the cache? */
964 if (cache->len < n) {
965 /* No. Backfill the cache first, and then fill from it */
966 uint32_t req = n + (cache_size - cache->len);
968 /* How many do we require i.e. number to fill the cache + the request */
969 ret = rte_ring_mc_dequeue_bulk(mp->ring, &cache->objs[cache->len], req);
970 if (unlikely(ret < 0)) {
972 * In the offchance that we are buffer constrained,
973 * where we are not able to allocate cache + n, go to
974 * the ring directly. If that fails, we are truly out of
983 /* Now fill in the response ... */
984 for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++)
985 *obj_table = cache_objs[len];
989 __MEMPOOL_STAT_ADD(mp, get_success, n_orig);
994 #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
996 /* get remaining objects from ring */
998 ret = rte_ring_mc_dequeue_bulk(mp->ring, obj_table, n);
1000 ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n);
1003 __MEMPOOL_STAT_ADD(mp, get_fail, n_orig);
1005 __MEMPOOL_STAT_ADD(mp, get_success, n_orig);
1011 * Get several objects from the mempool (multi-consumers safe).
1013 * If cache is enabled, objects will be retrieved first from cache,
1014 * subsequently from the common pool. Note that it can return -ENOENT when
1015 * the local cache and common pool are empty, even if cache from other
1019 * A pointer to the mempool structure.
1021 * A pointer to a table of void * pointers (objects) that will be filled.
1023 * The number of objects to get from mempool to obj_table.
1025 * - 0: Success; objects taken.
1026 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
1028 static inline int __attribute__((always_inline))
1029 rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
1032 ret = __mempool_get_bulk(mp, obj_table, n, 1);
1034 __mempool_check_cookies(mp, obj_table, n, 1);
1039 * Get several objects from the mempool (NOT multi-consumers safe).
1041 * If cache is enabled, objects will be retrieved first from cache,
1042 * subsequently from the common pool. Note that it can return -ENOENT when
1043 * the local cache and common pool are empty, even if cache from other
1047 * A pointer to the mempool structure.
1049 * A pointer to a table of void * pointers (objects) that will be filled.
1051 * The number of objects to get from the mempool to obj_table.
1053 * - 0: Success; objects taken.
1054 * - -ENOENT: Not enough entries in the mempool; no object is
1057 static inline int __attribute__((always_inline))
1058 rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
1061 ret = __mempool_get_bulk(mp, obj_table, n, 0);
1063 __mempool_check_cookies(mp, obj_table, n, 1);
1068 * Get several objects from the mempool.
1070 * This function calls the multi-consumers or the single-consumer
1071 * version, depending on the default behaviour that was specified at
1072 * mempool creation time (see flags).
1074 * If cache is enabled, objects will be retrieved first from cache,
1075 * subsequently from the common pool. Note that it can return -ENOENT when
1076 * the local cache and common pool are empty, even if cache from other
1080 * A pointer to the mempool structure.
1082 * A pointer to a table of void * pointers (objects) that will be filled.
1084 * The number of objects to get from the mempool to obj_table.
1086 * - 0: Success; objects taken
1087 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
1089 static inline int __attribute__((always_inline))
1090 rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
1093 ret = __mempool_get_bulk(mp, obj_table, n,
1094 !(mp->flags & MEMPOOL_F_SC_GET));
1096 __mempool_check_cookies(mp, obj_table, n, 1);
1101 * Get one object from the mempool (multi-consumers safe).
1103 * If cache is enabled, objects will be retrieved first from cache,
1104 * subsequently from the common pool. Note that it can return -ENOENT when
1105 * the local cache and common pool are empty, even if cache from other
1109 * A pointer to the mempool structure.
1111 * A pointer to a void * pointer (object) that will be filled.
1113 * - 0: Success; objects taken.
1114 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
1116 static inline int __attribute__((always_inline))
1117 rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
1119 return rte_mempool_mc_get_bulk(mp, obj_p, 1);
1123 * Get one object from the mempool (NOT multi-consumers safe).
1125 * If cache is enabled, objects will be retrieved first from cache,
1126 * subsequently from the common pool. Note that it can return -ENOENT when
1127 * the local cache and common pool are empty, even if cache from other
1131 * A pointer to the mempool structure.
1133 * A pointer to a void * pointer (object) that will be filled.
1135 * - 0: Success; objects taken.
1136 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
1138 static inline int __attribute__((always_inline))
1139 rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p)
1141 return rte_mempool_sc_get_bulk(mp, obj_p, 1);
1145 * Get one object from the mempool.
1147 * This function calls the multi-consumers or the single-consumer
1148 * version, depending on the default behavior that was specified at
1149 * mempool creation (see flags).
1151 * If cache is enabled, objects will be retrieved first from cache,
1152 * subsequently from the common pool. Note that it can return -ENOENT when
1153 * the local cache and common pool are empty, even if cache from other
1157 * A pointer to the mempool structure.
1159 * A pointer to a void * pointer (object) that will be filled.
1161 * - 0: Success; objects taken.
1162 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
1164 static inline int __attribute__((always_inline))
1165 rte_mempool_get(struct rte_mempool *mp, void **obj_p)
1167 return rte_mempool_get_bulk(mp, obj_p, 1);
1171 * Return the number of entries in the mempool.
1173 * When cache is enabled, this function has to browse the length of
1174 * all lcores, so it should not be used in a data path, but only for
1178 * A pointer to the mempool structure.
1180 * The number of entries in the mempool.
1182 unsigned rte_mempool_count(const struct rte_mempool *mp);
1185 * Return the number of free entries in the mempool ring.
1186 * i.e. how many entries can be freed back to the mempool.
1188 * NOTE: This corresponds to the number of elements *allocated* from the
1189 * memory pool, not the number of elements in the pool itself. To count
1190 * the number elements currently available in the pool, use "rte_mempool_count"
1192 * When cache is enabled, this function has to browse the length of
1193 * all lcores, so it should not be used in a data path, but only for
1197 * A pointer to the mempool structure.
1199 * The number of free entries in the mempool.
1201 static inline unsigned
1202 rte_mempool_free_count(const struct rte_mempool *mp)
1204 return mp->size - rte_mempool_count(mp);
1208 * Test if the mempool is full.
1210 * When cache is enabled, this function has to browse the length of all
1211 * lcores, so it should not be used in a data path, but only for debug
1215 * A pointer to the mempool structure.
1217 * - 1: The mempool is full.
1218 * - 0: The mempool is not full.
1221 rte_mempool_full(const struct rte_mempool *mp)
1223 return !!(rte_mempool_count(mp) == mp->size);
1227 * Test if the mempool is empty.
1229 * When cache is enabled, this function has to browse the length of all
1230 * lcores, so it should not be used in a data path, but only for debug
1234 * A pointer to the mempool structure.
1236 * - 1: The mempool is empty.
1237 * - 0: The mempool is not empty.
1240 rte_mempool_empty(const struct rte_mempool *mp)
1242 return !!(rte_mempool_count(mp) == 0);
1246 * Return the physical address of elt, which is an element of the pool mp.
1249 * A pointer to the mempool structure.
1251 * A pointer (virtual address) to the element of the pool.
1253 * The physical address of the elt element.
1255 static inline phys_addr_t
1256 rte_mempool_virt2phy(const struct rte_mempool *mp, const void *elt)
1260 off = (const char *)elt - (const char *)mp->elt_va_start;
1261 return (mp->elt_pa[off >> mp->pg_shift] + (off & mp->pg_mask));
1266 * Check the consistency of mempool objects.
1268 * Verify the coherency of fields in the mempool structure. Also check
1269 * that the cookies of mempool objects (even the ones that are not
1270 * present in pool) have a correct value. If not, a panic will occur.
1273 * A pointer to the mempool structure.
1275 void rte_mempool_audit(const struct rte_mempool *mp);
1278 * Return a pointer to the private data in an mempool structure.
1281 * A pointer to the mempool structure.
1283 * A pointer to the private data.
1285 static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
1287 return (char *)mp + MEMPOOL_HEADER_SIZE(mp, mp->pg_num);
1291 * Dump the status of all mempools on the console
1293 void rte_mempool_list_dump(void);
1296 * Search a mempool from its name
1299 * The name of the mempool.
1301 * The pointer to the mempool matching the name, or NULL if not found.
1303 * with rte_errno set appropriately. Possible rte_errno values include:
1304 * - ENOENT - required entry not available to return.
1307 struct rte_mempool *rte_mempool_lookup(const char *name);
1310 * Given a desired size of the mempool element and mempool flags,
1311 * caluclates header, trailer, body and total sizes of the mempool object.
1313 * The size of each element.
1315 * The flags used for the mempool creation.
1316 * Consult rte_mempool_create() for more information about possible values.
1317 * The size of each element.
1319 * Total size of the mempool object.
1321 uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
1322 struct rte_mempool_objsz *sz);
1325 * Calculate maximum amount of memory required to store given number of objects.
1326 * Assumes that the memory buffer will be alligned at page boundary.
1327 * Note, that if object size is bigger then page size, then it assumes that
1328 * we have a subsets of physically continuous pages big enough to store
1329 * at least one object.
1331 * Number of elements.
1333 * The size of each element.
1335 * LOG2 of the physical pages size.
1337 * Required memory size aligned at page boundary.
1339 size_t rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz,
1343 * Calculate how much memory would be actually required with the given
1344 * memory footprint to store required number of objects.
1346 * Virtual address of the externally allocated memory buffer.
1347 * Will be used to store mempool objects.
1349 * Number of elements.
1351 * The size of each element.
1353 * Array of phyiscall addresses of the pages that comprises given memory
1356 * Number of elements in the paddr array.
1358 * LOG2 of the physical pages size.
1360 * Number of bytes needed to store given number of objects,
1361 * aligned to the given page size.
1362 * If provided memory buffer is not big enough:
1363 * (-1) * actual number of elemnts that can be stored in that buffer.
1365 ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
1366 const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
1372 #endif /* _RTE_MEMPOOL_H_ */