4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _RTE_MEMPOOL_H_
35 #define _RTE_MEMPOOL_H_
41 * A memory pool is an allocator of fixed-size object. It is
42 * identified by its name, and uses a ring to store free objects. It
43 * provides some other optional services, like a per-core object
44 * cache, and an alignment helper to ensure that objects are padded
45 * to spread them equally on all RAM channels, ranks, and so on.
47 * Objects owned by a mempool should never be added in another
48 * mempool. When an object is freed using rte_mempool_put() or
49 * equivalent, the object data is not modified; the user can save some
50 * meta-data in the object data and retrieve them when allocating a
53 * Note: the mempool implementation is not preemptable. A lcore must
54 * not be interrupted by another task that uses the same mempool
55 * (because it uses a ring which is not preemptable). Also, mempool
56 * functions must not be used outside the DPDK environment: for
57 * example, in linuxapp environment, a thread that is not created by
58 * the EAL must not use mempools. This is due to the per-lcore cache
59 * that won't work as rte_lcore_id() will not return a correct value.
66 #include <sys/queue.h>
69 #include <rte_debug.h>
70 #include <rte_lcore.h>
71 #include <rte_memory.h>
72 #include <rte_branch_prediction.h>
79 #define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL /**< Header cookie. */
80 #define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL /**< Header cookie. */
81 #define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/
83 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
85 * A structure that stores the mempool statistics (per-lcore).
87 struct rte_mempool_debug_stats {
88 uint64_t put_bulk; /**< Number of puts. */
89 uint64_t put_objs; /**< Number of objects successfully put. */
90 uint64_t get_success_bulk; /**< Successful allocation number. */
91 uint64_t get_success_objs; /**< Objects successfully allocated. */
92 uint64_t get_fail_bulk; /**< Failed allocation number. */
93 uint64_t get_fail_objs; /**< Objects that failed to be allocated. */
94 } __rte_cache_aligned;
97 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
99 * A structure that stores a per-core object cache.
101 struct rte_mempool_cache {
102 unsigned len; /**< Cache len */
104 * Cache is allocated to this size to allow it to overflow in certain
105 * cases to avoid needless emptying of cache.
107 void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3]; /**< Cache objects */
108 } __rte_cache_aligned;
109 #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
111 #define RTE_MEMPOOL_NAMESIZE 32 /**< Maximum length of a memory pool. */
114 * The RTE mempool structure.
117 TAILQ_ENTRY(rte_mempool) next; /**< Next in list. */
119 char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */
120 struct rte_ring *ring; /**< Ring to store objects. */
121 phys_addr_t phys_addr; /**< Phys. addr. of mempool struct. */
122 int flags; /**< Flags of the mempool. */
123 uint32_t size; /**< Size of the mempool. */
124 uint32_t cache_size; /**< Size of per-lcore local cache. */
125 uint32_t cache_flushthresh; /**< Threshold before we flush excess elements. */
127 uint32_t elt_size; /**< Size of an element. */
128 uint32_t header_size; /**< Size of header (before elt). */
129 uint32_t trailer_size; /**< Size of trailer (after elt). */
131 unsigned private_data_size; /**< Size of private data. */
133 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
134 /** Per-lcore local cache. */
135 struct rte_mempool_cache local_cache[RTE_MAX_LCORE];
138 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
139 /** Per-lcore statistics. */
140 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
142 } __rte_cache_aligned;
144 #define MEMPOOL_F_NO_SPREAD 0x0001 /**< Do not spread in memory. */
145 #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/
146 #define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/
147 #define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/
150 * @internal When debug is enabled, store some statistics.
152 * Pointer to the memory pool.
154 * Name of the statistics field to increment in the memory pool.
156 * Number to add to the object-oriented statistics.
158 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
159 #define __MEMPOOL_STAT_ADD(mp, name, n) do { \
160 unsigned __lcore_id = rte_lcore_id(); \
161 mp->stats[__lcore_id].name##_objs += n; \
162 mp->stats[__lcore_id].name##_bulk += 1; \
165 #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
169 * @internal Get a pointer to a mempool pointer in the object header.
173 * The pointer to the mempool from which the object was allocated.
175 static inline struct rte_mempool **__mempool_from_obj(void *obj)
177 struct rte_mempool **mpp;
180 off = sizeof(struct rte_mempool *);
181 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
182 off += sizeof(uint64_t);
184 mpp = (struct rte_mempool **)((char *)obj - off);
189 * Return a pointer to the mempool owning this object.
192 * An object that is owned by a pool. If this is not the case,
193 * the behavior is undefined.
195 * A pointer to the mempool structure.
197 static inline const struct rte_mempool *rte_mempool_from_obj(void *obj)
199 struct rte_mempool * const *mpp;
200 mpp = __mempool_from_obj(obj);
204 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
205 /* get header cookie value */
206 static inline uint64_t __mempool_read_header_cookie(const void *obj)
208 return *(const uint64_t *)((const char *)obj - sizeof(uint64_t));
211 /* get trailer cookie value */
212 static inline uint64_t __mempool_read_trailer_cookie(void *obj)
214 struct rte_mempool **mpp = __mempool_from_obj(obj);
215 return *(uint64_t *)((char *)obj + (*mpp)->elt_size);
218 /* write header cookie value */
219 static inline void __mempool_write_header_cookie(void *obj, int free)
222 cookie_p = (uint64_t *)((char *)obj - sizeof(uint64_t));
224 *cookie_p = RTE_MEMPOOL_HEADER_COOKIE1;
226 *cookie_p = RTE_MEMPOOL_HEADER_COOKIE2;
230 /* write trailer cookie value */
231 static inline void __mempool_write_trailer_cookie(void *obj)
234 struct rte_mempool **mpp = __mempool_from_obj(obj);
235 cookie_p = (uint64_t *)((char *)obj + (*mpp)->elt_size);
236 *cookie_p = RTE_MEMPOOL_TRAILER_COOKIE;
238 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
241 * @internal Check and update cookies or panic.
244 * Pointer to the memory pool.
245 * @param obj_table_const
246 * Pointer to a table of void * pointers (objects).
248 * Index of object in object table.
250 * - 0: object is supposed to be allocated, mark it as free
251 * - 1: object is supposed to be free, mark it as allocated
252 * - 2: just check that cookie is valid (free or allocated)
254 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
255 #ifndef __INTEL_COMPILER
256 #pragma GCC diagnostic ignored "-Wcast-qual"
258 static inline void __mempool_check_cookies(const struct rte_mempool *mp,
259 void * const *obj_table_const,
260 unsigned n, int free)
267 /* Force to drop the "const" attribute. This is done only when
268 * DEBUG is enabled */
269 tmp = (void *) obj_table_const;
270 obj_table = (void **) tmp;
275 if (rte_mempool_from_obj(obj) != mp)
276 rte_panic("MEMPOOL: object is owned by another "
279 cookie = __mempool_read_header_cookie(obj);
282 if (cookie != RTE_MEMPOOL_HEADER_COOKIE1) {
283 rte_log_set_history(0);
284 RTE_LOG(CRIT, MEMPOOL,
285 "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
287 rte_panic("MEMPOOL: bad header cookie (put)\n");
289 __mempool_write_header_cookie(obj, 1);
291 else if (free == 1) {
292 if (cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
293 rte_log_set_history(0);
294 RTE_LOG(CRIT, MEMPOOL,
295 "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
297 rte_panic("MEMPOOL: bad header cookie (get)\n");
299 __mempool_write_header_cookie(obj, 0);
301 else if (free == 2) {
302 if (cookie != RTE_MEMPOOL_HEADER_COOKIE1 &&
303 cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
304 rte_log_set_history(0);
305 RTE_LOG(CRIT, MEMPOOL,
306 "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
308 rte_panic("MEMPOOL: bad header cookie (audit)\n");
311 cookie = __mempool_read_trailer_cookie(obj);
312 if (cookie != RTE_MEMPOOL_TRAILER_COOKIE) {
313 rte_log_set_history(0);
314 RTE_LOG(CRIT, MEMPOOL,
315 "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
317 rte_panic("MEMPOOL: bad trailer cookie\n");
321 #ifndef __INTEL_COMPILER
322 #pragma GCC diagnostic error "-Wcast-qual"
325 #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
326 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
329 * An object constructor callback function for mempool.
331 * Arguments are the mempool, the opaque pointer given by the user in
332 * rte_mempool_create(), the pointer to the element and the index of
333 * the element in the pool.
335 typedef void (rte_mempool_obj_ctor_t)(struct rte_mempool *, void *,
339 * A mempool constructor callback function.
341 * Arguments are the mempool and the opaque pointer given by the user in
342 * rte_mempool_create().
344 typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
347 * Creates a new mempool named *name* in memory.
349 * This function uses ``memzone_reserve()`` to allocate memory. The
350 * pool contains n elements of elt_size. Its size is set to n.
353 * The name of the mempool.
355 * The number of elements in the mempool. The optimum size (in terms of
356 * memory usage) for a mempool is when n is a power of two minus one:
359 * The size of each element.
361 * If cache_size is non-zero, the rte_mempool library will try to
362 * limit the accesses to the common lockless pool, by maintaining a
363 * per-lcore object cache. This argument must be lower or equal to
364 * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose
365 * cache_size to have "n modulo cache_size == 0": if this is
366 * not the case, some elements will always stay in the pool and will
367 * never be used. The access to the per-lcore table is of course
368 * faster than the multi-producer/consumer pool. The cache can be
369 * disabled if the cache_size argument is set to 0; it can be useful to
370 * avoid loosing objects in cache. Note that even if not used, the
371 * memory space for cache is always reserved in a mempool structure,
372 * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
373 * @param private_data_size
374 * The size of the private data appended after the mempool
375 * structure. This is useful for storing some private data after the
376 * mempool structure, as is done for rte_mbuf_pool for example.
378 * A function pointer that is called for initialization of the pool,
379 * before object initialization. The user can initialize the private
380 * data in this function if needed. This parameter can be NULL if
383 * An opaque pointer to data that can be used in the mempool
384 * constructor function.
386 * A function pointer that is called for each object at
387 * initialization of the pool. The user can set some meta data in
388 * objects if needed. This parameter can be NULL if not needed.
389 * The obj_init() function takes the mempool pointer, the init_arg,
390 * the object pointer and the object number as parameters.
391 * @param obj_init_arg
392 * An opaque pointer to data that can be used as an argument for
393 * each call to the object constructor function.
395 * The *socket_id* argument is the socket identifier in the case of
396 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
397 * constraint for the reserved zone.
399 * The *flags* arguments is an OR of following flags:
400 * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
401 * between channels in RAM: the pool allocator will add padding
402 * between objects depending on the hardware configuration. See
403 * Memory alignment constraints for details. If this flag is set,
404 * the allocator will just align them to a cache line.
405 * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
406 * cache-aligned. This flag removes this constraint, and no
407 * padding will be present between objects. This flag implies
408 * MEMPOOL_F_NO_SPREAD.
409 * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
410 * when using rte_mempool_put() or rte_mempool_put_bulk() is
411 * "single-producer". Otherwise, it is "multi-producers".
412 * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
413 * when using rte_mempool_get() or rte_mempool_get_bulk() is
414 * "single-consumer". Otherwise, it is "multi-consumers".
416 * The pointer to the new allocated mempool, on success. NULL on error
417 * with rte_errno set appropriately. Possible rte_errno values include:
418 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
419 * - E_RTE_SECONDARY - function was called from a secondary process instance
420 * - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list
421 * - EINVAL - cache size provided is too large
422 * - ENOSPC - the maximum number of memzones has already been allocated
423 * - EEXIST - a memzone with the same name already exists
424 * - ENOMEM - no appropriate memory area found in which to create memzone
427 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
428 unsigned cache_size, unsigned private_data_size,
429 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
430 rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
431 int socket_id, unsigned flags);
434 * Dump the status of the mempool to the console.
437 * A pointer to the mempool structure.
439 void rte_mempool_dump(const struct rte_mempool *mp);
442 * @internal Put several objects back in the mempool; used internally.
444 * A pointer to the mempool structure.
446 * A pointer to a table of void * pointers (objects).
448 * The number of objects to store back in the mempool, must be strictly
451 * Mono-producer (0) or multi-producers (1).
453 static inline void __attribute__((always_inline))
454 __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
455 unsigned n, int is_mp)
457 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
458 struct rte_mempool_cache *cache;
461 unsigned lcore_id = rte_lcore_id();
462 uint32_t cache_size = mp->cache_size;
463 uint32_t flushthresh = mp->cache_flushthresh;
464 #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
466 /* increment stat now, adding in mempool always success */
467 __MEMPOOL_STAT_ADD(mp, put, n);
469 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
470 /* cache is not enabled or single producer */
471 if (unlikely(cache_size == 0 || is_mp == 0))
474 /* Go straight to ring if put would overflow mem allocated for cache */
475 if (unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE))
478 cache = &mp->local_cache[lcore_id];
479 cache_objs = &cache->objs[cache->len];
482 * The cache follows the following algorithm
483 * 1. Add the objects to the cache
484 * 2. Anything greater than the cache min value (if it crosses the
485 * cache flush threshold) is flushed to the ring.
488 /* Add elements back into the cache */
489 for (index = 0; index < n; ++index, obj_table++)
490 cache_objs[index] = *obj_table;
494 if (cache->len >= flushthresh) {
495 rte_ring_mp_enqueue_bulk(mp->ring, &cache->objs[cache_size],
496 cache->len - cache_size);
497 cache->len = cache_size;
503 #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
505 /* push remaining objects in ring */
506 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
508 if (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0)
509 rte_panic("cannot put objects in mempool\n");
512 if (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0)
513 rte_panic("cannot put objects in mempool\n");
517 rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n);
519 rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n);
525 * Put several objects back in the mempool (multi-producers safe).
528 * A pointer to the mempool structure.
530 * A pointer to a table of void * pointers (objects).
532 * The number of objects to add in the mempool from the obj_table.
534 static inline void __attribute__((always_inline))
535 rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
538 __mempool_check_cookies(mp, obj_table, n, 0);
539 __mempool_put_bulk(mp, obj_table, n, 1);
543 * Put several objects back in the mempool (NOT multi-producers safe).
546 * A pointer to the mempool structure.
548 * A pointer to a table of void * pointers (objects).
550 * The number of objects to add in the mempool from obj_table.
553 rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
556 __mempool_check_cookies(mp, obj_table, n, 0);
557 __mempool_put_bulk(mp, obj_table, n, 0);
561 * Put several objects back in the mempool.
563 * This function calls the multi-producer or the single-producer
564 * version depending on the default behavior that was specified at
565 * mempool creation time (see flags).
568 * A pointer to the mempool structure.
570 * A pointer to a table of void * pointers (objects).
572 * The number of objects to add in the mempool from obj_table.
574 static inline void __attribute__((always_inline))
575 rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
578 __mempool_check_cookies(mp, obj_table, n, 0);
579 __mempool_put_bulk(mp, obj_table, n, !(mp->flags & MEMPOOL_F_SP_PUT));
583 * Put one object in the mempool (multi-producers safe).
586 * A pointer to the mempool structure.
588 * A pointer to the object to be added.
590 static inline void __attribute__((always_inline))
591 rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
593 rte_mempool_mp_put_bulk(mp, &obj, 1);
597 * Put one object back in the mempool (NOT multi-producers safe).
600 * A pointer to the mempool structure.
602 * A pointer to the object to be added.
604 static inline void __attribute__((always_inline))
605 rte_mempool_sp_put(struct rte_mempool *mp, void *obj)
607 rte_mempool_sp_put_bulk(mp, &obj, 1);
611 * Put one object back in the mempool.
613 * This function calls the multi-producer or the single-producer
614 * version depending on the default behavior that was specified at
615 * mempool creation time (see flags).
618 * A pointer to the mempool structure.
620 * A pointer to the object to be added.
622 static inline void __attribute__((always_inline))
623 rte_mempool_put(struct rte_mempool *mp, void *obj)
625 rte_mempool_put_bulk(mp, &obj, 1);
629 * @internal Get several objects from the mempool; used internally.
631 * A pointer to the mempool structure.
633 * A pointer to a table of void * pointers (objects).
635 * The number of objects to get, must be strictly positive.
637 * Mono-consumer (0) or multi-consumers (1).
639 * - >=0: Success; number of objects supplied.
640 * - <0: Error; code of ring dequeue function.
642 static inline int __attribute__((always_inline))
643 __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
644 unsigned n, int is_mc)
647 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
650 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
651 struct rte_mempool_cache *cache;
654 unsigned lcore_id = rte_lcore_id();
655 uint32_t cache_size = mp->cache_size;
657 /* cache is not enabled or single consumer */
658 if (unlikely(cache_size == 0 || is_mc == 0 || n >= cache_size))
661 cache = &mp->local_cache[lcore_id];
662 cache_objs = cache->objs;
664 /* Can this be satisfied from the cache? */
665 if (cache->len < n) {
666 /* No. Backfill the cache first, and then fill from it */
667 uint32_t req = n + (cache_size - cache->len);
669 /* How many do we require i.e. number to fill the cache + the request */
670 ret = rte_ring_mc_dequeue_bulk(mp->ring, &cache->objs[cache->len], req);
671 if (unlikely(ret < 0)) {
673 * In the offchance that we are buffer constrained,
674 * where we are not able to allocate cache + n, go to
675 * the ring directly. If that fails, we are truly out of
684 /* Now fill in the response ... */
685 for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++)
686 *obj_table = cache_objs[len];
690 __MEMPOOL_STAT_ADD(mp, get_success, n_orig);
695 #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
697 /* get remaining objects from ring */
699 ret = rte_ring_mc_dequeue_bulk(mp->ring, obj_table, n);
701 ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n);
704 __MEMPOOL_STAT_ADD(mp, get_fail, n_orig);
706 __MEMPOOL_STAT_ADD(mp, get_success, n_orig);
712 * Get several objects from the mempool (multi-consumers safe).
714 * If cache is enabled, objects will be retrieved first from cache,
715 * subsequently from the common pool. Note that it can return -ENOENT when
716 * the local cache and common pool are empty, even if cache from other
720 * A pointer to the mempool structure.
722 * A pointer to a table of void * pointers (objects) that will be filled.
724 * The number of objects to get from mempool to obj_table.
726 * - 0: Success; objects taken.
727 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
729 static inline int __attribute__((always_inline))
730 rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
733 ret = __mempool_get_bulk(mp, obj_table, n, 1);
735 __mempool_check_cookies(mp, obj_table, n, 1);
740 * Get several objects from the mempool (NOT multi-consumers safe).
742 * If cache is enabled, objects will be retrieved first from cache,
743 * subsequently from the common pool. Note that it can return -ENOENT when
744 * the local cache and common pool are empty, even if cache from other
748 * A pointer to the mempool structure.
750 * A pointer to a table of void * pointers (objects) that will be filled.
752 * The number of objects to get from the mempool to obj_table.
754 * - 0: Success; objects taken.
755 * - -ENOENT: Not enough entries in the mempool; no object is
758 static inline int __attribute__((always_inline))
759 rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
762 ret = __mempool_get_bulk(mp, obj_table, n, 0);
764 __mempool_check_cookies(mp, obj_table, n, 1);
769 * Get several objects from the mempool.
771 * This function calls the multi-consumers or the single-consumer
772 * version, depending on the default behaviour that was specified at
773 * mempool creation time (see flags).
775 * If cache is enabled, objects will be retrieved first from cache,
776 * subsequently from the common pool. Note that it can return -ENOENT when
777 * the local cache and common pool are empty, even if cache from other
781 * A pointer to the mempool structure.
783 * A pointer to a table of void * pointers (objects) that will be filled.
785 * The number of objects to get from the mempool to obj_table.
787 * - 0: Success; objects taken
788 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
790 static inline int __attribute__((always_inline))
791 rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
794 ret = __mempool_get_bulk(mp, obj_table, n,
795 !(mp->flags & MEMPOOL_F_SC_GET));
797 __mempool_check_cookies(mp, obj_table, n, 1);
802 * Get one object from the mempool (multi-consumers safe).
804 * If cache is enabled, objects will be retrieved first from cache,
805 * subsequently from the common pool. Note that it can return -ENOENT when
806 * the local cache and common pool are empty, even if cache from other
810 * A pointer to the mempool structure.
812 * A pointer to a void * pointer (object) that will be filled.
814 * - 0: Success; objects taken.
815 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
817 static inline int __attribute__((always_inline))
818 rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
820 return rte_mempool_mc_get_bulk(mp, obj_p, 1);
824 * Get one object from the mempool (NOT multi-consumers safe).
826 * If cache is enabled, objects will be retrieved first from cache,
827 * subsequently from the common pool. Note that it can return -ENOENT when
828 * the local cache and common pool are empty, even if cache from other
832 * A pointer to the mempool structure.
834 * A pointer to a void * pointer (object) that will be filled.
836 * - 0: Success; objects taken.
837 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
839 static inline int __attribute__((always_inline))
840 rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p)
842 return rte_mempool_sc_get_bulk(mp, obj_p, 1);
846 * Get one object from the mempool.
848 * This function calls the multi-consumers or the single-consumer
849 * version, depending on the default behavior that was specified at
850 * mempool creation (see flags).
852 * If cache is enabled, objects will be retrieved first from cache,
853 * subsequently from the common pool. Note that it can return -ENOENT when
854 * the local cache and common pool are empty, even if cache from other
858 * A pointer to the mempool structure.
860 * A pointer to a void * pointer (object) that will be filled.
862 * - 0: Success; objects taken.
863 * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
865 static inline int __attribute__((always_inline))
866 rte_mempool_get(struct rte_mempool *mp, void **obj_p)
868 return rte_mempool_get_bulk(mp, obj_p, 1);
872 * Return the number of entries in the mempool.
874 * When cache is enabled, this function has to browse the length of
875 * all lcores, so it should not be used in a data path, but only for
879 * A pointer to the mempool structure.
881 * The number of entries in the mempool.
883 unsigned rte_mempool_count(const struct rte_mempool *mp);
886 * Return the number of free entries in the mempool ring.
887 * i.e. how many entries can be freed back to the mempool.
889 * NOTE: This corresponds to the number of elements *allocated* from the
890 * memory pool, not the number of elements in the pool itself. To count
891 * the number elements currently available in the pool, use "rte_mempool_count"
893 * When cache is enabled, this function has to browse the length of
894 * all lcores, so it should not be used in a data path, but only for
898 * A pointer to the mempool structure.
900 * The number of free entries in the mempool.
902 static inline unsigned
903 rte_mempool_free_count(const struct rte_mempool *mp)
905 return mp->size - rte_mempool_count(mp);
909 * Test if the mempool is full.
911 * When cache is enabled, this function has to browse the length of all
912 * lcores, so it should not be used in a data path, but only for debug
916 * A pointer to the mempool structure.
918 * - 1: The mempool is full.
919 * - 0: The mempool is not full.
922 rte_mempool_full(const struct rte_mempool *mp)
924 return !!(rte_mempool_count(mp) == mp->size);
928 * Test if the mempool is empty.
930 * When cache is enabled, this function has to browse the length of all
931 * lcores, so it should not be used in a data path, but only for debug
935 * A pointer to the mempool structure.
937 * - 1: The mempool is empty.
938 * - 0: The mempool is not empty.
941 rte_mempool_empty(const struct rte_mempool *mp)
943 return !!(rte_mempool_count(mp) == 0);
947 * Return the physical address of elt, which is an element of the pool mp.
950 * A pointer to the mempool structure.
952 * A pointer (virtual address) to the element of the pool.
954 * The physical address of the elt element.
956 static inline phys_addr_t rte_mempool_virt2phy(const struct rte_mempool *mp,
961 off = (const char *)elt - (const char *)mp;
962 return mp->phys_addr + off;
967 * Check the consistency of mempool objects.
969 * Verify the coherency of fields in the mempool structure. Also check
970 * that the cookies of mempool objects (even the ones that are not
971 * present in pool) have a correct value. If not, a panic will occur.
974 * A pointer to the mempool structure.
976 void rte_mempool_audit(const struct rte_mempool *mp);
979 * Return a pointer to the private data in an mempool structure.
982 * A pointer to the mempool structure.
984 * A pointer to the private data.
986 static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
988 return (char *)mp + sizeof(struct rte_mempool);
992 * Dump the status of all mempools on the console
994 void rte_mempool_list_dump(void);
997 * Search a mempool from its name
1000 * The name of the mempool.
1002 * The pointer to the mempool matching the name, or NULL if not found.NULL on error
1003 * with rte_errno set appropriately. Possible rte_errno values include:
1004 * - ENOENT - required entry not available to return.
1007 struct rte_mempool *rte_mempool_lookup(const char *name);
1013 #endif /* _RTE_MEMPOOL_H_ */