X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_mempool%2Frte_mempool.h;h=331465184bbdb23083cab398e3d25dda609c5553;hb=fdf20fa7bee9df9037116318a87080e1eb7e757e;hp=d4a5c65cf035a595c13c396d734601b481e227b8;hpb=dada9ef6edc59015b6674b5a95258787c71401b0;p=dpdk.git diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index d4a5c65cf0..331465184b 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -1,35 +1,34 @@ /*- * BSD LICENSE - * - * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions * are met: - * - * * Redistributions of source code must retain the above copyright + * + * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * */ #ifndef _RTE_MEMPOOL_H_ @@ -60,6 +59,7 @@ * that won't work as rte_lcore_id() will not return a correct value. */ +#include #include #include #include @@ -68,8 +68,8 @@ #include #include -#include #include +#include #include #include @@ -101,25 +101,56 @@ struct rte_mempool_debug_stats { */ struct rte_mempool_cache { unsigned len; /**< Cache len */ - void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE]; /**< Cache objects */ + /* + * Cache is allocated to this size to allow it to overflow in certain + * cases to avoid needless emptying of cache. + */ + void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3]; /**< Cache objects */ } __rte_cache_aligned; #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */ +struct rte_mempool_objsz { + uint32_t elt_size; /**< Size of an element. */ + uint32_t header_size; /**< Size of header (before elt). */ + uint32_t trailer_size; /**< Size of trailer (after elt). */ + uint32_t total_size; + /**< Total size of an object (header + elt + trailer). */ +}; + #define RTE_MEMPOOL_NAMESIZE 32 /**< Maximum length of a memory pool. */ +#define RTE_MEMPOOL_MZ_PREFIX "MP_" + +/* "MP_" */ +#define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s" + +#ifdef RTE_LIBRTE_XEN_DOM0 + +/* "_MP_elt" */ +#define RTE_MEMPOOL_OBJ_NAME "%s_" RTE_MEMPOOL_MZ_PREFIX "elt" + +#else + +#define RTE_MEMPOOL_OBJ_NAME RTE_MEMPOOL_MZ_FORMAT + +#endif /* RTE_LIBRTE_XEN_DOM0 */ + +#define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1) + +/** Mempool over one chunk of physically continuous memory */ +#define MEMPOOL_PG_NUM_DEFAULT 1 /** * The RTE mempool structure. */ struct rte_mempool { - TAILQ_ENTRY(rte_mempool) next; /**< Next in list. */ - char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */ struct rte_ring *ring; /**< Ring to store objects. */ phys_addr_t phys_addr; /**< Phys. addr. of mempool struct. */ int flags; /**< Flags of the mempool. */ uint32_t size; /**< Size of the mempool. */ - uint32_t bulk_default; /**< Default bulk count. */ uint32_t cache_size; /**< Size of per-lcore local cache. */ + uint32_t cache_flushthresh; + /**< Threshold before we flush excess elements. */ uint32_t elt_size; /**< Size of an element. */ uint32_t header_size; /**< Size of header (before elt). */ @@ -136,6 +167,20 @@ struct rte_mempool { /** Per-lcore statistics. */ struct rte_mempool_debug_stats stats[RTE_MAX_LCORE]; #endif + + /* Address translation support, starts from next cache line. */ + + /** Number of elements in the elt_pa array. */ + uint32_t pg_num __rte_cache_aligned; + uint32_t pg_shift; /**< LOG2 of the physical pages. */ + uintptr_t pg_mask; /**< physical page mask value. */ + uintptr_t elt_va_start; + /**< Virtual address of the first mempool object. */ + uintptr_t elt_va_end; + /**< Virtual address of the mempool object. */ + phys_addr_t elt_pa[MEMPOOL_PG_NUM_DEFAULT]; + /**< Array of physical pages addresses for the mempool objects buffer. */ + } __rte_cache_aligned; #define MEMPOOL_F_NO_SPREAD 0x0001 /**< Do not spread in memory. */ @@ -144,7 +189,7 @@ struct rte_mempool { #define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/ /** - * When debug is enabled, store some statistics. + * @internal When debug is enabled, store some statistics. * @param mp * Pointer to the memory pool. * @param name @@ -163,7 +208,25 @@ struct rte_mempool { #endif /** - * Get a pointer to a mempool pointer in the object header. + * Calculates size of the mempool header. + * @param mp + * Pointer to the memory pool. + * @param pgn + * Number of page used to store mempool objects. + */ +#define MEMPOOL_HEADER_SIZE(mp, pgn) (sizeof(*(mp)) + \ + RTE_ALIGN_CEIL(((pgn) - RTE_DIM((mp)->elt_pa)) * \ + sizeof ((mp)->elt_pa[0]), RTE_CACHE_LINE_SIZE)) + +/** + * Returns TRUE if whole mempool is allocated in one contiguous block of memory. + */ +#define MEMPOOL_IS_CONTIG(mp) \ + ((mp)->pg_num == MEMPOOL_PG_NUM_DEFAULT && \ + (mp)->phys_addr == (mp)->elt_pa[0]) + +/** + * @internal Get a pointer to a mempool pointer in the object header. * @param obj * Pointer to object. * @return @@ -235,7 +298,7 @@ static inline void __mempool_write_trailer_cookie(void *obj) #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */ /** - * Check and update cookies or panic. + * @internal Check and update cookies or panic. * * @param mp * Pointer to the memory pool. @@ -322,6 +385,49 @@ static inline void __mempool_check_cookies(const struct rte_mempool *mp, #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0) #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */ +/** + * An mempool's object iterator callback function. + */ +typedef void (*rte_mempool_obj_iter_t)(void * /*obj_iter_arg*/, + void * /*obj_start*/, + void * /*obj_end*/, + uint32_t /*obj_index */); + +/* + * Iterates across objects of the given size and alignment in the + * provided chunk of memory. The given memory buffer can consist of + * disjoint physical pages. + * For each object calls the provided callback (if any). + * Used to populate mempool, walk through all elements of the mempool, + * estimate how many elements of the given size could be created in the given + * memory buffer. + * @param vaddr + * Virtual address of the memory buffer. + * @param elt_num + * Maximum number of objects to iterate through. + * @param elt_sz + * Size of each object. + * @param paddr + * Array of phyiscall addresses of the pages that comprises given memory + * buffer. + * @param pg_num + * Number of elements in the paddr array. + * @param pg_shift + * LOG2 of the physical pages size. + * @param obj_iter + * Object iterator callback function (could be NULL). + * @param obj_iter_arg + * User defined Prameter for the object iterator callback function. + * + * @return + * Number of objects iterated through. + */ + +uint32_t rte_mempool_obj_iter(void *vaddr, + uint32_t elt_num, size_t elt_sz, size_t align, + const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift, + rte_mempool_obj_iter_t obj_iter, void *obj_iter_arg); + /** * An object constructor callback function for mempool. * @@ -344,10 +450,9 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *); * Creates a new mempool named *name* in memory. * * This function uses ``memzone_reserve()`` to allocate memory. The - * pool contains n elements of elt_size. Its size is set to n. By - * default, bulk_default_count (the default number of elements to - * get/put in the pool) is set to 1. @see rte_mempool_set_bulk_count() - * to modify this valule. + * pool contains n elements of elt_size. Its size is set to n. + * All elements of the mempool are allocated together with the mempool header, + * in one physically continuous chunk of memory. * * @param name * The name of the mempool. @@ -367,7 +472,7 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *); * never be used. The access to the per-lcore table is of course * faster than the multi-producer/consumer pool. The cache can be * disabled if the cache_size argument is set to 0; it can be useful to - * avoid loosing objects in cache. Note that even if not used, the + * avoid losing objects in cache. Note that even if not used, the * memory space for cache is always reserved in a mempool structure, * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0. * @param private_data_size @@ -431,51 +536,207 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, int socket_id, unsigned flags); /** - * Set the default bulk count for put/get. + * Creates a new mempool named *name* in memory. * - * The *count* parameter is the default number of bulk elements to - * get/put when using ``rte_mempool_*_{en,de}queue_bulk()``. It must - * be greater than 0 and less than half of the mempool size. + * This function uses ``memzone_reserve()`` to allocate memory. The + * pool contains n elements of elt_size. Its size is set to n. + * Depending on the input parameters, mempool elements can be either allocated + * together with the mempool header, or an externally provided memory buffer + * could be used to store mempool objects. In later case, that external + * memory buffer can consist of set of disjoint phyiscal pages. * - * @param mp - * A pointer to the mempool structure. - * @param count - * A new water mark value. + * @param name + * The name of the mempool. + * @param n + * The number of elements in the mempool. The optimum size (in terms of + * memory usage) for a mempool is when n is a power of two minus one: + * n = (2^q - 1). + * @param elt_size + * The size of each element. + * @param cache_size + * If cache_size is non-zero, the rte_mempool library will try to + * limit the accesses to the common lockless pool, by maintaining a + * per-lcore object cache. This argument must be lower or equal to + * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose + * cache_size to have "n modulo cache_size == 0": if this is + * not the case, some elements will always stay in the pool and will + * never be used. The access to the per-lcore table is of course + * faster than the multi-producer/consumer pool. The cache can be + * disabled if the cache_size argument is set to 0; it can be useful to + * avoid losing objects in cache. Note that even if not used, the + * memory space for cache is always reserved in a mempool structure, + * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0. + * @param private_data_size + * The size of the private data appended after the mempool + * structure. This is useful for storing some private data after the + * mempool structure, as is done for rte_mbuf_pool for example. + * @param mp_init + * A function pointer that is called for initialization of the pool, + * before object initialization. The user can initialize the private + * data in this function if needed. This parameter can be NULL if + * not needed. + * @param mp_init_arg + * An opaque pointer to data that can be used in the mempool + * constructor function. + * @param obj_init + * A function pointer that is called for each object at + * initialization of the pool. The user can set some meta data in + * objects if needed. This parameter can be NULL if not needed. + * The obj_init() function takes the mempool pointer, the init_arg, + * the object pointer and the object number as parameters. + * @param obj_init_arg + * An opaque pointer to data that can be used as an argument for + * each call to the object constructor function. + * @param socket_id + * The *socket_id* argument is the socket identifier in the case of + * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA + * constraint for the reserved zone. + * @param flags + * The *flags* arguments is an OR of following flags: + * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread + * between channels in RAM: the pool allocator will add padding + * between objects depending on the hardware configuration. See + * Memory alignment constraints for details. If this flag is set, + * the allocator will just align them to a cache line. + * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are + * cache-aligned. This flag removes this constraint, and no + * padding will be present between objects. This flag implies + * MEMPOOL_F_NO_SPREAD. + * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior + * when using rte_mempool_put() or rte_mempool_put_bulk() is + * "single-producer". Otherwise, it is "multi-producers". + * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior + * when using rte_mempool_get() or rte_mempool_get_bulk() is + * "single-consumer". Otherwise, it is "multi-consumers". + * @param vaddr + * Virtual address of the externally allocated memory buffer. + * Will be used to store mempool objects. + * @param paddr + * Array of phyiscall addresses of the pages that comprises given memory + * buffer. + * @param pg_num + * Number of elements in the paddr array. + * @param pg_shift + * LOG2 of the physical pages size. * @return - * - 0: Success; default_bulk_count changed. - * - -EINVAL: Invalid count value. + * The pointer to the new allocated mempool, on success. NULL on error + * with rte_errno set appropriately. Possible rte_errno values include: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure + * - E_RTE_SECONDARY - function was called from a secondary process instance + * - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list + * - EINVAL - cache size provided is too large + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memzone */ -static inline int -rte_mempool_set_bulk_count(struct rte_mempool *mp, unsigned count) -{ - if (unlikely(count == 0 || count >= mp->size)) - return -EINVAL; - - mp->bulk_default = count; - return 0; -} - +struct rte_mempool * +rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + rte_mempool_ctor_t *mp_init, void *mp_init_arg, + rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, + int socket_id, unsigned flags, void *vaddr, + const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift); + +#ifdef RTE_LIBRTE_XEN_DOM0 /** - * Get the default bulk count for put/get. + * Creates a new mempool named *name* in memory on Xen Dom0. * - * @param mp - * A pointer to the mempool structure. + * This function uses ``rte_mempool_xmem_create()`` to allocate memory. The + * pool contains n elements of elt_size. Its size is set to n. + * All elements of the mempool are allocated together with the mempool header, + * and memory buffer can consist of set of disjoint phyiscal pages. + * + * @param name + * The name of the mempool. + * @param n + * The number of elements in the mempool. The optimum size (in terms of + * memory usage) for a mempool is when n is a power of two minus one: + * n = (2^q - 1). + * @param elt_size + * The size of each element. + * @param cache_size + * If cache_size is non-zero, the rte_mempool library will try to + * limit the accesses to the common lockless pool, by maintaining a + * per-lcore object cache. This argument must be lower or equal to + * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose + * cache_size to have "n modulo cache_size == 0": if this is + * not the case, some elements will always stay in the pool and will + * never be used. The access to the per-lcore table is of course + * faster than the multi-producer/consumer pool. The cache can be + * disabled if the cache_size argument is set to 0; it can be useful to + * avoid losing objects in cache. Note that even if not used, the + * memory space for cache is always reserved in a mempool structure, + * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0. + * @param private_data_size + * The size of the private data appended after the mempool + * structure. This is useful for storing some private data after the + * mempool structure, as is done for rte_mbuf_pool for example. + * @param mp_init + * A function pointer that is called for initialization of the pool, + * before object initialization. The user can initialize the private + * data in this function if needed. This parameter can be NULL if + * not needed. + * @param mp_init_arg + * An opaque pointer to data that can be used in the mempool + * constructor function. + * @param obj_init + * A function pointer that is called for each object at + * initialization of the pool. The user can set some meta data in + * objects if needed. This parameter can be NULL if not needed. + * The obj_init() function takes the mempool pointer, the init_arg, + * the object pointer and the object number as parameters. + * @param obj_init_arg + * An opaque pointer to data that can be used as an argument for + * each call to the object constructor function. + * @param socket_id + * The *socket_id* argument is the socket identifier in the case of + * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA + * constraint for the reserved zone. + * @param flags + * The *flags* arguments is an OR of following flags: + * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread + * between channels in RAM: the pool allocator will add padding + * between objects depending on the hardware configuration. See + * Memory alignment constraints for details. If this flag is set, + * the allocator will just align them to a cache line. + * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are + * cache-aligned. This flag removes this constraint, and no + * padding will be present between objects. This flag implies + * MEMPOOL_F_NO_SPREAD. + * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior + * when using rte_mempool_put() or rte_mempool_put_bulk() is + * "single-producer". Otherwise, it is "multi-producers". + * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior + * when using rte_mempool_get() or rte_mempool_get_bulk() is + * "single-consumer". Otherwise, it is "multi-consumers". * @return - * The default bulk count for enqueue/dequeue. + * The pointer to the new allocated mempool, on success. NULL on error + * with rte_errno set appropriately. Possible rte_errno values include: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure + * - E_RTE_SECONDARY - function was called from a secondary process instance + * - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list + * - EINVAL - cache size provided is too large + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memzone */ -static inline unsigned -rte_mempool_get_bulk_count(struct rte_mempool *mp) -{ - return mp->bulk_default; -} +struct rte_mempool * +rte_dom0_mempool_create(const char *name, unsigned n, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + rte_mempool_ctor_t *mp_init, void *mp_init_arg, + rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, + int socket_id, unsigned flags); +#endif /** * Dump the status of the mempool to the console. * + * @param f + * A pointer to a file for output * @param mp * A pointer to the mempool structure. */ -void rte_mempool_dump(const struct rte_mempool *mp); +void rte_mempool_dump(FILE *f, const struct rte_mempool *mp); /** * @internal Put several objects back in the mempool; used internally. @@ -489,17 +750,17 @@ void rte_mempool_dump(const struct rte_mempool *mp); * @param is_mp * Mono-producer (0) or multi-producers (1). */ -static inline void +static inline void __attribute__((always_inline)) __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, unsigned n, int is_mp) { #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 struct rte_mempool_cache *cache; - uint32_t cache_len; + uint32_t index; void **cache_objs; unsigned lcore_id = rte_lcore_id(); uint32_t cache_size = mp->cache_size; - uint32_t cache_add_count; + uint32_t flushthresh = mp->cache_flushthresh; #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */ /* increment stat now, adding in mempool always success */ @@ -510,52 +771,35 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, if (unlikely(cache_size == 0 || is_mp == 0)) goto ring_enqueue; - cache = &mp->local_cache[lcore_id]; - cache_len = cache->len; - cache_objs = cache->objs; - - /* cache is full and we add many objects: enqueue in ring */ - if (unlikely(cache_len == cache_size && n >= cache_size)) + /* Go straight to ring if put would overflow mem allocated for cache */ + if (unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE)) goto ring_enqueue; + cache = &mp->local_cache[lcore_id]; + cache_objs = &cache->objs[cache->len]; + /* - * cache is full and we add few objects: enqueue the content - * of the cache in ring + * The cache follows the following algorithm + * 1. Add the objects to the cache + * 2. Anything greater than the cache min value (if it crosses the + * cache flush threshold) is flushed to the ring. */ - if (unlikely(cache_len == cache_size)) { -#ifdef RTE_LIBRTE_MEMPOOL_DEBUG - if (rte_ring_mp_enqueue_bulk(mp->ring, cache->objs, - cache_size) < 0) - rte_panic("cannot put objects in mempool\n"); -#else - rte_ring_mp_enqueue_bulk(mp->ring, cache->objs, - cache_size); -#endif - cache_len = 0; - } - /* determine how many objects we can add in cache */ - if (likely(n <= cache_size - cache_len)) - cache_add_count = n; - else - cache_add_count = cache_size - cache_len; - - /* add in cache while there is enough room */ - while (cache_add_count > 0) { - cache_objs[cache_len] = *obj_table; - obj_table++; - cache_len++; - n--; - cache_add_count--; - } + /* Add elements back into the cache */ + for (index = 0; index < n; ++index, obj_table++) + cache_objs[index] = *obj_table; + + cache->len += n; - cache->len = cache_len; + if (cache->len >= flushthresh) { + rte_ring_mp_enqueue_bulk(mp->ring, &cache->objs[cache_size], + cache->len - cache_size); + cache->len = cache_size; + } - /* no more object to add, return */ - if (likely(n == 0)) - return; + return; - ring_enqueue: +ring_enqueue: #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */ /* push remaining objects in ring */ @@ -587,7 +831,7 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, * @param n * The number of objects to add in the mempool from the obj_table. */ -static inline void +static inline void __attribute__((always_inline)) rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table, unsigned n) { @@ -627,7 +871,7 @@ rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table, * @param n * The number of objects to add in the mempool from obj_table. */ -static inline void +static inline void __attribute__((always_inline)) rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, unsigned n) { @@ -643,7 +887,7 @@ rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, * @param obj * A pointer to the object to be added. */ -static inline void +static inline void __attribute__((always_inline)) rte_mempool_mp_put(struct rte_mempool *mp, void *obj) { rte_mempool_mp_put_bulk(mp, &obj, 1); @@ -657,7 +901,7 @@ rte_mempool_mp_put(struct rte_mempool *mp, void *obj) * @param obj * A pointer to the object to be added. */ -static inline void +static inline void __attribute__((always_inline)) rte_mempool_sp_put(struct rte_mempool *mp, void *obj) { rte_mempool_sp_put_bulk(mp, &obj, 1); @@ -675,7 +919,7 @@ rte_mempool_sp_put(struct rte_mempool *mp, void *obj) * @param obj * A pointer to the object to be added. */ -static inline void +static inline void __attribute__((always_inline)) rte_mempool_put(struct rte_mempool *mp, void *obj) { rte_mempool_put_bulk(mp, &obj, 1); @@ -695,72 +939,56 @@ rte_mempool_put(struct rte_mempool *mp, void *obj) * - >=0: Success; number of objects supplied. * - <0: Error; code of ring dequeue function. */ -static inline int +static inline int __attribute__((always_inline)) __mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n, int is_mc) { int ret; -#ifdef RTE_LIBRTE_MEMPOOL_DEBUG - unsigned n_orig = n; -#endif #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 struct rte_mempool_cache *cache; - uint32_t cache_len, cache_len_save = 0; + uint32_t index, len; void **cache_objs; unsigned lcore_id = rte_lcore_id(); uint32_t cache_size = mp->cache_size; - uint32_t cache_del_count; - - cache = &mp->local_cache[lcore_id]; /* cache is not enabled or single consumer */ - if (unlikely(cache_size == 0 || is_mc == 0)) + if (unlikely(cache_size == 0 || is_mc == 0 || n >= cache_size)) goto ring_dequeue; - cache_len = cache->len; + cache = &mp->local_cache[lcore_id]; cache_objs = cache->objs; - /* cache is empty and we need many objects: dequeue from ring */ - if (unlikely(cache_len == 0 && n >= cache_size)) - goto ring_dequeue; + /* Can this be satisfied from the cache? */ + if (cache->len < n) { + /* No. Backfill the cache first, and then fill from it */ + uint32_t req = n + (cache_size - cache->len); - /* cache is empty and we dequeue few objects: fill the cache first */ - if (unlikely(cache_len == 0 && n < cache_size)) { - ret = rte_ring_mc_dequeue_bulk(mp->ring, cache_objs, - cache_size); + /* How many do we require i.e. number to fill the cache + the request */ + ret = rte_ring_mc_dequeue_bulk(mp->ring, &cache->objs[cache->len], req); if (unlikely(ret < 0)) { - __MEMPOOL_STAT_ADD(mp, get_fail, n_orig); - return ret; + /* + * In the offchance that we are buffer constrained, + * where we are not able to allocate cache + n, go to + * the ring directly. If that fails, we are truly out of + * buffers. + */ + goto ring_dequeue; } - cache_len = cache_size; + cache->len += req; } - if (likely(n <= cache_len)) - cache_del_count = n; - else - cache_del_count = cache_len; + /* Now fill in the response ... */ + for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++) + *obj_table = cache_objs[len]; - cache_len_save = cache_len; + cache->len -= n; - /* add in cache only while there is enough room */ - while (cache_del_count > 0) { - cache_len--; - *obj_table = cache_objs[cache_len]; - obj_table++; - n--; - cache_del_count--; - } + __MEMPOOL_STAT_ADD(mp, get_success, n); - cache->len = cache_len; - - /* no more object to get, return */ - if (likely(n == 0)) { - __MEMPOOL_STAT_ADD(mp, get_success, n_orig); - return 0; - } + return 0; - ring_dequeue: +ring_dequeue: #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */ /* get remaining objects from ring */ @@ -769,19 +997,10 @@ __mempool_get_bulk(struct rte_mempool *mp, void **obj_table, else ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n); -#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 - /* - * bad luck, the ring is empty but we already dequeued some - * entries from cache, we have to restore them - */ - if (unlikely(ret < 0 && cache_len_save != 0)) - cache->len = cache_len_save; -#endif - if (ret < 0) - __MEMPOOL_STAT_ADD(mp, get_fail, n_orig); + __MEMPOOL_STAT_ADD(mp, get_fail, n); else - __MEMPOOL_STAT_ADD(mp, get_success, n_orig); + __MEMPOOL_STAT_ADD(mp, get_success, n); return ret; } @@ -804,7 +1023,7 @@ __mempool_get_bulk(struct rte_mempool *mp, void **obj_table, * - 0: Success; objects taken. * - -ENOENT: Not enough entries in the mempool; no object is retrieved. */ -static inline int +static inline int __attribute__((always_inline)) rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) { int ret; @@ -833,7 +1052,7 @@ rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) * - -ENOENT: Not enough entries in the mempool; no object is * retrieved. */ -static inline int +static inline int __attribute__((always_inline)) rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) { int ret; @@ -865,7 +1084,7 @@ rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) * - 0: Success; objects taken * - -ENOENT: Not enough entries in the mempool; no object is retrieved. */ -static inline int +static inline int __attribute__((always_inline)) rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) { int ret; @@ -892,7 +1111,7 @@ rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) * - 0: Success; objects taken. * - -ENOENT: Not enough entries in the mempool; no object is retrieved. */ -static inline int +static inline int __attribute__((always_inline)) rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p) { return rte_mempool_mc_get_bulk(mp, obj_p, 1); @@ -914,7 +1133,7 @@ rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p) * - 0: Success; objects taken. * - -ENOENT: Not enough entries in the mempool; no object is retrieved. */ -static inline int +static inline int __attribute__((always_inline)) rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p) { return rte_mempool_sc_get_bulk(mp, obj_p, 1); @@ -940,7 +1159,7 @@ rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p) * - 0: Success; objects taken. * - -ENOENT: Not enough entries in the mempool; no object is retrieved. */ -static inline int +static inline int __attribute__((always_inline)) rte_mempool_get(struct rte_mempool *mp, void **obj_p) { return rte_mempool_get_bulk(mp, obj_p, 1); @@ -961,7 +1180,12 @@ rte_mempool_get(struct rte_mempool *mp, void **obj_p) unsigned rte_mempool_count(const struct rte_mempool *mp); /** - * Return the number of free entries in the mempool. + * Return the number of free entries in the mempool ring. + * i.e. how many entries can be freed back to the mempool. + * + * NOTE: This corresponds to the number of elements *allocated* from the + * memory pool, not the number of elements in the pool itself. To count + * the number elements currently available in the pool, use "rte_mempool_count" * * When cache is enabled, this function has to browse the length of * all lcores, so it should not be used in a data path, but only for @@ -1026,16 +1250,24 @@ rte_mempool_empty(const struct rte_mempool *mp) * @return * The physical address of the elt element. */ -static inline phys_addr_t rte_mempool_virt2phy(const struct rte_mempool *mp, - const void *elt) +static inline phys_addr_t +rte_mempool_virt2phy(const struct rte_mempool *mp, const void *elt) { - uintptr_t off; - - off = (const char *)elt - (const char *)mp; - return mp->phys_addr + off; + if (rte_eal_has_hugepages()) { + uintptr_t off; + + off = (const char *)elt - (const char *)mp->elt_va_start; + return (mp->elt_pa[off >> mp->pg_shift] + (off & mp->pg_mask)); + } else { + /* + * If huge pages are disabled, we cannot assume the + * memory region to be physically contiguous. + * Lookup for each element. + */ + return rte_mem_virt2phy(elt); + } } - /** * Check the consistency of mempool objects. * @@ -1058,13 +1290,16 @@ void rte_mempool_audit(const struct rte_mempool *mp); */ static inline void *rte_mempool_get_priv(struct rte_mempool *mp) { - return (char *)mp + sizeof(struct rte_mempool); + return (char *)mp + MEMPOOL_HEADER_SIZE(mp, mp->pg_num); } /** * Dump the status of all mempools on the console + * + * @param f + * A pointer to a file for output */ -void rte_mempool_list_dump(void); +void rte_mempool_list_dump(FILE *f); /** * Search a mempool from its name @@ -1072,13 +1307,84 @@ void rte_mempool_list_dump(void); * @param name * The name of the mempool. * @return - * The pointer to the mempool matching the name, or NULL if not found.NULL on error + * The pointer to the mempool matching the name, or NULL if not found. + * NULL on error * with rte_errno set appropriately. Possible rte_errno values include: * - ENOENT - required entry not available to return. * */ struct rte_mempool *rte_mempool_lookup(const char *name); +/** + * Given a desired size of the mempool element and mempool flags, + * caluclates header, trailer, body and total sizes of the mempool object. + * @param elt_size + * The size of each element. + * @param flags + * The flags used for the mempool creation. + * Consult rte_mempool_create() for more information about possible values. + * The size of each element. + * @return + * Total size of the mempool object. + */ +uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, + struct rte_mempool_objsz *sz); + +/** + * Calculate maximum amount of memory required to store given number of objects. + * Assumes that the memory buffer will be aligned at page boundary. + * Note, that if object size is bigger then page size, then it assumes that + * we have a subsets of physically continuous pages big enough to store + * at least one object. + * @param elt_num + * Number of elements. + * @param elt_sz + * The size of each element. + * @param pg_shift + * LOG2 of the physical pages size. + * @return + * Required memory size aligned at page boundary. + */ +size_t rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz, + uint32_t pg_shift); + +/** + * Calculate how much memory would be actually required with the given + * memory footprint to store required number of objects. + * @param vaddr + * Virtual address of the externally allocated memory buffer. + * Will be used to store mempool objects. + * @param elt_num + * Number of elements. + * @param elt_sz + * The size of each element. + * @param paddr + * Array of phyiscall addresses of the pages that comprises given memory + * buffer. + * @param pg_num + * Number of elements in the paddr array. + * @param pg_shift + * LOG2 of the physical pages size. + * @return + * Number of bytes needed to store given number of objects, + * aligned to the given page size. + * If provided memory buffer is not big enough: + * (-1) * actual number of elemnts that can be stored in that buffer. + */ +ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz, + const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift); + +/** + * Walk list of all memory pools + * + * @param func + * Iterator function + * @param arg + * Argument passed to iterator + */ +void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *arg), + void *arg); + #ifdef __cplusplus } #endif