X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_mempool%2Frte_mempool.h;h=9e41f595e8dba51211ed74e6247880ca08061639;hb=b291e69423d0cbfdc34411460b641215da2e3fdc;hp=ded144a852c0b4f5dec2bd91c33cb30ba49b091f;hpb=c738c6a644e5a07fa98ede668775c08ec5321273;p=dpdk.git diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index ded144a852..9e41f595e8 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation. + * Copyright(c) 2016 6WIND S.A. */ #ifndef _RTE_MEMPOOL_H_ @@ -50,27 +22,34 @@ * meta-data in the object data and retrieve them when allocating a * new object. * - * Note: the mempool implementation is not preemptable. A lcore must - * not be interrupted by another task that uses the same mempool - * (because it uses a ring which is not preemptable). Also, mempool - * functions must not be used outside the DPDK environment: for - * example, in linuxapp environment, a thread that is not created by - * the EAL must not use mempools. This is due to the per-lcore cache - * that won't work as rte_lcore_id() will not return a correct value. + * Note: the mempool implementation is not preemptible. An lcore must not be + * interrupted by another task that uses the same mempool (because it uses a + * ring which is not preemptible). Also, usual mempool functions like + * rte_mempool_get() or rte_mempool_put() are designed to be called from an EAL + * thread due to the internal per-lcore cache. Due to the lack of caching, + * rte_mempool_get() or rte_mempool_put() performance will suffer when called + * by non-EAL threads. Instead, non-EAL threads should call + * rte_mempool_generic_get() or rte_mempool_generic_put() with a user cache + * created with rte_mempool_cache_create(). */ +#include #include #include #include #include #include +#include +#include #include #include #include #include #include #include +#include +#include #ifdef __cplusplus extern "C" { @@ -91,23 +70,30 @@ struct rte_mempool_debug_stats { uint64_t get_success_objs; /**< Objects successfully allocated. */ uint64_t get_fail_bulk; /**< Failed allocation number. */ uint64_t get_fail_objs; /**< Objects that failed to be allocated. */ + /** Successful allocation number of contiguous blocks. */ + uint64_t get_success_blks; + /** Failed allocation number of contiguous blocks. */ + uint64_t get_fail_blks; } __rte_cache_aligned; #endif -#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 /** * A structure that stores a per-core object cache. */ struct rte_mempool_cache { - unsigned len; /**< Cache len */ + uint32_t size; /**< Size of the cache */ + uint32_t flushthresh; /**< Threshold before we flush excess elements */ + uint32_t len; /**< Current cache count */ /* * Cache is allocated to this size to allow it to overflow in certain * cases to avoid needless emptying of cache. */ void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3]; /**< Cache objects */ } __rte_cache_aligned; -#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */ +/** + * A structure that stores the size of mempool elements. + */ struct rte_mempool_objsz { uint32_t elt_size; /**< Size of an element. */ uint32_t header_size; /**< Size of header (before elt). */ @@ -116,81 +102,172 @@ struct rte_mempool_objsz { /**< Total size of an object (header + elt + trailer). */ }; -#define RTE_MEMPOOL_NAMESIZE 32 /**< Maximum length of a memory pool. */ +/**< Maximum length of a memory pool's name. */ +#define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \ + sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1) #define RTE_MEMPOOL_MZ_PREFIX "MP_" /* "MP_" */ #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s" -#ifdef RTE_LIBRTE_XEN_DOM0 +#define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1) -/* "_MP_elt" */ -#define RTE_MEMPOOL_OBJ_NAME "%s_" RTE_MEMPOOL_MZ_PREFIX "elt" +/** Mempool over one chunk of physically continuous memory */ +#define MEMPOOL_PG_NUM_DEFAULT 1 -#else +#ifndef RTE_MEMPOOL_ALIGN +#define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE +#endif + +#define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1) + +/** + * Mempool object header structure + * + * Each object stored in mempools are prefixed by this header structure, + * it allows to retrieve the mempool pointer from the object and to + * iterate on all objects attached to a mempool. When debug is enabled, + * a cookie is also added in this structure preventing corruptions and + * double-frees. + */ +struct rte_mempool_objhdr { + STAILQ_ENTRY(rte_mempool_objhdr) next; /**< Next in list. */ + struct rte_mempool *mp; /**< The mempool owning the object. */ + RTE_STD_C11 + union { + rte_iova_t iova; /**< IO address of the object. */ + phys_addr_t physaddr; /**< deprecated - Physical address of the object. */ + }; +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + uint64_t cookie; /**< Debug cookie. */ +#endif +}; -#define RTE_MEMPOOL_OBJ_NAME RTE_MEMPOOL_MZ_FORMAT +/** + * A list of object headers type + */ +STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr); -#endif /* RTE_LIBRTE_XEN_DOM0 */ +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG -#define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1) +/** + * Mempool object trailer structure + * + * In debug mode, each object stored in mempools are suffixed by this + * trailer structure containing a cookie preventing memory corruptions. + */ +struct rte_mempool_objtlr { + uint64_t cookie; /**< Debug cookie. */ +}; -/** Mempool over one chunk of physically continuous memory */ -#define MEMPOOL_PG_NUM_DEFAULT 1 +#endif + +/** + * A list of memory where objects are stored + */ +STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr); + +/** + * Callback used to free a memory chunk + */ +typedef void (rte_mempool_memchunk_free_cb_t)(struct rte_mempool_memhdr *memhdr, + void *opaque); + +/** + * Mempool objects memory header structure + * + * The memory chunks where objects are stored. Each chunk is virtually + * and physically contiguous. + */ +struct rte_mempool_memhdr { + STAILQ_ENTRY(rte_mempool_memhdr) next; /**< Next in list. */ + struct rte_mempool *mp; /**< The mempool owning the chunk */ + void *addr; /**< Virtual address of the chunk */ + RTE_STD_C11 + union { + rte_iova_t iova; /**< IO address of the chunk */ + phys_addr_t phys_addr; /**< Physical address of the chunk */ + }; + size_t len; /**< length of the chunk */ + rte_mempool_memchunk_free_cb_t *free_cb; /**< Free callback */ + void *opaque; /**< Argument passed to the free callback */ +}; + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Additional information about the mempool + * + * The structure is cache-line aligned to avoid ABI breakages in + * a number of cases when something small is added. + */ +struct rte_mempool_info { + /** Number of objects in the contiguous block */ + unsigned int contig_block_size; +} __rte_cache_aligned; /** * The RTE mempool structure. */ struct rte_mempool { - TAILQ_ENTRY(rte_mempool) next; /**< Next in list. */ - - char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */ - struct rte_ring *ring; /**< Ring to store objects. */ - phys_addr_t phys_addr; /**< Phys. addr. of mempool struct. */ - int flags; /**< Flags of the mempool. */ - uint32_t size; /**< Size of the mempool. */ - uint32_t cache_size; /**< Size of per-lcore local cache. */ - uint32_t cache_flushthresh; - /**< Threshold before we flush excess elements. */ + /* + * Note: this field kept the RTE_MEMZONE_NAMESIZE size due to ABI + * compatibility requirements, it could be changed to + * RTE_MEMPOOL_NAMESIZE next time the ABI changes + */ + char name[RTE_MEMZONE_NAMESIZE]; /**< Name of mempool. */ + RTE_STD_C11 + union { + void *pool_data; /**< Ring or pool to store objects. */ + uint64_t pool_id; /**< External mempool identifier. */ + }; + void *pool_config; /**< optional args for ops alloc. */ + const struct rte_memzone *mz; /**< Memzone where pool is alloc'd. */ + unsigned int flags; /**< Flags of the mempool. */ + int socket_id; /**< Socket id passed at create. */ + uint32_t size; /**< Max size of the mempool. */ + uint32_t cache_size; + /**< Size of per-lcore default local cache. */ uint32_t elt_size; /**< Size of an element. */ uint32_t header_size; /**< Size of header (before elt). */ uint32_t trailer_size; /**< Size of trailer (after elt). */ unsigned private_data_size; /**< Size of private data. */ + /** + * Index into rte_mempool_ops_table array of mempool ops + * structs, which contain callback function pointers. + * We're using an index here rather than pointers to the callbacks + * to facilitate any secondary processes that may want to use + * this mempool. + */ + int32_t ops_index; -#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 - /** Per-lcore local cache. */ - struct rte_mempool_cache local_cache[RTE_MAX_LCORE]; -#endif + struct rte_mempool_cache *local_cache; /**< Per-lcore local cache */ + + uint32_t populated_size; /**< Number of populated objects. */ + struct rte_mempool_objhdr_list elt_list; /**< List of objects in pool */ + uint32_t nb_mem_chunks; /**< Number of memory chunks */ + struct rte_mempool_memhdr_list mem_list; /**< List of memory chunks */ #ifdef RTE_LIBRTE_MEMPOOL_DEBUG /** Per-lcore statistics. */ struct rte_mempool_debug_stats stats[RTE_MAX_LCORE]; #endif - - /* Address translation support, starts from next cache line. */ - - /** Number of elements in the elt_pa array. */ - uint32_t pg_num __rte_cache_aligned; - uint32_t pg_shift; /**< LOG2 of the physical pages. */ - uintptr_t pg_mask; /**< physical page mask value. */ - uintptr_t elt_va_start; - /**< Virtual address of the first mempool object. */ - uintptr_t elt_va_end; - /**< Virtual address of the mempool object. */ - phys_addr_t elt_pa[MEMPOOL_PG_NUM_DEFAULT]; - /**< Array of physical pages addresses for the mempool objects buffer. */ - } __rte_cache_aligned; -#define MEMPOOL_F_NO_SPREAD 0x0001 /**< Do not spread in memory. */ +#define MEMPOOL_F_NO_SPREAD 0x0001 /**< Do not spread among memory channels. */ #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/ #define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/ #define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/ +#define MEMPOOL_F_POOL_CREATED 0x0010 /**< Internal: pool is created. */ +#define MEMPOOL_F_NO_IOVA_CONTIG 0x0020 /**< Don't need IOVA contiguous objs. */ +#define MEMPOOL_F_NO_PHYS_CONTIG MEMPOOL_F_NO_IOVA_CONTIG /* deprecated */ /** * @internal When debug is enabled, store some statistics. + * * @param mp * Pointer to the memory pool. * @param name @@ -199,51 +276,42 @@ struct rte_mempool { * Number to add to the object-oriented statistics. */ #ifdef RTE_LIBRTE_MEMPOOL_DEBUG -#define __MEMPOOL_STAT_ADD(mp, name, n) do { \ - unsigned __lcore_id = rte_lcore_id(); \ - mp->stats[__lcore_id].name##_objs += n; \ - mp->stats[__lcore_id].name##_bulk += 1; \ +#define __MEMPOOL_STAT_ADD(mp, name, n) do { \ + unsigned __lcore_id = rte_lcore_id(); \ + if (__lcore_id < RTE_MAX_LCORE) { \ + mp->stats[__lcore_id].name##_objs += n; \ + mp->stats[__lcore_id].name##_bulk += 1; \ + } \ } while(0) +#define __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, name, n) do { \ + unsigned int __lcore_id = rte_lcore_id(); \ + if (__lcore_id < RTE_MAX_LCORE) { \ + mp->stats[__lcore_id].name##_blks += n; \ + mp->stats[__lcore_id].name##_bulk += 1; \ + } \ + } while (0) #else #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0) +#define __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, name, n) do {} while (0) #endif /** - * Calculates size of the mempool header. + * Calculate the size of the mempool header. + * * @param mp * Pointer to the memory pool. - * @param pgn - * Number of page used to store mempool objects. + * @param cs + * Size of the per-lcore cache. */ -#define MEMPOOL_HEADER_SIZE(mp, pgn) (sizeof(*(mp)) + \ - RTE_ALIGN_CEIL(((pgn) - RTE_DIM((mp)->elt_pa)) * \ - sizeof ((mp)->elt_pa[0]), CACHE_LINE_SIZE)) +#define MEMPOOL_HEADER_SIZE(mp, cs) \ + (sizeof(*(mp)) + (((cs) == 0) ? 0 : \ + (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE))) -/** - * Returns TRUE if whole mempool is allocated in one contiguous block of memory. - */ -#define MEMPOOL_IS_CONTIG(mp) \ - ((mp)->pg_num == MEMPOOL_PG_NUM_DEFAULT && \ - (mp)->phys_addr == (mp)->elt_pa[0]) - -/** - * @internal Get a pointer to a mempool pointer in the object header. - * @param obj - * Pointer to object. - * @return - * The pointer to the mempool from which the object was allocated. - */ -static inline struct rte_mempool **__mempool_from_obj(void *obj) +/* return the header of a mempool object (internal) */ +static inline struct rte_mempool_objhdr *__mempool_get_header(void *obj) { - struct rte_mempool **mpp; - unsigned off; - - off = sizeof(struct rte_mempool *); -#ifdef RTE_LIBRTE_MEMPOOL_DEBUG - off += sizeof(uint64_t); -#endif - mpp = (struct rte_mempool **)((char *)obj - off); - return mpp; + return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj, + sizeof(struct rte_mempool_objhdr)); } /** @@ -255,48 +323,18 @@ static inline struct rte_mempool **__mempool_from_obj(void *obj) * @return * A pointer to the mempool structure. */ -static inline const struct rte_mempool *rte_mempool_from_obj(void *obj) +static inline struct rte_mempool *rte_mempool_from_obj(void *obj) { - struct rte_mempool * const *mpp; - mpp = __mempool_from_obj(obj); - return *mpp; -} - -#ifdef RTE_LIBRTE_MEMPOOL_DEBUG -/* get header cookie value */ -static inline uint64_t __mempool_read_header_cookie(const void *obj) -{ - return *(const uint64_t *)((const char *)obj - sizeof(uint64_t)); -} - -/* get trailer cookie value */ -static inline uint64_t __mempool_read_trailer_cookie(void *obj) -{ - struct rte_mempool **mpp = __mempool_from_obj(obj); - return *(uint64_t *)((char *)obj + (*mpp)->elt_size); -} - -/* write header cookie value */ -static inline void __mempool_write_header_cookie(void *obj, int free) -{ - uint64_t *cookie_p; - cookie_p = (uint64_t *)((char *)obj - sizeof(uint64_t)); - if (free == 0) - *cookie_p = RTE_MEMPOOL_HEADER_COOKIE1; - else - *cookie_p = RTE_MEMPOOL_HEADER_COOKIE2; - + struct rte_mempool_objhdr *hdr = __mempool_get_header(obj); + return hdr->mp; } -/* write trailer cookie value */ -static inline void __mempool_write_trailer_cookie(void *obj) +/* return the trailer of a mempool object (internal) */ +static inline struct rte_mempool_objtlr *__mempool_get_trailer(void *obj) { - uint64_t *cookie_p; - struct rte_mempool **mpp = __mempool_from_obj(obj); - cookie_p = (uint64_t *)((char *)obj + (*mpp)->elt_size); - *cookie_p = RTE_MEMPOOL_TRAILER_COOKIE; + struct rte_mempool *mp = rte_mempool_from_obj(obj); + return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size); } -#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */ /** * @internal Check and update cookies or panic. @@ -312,341 +350,522 @@ static inline void __mempool_write_trailer_cookie(void *obj) * - 1: object is supposed to be free, mark it as allocated * - 2: just check that cookie is valid (free or allocated) */ +void rte_mempool_check_cookies(const struct rte_mempool *mp, + void * const *obj_table_const, unsigned n, int free); + #ifdef RTE_LIBRTE_MEMPOOL_DEBUG -#ifndef __INTEL_COMPILER -#pragma GCC push_options -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif -static inline void __mempool_check_cookies(const struct rte_mempool *mp, - void * const *obj_table_const, - unsigned n, int free) -{ - uint64_t cookie; - void *tmp; - void *obj; - void **obj_table; - - /* Force to drop the "const" attribute. This is done only when - * DEBUG is enabled */ - tmp = (void *) obj_table_const; - obj_table = (void **) tmp; - - while (n--) { - obj = obj_table[n]; - - if (rte_mempool_from_obj(obj) != mp) - rte_panic("MEMPOOL: object is owned by another " - "mempool\n"); - - cookie = __mempool_read_header_cookie(obj); - - if (free == 0) { - if (cookie != RTE_MEMPOOL_HEADER_COOKIE1) { - rte_log_set_history(0); - RTE_LOG(CRIT, MEMPOOL, - "obj=%p, mempool=%p, cookie=%"PRIx64"\n", - obj, mp, cookie); - rte_panic("MEMPOOL: bad header cookie (put)\n"); - } - __mempool_write_header_cookie(obj, 1); - } - else if (free == 1) { - if (cookie != RTE_MEMPOOL_HEADER_COOKIE2) { - rte_log_set_history(0); - RTE_LOG(CRIT, MEMPOOL, - "obj=%p, mempool=%p, cookie=%"PRIx64"\n", - obj, mp, cookie); - rte_panic("MEMPOOL: bad header cookie (get)\n"); - } - __mempool_write_header_cookie(obj, 0); - } - else if (free == 2) { - if (cookie != RTE_MEMPOOL_HEADER_COOKIE1 && - cookie != RTE_MEMPOOL_HEADER_COOKIE2) { - rte_log_set_history(0); - RTE_LOG(CRIT, MEMPOOL, - "obj=%p, mempool=%p, cookie=%"PRIx64"\n", - obj, mp, cookie); - rte_panic("MEMPOOL: bad header cookie (audit)\n"); - } - } - cookie = __mempool_read_trailer_cookie(obj); - if (cookie != RTE_MEMPOOL_TRAILER_COOKIE) { - rte_log_set_history(0); - RTE_LOG(CRIT, MEMPOOL, - "obj=%p, mempool=%p, cookie=%"PRIx64"\n", - obj, mp, cookie); - rte_panic("MEMPOOL: bad trailer cookie\n"); - } - } -} -#ifndef __INTEL_COMPILER -#pragma GCC pop_options -#endif +#define __mempool_check_cookies(mp, obj_table_const, n, free) \ + rte_mempool_check_cookies(mp, obj_table_const, n, free) #else #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0) #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */ /** - * An mempool's object iterator callback function. + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * @internal Check contiguous object blocks and update cookies or panic. + * + * @param mp + * Pointer to the memory pool. + * @param first_obj_table_const + * Pointer to a table of void * pointers (first object of the contiguous + * object blocks). + * @param n + * Number of contiguous object blocks. + * @param free + * - 0: object is supposed to be allocated, mark it as free + * - 1: object is supposed to be free, mark it as allocated + * - 2: just check that cookie is valid (free or allocated) */ -typedef void (*rte_mempool_obj_iter_t)(void * /*obj_iter_arg*/, - void * /*obj_start*/, - void * /*obj_end*/, - uint32_t /*obj_index */); +void rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp, + void * const *first_obj_table_const, unsigned int n, int free); -/* - * Iterates across objects of the given size and alignment in the - * provided chunk of memory. The given memory buffer can consist of - * disjoint physical pages. - * For each object calls the provided callback (if any). - * Used to populate mempool, walk through all elements of the mempool, - * estimate how many elements of the given size could be created in the given - * memory buffer. - * @param vaddr - * Virtual address of the memory buffer. - * @param elt_num - * Maximum number of objects to iterate through. - * @param elt_sz - * Size of each object. - * @param paddr - * Array of phyiscall addresses of the pages that comprises given memory - * buffer. - * @param pg_num - * Number of elements in the paddr array. - * @param pg_shift - * LOG2 of the physical pages size. - * @param obj_iter - * Object iterator callback function (could be NULL). - * @param obj_iter_arg - * User defined Prameter for the object iterator callback function. +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG +#define __mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \ + free) \ + rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \ + free) +#else +#define __mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \ + free) \ + do {} while (0) +#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */ + +#define RTE_MEMPOOL_OPS_NAMESIZE 32 /**< Max length of ops struct name. */ + +/** + * Prototype for implementation specific data provisioning function. * + * The function should provide the implementation specific memory for + * use by the other mempool ops functions in a given mempool ops struct. + * E.g. the default ops provides an instance of the rte_ring for this purpose. + * it will most likely point to a different type of data structure, and + * will be transparent to the application programmer. + * This function should set mp->pool_data. + */ +typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp); + +/** + * Free the opaque private data pointed to by mp->pool_data pointer. + */ +typedef void (*rte_mempool_free_t)(struct rte_mempool *mp); + +/** + * Enqueue an object into the external pool. + */ +typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp, + void * const *obj_table, unsigned int n); + +/** + * Dequeue an object from the external pool. + */ +typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp, + void **obj_table, unsigned int n); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Dequeue a number of contiguous object blocks from the external pool. + */ +typedef int (*rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp, + void **first_obj_table, unsigned int n); + +/** + * Return the number of available objects in the external pool. + */ +typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp); + +/** + * Calculate memory size required to store given number of objects. + * + * If mempool objects are not required to be IOVA-contiguous + * (the flag MEMPOOL_F_NO_IOVA_CONTIG is set), min_chunk_size defines + * virtually contiguous chunk size. Otherwise, if mempool objects must + * be IOVA-contiguous (the flag MEMPOOL_F_NO_IOVA_CONTIG is clear), + * min_chunk_size defines IOVA-contiguous chunk size. + * + * @param[in] mp + * Pointer to the memory pool. + * @param[in] obj_num + * Number of objects. + * @param[in] pg_shift + * LOG2 of the physical pages size. If set to 0, ignore page boundaries. + * @param[out] min_chunk_size + * Location for minimum size of the memory chunk which may be used to + * store memory pool objects. + * @param[out] align + * Location for required memory chunk alignment. * @return - * Number of objects iterated through. + * Required memory size. */ +typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, + size_t *min_chunk_size, size_t *align); -uint32_t rte_mempool_obj_iter(void *vaddr, - uint32_t elt_num, size_t elt_sz, size_t align, - const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift, - rte_mempool_obj_iter_t obj_iter, void *obj_iter_arg); +/** + * Default way to calculate memory size required to store given number of + * objects. + * + * If page boundaries may be ignored, it is just a product of total + * object size including header and trailer and number of objects. + * Otherwise, it is a number of pages required to store given number of + * objects without crossing page boundary. + * + * Note that if object size is bigger than page size, then it assumes + * that pages are grouped in subsets of physically continuous pages big + * enough to store at least one object. + * + * Minimum size of memory chunk is the total element size. + * Required memory chunk alignment is the cache line size. + */ +ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, + size_t *min_chunk_size, size_t *align); /** - * An object constructor callback function for mempool. + * Function to be called for each populated object. * - * Arguments are the mempool, the opaque pointer given by the user in - * rte_mempool_create(), the pointer to the element and the index of - * the element in the pool. + * @param[in] mp + * A pointer to the mempool structure. + * @param[in] opaque + * An opaque pointer passed to iterator. + * @param[in] vaddr + * Object virtual address. + * @param[in] iova + * Input/output virtual address of the object or RTE_BAD_IOVA. */ -typedef void (rte_mempool_obj_ctor_t)(struct rte_mempool *, void *, - void *, unsigned); +typedef void (rte_mempool_populate_obj_cb_t)(struct rte_mempool *mp, + void *opaque, void *vaddr, rte_iova_t iova); /** - * A mempool constructor callback function. + * Populate memory pool objects using provided memory chunk. * - * Arguments are the mempool and the opaque pointer given by the user in - * rte_mempool_create(). + * Populated objects should be enqueued to the pool, e.g. using + * rte_mempool_ops_enqueue_bulk(). + * + * If the given IO address is unknown (iova = RTE_BAD_IOVA), + * the chunk doesn't need to be physically contiguous (only virtually), + * and allocated objects may span two pages. + * + * @param[in] mp + * A pointer to the mempool structure. + * @param[in] max_objs + * Maximum number of objects to be populated. + * @param[in] vaddr + * The virtual address of memory that should be used to store objects. + * @param[in] iova + * The IO address + * @param[in] len + * The length of memory in bytes. + * @param[in] obj_cb + * Callback function to be executed for each populated object. + * @param[in] obj_cb_arg + * An opaque pointer passed to the callback function. + * @return + * The number of objects added on success. + * On error, no objects are populated and a negative errno is returned. */ -typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *); +typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp, + unsigned int max_objs, + void *vaddr, rte_iova_t iova, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg); /** - * Creates a new mempool named *name* in memory. + * Default way to populate memory pool object using provided memory + * chunk: just slice objects one by one. + */ +int rte_mempool_op_populate_default(struct rte_mempool *mp, + unsigned int max_objs, + void *vaddr, rte_iova_t iova, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. * - * This function uses ``memzone_reserve()`` to allocate memory. The - * pool contains n elements of elt_size. Its size is set to n. - * All elements of the mempool are allocated together with the mempool header, - * in one physically continuous chunk of memory. + * Get some additional information about a mempool. + */ +typedef int (*rte_mempool_get_info_t)(const struct rte_mempool *mp, + struct rte_mempool_info *info); + + +/** Structure defining mempool operations structure */ +struct rte_mempool_ops { + char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct. */ + rte_mempool_alloc_t alloc; /**< Allocate private data. */ + rte_mempool_free_t free; /**< Free the external pool. */ + rte_mempool_enqueue_t enqueue; /**< Enqueue an object. */ + rte_mempool_dequeue_t dequeue; /**< Dequeue an object. */ + rte_mempool_get_count get_count; /**< Get qty of available objs. */ + /** + * Optional callback to calculate memory size required to + * store specified number of objects. + */ + rte_mempool_calc_mem_size_t calc_mem_size; + /** + * Optional callback to populate mempool objects using + * provided memory chunk. + */ + rte_mempool_populate_t populate; + /** + * Get mempool info + */ + rte_mempool_get_info_t get_info; + /** + * Dequeue a number of contiguous object blocks. + */ + rte_mempool_dequeue_contig_blocks_t dequeue_contig_blocks; +} __rte_cache_aligned; + +#define RTE_MEMPOOL_MAX_OPS_IDX 16 /**< Max registered ops structs */ + +/** + * Structure storing the table of registered ops structs, each of which contain + * the function pointers for the mempool ops functions. + * Each process has its own storage for this ops struct array so that + * the mempools can be shared across primary and secondary processes. + * The indices used to access the array are valid across processes, whereas + * any function pointers stored directly in the mempool struct would not be. + * This results in us simply having "ops_index" in the mempool struct. + */ +struct rte_mempool_ops_table { + rte_spinlock_t sl; /**< Spinlock for add/delete. */ + uint32_t num_ops; /**< Number of used ops structs in the table. */ + /** + * Storage for all possible ops structs. + */ + struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX]; +} __rte_cache_aligned; + +/** Array of registered ops structs. */ +extern struct rte_mempool_ops_table rte_mempool_ops_table; + +/** + * @internal Get the mempool ops struct from its index. * - * @param name - * The name of the mempool. + * @param ops_index + * The index of the ops struct in the ops struct table. It must be a valid + * index: (0 <= idx < num_ops). + * @return + * The pointer to the ops struct in the table. + */ +static inline struct rte_mempool_ops * +rte_mempool_get_ops(int ops_index) +{ + RTE_VERIFY((ops_index >= 0) && (ops_index < RTE_MEMPOOL_MAX_OPS_IDX)); + + return &rte_mempool_ops_table.ops[ops_index]; +} + +/** + * @internal Wrapper for mempool_ops alloc callback. + * + * @param mp + * Pointer to the memory pool. + * @return + * - 0: Success; successfully allocated mempool pool_data. + * - <0: Error; code of alloc function. + */ +int +rte_mempool_ops_alloc(struct rte_mempool *mp); + +/** + * @internal Wrapper for mempool_ops dequeue callback. + * + * @param mp + * Pointer to the memory pool. + * @param obj_table + * Pointer to a table of void * pointers (objects). * @param n - * The number of elements in the mempool. The optimum size (in terms of - * memory usage) for a mempool is when n is a power of two minus one: - * n = (2^q - 1). - * @param elt_size - * The size of each element. - * @param cache_size - * If cache_size is non-zero, the rte_mempool library will try to - * limit the accesses to the common lockless pool, by maintaining a - * per-lcore object cache. This argument must be lower or equal to - * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose - * cache_size to have "n modulo cache_size == 0": if this is - * not the case, some elements will always stay in the pool and will - * never be used. The access to the per-lcore table is of course - * faster than the multi-producer/consumer pool. The cache can be - * disabled if the cache_size argument is set to 0; it can be useful to - * avoid losing objects in cache. Note that even if not used, the - * memory space for cache is always reserved in a mempool structure, - * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0. - * @param private_data_size - * The size of the private data appended after the mempool - * structure. This is useful for storing some private data after the - * mempool structure, as is done for rte_mbuf_pool for example. - * @param mp_init - * A function pointer that is called for initialization of the pool, - * before object initialization. The user can initialize the private - * data in this function if needed. This parameter can be NULL if - * not needed. - * @param mp_init_arg - * An opaque pointer to data that can be used in the mempool - * constructor function. - * @param obj_init - * A function pointer that is called for each object at - * initialization of the pool. The user can set some meta data in - * objects if needed. This parameter can be NULL if not needed. - * The obj_init() function takes the mempool pointer, the init_arg, - * the object pointer and the object number as parameters. - * @param obj_init_arg - * An opaque pointer to data that can be used as an argument for - * each call to the object constructor function. - * @param socket_id - * The *socket_id* argument is the socket identifier in the case of - * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA - * constraint for the reserved zone. - * @param flags - * The *flags* arguments is an OR of following flags: - * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread - * between channels in RAM: the pool allocator will add padding - * between objects depending on the hardware configuration. See - * Memory alignment constraints for details. If this flag is set, - * the allocator will just align them to a cache line. - * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are - * cache-aligned. This flag removes this constraint, and no - * padding will be present between objects. This flag implies - * MEMPOOL_F_NO_SPREAD. - * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior - * when using rte_mempool_put() or rte_mempool_put_bulk() is - * "single-producer". Otherwise, it is "multi-producers". - * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior - * when using rte_mempool_get() or rte_mempool_get_bulk() is - * "single-consumer". Otherwise, it is "multi-consumers". + * Number of objects to get. * @return - * The pointer to the new allocated mempool, on success. NULL on error - * with rte_errno set appropriately. Possible rte_errno values include: - * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure - * - E_RTE_SECONDARY - function was called from a secondary process instance - * - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list - * - EINVAL - cache size provided is too large - * - ENOSPC - the maximum number of memzones has already been allocated - * - EEXIST - a memzone with the same name already exists - * - ENOMEM - no appropriate memory area found in which to create memzone + * - 0: Success; got n objects. + * - <0: Error; code of dequeue function. */ -struct rte_mempool * -rte_mempool_create(const char *name, unsigned n, unsigned elt_size, - unsigned cache_size, unsigned private_data_size, - rte_mempool_ctor_t *mp_init, void *mp_init_arg, - rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, - int socket_id, unsigned flags); +static inline int +rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp, + void **obj_table, unsigned n) +{ + struct rte_mempool_ops *ops; + + ops = rte_mempool_get_ops(mp->ops_index); + return ops->dequeue(mp, obj_table, n); +} /** - * Creates a new mempool named *name* in memory. + * @internal Wrapper for mempool_ops dequeue_contig_blocks callback. * - * This function uses ``memzone_reserve()`` to allocate memory. The - * pool contains n elements of elt_size. Its size is set to n. - * Depending on the input parameters, mempool elements can be either allocated - * together with the mempool header, or an externally provided memory buffer - * could be used to store mempool objects. In later case, that external - * memory buffer can consist of set of disjoint phyiscal pages. + * @param[in] mp + * Pointer to the memory pool. + * @param[out] first_obj_table + * Pointer to a table of void * pointers (first objects). + * @param[in] n + * Number of blocks to get. + * @return + * - 0: Success; got n objects. + * - <0: Error; code of dequeue function. + */ +static inline int +rte_mempool_ops_dequeue_contig_blocks(struct rte_mempool *mp, + void **first_obj_table, unsigned int n) +{ + struct rte_mempool_ops *ops; + + ops = rte_mempool_get_ops(mp->ops_index); + RTE_ASSERT(ops->dequeue_contig_blocks != NULL); + return ops->dequeue_contig_blocks(mp, first_obj_table, n); +} + +/** + * @internal wrapper for mempool_ops enqueue callback. * - * @param name - * The name of the mempool. + * @param mp + * Pointer to the memory pool. + * @param obj_table + * Pointer to a table of void * pointers (objects). * @param n - * The number of elements in the mempool. The optimum size (in terms of - * memory usage) for a mempool is when n is a power of two minus one: - * n = (2^q - 1). - * @param elt_size - * The size of each element. - * @param cache_size - * If cache_size is non-zero, the rte_mempool library will try to - * limit the accesses to the common lockless pool, by maintaining a - * per-lcore object cache. This argument must be lower or equal to - * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose - * cache_size to have "n modulo cache_size == 0": if this is - * not the case, some elements will always stay in the pool and will - * never be used. The access to the per-lcore table is of course - * faster than the multi-producer/consumer pool. The cache can be - * disabled if the cache_size argument is set to 0; it can be useful to - * avoid losing objects in cache. Note that even if not used, the - * memory space for cache is always reserved in a mempool structure, - * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0. - * @param private_data_size - * The size of the private data appended after the mempool - * structure. This is useful for storing some private data after the - * mempool structure, as is done for rte_mbuf_pool for example. - * @param mp_init - * A function pointer that is called for initialization of the pool, - * before object initialization. The user can initialize the private - * data in this function if needed. This parameter can be NULL if - * not needed. - * @param mp_init_arg - * An opaque pointer to data that can be used in the mempool - * constructor function. - * @param obj_init - * A function pointer that is called for each object at - * initialization of the pool. The user can set some meta data in - * objects if needed. This parameter can be NULL if not needed. - * The obj_init() function takes the mempool pointer, the init_arg, - * the object pointer and the object number as parameters. - * @param obj_init_arg - * An opaque pointer to data that can be used as an argument for - * each call to the object constructor function. - * @param socket_id - * The *socket_id* argument is the socket identifier in the case of - * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA - * constraint for the reserved zone. - * @param flags - * The *flags* arguments is an OR of following flags: - * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread - * between channels in RAM: the pool allocator will add padding - * between objects depending on the hardware configuration. See - * Memory alignment constraints for details. If this flag is set, - * the allocator will just align them to a cache line. - * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are - * cache-aligned. This flag removes this constraint, and no - * padding will be present between objects. This flag implies - * MEMPOOL_F_NO_SPREAD. - * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior - * when using rte_mempool_put() or rte_mempool_put_bulk() is - * "single-producer". Otherwise, it is "multi-producers". - * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior - * when using rte_mempool_get() or rte_mempool_get_bulk() is - * "single-consumer". Otherwise, it is "multi-consumers". - * @param vaddr - * Virtual address of the externally allocated memory buffer. - * Will be used to store mempool objects. - * @param paddr - * Array of phyiscall addresses of the pages that comprises given memory - * buffer. - * @param pg_num - * Number of elements in the paddr array. - * @param pg_shift - * LOG2 of the physical pages size. + * Number of objects to put. * @return - * The pointer to the new allocated mempool, on success. NULL on error - * with rte_errno set appropriately. Possible rte_errno values include: - * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure - * - E_RTE_SECONDARY - function was called from a secondary process instance - * - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list - * - EINVAL - cache size provided is too large - * - ENOSPC - the maximum number of memzones has already been allocated - * - EEXIST - a memzone with the same name already exists - * - ENOMEM - no appropriate memory area found in which to create memzone + * - 0: Success; n objects supplied. + * - <0: Error; code of enqueue function. + */ +static inline int +rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table, + unsigned n) +{ + struct rte_mempool_ops *ops; + + ops = rte_mempool_get_ops(mp->ops_index); + return ops->enqueue(mp, obj_table, n); +} + +/** + * @internal wrapper for mempool_ops get_count callback. + * + * @param mp + * Pointer to the memory pool. + * @return + * The number of available objects in the external pool. + */ +unsigned +rte_mempool_ops_get_count(const struct rte_mempool *mp); + +/** + * @internal wrapper for mempool_ops calc_mem_size callback. + * API to calculate size of memory required to store specified number of + * object. + * + * @param[in] mp + * Pointer to the memory pool. + * @param[in] obj_num + * Number of objects. + * @param[in] pg_shift + * LOG2 of the physical pages size. If set to 0, ignore page boundaries. + * @param[out] min_chunk_size + * Location for minimum size of the memory chunk which may be used to + * store memory pool objects. + * @param[out] align + * Location for required memory chunk alignment. + * @return + * Required memory size aligned at page boundary. + */ +ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, + size_t *min_chunk_size, size_t *align); + +/** + * @internal wrapper for mempool_ops populate callback. + * + * Populate memory pool objects using provided memory chunk. + * + * @param[in] mp + * A pointer to the mempool structure. + * @param[in] max_objs + * Maximum number of objects to be populated. + * @param[in] vaddr + * The virtual address of memory that should be used to store objects. + * @param[in] iova + * The IO address + * @param[in] len + * The length of memory in bytes. + * @param[in] obj_cb + * Callback function to be executed for each populated object. + * @param[in] obj_cb_arg + * An opaque pointer passed to the callback function. + * @return + * The number of objects added on success. + * On error, no objects are populated and a negative errno is returned. + */ +int rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs, + void *vaddr, rte_iova_t iova, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, + void *obj_cb_arg); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Wrapper for mempool_ops get_info callback. + * + * @param[in] mp + * Pointer to the memory pool. + * @param[out] info + * Pointer to the rte_mempool_info structure + * @return + * - 0: Success; The mempool driver supports retrieving supplementary + * mempool information + * - -ENOTSUP - doesn't support get_info ops (valid case). + */ +__rte_experimental +int rte_mempool_ops_get_info(const struct rte_mempool *mp, + struct rte_mempool_info *info); + +/** + * @internal wrapper for mempool_ops free callback. + * + * @param mp + * Pointer to the memory pool. + */ +void +rte_mempool_ops_free(struct rte_mempool *mp); + +/** + * Set the ops of a mempool. + * + * This can only be done on a mempool that is not populated, i.e. just after + * a call to rte_mempool_create_empty(). + * + * @param mp + * Pointer to the memory pool. + * @param name + * Name of the ops structure to use for this mempool. + * @param pool_config + * Opaque data that can be passed by the application to the ops functions. + * @return + * - 0: Success; the mempool is now using the requested ops functions. + * - -EINVAL - Invalid ops struct name provided. + * - -EEXIST - mempool already has an ops struct assigned. + */ +int +rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name, + void *pool_config); + +/** + * Register mempool operations. + * + * @param ops + * Pointer to an ops structure to register. + * @return + * - >=0: Success; return the index of the ops struct in the table. + * - -EINVAL - some missing callbacks while registering ops struct. + * - -ENOSPC - the maximum number of ops structs has been reached. + */ +int rte_mempool_register_ops(const struct rte_mempool_ops *ops); + +/** + * Macro to statically register the ops of a mempool handler. + * Note that the rte_mempool_register_ops fails silently here when + * more than RTE_MEMPOOL_MAX_OPS_IDX is registered. + */ +#define MEMPOOL_REGISTER_OPS(ops) \ + RTE_INIT(mp_hdlr_init_##ops) \ + { \ + rte_mempool_register_ops(&ops); \ + } + +/** + * An object callback function for mempool. + * + * Used by rte_mempool_create() and rte_mempool_obj_iter(). + */ +typedef void (rte_mempool_obj_cb_t)(struct rte_mempool *mp, + void *opaque, void *obj, unsigned obj_idx); +typedef rte_mempool_obj_cb_t rte_mempool_obj_ctor_t; /* compat */ + +/** + * A memory callback function for mempool. + * + * Used by rte_mempool_mem_iter(). + */ +typedef void (rte_mempool_mem_cb_t)(struct rte_mempool *mp, + void *opaque, struct rte_mempool_memhdr *memhdr, + unsigned mem_idx); + +/** + * A mempool constructor callback function. + * + * Arguments are the mempool and the opaque pointer given by the user in + * rte_mempool_create(). */ -struct rte_mempool * -rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, - unsigned cache_size, unsigned private_data_size, - rte_mempool_ctor_t *mp_init, void *mp_init_arg, - rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, - int socket_id, unsigned flags, void *vaddr, - const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift); +typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *); -#ifdef RTE_LIBRTE_XEN_DOM0 /** - * Creates a new mempool named *name* in memory on Xen Dom0. + * Create a new mempool named *name* in memory. * - * This function uses ``rte_mempool_xmem_create()`` to allocate memory. The + * This function uses ``rte_memzone_reserve()`` to allocate memory. The * pool contains n elements of elt_size. Its size is set to n. - * All elements of the mempool are allocated together with the mempool header, - * and memory buffer can consist of set of disjoint phyiscal pages. * * @param name * The name of the mempool. @@ -660,15 +879,13 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, * If cache_size is non-zero, the rte_mempool library will try to * limit the accesses to the common lockless pool, by maintaining a * per-lcore object cache. This argument must be lower or equal to - * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose + * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose * cache_size to have "n modulo cache_size == 0": if this is * not the case, some elements will always stay in the pool and will * never be used. The access to the per-lcore table is of course * faster than the multi-producer/consumer pool. The cache can be * disabled if the cache_size argument is set to 0; it can be useful to - * avoid losing objects in cache. Note that even if not used, the - * memory space for cache is always reserved in a mempool structure, - * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0. + * avoid losing objects in cache. * @param private_data_size * The size of the private data appended after the mempool * structure. This is useful for storing some private data after the @@ -711,32 +928,278 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior * when using rte_mempool_get() or rte_mempool_get_bulk() is * "single-consumer". Otherwise, it is "multi-consumers". + * - MEMPOOL_F_NO_IOVA_CONTIG: If set, allocated objects won't + * necessarily be contiguous in IO memory. * @return * The pointer to the new allocated mempool, on success. NULL on error * with rte_errno set appropriately. Possible rte_errno values include: * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure * - E_RTE_SECONDARY - function was called from a secondary process instance - * - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list * - EINVAL - cache size provided is too large * - ENOSPC - the maximum number of memzones has already been allocated * - EEXIST - a memzone with the same name already exists * - ENOMEM - no appropriate memory area found in which to create memzone */ struct rte_mempool * -rte_dom0_mempool_create(const char *name, unsigned n, unsigned elt_size, - unsigned cache_size, unsigned private_data_size, - rte_mempool_ctor_t *mp_init, void *mp_init_arg, - rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, - int socket_id, unsigned flags); -#endif +rte_mempool_create(const char *name, unsigned n, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + rte_mempool_ctor_t *mp_init, void *mp_init_arg, + rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, + int socket_id, unsigned flags); + +/** + * Create an empty mempool + * + * The mempool is allocated and initialized, but it is not populated: no + * memory is allocated for the mempool elements. The user has to call + * rte_mempool_populate_*() to add memory chunks to the pool. Once + * populated, the user may also want to initialize each object with + * rte_mempool_obj_iter(). + * + * @param name + * The name of the mempool. + * @param n + * The maximum number of elements that can be added in the mempool. + * The optimum size (in terms of memory usage) for a mempool is when n + * is a power of two minus one: n = (2^q - 1). + * @param elt_size + * The size of each element. + * @param cache_size + * Size of the cache. See rte_mempool_create() for details. + * @param private_data_size + * The size of the private data appended after the mempool + * structure. This is useful for storing some private data after the + * mempool structure, as is done for rte_mbuf_pool for example. + * @param socket_id + * The *socket_id* argument is the socket identifier in the case of + * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA + * constraint for the reserved zone. + * @param flags + * Flags controlling the behavior of the mempool. See + * rte_mempool_create() for details. + * @return + * The pointer to the new allocated mempool, on success. NULL on error + * with rte_errno set appropriately. See rte_mempool_create() for details. + */ +struct rte_mempool * +rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + int socket_id, unsigned flags); +/** + * Free a mempool + * + * Unlink the mempool from global list, free the memory chunks, and all + * memory referenced by the mempool. The objects must not be used by + * other cores as they will be freed. + * + * @param mp + * A pointer to the mempool structure. + */ +void +rte_mempool_free(struct rte_mempool *mp); + +/** + * Add physically contiguous memory for objects in the pool at init + * + * Add a virtually and physically contiguous memory chunk in the pool + * where objects can be instantiated. + * + * If the given IO address is unknown (iova = RTE_BAD_IOVA), + * the chunk doesn't need to be physically contiguous (only virtually), + * and allocated objects may span two pages. + * + * @param mp + * A pointer to the mempool structure. + * @param vaddr + * The virtual address of memory that should be used to store objects. + * @param iova + * The IO address + * @param len + * The length of memory in bytes. + * @param free_cb + * The callback used to free this chunk when destroying the mempool. + * @param opaque + * An opaque argument passed to free_cb. + * @return + * The number of objects added on success. + * On error, the chunk is not added in the memory list of the + * mempool and a negative errno is returned. + */ +int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, + rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, + void *opaque); + +/** + * Add virtually contiguous memory for objects in the pool at init + * + * Add a virtually contiguous memory chunk in the pool where objects can + * be instantiated. + * + * @param mp + * A pointer to the mempool structure. + * @param addr + * The virtual address of memory that should be used to store objects. + * @param len + * The length of memory in bytes. + * @param pg_sz + * The size of memory pages in this virtual area. + * @param free_cb + * The callback used to free this chunk when destroying the mempool. + * @param opaque + * An opaque argument passed to free_cb. + * @return + * The number of objects added on success. + * On error, the chunk is not added in the memory list of the + * mempool and a negative errno is returned. + */ +int +rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, + size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, + void *opaque); + +/** + * Add memory for objects in the pool at init + * + * This is the default function used by rte_mempool_create() to populate + * the mempool. It adds memory allocated using rte_memzone_reserve(). + * + * @param mp + * A pointer to the mempool structure. + * @return + * The number of objects added on success. + * On error, the chunk is not added in the memory list of the + * mempool and a negative errno is returned. + */ +int rte_mempool_populate_default(struct rte_mempool *mp); + +/** + * Add memory from anonymous mapping for objects in the pool at init + * + * This function mmap an anonymous memory zone that is locked in + * memory to store the objects of the mempool. + * + * @param mp + * A pointer to the mempool structure. + * @return + * The number of objects added on success. + * On error, the chunk is not added in the memory list of the + * mempool and a negative errno is returned. + */ +int rte_mempool_populate_anon(struct rte_mempool *mp); + +/** + * Call a function for each mempool element + * + * Iterate across all objects attached to a rte_mempool and call the + * callback function on it. + * + * @param mp + * A pointer to an initialized mempool. + * @param obj_cb + * A function pointer that is called for each object. + * @param obj_cb_arg + * An opaque pointer passed to the callback function. + * @return + * Number of objects iterated. + */ +uint32_t rte_mempool_obj_iter(struct rte_mempool *mp, + rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg); + +/** + * Call a function for each mempool memory chunk + * + * Iterate across all memory chunks attached to a rte_mempool and call + * the callback function on it. + * + * @param mp + * A pointer to an initialized mempool. + * @param mem_cb + * A function pointer that is called for each memory chunk. + * @param mem_cb_arg + * An opaque pointer passed to the callback function. + * @return + * Number of memory chunks iterated. + */ +uint32_t rte_mempool_mem_iter(struct rte_mempool *mp, + rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg); + +/** + * Dump the status of the mempool to a file. + * + * @param f + * A pointer to a file for output + * @param mp + * A pointer to the mempool structure. + */ +void rte_mempool_dump(FILE *f, struct rte_mempool *mp); + +/** + * Create a user-owned mempool cache. + * + * This can be used by non-EAL threads to enable caching when they + * interact with a mempool. + * + * @param size + * The size of the mempool cache. See rte_mempool_create()'s cache_size + * parameter description for more information. The same limits and + * considerations apply here too. + * @param socket_id + * The socket identifier in the case of NUMA. The value can be + * SOCKET_ID_ANY if there is no NUMA constraint for the reserved zone. + */ +struct rte_mempool_cache * +rte_mempool_cache_create(uint32_t size, int socket_id); + +/** + * Free a user-owned mempool cache. + * + * @param cache + * A pointer to the mempool cache. + */ +void +rte_mempool_cache_free(struct rte_mempool_cache *cache); /** - * Dump the status of the mempool to the console. + * Get a pointer to the per-lcore default mempool cache. * * @param mp * A pointer to the mempool structure. + * @param lcore_id + * The logical core id. + * @return + * A pointer to the mempool cache or NULL if disabled or non-EAL thread. + */ +static __rte_always_inline struct rte_mempool_cache * +rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id) +{ + if (mp->cache_size == 0) + return NULL; + + if (lcore_id >= RTE_MAX_LCORE) + return NULL; + + return &mp->local_cache[lcore_id]; +} + +/** + * Flush a user-owned mempool cache to the specified mempool. + * + * @param cache + * A pointer to the mempool cache. + * @param mp + * A pointer to the mempool. */ -void rte_mempool_dump(const struct rte_mempool *mp); +static __rte_always_inline void +rte_mempool_cache_flush(struct rte_mempool_cache *cache, + struct rte_mempool *mp) +{ + if (cache == NULL) + cache = rte_mempool_default_cache(mp, rte_lcore_id()); + if (cache == NULL || cache->len == 0) + return; + rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len); + cache->len = 0; +} /** * @internal Put several objects back in the mempool; used internally. @@ -747,35 +1210,22 @@ void rte_mempool_dump(const struct rte_mempool *mp); * @param n * The number of objects to store back in the mempool, must be strictly * positive. - * @param is_mp - * Mono-producer (0) or multi-producers (1). + * @param cache + * A pointer to a mempool cache structure. May be NULL if not needed. */ -static inline void __attribute__((always_inline)) -__mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, - unsigned n, int is_mp) +static __rte_always_inline void +__mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, + unsigned int n, struct rte_mempool_cache *cache) { -#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 - struct rte_mempool_cache *cache; - uint32_t index; void **cache_objs; - unsigned lcore_id = rte_lcore_id(); - uint32_t cache_size = mp->cache_size; - uint32_t flushthresh = mp->cache_flushthresh; -#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */ /* increment stat now, adding in mempool always success */ __MEMPOOL_STAT_ADD(mp, put, n); -#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 - /* cache is not enabled or single producer */ - if (unlikely(cache_size == 0 || is_mp == 0)) + /* No cache provided or if put would overflow mem allocated for cache */ + if (unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE)) goto ring_enqueue; - /* Go straight to ring if put would overflow mem allocated for cache */ - if (unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE)) - goto ring_enqueue; - - cache = &mp->local_cache[lcore_id]; cache_objs = &cache->objs[cache->len]; /* @@ -786,43 +1236,32 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, */ /* Add elements back into the cache */ - for (index = 0; index < n; ++index, obj_table++) - cache_objs[index] = *obj_table; + rte_memcpy(&cache_objs[0], obj_table, sizeof(void *) * n); cache->len += n; - if (cache->len >= flushthresh) { - rte_ring_mp_enqueue_bulk(mp->ring, &cache->objs[cache_size], - cache->len - cache_size); - cache->len = cache_size; + if (cache->len >= cache->flushthresh) { + rte_mempool_ops_enqueue_bulk(mp, &cache->objs[cache->size], + cache->len - cache->size); + cache->len = cache->size; } return; ring_enqueue: -#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */ /* push remaining objects in ring */ #ifdef RTE_LIBRTE_MEMPOOL_DEBUG - if (is_mp) { - if (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0) - rte_panic("cannot put objects in mempool\n"); - } - else { - if (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0) - rte_panic("cannot put objects in mempool\n"); - } + if (rte_mempool_ops_enqueue_bulk(mp, obj_table, n) < 0) + rte_panic("cannot put objects in mempool\n"); #else - if (is_mp) - rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n); - else - rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n); + rte_mempool_ops_enqueue_bulk(mp, obj_table, n); #endif } /** - * Put several objects back in the mempool (multi-producers safe). + * Put several objects back in the mempool. * * @param mp * A pointer to the mempool structure. @@ -830,31 +1269,15 @@ ring_enqueue: * A pointer to a table of void * pointers (objects). * @param n * The number of objects to add in the mempool from the obj_table. + * @param cache + * A pointer to a mempool cache structure. May be NULL if not needed. */ -static inline void __attribute__((always_inline)) -rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table, - unsigned n) -{ - __mempool_check_cookies(mp, obj_table, n, 0); - __mempool_put_bulk(mp, obj_table, n, 1); -} - -/** - * Put several objects back in the mempool (NOT multi-producers safe). - * - * @param mp - * A pointer to the mempool structure. - * @param obj_table - * A pointer to a table of void * pointers (objects). - * @param n - * The number of objects to add in the mempool from obj_table. - */ -static inline void -rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table, - unsigned n) +static __rte_always_inline void +rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, + unsigned int n, struct rte_mempool_cache *cache) { __mempool_check_cookies(mp, obj_table, n, 0); - __mempool_put_bulk(mp, obj_table, n, 0); + __mempool_generic_put(mp, obj_table, n, cache); } /** @@ -871,40 +1294,13 @@ rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table, * @param n * The number of objects to add in the mempool from obj_table. */ -static inline void __attribute__((always_inline)) +static __rte_always_inline void rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, - unsigned n) -{ - __mempool_check_cookies(mp, obj_table, n, 0); - __mempool_put_bulk(mp, obj_table, n, !(mp->flags & MEMPOOL_F_SP_PUT)); -} - -/** - * Put one object in the mempool (multi-producers safe). - * - * @param mp - * A pointer to the mempool structure. - * @param obj - * A pointer to the object to be added. - */ -static inline void __attribute__((always_inline)) -rte_mempool_mp_put(struct rte_mempool *mp, void *obj) -{ - rte_mempool_mp_put_bulk(mp, &obj, 1); -} - -/** - * Put one object back in the mempool (NOT multi-producers safe). - * - * @param mp - * A pointer to the mempool structure. - * @param obj - * A pointer to the object to be added. - */ -static inline void __attribute__((always_inline)) -rte_mempool_sp_put(struct rte_mempool *mp, void *obj) + unsigned int n) { - rte_mempool_sp_put_bulk(mp, &obj, 1); + struct rte_mempool_cache *cache; + cache = rte_mempool_default_cache(mp, rte_lcore_id()); + rte_mempool_generic_put(mp, obj_table, n, cache); } /** @@ -919,7 +1315,7 @@ rte_mempool_sp_put(struct rte_mempool *mp, void *obj) * @param obj * A pointer to the object to be added. */ -static inline void __attribute__((always_inline)) +static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj) { rte_mempool_put_bulk(mp, &obj, 1); @@ -933,44 +1329,37 @@ rte_mempool_put(struct rte_mempool *mp, void *obj) * A pointer to a table of void * pointers (objects). * @param n * The number of objects to get, must be strictly positive. - * @param is_mc - * Mono-consumer (0) or multi-consumers (1). + * @param cache + * A pointer to a mempool cache structure. May be NULL if not needed. * @return * - >=0: Success; number of objects supplied. * - <0: Error; code of ring dequeue function. */ -static inline int __attribute__((always_inline)) -__mempool_get_bulk(struct rte_mempool *mp, void **obj_table, - unsigned n, int is_mc) +static __rte_always_inline int +__mempool_generic_get(struct rte_mempool *mp, void **obj_table, + unsigned int n, struct rte_mempool_cache *cache) { int ret; -#ifdef RTE_LIBRTE_MEMPOOL_DEBUG - unsigned n_orig = n; -#endif -#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 - struct rte_mempool_cache *cache; uint32_t index, len; void **cache_objs; - unsigned lcore_id = rte_lcore_id(); - uint32_t cache_size = mp->cache_size; - /* cache is not enabled or single consumer */ - if (unlikely(cache_size == 0 || is_mc == 0 || n >= cache_size)) + /* No cache provided or cannot be satisfied from cache */ + if (unlikely(cache == NULL || n >= cache->size)) goto ring_dequeue; - cache = &mp->local_cache[lcore_id]; cache_objs = cache->objs; /* Can this be satisfied from the cache? */ if (cache->len < n) { /* No. Backfill the cache first, and then fill from it */ - uint32_t req = n + (cache_size - cache->len); + uint32_t req = n + (cache->size - cache->len); /* How many do we require i.e. number to fill the cache + the request */ - ret = rte_ring_mc_dequeue_bulk(mp->ring, &cache->objs[cache->len], req); + ret = rte_mempool_ops_dequeue_bulk(mp, + &cache->objs[cache->len], req); if (unlikely(ret < 0)) { /* - * In the offchance that we are buffer constrained, + * In the off chance that we are buffer constrained, * where we are not able to allocate cache + n, go to * the ring directly. If that fails, we are truly out of * buffers. @@ -987,29 +1376,25 @@ __mempool_get_bulk(struct rte_mempool *mp, void **obj_table, cache->len -= n; - __MEMPOOL_STAT_ADD(mp, get_success, n_orig); + __MEMPOOL_STAT_ADD(mp, get_success, n); return 0; ring_dequeue: -#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */ /* get remaining objects from ring */ - if (is_mc) - ret = rte_ring_mc_dequeue_bulk(mp->ring, obj_table, n); - else - ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n); + ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n); if (ret < 0) - __MEMPOOL_STAT_ADD(mp, get_fail, n_orig); + __MEMPOOL_STAT_ADD(mp, get_fail, n); else - __MEMPOOL_STAT_ADD(mp, get_success, n_orig); + __MEMPOOL_STAT_ADD(mp, get_success, n); return ret; } /** - * Get several objects from the mempool (multi-consumers safe). + * Get several objects from the mempool. * * If cache is enabled, objects will be retrieved first from cache, * subsequently from the common pool. Note that it can return -ENOENT when @@ -1022,44 +1407,18 @@ ring_dequeue: * A pointer to a table of void * pointers (objects) that will be filled. * @param n * The number of objects to get from mempool to obj_table. + * @param cache + * A pointer to a mempool cache structure. May be NULL if not needed. * @return * - 0: Success; objects taken. * - -ENOENT: Not enough entries in the mempool; no object is retrieved. */ -static inline int __attribute__((always_inline)) -rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) -{ - int ret; - ret = __mempool_get_bulk(mp, obj_table, n, 1); - if (ret == 0) - __mempool_check_cookies(mp, obj_table, n, 1); - return ret; -} - -/** - * Get several objects from the mempool (NOT multi-consumers safe). - * - * If cache is enabled, objects will be retrieved first from cache, - * subsequently from the common pool. Note that it can return -ENOENT when - * the local cache and common pool are empty, even if cache from other - * lcores are full. - * - * @param mp - * A pointer to the mempool structure. - * @param obj_table - * A pointer to a table of void * pointers (objects) that will be filled. - * @param n - * The number of objects to get from the mempool to obj_table. - * @return - * - 0: Success; objects taken. - * - -ENOENT: Not enough entries in the mempool; no object is - * retrieved. - */ -static inline int __attribute__((always_inline)) -rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) +static __rte_always_inline int +rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, + unsigned int n, struct rte_mempool_cache *cache) { int ret; - ret = __mempool_get_bulk(mp, obj_table, n, 0); + ret = __mempool_generic_get(mp, obj_table, n, cache); if (ret == 0) __mempool_check_cookies(mp, obj_table, n, 1); return ret; @@ -1087,41 +1446,20 @@ rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) * - 0: Success; objects taken * - -ENOENT: Not enough entries in the mempool; no object is retrieved. */ -static inline int __attribute__((always_inline)) -rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) +static __rte_always_inline int +rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n) { - int ret; - ret = __mempool_get_bulk(mp, obj_table, n, - !(mp->flags & MEMPOOL_F_SC_GET)); - if (ret == 0) - __mempool_check_cookies(mp, obj_table, n, 1); - return ret; + struct rte_mempool_cache *cache; + cache = rte_mempool_default_cache(mp, rte_lcore_id()); + return rte_mempool_generic_get(mp, obj_table, n, cache); } /** - * Get one object from the mempool (multi-consumers safe). - * - * If cache is enabled, objects will be retrieved first from cache, - * subsequently from the common pool. Note that it can return -ENOENT when - * the local cache and common pool are empty, even if cache from other - * lcores are full. + * Get one object from the mempool. * - * @param mp - * A pointer to the mempool structure. - * @param obj_p - * A pointer to a void * pointer (object) that will be filled. - * @return - * - 0: Success; objects taken. - * - -ENOENT: Not enough entries in the mempool; no object is retrieved. - */ -static inline int __attribute__((always_inline)) -rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p) -{ - return rte_mempool_mc_get_bulk(mp, obj_p, 1); -} - -/** - * Get one object from the mempool (NOT multi-consumers safe). + * This function calls the multi-consumers or the single-consumer + * version, depending on the default behavior that was specified at + * mempool creation (see flags). * * If cache is enabled, objects will be retrieved first from cache, * subsequently from the common pool. Note that it can return -ENOENT when @@ -1136,36 +1474,53 @@ rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p) * - 0: Success; objects taken. * - -ENOENT: Not enough entries in the mempool; no object is retrieved. */ -static inline int __attribute__((always_inline)) -rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p) +static __rte_always_inline int +rte_mempool_get(struct rte_mempool *mp, void **obj_p) { - return rte_mempool_sc_get_bulk(mp, obj_p, 1); + return rte_mempool_get_bulk(mp, obj_p, 1); } /** - * Get one object from the mempool. + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. * - * This function calls the multi-consumers or the single-consumer - * version, depending on the default behavior that was specified at - * mempool creation (see flags). + * Get a contiguous blocks of objects from the mempool. * - * If cache is enabled, objects will be retrieved first from cache, - * subsequently from the common pool. Note that it can return -ENOENT when - * the local cache and common pool are empty, even if cache from other - * lcores are full. + * If cache is enabled, consider to flush it first, to reuse objects + * as soon as possible. + * + * The application should check that the driver supports the operation + * by calling rte_mempool_ops_get_info() and checking that `contig_block_size` + * is not zero. * * @param mp * A pointer to the mempool structure. - * @param obj_p - * A pointer to a void * pointer (object) that will be filled. + * @param first_obj_table + * A pointer to a pointer to the first object in each block. + * @param n + * The number of blocks to get from mempool. * @return - * - 0: Success; objects taken. - * - -ENOENT: Not enough entries in the mempool; no object is retrieved. + * - 0: Success; blocks taken. + * - -ENOBUFS: Not enough entries in the mempool; no object is retrieved. + * - -EOPNOTSUPP: The mempool driver does not support block dequeue */ -static inline int __attribute__((always_inline)) -rte_mempool_get(struct rte_mempool *mp, void **obj_p) +static __rte_always_inline int +__rte_experimental +rte_mempool_get_contig_blocks(struct rte_mempool *mp, + void **first_obj_table, unsigned int n) { - return rte_mempool_get_bulk(mp, obj_p, 1); + int ret; + + ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n); + if (ret == 0) { + __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_success, n); + __mempool_contig_blocks_check_cookies(mp, first_obj_table, n, + 1); + } else { + __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_fail, n); + } + + return ret; } /** @@ -1173,22 +1528,17 @@ rte_mempool_get(struct rte_mempool *mp, void **obj_p) * * When cache is enabled, this function has to browse the length of * all lcores, so it should not be used in a data path, but only for - * debug purposes. + * debug purposes. User-owned mempool caches are not accounted for. * * @param mp * A pointer to the mempool structure. * @return * The number of entries in the mempool. */ -unsigned rte_mempool_count(const struct rte_mempool *mp); +unsigned int rte_mempool_avail_count(const struct rte_mempool *mp); /** - * Return the number of free entries in the mempool ring. - * i.e. how many entries can be freed back to the mempool. - * - * NOTE: This corresponds to the number of elements *allocated* from the - * memory pool, not the number of elements in the pool itself. To count - * the number elements currently available in the pool, use "rte_mempool_count" + * Return the number of elements which have been allocated from the mempool * * When cache is enabled, this function has to browse the length of * all lcores, so it should not be used in a data path, but only for @@ -1199,18 +1549,15 @@ unsigned rte_mempool_count(const struct rte_mempool *mp); * @return * The number of free entries in the mempool. */ -static inline unsigned -rte_mempool_free_count(const struct rte_mempool *mp) -{ - return mp->size - rte_mempool_count(mp); -} +unsigned int +rte_mempool_in_use_count(const struct rte_mempool *mp); /** * Test if the mempool is full. * * When cache is enabled, this function has to browse the length of all * lcores, so it should not be used in a data path, but only for debug - * purposes. + * purposes. User-owned mempool caches are not accounted for. * * @param mp * A pointer to the mempool structure. @@ -1221,7 +1568,7 @@ rte_mempool_free_count(const struct rte_mempool *mp) static inline int rte_mempool_full(const struct rte_mempool *mp) { - return !!(rte_mempool_count(mp) == mp->size); + return !!(rte_mempool_avail_count(mp) == mp->size); } /** @@ -1229,7 +1576,7 @@ rte_mempool_full(const struct rte_mempool *mp) * * When cache is enabled, this function has to browse the length of all * lcores, so it should not be used in a data path, but only for debug - * purposes. + * purposes. User-owned mempool caches are not accounted for. * * @param mp * A pointer to the mempool structure. @@ -1240,35 +1587,26 @@ rte_mempool_full(const struct rte_mempool *mp) static inline int rte_mempool_empty(const struct rte_mempool *mp) { - return !!(rte_mempool_count(mp) == 0); + return !!(rte_mempool_avail_count(mp) == 0); } /** - * Return the physical address of elt, which is an element of the pool mp. + * Return the IO address of elt, which is an element of the pool mp. * - * @param mp - * A pointer to the mempool structure. * @param elt * A pointer (virtual address) to the element of the pool. * @return - * The physical address of the elt element. + * The IO address of the elt element. + * If the mempool was created with MEMPOOL_F_NO_IOVA_CONTIG, the + * returned value is RTE_BAD_IOVA. */ -static inline phys_addr_t -rte_mempool_virt2phy(const struct rte_mempool *mp, const void *elt) +static inline rte_iova_t +rte_mempool_virt2iova(const void *elt) { - if (rte_eal_has_hugepages()) { - uintptr_t off; - - off = (const char *)elt - (const char *)mp->elt_va_start; - return (mp->elt_pa[off >> mp->pg_shift] + (off & mp->pg_mask)); - } else { - /* - * If huge pages are disabled, we cannot assume the - * memory region to be physically contiguous. - * Lookup for each element. - */ - return rte_mem_virt2phy(elt); - } + const struct rte_mempool_objhdr *hdr; + hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt, + sizeof(*hdr)); + return hdr->iova; } /** @@ -1281,7 +1619,7 @@ rte_mempool_virt2phy(const struct rte_mempool *mp, const void *elt) * @param mp * A pointer to the mempool structure. */ -void rte_mempool_audit(const struct rte_mempool *mp); +void rte_mempool_audit(struct rte_mempool *mp); /** * Return a pointer to the private data in an mempool structure. @@ -1293,13 +1631,17 @@ void rte_mempool_audit(const struct rte_mempool *mp); */ static inline void *rte_mempool_get_priv(struct rte_mempool *mp) { - return (char *)mp + MEMPOOL_HEADER_SIZE(mp, mp->pg_num); + return (char *)mp + + MEMPOOL_HEADER_SIZE(mp, mp->cache_size); } /** * Dump the status of all mempools on the console + * + * @param f + * A pointer to a file for output */ -void rte_mempool_list_dump(void); +void rte_mempool_list_dump(FILE *f); /** * Search a mempool from its name @@ -1316,14 +1658,19 @@ void rte_mempool_list_dump(void); struct rte_mempool *rte_mempool_lookup(const char *name); /** + * Get the header, trailer and total size of a mempool element. + * * Given a desired size of the mempool element and mempool flags, - * caluclates header, trailer, body and total sizes of the mempool object. + * calculates header, trailer, body and total sizes of the mempool object. + * * @param elt_size - * The size of each element. + * The size of each element, without header and trailer. * @param flags * The flags used for the mempool creation. * Consult rte_mempool_create() for more information about possible values. * The size of each element. + * @param sz + * The calculated detailed size the mempool object. May be NULL. * @return * Total size of the mempool object. */ @@ -1331,48 +1678,26 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, struct rte_mempool_objsz *sz); /** - * Calculate maximum amount of memory required to store given number of objects. - * Assumes that the memory buffer will be aligned at page boundary. - * Note, that if object size is bigger then page size, then it assumes that - * we have a subsets of physically continuous pages big enough to store - * at least one object. - * @param elt_num - * Number of elements. - * @param elt_sz - * The size of each element. - * @param pg_shift - * LOG2 of the physical pages size. - * @return - * Required memory size aligned at page boundary. + * Walk list of all memory pools + * + * @param func + * Iterator function + * @param arg + * Argument passed to iterator */ -size_t rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz, - uint32_t pg_shift); +void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg), + void *arg); /** - * Calculate how much memory would be actually required with the given - * memory footprint to store required number of objects. - * @param vaddr - * Virtual address of the externally allocated memory buffer. - * Will be used to store mempool objects. - * @param elt_num - * Number of elements. - * @param elt_sz - * The size of each element. - * @param paddr - * Array of phyiscall addresses of the pages that comprises given memory - * buffer. - * @param pg_num - * Number of elements in the paddr array. - * @param pg_shift - * LOG2 of the physical pages size. - * @return - * Number of bytes needed to store given number of objects, - * aligned to the given page size. - * If provided memory buffer is not big enough: - * (-1) * actual number of elemnts that can be stored in that buffer. + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * @internal Get page size used for mempool object allocation. + * This function is internal to mempool library and mempool drivers. */ -ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz, - const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift); +__rte_experimental +int +rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz); #ifdef __cplusplus }