X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_mempool%2Frte_mempool.h;h=9e41f595e8dba51211ed74e6247880ca08061639;hb=b291e69423d0cbfdc34411460b641215da2e3fdc;hp=0243f9e4f6d18190a9279ff07f4c9d75faa27d8c;hpb=79d6f5fc5858743b654650d8b8cf4efee55d1ae8;p=dpdk.git diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index 0243f9e4f6..9e41f595e8 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -1,35 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * Copyright(c) 2016 6WIND S.A. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation. + * Copyright(c) 2016 6WIND S.A. */ #ifndef _RTE_MEMPOOL_H_ @@ -51,13 +22,15 @@ * meta-data in the object data and retrieve them when allocating a * new object. * - * Note: the mempool implementation is not preemptable. A lcore must - * not be interrupted by another task that uses the same mempool - * (because it uses a ring which is not preemptable). Also, mempool - * functions must not be used outside the DPDK environment: for - * example, in linuxapp environment, a thread that is not created by - * the EAL must not use mempools. This is due to the per-lcore cache - * that won't work as rte_lcore_id() will not return a correct value. + * Note: the mempool implementation is not preemptible. An lcore must not be + * interrupted by another task that uses the same mempool (because it uses a + * ring which is not preemptible). Also, usual mempool functions like + * rte_mempool_get() or rte_mempool_put() are designed to be called from an EAL + * thread due to the internal per-lcore cache. Due to the lack of caching, + * rte_mempool_get() or rte_mempool_put() performance will suffer when called + * by non-EAL threads. Instead, non-EAL threads should call + * rte_mempool_generic_get() or rte_mempool_generic_put() with a user cache + * created with rte_mempool_cache_create(). */ #include @@ -67,6 +40,7 @@ #include #include +#include #include #include #include @@ -96,6 +70,10 @@ struct rte_mempool_debug_stats { uint64_t get_success_objs; /**< Objects successfully allocated. */ uint64_t get_fail_bulk; /**< Failed allocation number. */ uint64_t get_fail_objs; /**< Objects that failed to be allocated. */ + /** Successful allocation number of contiguous blocks. */ + uint64_t get_success_blks; + /** Failed allocation number of contiguous blocks. */ + uint64_t get_fail_blks; } __rte_cache_aligned; #endif @@ -155,7 +133,11 @@ struct rte_mempool_objsz { struct rte_mempool_objhdr { STAILQ_ENTRY(rte_mempool_objhdr) next; /**< Next in list. */ struct rte_mempool *mp; /**< The mempool owning the object. */ - phys_addr_t physaddr; /**< Physical address of the object. */ + RTE_STD_C11 + union { + rte_iova_t iova; /**< IO address of the object. */ + phys_addr_t physaddr; /**< deprecated - Physical address of the object. */ + }; #ifdef RTE_LIBRTE_MEMPOOL_DEBUG uint64_t cookie; /**< Debug cookie. */ #endif @@ -201,12 +183,30 @@ struct rte_mempool_memhdr { STAILQ_ENTRY(rte_mempool_memhdr) next; /**< Next in list. */ struct rte_mempool *mp; /**< The mempool owning the chunk */ void *addr; /**< Virtual address of the chunk */ - phys_addr_t phys_addr; /**< Physical address of the chunk */ + RTE_STD_C11 + union { + rte_iova_t iova; /**< IO address of the chunk */ + phys_addr_t phys_addr; /**< Physical address of the chunk */ + }; size_t len; /**< length of the chunk */ rte_mempool_memchunk_free_cb_t *free_cb; /**< Free callback */ void *opaque; /**< Argument passed to the free callback */ }; +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Additional information about the mempool + * + * The structure is cache-line aligned to avoid ABI breakages in + * a number of cases when something small is added. + */ +struct rte_mempool_info { + /** Number of objects in the contiguous block */ + unsigned int contig_block_size; +} __rte_cache_aligned; + /** * The RTE mempool structure. */ @@ -224,7 +224,7 @@ struct rte_mempool { }; void *pool_config; /**< optional args for ops alloc. */ const struct rte_memzone *mz; /**< Memzone where pool is alloc'd. */ - int flags; /**< Flags of the mempool. */ + unsigned int flags; /**< Flags of the mempool. */ int socket_id; /**< Socket id passed at create. */ uint32_t size; /**< Max size of the mempool. */ uint32_t cache_size; @@ -262,7 +262,8 @@ struct rte_mempool { #define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/ #define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/ #define MEMPOOL_F_POOL_CREATED 0x0010 /**< Internal: pool is created. */ -#define MEMPOOL_F_NO_PHYS_CONTIG 0x0020 /**< Don't need physically contiguous objs. */ +#define MEMPOOL_F_NO_IOVA_CONTIG 0x0020 /**< Don't need IOVA contiguous objs. */ +#define MEMPOOL_F_NO_PHYS_CONTIG MEMPOOL_F_NO_IOVA_CONTIG /* deprecated */ /** * @internal When debug is enabled, store some statistics. @@ -282,8 +283,16 @@ struct rte_mempool { mp->stats[__lcore_id].name##_bulk += 1; \ } \ } while(0) +#define __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, name, n) do { \ + unsigned int __lcore_id = rte_lcore_id(); \ + if (__lcore_id < RTE_MAX_LCORE) { \ + mp->stats[__lcore_id].name##_blks += n; \ + mp->stats[__lcore_id].name##_bulk += 1; \ + } \ + } while (0) #else #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0) +#define __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, name, n) do {} while (0) #endif /** @@ -351,13 +360,45 @@ void rte_mempool_check_cookies(const struct rte_mempool *mp, #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0) #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */ +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * @internal Check contiguous object blocks and update cookies or panic. + * + * @param mp + * Pointer to the memory pool. + * @param first_obj_table_const + * Pointer to a table of void * pointers (first object of the contiguous + * object blocks). + * @param n + * Number of contiguous object blocks. + * @param free + * - 0: object is supposed to be allocated, mark it as free + * - 1: object is supposed to be free, mark it as allocated + * - 2: just check that cookie is valid (free or allocated) + */ +void rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp, + void * const *first_obj_table_const, unsigned int n, int free); + +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG +#define __mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \ + free) \ + rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \ + free) +#else +#define __mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \ + free) \ + do {} while (0) +#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */ + #define RTE_MEMPOOL_OPS_NAMESIZE 32 /**< Max length of ops struct name. */ /** * Prototype for implementation specific data provisioning function. * * The function should provide the implementation specific memory for - * for use by the other mempool ops functions in a given mempool ops struct. + * use by the other mempool ops functions in a given mempool ops struct. * E.g. the default ops provides an instance of the rte_ring for this purpose. * it will most likely point to a different type of data structure, and * will be transparent to the application programmer. @@ -382,11 +423,134 @@ typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp, typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp, void **obj_table, unsigned int n); +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Dequeue a number of contiguous object blocks from the external pool. + */ +typedef int (*rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp, + void **first_obj_table, unsigned int n); + /** * Return the number of available objects in the external pool. */ typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp); +/** + * Calculate memory size required to store given number of objects. + * + * If mempool objects are not required to be IOVA-contiguous + * (the flag MEMPOOL_F_NO_IOVA_CONTIG is set), min_chunk_size defines + * virtually contiguous chunk size. Otherwise, if mempool objects must + * be IOVA-contiguous (the flag MEMPOOL_F_NO_IOVA_CONTIG is clear), + * min_chunk_size defines IOVA-contiguous chunk size. + * + * @param[in] mp + * Pointer to the memory pool. + * @param[in] obj_num + * Number of objects. + * @param[in] pg_shift + * LOG2 of the physical pages size. If set to 0, ignore page boundaries. + * @param[out] min_chunk_size + * Location for minimum size of the memory chunk which may be used to + * store memory pool objects. + * @param[out] align + * Location for required memory chunk alignment. + * @return + * Required memory size. + */ +typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, + size_t *min_chunk_size, size_t *align); + +/** + * Default way to calculate memory size required to store given number of + * objects. + * + * If page boundaries may be ignored, it is just a product of total + * object size including header and trailer and number of objects. + * Otherwise, it is a number of pages required to store given number of + * objects without crossing page boundary. + * + * Note that if object size is bigger than page size, then it assumes + * that pages are grouped in subsets of physically continuous pages big + * enough to store at least one object. + * + * Minimum size of memory chunk is the total element size. + * Required memory chunk alignment is the cache line size. + */ +ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, + size_t *min_chunk_size, size_t *align); + +/** + * Function to be called for each populated object. + * + * @param[in] mp + * A pointer to the mempool structure. + * @param[in] opaque + * An opaque pointer passed to iterator. + * @param[in] vaddr + * Object virtual address. + * @param[in] iova + * Input/output virtual address of the object or RTE_BAD_IOVA. + */ +typedef void (rte_mempool_populate_obj_cb_t)(struct rte_mempool *mp, + void *opaque, void *vaddr, rte_iova_t iova); + +/** + * Populate memory pool objects using provided memory chunk. + * + * Populated objects should be enqueued to the pool, e.g. using + * rte_mempool_ops_enqueue_bulk(). + * + * If the given IO address is unknown (iova = RTE_BAD_IOVA), + * the chunk doesn't need to be physically contiguous (only virtually), + * and allocated objects may span two pages. + * + * @param[in] mp + * A pointer to the mempool structure. + * @param[in] max_objs + * Maximum number of objects to be populated. + * @param[in] vaddr + * The virtual address of memory that should be used to store objects. + * @param[in] iova + * The IO address + * @param[in] len + * The length of memory in bytes. + * @param[in] obj_cb + * Callback function to be executed for each populated object. + * @param[in] obj_cb_arg + * An opaque pointer passed to the callback function. + * @return + * The number of objects added on success. + * On error, no objects are populated and a negative errno is returned. + */ +typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp, + unsigned int max_objs, + void *vaddr, rte_iova_t iova, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg); + +/** + * Default way to populate memory pool object using provided memory + * chunk: just slice objects one by one. + */ +int rte_mempool_op_populate_default(struct rte_mempool *mp, + unsigned int max_objs, + void *vaddr, rte_iova_t iova, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Get some additional information about a mempool. + */ +typedef int (*rte_mempool_get_info_t)(const struct rte_mempool *mp, + struct rte_mempool_info *info); + + /** Structure defining mempool operations structure */ struct rte_mempool_ops { char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct. */ @@ -395,6 +559,24 @@ struct rte_mempool_ops { rte_mempool_enqueue_t enqueue; /**< Enqueue an object. */ rte_mempool_dequeue_t dequeue; /**< Dequeue an object. */ rte_mempool_get_count get_count; /**< Get qty of available objs. */ + /** + * Optional callback to calculate memory size required to + * store specified number of objects. + */ + rte_mempool_calc_mem_size_t calc_mem_size; + /** + * Optional callback to populate mempool objects using + * provided memory chunk. + */ + rte_mempool_populate_t populate; + /** + * Get mempool info + */ + rte_mempool_get_info_t get_info; + /** + * Dequeue a number of contiguous object blocks. + */ + rte_mempool_dequeue_contig_blocks_t dequeue_contig_blocks; } __rte_cache_aligned; #define RTE_MEMPOOL_MAX_OPS_IDX 16 /**< Max registered ops structs */ @@ -472,6 +654,30 @@ rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp, return ops->dequeue(mp, obj_table, n); } +/** + * @internal Wrapper for mempool_ops dequeue_contig_blocks callback. + * + * @param[in] mp + * Pointer to the memory pool. + * @param[out] first_obj_table + * Pointer to a table of void * pointers (first objects). + * @param[in] n + * Number of blocks to get. + * @return + * - 0: Success; got n objects. + * - <0: Error; code of dequeue function. + */ +static inline int +rte_mempool_ops_dequeue_contig_blocks(struct rte_mempool *mp, + void **first_obj_table, unsigned int n) +{ + struct rte_mempool_ops *ops; + + ops = rte_mempool_get_ops(mp->ops_index); + RTE_ASSERT(ops->dequeue_contig_blocks != NULL); + return ops->dequeue_contig_blocks(mp, first_obj_table, n); +} + /** * @internal wrapper for mempool_ops enqueue callback. * @@ -506,6 +712,76 @@ rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table, unsigned rte_mempool_ops_get_count(const struct rte_mempool *mp); +/** + * @internal wrapper for mempool_ops calc_mem_size callback. + * API to calculate size of memory required to store specified number of + * object. + * + * @param[in] mp + * Pointer to the memory pool. + * @param[in] obj_num + * Number of objects. + * @param[in] pg_shift + * LOG2 of the physical pages size. If set to 0, ignore page boundaries. + * @param[out] min_chunk_size + * Location for minimum size of the memory chunk which may be used to + * store memory pool objects. + * @param[out] align + * Location for required memory chunk alignment. + * @return + * Required memory size aligned at page boundary. + */ +ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, + size_t *min_chunk_size, size_t *align); + +/** + * @internal wrapper for mempool_ops populate callback. + * + * Populate memory pool objects using provided memory chunk. + * + * @param[in] mp + * A pointer to the mempool structure. + * @param[in] max_objs + * Maximum number of objects to be populated. + * @param[in] vaddr + * The virtual address of memory that should be used to store objects. + * @param[in] iova + * The IO address + * @param[in] len + * The length of memory in bytes. + * @param[in] obj_cb + * Callback function to be executed for each populated object. + * @param[in] obj_cb_arg + * An opaque pointer passed to the callback function. + * @return + * The number of objects added on success. + * On error, no objects are populated and a negative errno is returned. + */ +int rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs, + void *vaddr, rte_iova_t iova, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, + void *obj_cb_arg); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Wrapper for mempool_ops get_info callback. + * + * @param[in] mp + * Pointer to the memory pool. + * @param[out] info + * Pointer to the rte_mempool_info structure + * @return + * - 0: Success; The mempool driver supports retrieving supplementary + * mempool information + * - -ENOTSUP - doesn't support get_info ops (valid case). + */ +__rte_experimental +int rte_mempool_ops_get_info(const struct rte_mempool *mp, + struct rte_mempool_info *info); + /** * @internal wrapper for mempool_ops free callback. * @@ -551,12 +827,11 @@ int rte_mempool_register_ops(const struct rte_mempool_ops *ops); /** * Macro to statically register the ops of a mempool handler. * Note that the rte_mempool_register_ops fails silently here when - * more then RTE_MEMPOOL_MAX_OPS_IDX is registered. + * more than RTE_MEMPOOL_MAX_OPS_IDX is registered. */ -#define MEMPOOL_REGISTER_OPS(ops) \ - void mp_hdlr_init_##ops(void); \ - void __attribute__((constructor, used)) mp_hdlr_init_##ops(void)\ - { \ +#define MEMPOOL_REGISTER_OPS(ops) \ + RTE_INIT(mp_hdlr_init_##ops) \ + { \ rte_mempool_register_ops(&ops); \ } @@ -589,10 +864,8 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *); /** * Create a new mempool named *name* in memory. * - * This function uses ``memzone_reserve()`` to allocate memory. The + * This function uses ``rte_memzone_reserve()`` to allocate memory. The * pool contains n elements of elt_size. Its size is set to n. - * All elements of the mempool are allocated together with the mempool header, - * in one physically continuous chunk of memory. * * @param name * The name of the mempool. @@ -612,9 +885,7 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *); * never be used. The access to the per-lcore table is of course * faster than the multi-producer/consumer pool. The cache can be * disabled if the cache_size argument is set to 0; it can be useful to - * avoid losing objects in cache. Note that even if not used, the - * memory space for cache is always reserved in a mempool structure, - * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0. + * avoid losing objects in cache. * @param private_data_size * The size of the private data appended after the mempool * structure. This is useful for storing some private data after the @@ -657,8 +928,8 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *); * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior * when using rte_mempool_get() or rte_mempool_get_bulk() is * "single-consumer". Otherwise, it is "multi-consumers". - * - MEMPOOL_F_NO_PHYS_CONTIG: If set, allocated objects won't - * necessarilly be contiguous in physical memory. + * - MEMPOOL_F_NO_IOVA_CONTIG: If set, allocated objects won't + * necessarily be contiguous in IO memory. * @return * The pointer to the new allocated mempool, on success. NULL on error * with rte_errno set appropriately. Possible rte_errno values include: @@ -676,79 +947,12 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags); -/** - * Create a new mempool named *name* in memory. - * - * The pool contains n elements of elt_size. Its size is set to n. - * This function uses ``memzone_reserve()`` to allocate the mempool header - * (and the objects if vaddr is NULL). - * Depending on the input parameters, mempool elements can be either allocated - * together with the mempool header, or an externally provided memory buffer - * could be used to store mempool objects. In later case, that external - * memory buffer can consist of set of disjoint physical pages. - * - * @param name - * The name of the mempool. - * @param n - * The number of elements in the mempool. The optimum size (in terms of - * memory usage) for a mempool is when n is a power of two minus one: - * n = (2^q - 1). - * @param elt_size - * The size of each element. - * @param cache_size - * Size of the cache. See rte_mempool_create() for details. - * @param private_data_size - * The size of the private data appended after the mempool - * structure. This is useful for storing some private data after the - * mempool structure, as is done for rte_mbuf_pool for example. - * @param mp_init - * A function pointer that is called for initialization of the pool, - * before object initialization. The user can initialize the private - * data in this function if needed. This parameter can be NULL if - * not needed. - * @param mp_init_arg - * An opaque pointer to data that can be used in the mempool - * constructor function. - * @param obj_init - * A function called for each object at initialization of the pool. - * See rte_mempool_create() for details. - * @param obj_init_arg - * An opaque pointer passed to the object constructor function. - * @param socket_id - * The *socket_id* argument is the socket identifier in the case of - * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA - * constraint for the reserved zone. - * @param flags - * Flags controlling the behavior of the mempool. See - * rte_mempool_create() for details. - * @param vaddr - * Virtual address of the externally allocated memory buffer. - * Will be used to store mempool objects. - * @param paddr - * Array of physical addresses of the pages that comprises given memory - * buffer. - * @param pg_num - * Number of elements in the paddr array. - * @param pg_shift - * LOG2 of the physical pages size. - * @return - * The pointer to the new allocated mempool, on success. NULL on error - * with rte_errno set appropriately. See rte_mempool_create() for details. - */ -struct rte_mempool * -rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, - unsigned cache_size, unsigned private_data_size, - rte_mempool_ctor_t *mp_init, void *mp_init_arg, - rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, - int socket_id, unsigned flags, void *vaddr, - const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift); - /** * Create an empty mempool * * The mempool is allocated and initialized, but it is not populated: no * memory is allocated for the mempool elements. The user has to call - * rte_mempool_populate_*() or to add memory chunks to the pool. Once + * rte_mempool_populate_*() to add memory chunks to the pool. Once * populated, the user may also want to initialize each object with * rte_mempool_obj_iter(). * @@ -798,14 +1002,18 @@ rte_mempool_free(struct rte_mempool *mp); * Add physically contiguous memory for objects in the pool at init * * Add a virtually and physically contiguous memory chunk in the pool - * where objects can be instanciated. + * where objects can be instantiated. + * + * If the given IO address is unknown (iova = RTE_BAD_IOVA), + * the chunk doesn't need to be physically contiguous (only virtually), + * and allocated objects may span two pages. * * @param mp * A pointer to the mempool structure. * @param vaddr * The virtual address of memory that should be used to store objects. - * @param paddr - * The physical address + * @param iova + * The IO address * @param len * The length of memory in bytes. * @param free_cb @@ -817,54 +1025,22 @@ rte_mempool_free(struct rte_mempool *mp); * On error, the chunk is not added in the memory list of the * mempool and a negative errno is returned. */ -int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, - phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, +int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, + rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque); -/** - * Add physical memory for objects in the pool at init - * - * Add a virtually contiguous memory chunk in the pool where objects can - * be instanciated. The physical addresses corresponding to the virtual - * area are described in paddr[], pg_num, pg_shift. - * - * @param mp - * A pointer to the mempool structure. - * @param vaddr - * The virtual address of memory that should be used to store objects. - * @param paddr - * An array of physical addresses of each page composing the virtual - * area. - * @param pg_num - * Number of elements in the paddr array. - * @param pg_shift - * LOG2 of the physical pages size. - * @param free_cb - * The callback used to free this chunk when destroying the mempool. - * @param opaque - * An opaque argument passed to free_cb. - * @return - * The number of objects added on success. - * On error, the chunks are not added in the memory list of the - * mempool and a negative errno is returned. - */ -int rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr, - const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift, - rte_mempool_memchunk_free_cb_t *free_cb, void *opaque); - /** * Add virtually contiguous memory for objects in the pool at init * * Add a virtually contiguous memory chunk in the pool where objects can - * be instanciated. + * be instantiated. * * @param mp * A pointer to the mempool structure. * @param addr * The virtual address of memory that should be used to store objects. - * Must be page-aligned. * @param len - * The length of memory in bytes. Must be page-aligned. + * The length of memory in bytes. * @param pg_sz * The size of memory pages in this virtual area. * @param free_cb @@ -948,7 +1124,7 @@ uint32_t rte_mempool_mem_iter(struct rte_mempool *mp, rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg); /** - * Dump the status of the mempool to the console. + * Dump the status of the mempool to a file. * * @param f * A pointer to a file for output @@ -983,22 +1159,6 @@ rte_mempool_cache_create(uint32_t size, int socket_id); void rte_mempool_cache_free(struct rte_mempool_cache *cache); -/** - * Flush a user-owned mempool cache to the specified mempool. - * - * @param cache - * A pointer to the mempool cache. - * @param mp - * A pointer to the mempool. - */ -static inline void __attribute__((always_inline)) -rte_mempool_cache_flush(struct rte_mempool_cache *cache, - struct rte_mempool *mp) -{ - rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len); - cache->len = 0; -} - /** * Get a pointer to the per-lcore default mempool cache. * @@ -1009,7 +1169,7 @@ rte_mempool_cache_flush(struct rte_mempool_cache *cache, * @return * A pointer to the mempool cache or NULL if disabled or non-EAL thread. */ -static inline struct rte_mempool_cache *__attribute__((always_inline)) +static __rte_always_inline struct rte_mempool_cache * rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id) { if (mp->cache_size == 0) @@ -1021,6 +1181,26 @@ rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id) return &mp->local_cache[lcore_id]; } +/** + * Flush a user-owned mempool cache to the specified mempool. + * + * @param cache + * A pointer to the mempool cache. + * @param mp + * A pointer to the mempool. + */ +static __rte_always_inline void +rte_mempool_cache_flush(struct rte_mempool_cache *cache, + struct rte_mempool *mp) +{ + if (cache == NULL) + cache = rte_mempool_default_cache(mp, rte_lcore_id()); + if (cache == NULL || cache->len == 0) + return; + rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len); + cache->len = 0; +} + /** * @internal Put several objects back in the mempool; used internally. * @param mp @@ -1032,25 +1212,18 @@ rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id) * positive. * @param cache * A pointer to a mempool cache structure. May be NULL if not needed. - * @param flags - * The flags used for the mempool creation. - * Single-producer (MEMPOOL_F_SP_PUT flag) or multi-producers. */ -static inline void __attribute__((always_inline)) +static __rte_always_inline void __mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, - unsigned n, struct rte_mempool_cache *cache, int flags) + unsigned int n, struct rte_mempool_cache *cache) { void **cache_objs; /* increment stat now, adding in mempool always success */ __MEMPOOL_STAT_ADD(mp, put, n); - /* No cache provided or single producer */ - if (unlikely(cache == NULL || flags & MEMPOOL_F_SP_PUT)) - goto ring_enqueue; - - /* Go straight to ring if put would overflow mem allocated for cache */ - if (unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE)) + /* No cache provided or if put would overflow mem allocated for cache */ + if (unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE)) goto ring_enqueue; cache_objs = &cache->objs[cache->len]; @@ -1098,56 +1271,13 @@ ring_enqueue: * The number of objects to add in the mempool from the obj_table. * @param cache * A pointer to a mempool cache structure. May be NULL if not needed. - * @param flags - * The flags used for the mempool creation. - * Single-producer (MEMPOOL_F_SP_PUT flag) or multi-producers. */ -static inline void __attribute__((always_inline)) +static __rte_always_inline void rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, - unsigned n, struct rte_mempool_cache *cache, int flags) + unsigned int n, struct rte_mempool_cache *cache) { __mempool_check_cookies(mp, obj_table, n, 0); - __mempool_generic_put(mp, obj_table, n, cache, flags); -} - -/** - * @deprecated - * Put several objects back in the mempool (multi-producers safe). - * - * @param mp - * A pointer to the mempool structure. - * @param obj_table - * A pointer to a table of void * pointers (objects). - * @param n - * The number of objects to add in the mempool from the obj_table. - */ -__rte_deprecated -static inline void __attribute__((always_inline)) -rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table, - unsigned n) -{ - struct rte_mempool_cache *cache; - cache = rte_mempool_default_cache(mp, rte_lcore_id()); - rte_mempool_generic_put(mp, obj_table, n, cache, 0); -} - -/** - * @deprecated - * Put several objects back in the mempool (NOT multi-producers safe). - * - * @param mp - * A pointer to the mempool structure. - * @param obj_table - * A pointer to a table of void * pointers (objects). - * @param n - * The number of objects to add in the mempool from obj_table. - */ -__rte_deprecated -static inline void __attribute__((always_inline)) -rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table, - unsigned n) -{ - rte_mempool_generic_put(mp, obj_table, n, NULL, MEMPOOL_F_SP_PUT); + __mempool_generic_put(mp, obj_table, n, cache); } /** @@ -1164,47 +1294,13 @@ rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table, * @param n * The number of objects to add in the mempool from obj_table. */ -static inline void __attribute__((always_inline)) +static __rte_always_inline void rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, - unsigned n) + unsigned int n) { struct rte_mempool_cache *cache; cache = rte_mempool_default_cache(mp, rte_lcore_id()); - rte_mempool_generic_put(mp, obj_table, n, cache, mp->flags); -} - -/** - * @deprecated - * Put one object in the mempool (multi-producers safe). - * - * @param mp - * A pointer to the mempool structure. - * @param obj - * A pointer to the object to be added. - */ -__rte_deprecated -static inline void __attribute__((always_inline)) -rte_mempool_mp_put(struct rte_mempool *mp, void *obj) -{ - struct rte_mempool_cache *cache; - cache = rte_mempool_default_cache(mp, rte_lcore_id()); - rte_mempool_generic_put(mp, &obj, 1, cache, 0); -} - -/** - * @deprecated - * Put one object back in the mempool (NOT multi-producers safe). - * - * @param mp - * A pointer to the mempool structure. - * @param obj - * A pointer to the object to be added. - */ -__rte_deprecated -static inline void __attribute__((always_inline)) -rte_mempool_sp_put(struct rte_mempool *mp, void *obj) -{ - rte_mempool_generic_put(mp, &obj, 1, NULL, MEMPOOL_F_SP_PUT); + rte_mempool_generic_put(mp, obj_table, n, cache); } /** @@ -1219,7 +1315,7 @@ rte_mempool_sp_put(struct rte_mempool *mp, void *obj) * @param obj * A pointer to the object to be added. */ -static inline void __attribute__((always_inline)) +static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj) { rte_mempool_put_bulk(mp, &obj, 1); @@ -1235,24 +1331,20 @@ rte_mempool_put(struct rte_mempool *mp, void *obj) * The number of objects to get, must be strictly positive. * @param cache * A pointer to a mempool cache structure. May be NULL if not needed. - * @param flags - * The flags used for the mempool creation. - * Single-consumer (MEMPOOL_F_SC_GET flag) or multi-consumers. * @return * - >=0: Success; number of objects supplied. * - <0: Error; code of ring dequeue function. */ -static inline int __attribute__((always_inline)) +static __rte_always_inline int __mempool_generic_get(struct rte_mempool *mp, void **obj_table, - unsigned n, struct rte_mempool_cache *cache, int flags) + unsigned int n, struct rte_mempool_cache *cache) { int ret; uint32_t index, len; void **cache_objs; - /* No cache provided or single consumer */ - if (unlikely(cache == NULL || flags & MEMPOOL_F_SC_GET || - n >= cache->size)) + /* No cache provided or cannot be satisfied from cache */ + if (unlikely(cache == NULL || n >= cache->size)) goto ring_dequeue; cache_objs = cache->objs; @@ -1267,7 +1359,7 @@ __mempool_generic_get(struct rte_mempool *mp, void **obj_table, &cache->objs[cache->len], req); if (unlikely(ret < 0)) { /* - * In the offchance that we are buffer constrained, + * In the off chance that we are buffer constrained, * where we are not able to allocate cache + n, go to * the ring directly. If that fails, we are truly out of * buffers. @@ -1317,80 +1409,21 @@ ring_dequeue: * The number of objects to get from mempool to obj_table. * @param cache * A pointer to a mempool cache structure. May be NULL if not needed. - * @param flags - * The flags used for the mempool creation. - * Single-consumer (MEMPOOL_F_SC_GET flag) or multi-consumers. * @return * - 0: Success; objects taken. * - -ENOENT: Not enough entries in the mempool; no object is retrieved. */ -static inline int __attribute__((always_inline)) -rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned n, - struct rte_mempool_cache *cache, int flags) +static __rte_always_inline int +rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, + unsigned int n, struct rte_mempool_cache *cache) { int ret; - ret = __mempool_generic_get(mp, obj_table, n, cache, flags); + ret = __mempool_generic_get(mp, obj_table, n, cache); if (ret == 0) __mempool_check_cookies(mp, obj_table, n, 1); return ret; } -/** - * @deprecated - * Get several objects from the mempool (multi-consumers safe). - * - * If cache is enabled, objects will be retrieved first from cache, - * subsequently from the common pool. Note that it can return -ENOENT when - * the local cache and common pool are empty, even if cache from other - * lcores are full. - * - * @param mp - * A pointer to the mempool structure. - * @param obj_table - * A pointer to a table of void * pointers (objects) that will be filled. - * @param n - * The number of objects to get from mempool to obj_table. - * @return - * - 0: Success; objects taken. - * - -ENOENT: Not enough entries in the mempool; no object is retrieved. - */ -__rte_deprecated -static inline int __attribute__((always_inline)) -rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) -{ - struct rte_mempool_cache *cache; - cache = rte_mempool_default_cache(mp, rte_lcore_id()); - return rte_mempool_generic_get(mp, obj_table, n, cache, 0); -} - -/** - * @deprecated - * Get several objects from the mempool (NOT multi-consumers safe). - * - * If cache is enabled, objects will be retrieved first from cache, - * subsequently from the common pool. Note that it can return -ENOENT when - * the local cache and common pool are empty, even if cache from other - * lcores are full. - * - * @param mp - * A pointer to the mempool structure. - * @param obj_table - * A pointer to a table of void * pointers (objects) that will be filled. - * @param n - * The number of objects to get from the mempool to obj_table. - * @return - * - 0: Success; objects taken. - * - -ENOENT: Not enough entries in the mempool; no object is - * retrieved. - */ -__rte_deprecated -static inline int __attribute__((always_inline)) -rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) -{ - return rte_mempool_generic_get(mp, obj_table, n, NULL, - MEMPOOL_F_SC_GET); -} - /** * Get several objects from the mempool. * @@ -1413,62 +1446,12 @@ rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) * - 0: Success; objects taken * - -ENOENT: Not enough entries in the mempool; no object is retrieved. */ -static inline int __attribute__((always_inline)) -rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) -{ - struct rte_mempool_cache *cache; - cache = rte_mempool_default_cache(mp, rte_lcore_id()); - return rte_mempool_generic_get(mp, obj_table, n, cache, mp->flags); -} - -/** - * @deprecated - * Get one object from the mempool (multi-consumers safe). - * - * If cache is enabled, objects will be retrieved first from cache, - * subsequently from the common pool. Note that it can return -ENOENT when - * the local cache and common pool are empty, even if cache from other - * lcores are full. - * - * @param mp - * A pointer to the mempool structure. - * @param obj_p - * A pointer to a void * pointer (object) that will be filled. - * @return - * - 0: Success; objects taken. - * - -ENOENT: Not enough entries in the mempool; no object is retrieved. - */ -__rte_deprecated -static inline int __attribute__((always_inline)) -rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p) +static __rte_always_inline int +rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n) { struct rte_mempool_cache *cache; cache = rte_mempool_default_cache(mp, rte_lcore_id()); - return rte_mempool_generic_get(mp, obj_p, 1, cache, 0); -} - -/** - * @deprecated - * Get one object from the mempool (NOT multi-consumers safe). - * - * If cache is enabled, objects will be retrieved first from cache, - * subsequently from the common pool. Note that it can return -ENOENT when - * the local cache and common pool are empty, even if cache from other - * lcores are full. - * - * @param mp - * A pointer to the mempool structure. - * @param obj_p - * A pointer to a void * pointer (object) that will be filled. - * @return - * - 0: Success; objects taken. - * - -ENOENT: Not enough entries in the mempool; no object is retrieved. - */ -__rte_deprecated -static inline int __attribute__((always_inline)) -rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p) -{ - return rte_mempool_generic_get(mp, obj_p, 1, NULL, MEMPOOL_F_SC_GET); + return rte_mempool_generic_get(mp, obj_table, n, cache); } /** @@ -1491,41 +1474,68 @@ rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p) * - 0: Success; objects taken. * - -ENOENT: Not enough entries in the mempool; no object is retrieved. */ -static inline int __attribute__((always_inline)) +static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p) { return rte_mempool_get_bulk(mp, obj_p, 1); } /** - * Return the number of entries in the mempool. + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. * - * When cache is enabled, this function has to browse the length of - * all lcores, so it should not be used in a data path, but only for - * debug purposes. User-owned mempool caches are not accounted for. + * Get a contiguous blocks of objects from the mempool. + * + * If cache is enabled, consider to flush it first, to reuse objects + * as soon as possible. + * + * The application should check that the driver supports the operation + * by calling rte_mempool_ops_get_info() and checking that `contig_block_size` + * is not zero. * * @param mp * A pointer to the mempool structure. + * @param first_obj_table + * A pointer to a pointer to the first object in each block. + * @param n + * The number of blocks to get from mempool. * @return - * The number of entries in the mempool. - */ -unsigned int rte_mempool_avail_count(const struct rte_mempool *mp); + * - 0: Success; blocks taken. + * - -ENOBUFS: Not enough entries in the mempool; no object is retrieved. + * - -EOPNOTSUPP: The mempool driver does not support block dequeue + */ +static __rte_always_inline int +__rte_experimental +rte_mempool_get_contig_blocks(struct rte_mempool *mp, + void **first_obj_table, unsigned int n) +{ + int ret; + + ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n); + if (ret == 0) { + __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_success, n); + __mempool_contig_blocks_check_cookies(mp, first_obj_table, n, + 1); + } else { + __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_fail, n); + } + + return ret; +} /** - * @deprecated * Return the number of entries in the mempool. * * When cache is enabled, this function has to browse the length of * all lcores, so it should not be used in a data path, but only for - * debug purposes. + * debug purposes. User-owned mempool caches are not accounted for. * * @param mp * A pointer to the mempool structure. * @return * The number of entries in the mempool. */ -__rte_deprecated -unsigned rte_mempool_count(const struct rte_mempool *mp); +unsigned int rte_mempool_avail_count(const struct rte_mempool *mp); /** * Return the number of elements which have been allocated from the mempool @@ -1542,31 +1552,6 @@ unsigned rte_mempool_count(const struct rte_mempool *mp); unsigned int rte_mempool_in_use_count(const struct rte_mempool *mp); -/** - * @deprecated - * Return the number of free entries in the mempool ring. - * i.e. how many entries can be freed back to the mempool. - * - * NOTE: This corresponds to the number of elements *allocated* from the - * memory pool, not the number of elements in the pool itself. To count - * the number elements currently available in the pool, use "rte_mempool_count" - * - * When cache is enabled, this function has to browse the length of - * all lcores, so it should not be used in a data path, but only for - * debug purposes. User-owned mempool caches are not accounted for. - * - * @param mp - * A pointer to the mempool structure. - * @return - * The number of free entries in the mempool. - */ -__rte_deprecated -static inline unsigned -rte_mempool_free_count(const struct rte_mempool *mp) -{ - return rte_mempool_in_use_count(mp); -} - /** * Test if the mempool is full. * @@ -1606,24 +1591,22 @@ rte_mempool_empty(const struct rte_mempool *mp) } /** - * Return the physical address of elt, which is an element of the pool mp. + * Return the IO address of elt, which is an element of the pool mp. * - * @param mp - * A pointer to the mempool structure. * @param elt * A pointer (virtual address) to the element of the pool. * @return - * The physical address of the elt element. - * If the mempool was created with MEMPOOL_F_NO_PHYS_CONTIG, the - * returned value is RTE_BAD_PHYS_ADDR. + * The IO address of the elt element. + * If the mempool was created with MEMPOOL_F_NO_IOVA_CONTIG, the + * returned value is RTE_BAD_IOVA. */ -static inline phys_addr_t -rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt) +static inline rte_iova_t +rte_mempool_virt2iova(const void *elt) { const struct rte_mempool_objhdr *hdr; hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt, sizeof(*hdr)); - return hdr->physaddr; + return hdr->iova; } /** @@ -1694,61 +1677,6 @@ struct rte_mempool *rte_mempool_lookup(const char *name); uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, struct rte_mempool_objsz *sz); -/** - * Get the size of memory required to store mempool elements. - * - * Calculate the maximum amount of memory required to store given number - * of objects. Assume that the memory buffer will be aligned at page - * boundary. - * - * Note that if object size is bigger then page size, then it assumes - * that pages are grouped in subsets of physically continuous pages big - * enough to store at least one object. - * - * @param elt_num - * Number of elements. - * @param total_elt_sz - * The size of each element, including header and trailer, as returned - * by rte_mempool_calc_obj_size(). - * @param pg_shift - * LOG2 of the physical pages size. If set to 0, ignore page boundaries. - * @return - * Required memory size aligned at page boundary. - */ -size_t rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, - uint32_t pg_shift); - -/** - * Get the size of memory required to store mempool elements. - * - * Calculate how much memory would be actually required with the given - * memory footprint to store required number of objects. - * - * @param vaddr - * Virtual address of the externally allocated memory buffer. - * Will be used to store mempool objects. - * @param elt_num - * Number of elements. - * @param total_elt_sz - * The size of each element, including header and trailer, as returned - * by rte_mempool_calc_obj_size(). - * @param paddr - * Array of physical addresses of the pages that comprises given memory - * buffer. - * @param pg_num - * Number of elements in the paddr array. - * @param pg_shift - * LOG2 of the physical pages size. - * @return - * On success, the number of bytes needed to store given number of - * objects, aligned to the given page size. If the provided memory - * buffer is too small, return a negative value whose absolute value - * is the actual number of elements that can be stored in that buffer. - */ -ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, - size_t total_elt_sz, const phys_addr_t paddr[], uint32_t pg_num, - uint32_t pg_shift); - /** * Walk list of all memory pools * @@ -1760,6 +1688,17 @@ ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg), void *arg); +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * @internal Get page size used for mempool object allocation. + * This function is internal to mempool library and mempool drivers. + */ +__rte_experimental +int +rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz); + #ifdef __cplusplus } #endif