X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_mempool%2Frte_mempool.h;h=0b83d5ec29ccaca64ae0fedf9086cafcc2194f56;hb=ce1f2c61ed135e4133d0429e86e554bfd4d58cb0;hp=41f8b2daf84a144ca5feecba94305bd8818c7aee;hpb=efd785f99489478d14e1c222e243ecbd0b151507;p=dpdk.git diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index 41f8b2daf8..0b83d5ec29 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -1,35 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * Copyright(c) 2016 6WIND S.A. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation. + * Copyright(c) 2016 6WIND S.A. */ #ifndef _RTE_MEMPOOL_H_ @@ -69,6 +40,7 @@ #include #include +#include #include #include #include @@ -272,25 +244,8 @@ struct rte_mempool { #define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/ #define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/ #define MEMPOOL_F_POOL_CREATED 0x0010 /**< Internal: pool is created. */ -#define MEMPOOL_F_NO_PHYS_CONTIG 0x0020 /**< Don't need physically contiguous objs. */ -/** - * This capability flag is advertised by a mempool handler, if the whole - * memory area containing the objects must be physically contiguous. - * Note: This flag should not be passed by application. - */ -#define MEMPOOL_F_CAPA_PHYS_CONTIG 0x0040 -/** - * This capability flag is advertised by a mempool handler. Used for a case - * where mempool driver wants object start address(vaddr) aligned to block - * size(/ total element size). - * - * Note: - * - This flag should not be passed by application. - * Flag used for mempool driver only. - * - Mempool driver must also set MEMPOOL_F_CAPA_PHYS_CONTIG flag along with - * MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS. - */ -#define MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS 0x0080 +#define MEMPOOL_F_NO_IOVA_CONTIG 0x0020 /**< Don't need IOVA contiguous objs. */ +#define MEMPOOL_F_NO_PHYS_CONTIG MEMPOOL_F_NO_IOVA_CONTIG /* deprecated */ /** * @internal When debug is enabled, store some statistics. @@ -415,18 +370,119 @@ typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp, */ typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp); -/** - * Get the mempool capabilities. - */ -typedef int (*rte_mempool_get_capabilities_t)(const struct rte_mempool *mp, - unsigned int *flags); - /** * Notify new memory area to mempool. */ typedef int (*rte_mempool_ops_register_memory_area_t) (const struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len); +/** + * Calculate memory size required to store given number of objects. + * + * If mempool objects are not required to be IOVA-contiguous + * (the flag MEMPOOL_F_NO_IOVA_CONTIG is set), min_chunk_size defines + * virtually contiguous chunk size. Otherwise, if mempool objects must + * be IOVA-contiguous (the flag MEMPOOL_F_NO_IOVA_CONTIG is clear), + * min_chunk_size defines IOVA-contiguous chunk size. + * + * @param[in] mp + * Pointer to the memory pool. + * @param[in] obj_num + * Number of objects. + * @param[in] pg_shift + * LOG2 of the physical pages size. If set to 0, ignore page boundaries. + * @param[out] min_chunk_size + * Location for minimum size of the memory chunk which may be used to + * store memory pool objects. + * @param[out] align + * Location for required memory chunk alignment. + * @return + * Required memory size aligned at page boundary. + */ +typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, + size_t *min_chunk_size, size_t *align); + +/** + * Default way to calculate memory size required to store given number of + * objects. + * + * If page boundaries may be ignored, it is just a product of total + * object size including header and trailer and number of objects. + * Otherwise, it is a number of pages required to store given number of + * objects without crossing page boundary. + * + * Note that if object size is bigger than page size, then it assumes + * that pages are grouped in subsets of physically continuous pages big + * enough to store at least one object. + * + * Minimum size of memory chunk is a maximum of the page size and total + * element size. + * + * Required memory chunk alignment is a maximum of page size and cache + * line size. + */ +ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, + size_t *min_chunk_size, size_t *align); + +/** + * Function to be called for each populated object. + * + * @param[in] mp + * A pointer to the mempool structure. + * @param[in] opaque + * An opaque pointer passed to iterator. + * @param[in] vaddr + * Object virtual address. + * @param[in] iova + * Input/output virtual address of the object or RTE_BAD_IOVA. + */ +typedef void (rte_mempool_populate_obj_cb_t)(struct rte_mempool *mp, + void *opaque, void *vaddr, rte_iova_t iova); + +/** + * Populate memory pool objects using provided memory chunk. + * + * Populated objects should be enqueued to the pool, e.g. using + * rte_mempool_ops_enqueue_bulk(). + * + * If the given IO address is unknown (iova = RTE_BAD_IOVA), + * the chunk doesn't need to be physically contiguous (only virtually), + * and allocated objects may span two pages. + * + * @param[in] mp + * A pointer to the mempool structure. + * @param[in] max_objs + * Maximum number of objects to be populated. + * @param[in] vaddr + * The virtual address of memory that should be used to store objects. + * @param[in] iova + * The IO address + * @param[in] len + * The length of memory in bytes. + * @param[in] obj_cb + * Callback function to be executed for each populated object. + * @param[in] obj_cb_arg + * An opaque pointer passed to the callback function. + * @return + * The number of objects added on success. + * On error, no objects are populated and a negative errno is returned. + */ +typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp, + unsigned int max_objs, + void *vaddr, rte_iova_t iova, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg); + +/** + * Default way to populate memory pool object using provided memory + * chunk: just slice objects one by one. + */ +int rte_mempool_op_populate_default(struct rte_mempool *mp, + unsigned int max_objs, + void *vaddr, rte_iova_t iova, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg); + /** Structure defining mempool operations structure */ struct rte_mempool_ops { char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct. */ @@ -435,14 +491,20 @@ struct rte_mempool_ops { rte_mempool_enqueue_t enqueue; /**< Enqueue an object. */ rte_mempool_dequeue_t dequeue; /**< Dequeue an object. */ rte_mempool_get_count get_count; /**< Get qty of available objs. */ - /** - * Get the mempool capabilities - */ - rte_mempool_get_capabilities_t get_capabilities; /** * Notify new memory area to mempool */ rte_mempool_ops_register_memory_area_t register_memory_area; + /** + * Optional callback to calculate memory size required to + * store specified number of objects. + */ + rte_mempool_calc_mem_size_t calc_mem_size; + /** + * Optional callback to populate mempool objects using + * provided memory chunk. + */ + rte_mempool_populate_t populate; } __rte_cache_aligned; #define RTE_MEMPOOL_MAX_OPS_IDX 16 /**< Max registered ops structs */ @@ -554,22 +616,6 @@ rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table, unsigned rte_mempool_ops_get_count(const struct rte_mempool *mp); -/** - * @internal wrapper for mempool_ops get_capabilities callback. - * - * @param mp [in] - * Pointer to the memory pool. - * @param flags [out] - * Pointer to the mempool flags. - * @return - * - 0: Success; The mempool driver has advertised his pool capabilities in - * flags param. - * - -ENOTSUP - doesn't support get_capabilities ops (valid case). - * - Otherwise, pool create fails. - */ -int -rte_mempool_ops_get_capabilities(const struct rte_mempool *mp, - unsigned int *flags); /** * @internal wrapper for mempool_ops register_memory_area callback. * API to notify the mempool handler when a new memory area is added to pool. @@ -591,6 +637,57 @@ int rte_mempool_ops_register_memory_area(const struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len); +/** + * @internal wrapper for mempool_ops calc_mem_size callback. + * API to calculate size of memory required to store specified number of + * object. + * + * @param[in] mp + * Pointer to the memory pool. + * @param[in] obj_num + * Number of objects. + * @param[in] pg_shift + * LOG2 of the physical pages size. If set to 0, ignore page boundaries. + * @param[out] min_chunk_size + * Location for minimum size of the memory chunk which may be used to + * store memory pool objects. + * @param[out] align + * Location for required memory chunk alignment. + * @return + * Required memory size aligned at page boundary. + */ +ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, + size_t *min_chunk_size, size_t *align); + +/** + * @internal wrapper for mempool_ops populate callback. + * + * Populate memory pool objects using provided memory chunk. + * + * @param[in] mp + * A pointer to the mempool structure. + * @param[in] max_objs + * Maximum number of objects to be populated. + * @param[in] vaddr + * The virtual address of memory that should be used to store objects. + * @param[in] iova + * The IO address + * @param[in] len + * The length of memory in bytes. + * @param[in] obj_cb + * Callback function to be executed for each populated object. + * @param[in] obj_cb_arg + * An opaque pointer passed to the callback function. + * @return + * The number of objects added on success. + * On error, no objects are populated and a negative errno is returned. + */ +int rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs, + void *vaddr, rte_iova_t iova, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, + void *obj_cb_arg); + /** * @internal wrapper for mempool_ops free callback. * @@ -738,8 +835,8 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *); * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior * when using rte_mempool_get() or rte_mempool_get_bulk() is * "single-consumer". Otherwise, it is "multi-consumers". - * - MEMPOOL_F_NO_PHYS_CONTIG: If set, allocated objects won't - * necessarily be contiguous in physical memory. + * - MEMPOOL_F_NO_IOVA_CONTIG: If set, allocated objects won't + * necessarily be contiguous in IO memory. * @return * The pointer to the new allocated mempool, on success. NULL on error * with rte_errno set appropriately. Possible rte_errno values include: @@ -880,7 +977,7 @@ rte_mempool_free(struct rte_mempool *mp); * Add a virtually and physically contiguous memory chunk in the pool * where objects can be instantiated. * - * If the given physical address is unknown (paddr = RTE_BAD_PHYS_ADDR), + * If the given IO address is unknown (iova = RTE_BAD_IOVA), * the chunk doesn't need to be physically contiguous (only virtually), * and allocated objects may span two pages. * @@ -888,8 +985,8 @@ rte_mempool_free(struct rte_mempool *mp); * A pointer to the mempool structure. * @param vaddr * The virtual address of memory that should be used to store objects. - * @param paddr - * The physical address + * @param iova + * The IO address * @param len * The length of memory in bytes. * @param free_cb @@ -901,6 +998,11 @@ rte_mempool_free(struct rte_mempool *mp); * On error, the chunk is not added in the memory list of the * mempool and a negative errno is returned. */ +int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, + rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, + void *opaque); + +__rte_deprecated int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque); @@ -909,18 +1011,17 @@ int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, * Add physical memory for objects in the pool at init * * Add a virtually contiguous memory chunk in the pool where objects can - * be instantiated. The physical addresses corresponding to the virtual - * area are described in paddr[], pg_num, pg_shift. + * be instantiated. The IO addresses corresponding to the virtual + * area are described in iova[], pg_num, pg_shift. * * @param mp * A pointer to the mempool structure. * @param vaddr * The virtual address of memory that should be used to store objects. - * @param paddr - * An array of physical addresses of each page composing the virtual - * area. + * @param iova + * An array of IO addresses of each page composing the virtual area. * @param pg_num - * Number of elements in the paddr array. + * Number of elements in the iova array. * @param pg_shift * LOG2 of the physical pages size. * @param free_cb @@ -932,6 +1033,11 @@ int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, * On error, the chunks are not added in the memory list of the * mempool and a negative errno is returned. */ +int rte_mempool_populate_iova_tab(struct rte_mempool *mp, char *vaddr, + const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift, + rte_mempool_memchunk_free_cb_t *free_cb, void *opaque); + +__rte_deprecated int rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr, const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque); @@ -1452,19 +1558,17 @@ rte_mempool_empty(const struct rte_mempool *mp) } /** - * Return the physical address of elt, which is an element of the pool mp. + * Return the IO address of elt, which is an element of the pool mp. * - * @param mp - * A pointer to the mempool structure. * @param elt * A pointer (virtual address) to the element of the pool. * @return - * The physical address of the elt element. - * If the mempool was created with MEMPOOL_F_NO_PHYS_CONTIG, the - * returned value is RTE_BAD_PHYS_ADDR. + * The IO address of the elt element. + * If the mempool was created with MEMPOOL_F_NO_IOVA_CONTIG, the + * returned value is RTE_BAD_IOVA. */ -static inline phys_addr_t -rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt) +static inline rte_iova_t +rte_mempool_virt2iova(const void *elt) { const struct rte_mempool_objhdr *hdr; hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt, @@ -1472,6 +1576,13 @@ rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt) return hdr->iova; } +__rte_deprecated +static inline phys_addr_t +rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt) +{ + return rte_mempool_virt2iova(elt); +} + /** * Check the consistency of mempool objects. * @@ -1547,7 +1658,7 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, * of objects. Assume that the memory buffer will be aligned at page * boundary. * - * Note that if object size is bigger then page size, then it assumes + * Note that if object size is bigger than page size, then it assumes * that pages are grouped in subsets of physically continuous pages big * enough to store at least one object. *