X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_mempool%2Frte_mempool.h;h=225bf9fc94123e541830c08a2dcedabd0cc41075;hb=84626a0d61a624dad11614946accc8eebd52353b;hp=1f59553b38ff2635820bfaf40be7a94b071ed1c7;hpb=8a80fa47233f5e99653253513c1c4209ccee63c6;p=dpdk.git diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index 1f59553b38..225bf9fc94 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -427,7 +427,7 @@ typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp, * @warning * @b EXPERIMENTAL: this API may change without prior notice. * - * Dequeue a number of contiquous object blocks from the external pool. + * Dequeue a number of contiguous object blocks from the external pool. */ typedef int (*rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp, void **first_obj_table, unsigned int n); @@ -458,15 +458,20 @@ typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp); * @param[out] align * Location for required memory chunk alignment. * @return - * Required memory size aligned at page boundary. + * Required memory size. */ typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, size_t *min_chunk_size, size_t *align); /** - * Default way to calculate memory size required to store given number of - * objects. + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * @internal Helper to calculate memory size required to store given + * number of objects. + * + * This function is internal to mempool library and mempool drivers. * * If page boundaries may be ignored, it is just a product of total * object size including header and trailer and number of objects. @@ -477,37 +482,41 @@ typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp, * that pages are grouped in subsets of physically continuous pages big * enough to store at least one object. * - * Minimum size of memory chunk is a maximum of the page size and total - * element size. + * Minimum size of memory chunk is the total element size. + * Required memory chunk alignment is the cache line size. * - * Required memory chunk alignment is a maximum of page size and cache - * line size. + * @param[in] mp + * A pointer to the mempool structure. + * @param[in] obj_num + * Number of objects to be added in mempool. + * @param[in] pg_shift + * LOG2 of the physical pages size. If set to 0, ignore page boundaries. + * @param[in] chunk_reserve + * Amount of memory that must be reserved at the beginning of each page, + * or at the beginning of the memory area if pg_shift is 0. + * @param[out] min_chunk_size + * Location for minimum size of the memory chunk which may be used to + * store memory pool objects. + * @param[out] align + * Location for required memory chunk alignment. + * @return + * Required memory size. */ -ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp, - uint32_t obj_num, uint32_t pg_shift, +__rte_experimental +ssize_t rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, size_t chunk_reserve, size_t *min_chunk_size, size_t *align); /** - * @internal Helper function to calculate memory size required to store - * specified number of objects in assumption that the memory buffer will - * be aligned at page boundary. - * - * Note that if object size is bigger than page size, then it assumes - * that pages are grouped in subsets of physically continuous pages big - * enough to store at least one object. + * Default way to calculate memory size required to store given number of + * objects. * - * @param elt_num - * Number of elements. - * @param total_elt_sz - * The size of each element, including header and trailer, as returned - * by rte_mempool_calc_obj_size(). - * @param pg_shift - * LOG2 of the physical pages size. If set to 0, ignore page boundaries. - * @return - * Required memory size aligned at page boundary. + * Equivalent to rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift, + * 0, min_chunk_size, align). */ -size_t rte_mempool_calc_mem_size_helper(uint32_t elt_num, size_t total_elt_sz, - uint32_t pg_shift); +ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, + size_t *min_chunk_size, size_t *align); /** * Function to be called for each populated object. @@ -558,8 +567,56 @@ typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp, rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg); /** - * Default way to populate memory pool object using provided memory - * chunk: just slice objects one by one. + * Align objects on addresses multiple of total_elt_sz. + */ +#define RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ 0x0001 + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * @internal Helper to populate memory pool object using provided memory + * chunk: just slice objects one by one, taking care of not + * crossing page boundaries. + * + * If RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ is set in flags, the addresses + * of object headers will be aligned on a multiple of total_elt_sz. + * This feature is used by octeontx hardware. + * + * This function is internal to mempool library and mempool drivers. + * + * @param[in] mp + * A pointer to the mempool structure. + * @param[in] flags + * Logical OR of following flags: + * - RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ: align objects on addresses + * multiple of total_elt_sz. + * @param[in] max_objs + * Maximum number of objects to be added in mempool. + * @param[in] vaddr + * The virtual address of memory that should be used to store objects. + * @param[in] iova + * The IO address corresponding to vaddr, or RTE_BAD_IOVA. + * @param[in] len + * The length of memory in bytes. + * @param[in] obj_cb + * Callback function to be executed for each populated object. + * @param[in] obj_cb_arg + * An opaque pointer passed to the callback function. + * @return + * The number of objects added in mempool. + */ +__rte_experimental +int rte_mempool_op_populate_helper(struct rte_mempool *mp, + unsigned int flags, unsigned int max_objs, + void *vaddr, rte_iova_t iova, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg); + +/** + * Default way to populate memory pool object using provided memory chunk. + * + * Equivalent to rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, iova, + * len, obj_cb, obj_cb_arg). */ int rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs, @@ -854,10 +911,9 @@ int rte_mempool_register_ops(const struct rte_mempool_ops *ops); * Note that the rte_mempool_register_ops fails silently here when * more than RTE_MEMPOOL_MAX_OPS_IDX is registered. */ -#define MEMPOOL_REGISTER_OPS(ops) \ - void mp_hdlr_init_##ops(void); \ - void __attribute__((constructor, used)) mp_hdlr_init_##ops(void)\ - { \ +#define MEMPOOL_REGISTER_OPS(ops) \ + RTE_INIT(mp_hdlr_init_##ops) \ + { \ rte_mempool_register_ops(&ops); \ } @@ -973,74 +1029,6 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags); -/** - * @deprecated - * Create a new mempool named *name* in memory. - * - * The pool contains n elements of elt_size. Its size is set to n. - * This function uses ``memzone_reserve()`` to allocate the mempool header - * (and the objects if vaddr is NULL). - * Depending on the input parameters, mempool elements can be either allocated - * together with the mempool header, or an externally provided memory buffer - * could be used to store mempool objects. In later case, that external - * memory buffer can consist of set of disjoint physical pages. - * - * @param name - * The name of the mempool. - * @param n - * The number of elements in the mempool. The optimum size (in terms of - * memory usage) for a mempool is when n is a power of two minus one: - * n = (2^q - 1). - * @param elt_size - * The size of each element. - * @param cache_size - * Size of the cache. See rte_mempool_create() for details. - * @param private_data_size - * The size of the private data appended after the mempool - * structure. This is useful for storing some private data after the - * mempool structure, as is done for rte_mbuf_pool for example. - * @param mp_init - * A function pointer that is called for initialization of the pool, - * before object initialization. The user can initialize the private - * data in this function if needed. This parameter can be NULL if - * not needed. - * @param mp_init_arg - * An opaque pointer to data that can be used in the mempool - * constructor function. - * @param obj_init - * A function called for each object at initialization of the pool. - * See rte_mempool_create() for details. - * @param obj_init_arg - * An opaque pointer passed to the object constructor function. - * @param socket_id - * The *socket_id* argument is the socket identifier in the case of - * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA - * constraint for the reserved zone. - * @param flags - * Flags controlling the behavior of the mempool. See - * rte_mempool_create() for details. - * @param vaddr - * Virtual address of the externally allocated memory buffer. - * Will be used to store mempool objects. - * @param iova - * Array of IO addresses of the pages that comprises given memory buffer. - * @param pg_num - * Number of elements in the iova array. - * @param pg_shift - * LOG2 of the physical pages size. - * @return - * The pointer to the new allocated mempool, on success. NULL on error - * with rte_errno set appropriately. See rte_mempool_create() for details. - */ -__rte_deprecated -struct rte_mempool * -rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, - unsigned cache_size, unsigned private_data_size, - rte_mempool_ctor_t *mp_init, void *mp_init_arg, - rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, - int socket_id, unsigned flags, void *vaddr, - const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift); - /** * Create an empty mempool * @@ -1123,48 +1111,6 @@ int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque); -__rte_deprecated -int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, - phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, - void *opaque); - -/** - * @deprecated - * Add physical memory for objects in the pool at init - * - * Add a virtually contiguous memory chunk in the pool where objects can - * be instantiated. The IO addresses corresponding to the virtual - * area are described in iova[], pg_num, pg_shift. - * - * @param mp - * A pointer to the mempool structure. - * @param vaddr - * The virtual address of memory that should be used to store objects. - * @param iova - * An array of IO addresses of each page composing the virtual area. - * @param pg_num - * Number of elements in the iova array. - * @param pg_shift - * LOG2 of the physical pages size. - * @param free_cb - * The callback used to free this chunk when destroying the mempool. - * @param opaque - * An opaque argument passed to free_cb. - * @return - * The number of objects added on success. - * On error, the chunks are not added in the memory list of the - * mempool and a negative errno is returned. - */ -__rte_deprecated -int rte_mempool_populate_iova_tab(struct rte_mempool *mp, char *vaddr, - const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift, - rte_mempool_memchunk_free_cb_t *free_cb, void *opaque); - -__rte_deprecated -int rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr, - const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift, - rte_mempool_memchunk_free_cb_t *free_cb, void *opaque); - /** * Add virtually contiguous memory for objects in the pool at init * @@ -1175,9 +1121,8 @@ int rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr, * A pointer to the mempool structure. * @param addr * The virtual address of memory that should be used to store objects. - * Must be page-aligned. * @param len - * The length of memory in bytes. Must be page-aligned. + * The length of memory in bytes. * @param pg_sz * The size of memory pages in this virtual area. * @param free_cb @@ -1496,7 +1441,7 @@ __mempool_generic_get(struct rte_mempool *mp, void **obj_table, &cache->objs[cache->len], req); if (unlikely(ret < 0)) { /* - * In the offchance that we are buffer constrained, + * In the off chance that we are buffer constrained, * where we are not able to allocate cache + n, go to * the ring directly. If that fails, we are truly out of * buffers. @@ -1746,13 +1691,6 @@ rte_mempool_virt2iova(const void *elt) return hdr->iova; } -__rte_deprecated -static inline phys_addr_t -rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt) -{ - return rte_mempool_virt2iova(elt); -} - /** * Check the consistency of mempool objects. * @@ -1821,68 +1759,6 @@ struct rte_mempool *rte_mempool_lookup(const char *name); uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, struct rte_mempool_objsz *sz); -/** - * @deprecated - * Get the size of memory required to store mempool elements. - * - * Calculate the maximum amount of memory required to store given number - * of objects. Assume that the memory buffer will be aligned at page - * boundary. - * - * Note that if object size is bigger than page size, then it assumes - * that pages are grouped in subsets of physically continuous pages big - * enough to store at least one object. - * - * @param elt_num - * Number of elements. - * @param total_elt_sz - * The size of each element, including header and trailer, as returned - * by rte_mempool_calc_obj_size(). - * @param pg_shift - * LOG2 of the physical pages size. If set to 0, ignore page boundaries. - * @param flags - * The mempool flags. - * @return - * Required memory size aligned at page boundary. - */ -__rte_deprecated -size_t rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, - uint32_t pg_shift, unsigned int flags); - -/** - * @deprecated - * Get the size of memory required to store mempool elements. - * - * Calculate how much memory would be actually required with the given - * memory footprint to store required number of objects. - * - * @param vaddr - * Virtual address of the externally allocated memory buffer. - * Will be used to store mempool objects. - * @param elt_num - * Number of elements. - * @param total_elt_sz - * The size of each element, including header and trailer, as returned - * by rte_mempool_calc_obj_size(). - * @param iova - * Array of IO addresses of the pages that comprises given memory buffer. - * @param pg_num - * Number of elements in the iova array. - * @param pg_shift - * LOG2 of the physical pages size. - * @param flags - * The mempool flags. - * @return - * On success, the number of bytes needed to store given number of - * objects, aligned to the given page size. If the provided memory - * buffer is too small, return a negative value whose absolute value - * is the actual number of elements that can be stored in that buffer. - */ -__rte_deprecated -ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, - size_t total_elt_sz, const rte_iova_t iova[], uint32_t pg_num, - uint32_t pg_shift, unsigned int flags); - /** * Walk list of all memory pools * @@ -1894,6 +1770,17 @@ ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg), void *arg); +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * @internal Get page size used for mempool object allocation. + * This function is internal to mempool library and mempool drivers. + */ +__rte_experimental +int +rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz); + #ifdef __cplusplus } #endif