X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=lib%2Flibrte_mempool%2Frte_mempool.h;h=9e0ee052b30ebf4e59b61cfafeae2f97b9e11607;hb=251691f107a565807be613439a431a6072edba15;hp=0fe8aa7b8923f8a3ef8ac4e2094fe90605892696;hpb=354788b60cfd053b5a576734662bd8f70fe8e419;p=dpdk.git diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index 0fe8aa7b89..9e0ee052b3 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -28,9 +28,9 @@ * rte_mempool_get() or rte_mempool_put() are designed to be called from an EAL * thread due to the internal per-lcore cache. Due to the lack of caching, * rte_mempool_get() or rte_mempool_put() performance will suffer when called - * by non-EAL threads. Instead, non-EAL threads should call - * rte_mempool_generic_get() or rte_mempool_generic_put() with a user cache - * created with rte_mempool_cache_create(). + * by unregistered non-EAL threads. Instead, unregistered non-EAL threads + * should call rte_mempool_generic_get() or rte_mempool_generic_put() with a + * user cache created with rte_mempool_cache_create(). */ #include @@ -51,6 +51,8 @@ #include #include +#include "rte_mempool_trace_fp.h" + #ifdef __cplusplus extern "C" { #endif @@ -116,6 +118,9 @@ struct rte_mempool_objsz { #define MEMPOOL_PG_NUM_DEFAULT 1 #ifndef RTE_MEMPOOL_ALIGN +/** + * Alignment of elements inside mempool. + */ #define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE #endif @@ -257,7 +262,8 @@ struct rte_mempool { #endif } __rte_cache_aligned; -#define MEMPOOL_F_NO_SPREAD 0x0001 /**< Do not spread among memory channels. */ +#define MEMPOOL_F_NO_SPREAD 0x0001 + /**< Spreading among memory channels not required. */ #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/ #define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/ #define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/ @@ -458,15 +464,20 @@ typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp); * @param[out] align * Location for required memory chunk alignment. * @return - * Required memory size aligned at page boundary. + * Required memory size. */ typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, size_t *min_chunk_size, size_t *align); /** - * Default way to calculate memory size required to store given number of - * objects. + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * @internal Helper to calculate memory size required to store given + * number of objects. + * + * This function is internal to mempool library and mempool drivers. * * If page boundaries may be ignored, it is just a product of total * object size including header and trailer and number of objects. @@ -477,11 +488,37 @@ typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp, * that pages are grouped in subsets of physically continuous pages big * enough to store at least one object. * - * Minimum size of memory chunk is a maximum of the page size and total - * element size. + * Minimum size of memory chunk is the total element size. + * Required memory chunk alignment is the cache line size. + * + * @param[in] mp + * A pointer to the mempool structure. + * @param[in] obj_num + * Number of objects to be added in mempool. + * @param[in] pg_shift + * LOG2 of the physical pages size. If set to 0, ignore page boundaries. + * @param[in] chunk_reserve + * Amount of memory that must be reserved at the beginning of each page, + * or at the beginning of the memory area if pg_shift is 0. + * @param[out] min_chunk_size + * Location for minimum size of the memory chunk which may be used to + * store memory pool objects. + * @param[out] align + * Location for required memory chunk alignment. + * @return + * Required memory size. + */ +__rte_experimental +ssize_t rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, size_t chunk_reserve, + size_t *min_chunk_size, size_t *align); + +/** + * Default way to calculate memory size required to store given number of + * objects. * - * Required memory chunk alignment is a maximum of page size and cache - * line size. + * Equivalent to rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift, + * 0, min_chunk_size, align). */ ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, @@ -536,8 +573,56 @@ typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp, rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg); /** - * Default way to populate memory pool object using provided memory - * chunk: just slice objects one by one. + * Align objects on addresses multiple of total_elt_sz. + */ +#define RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ 0x0001 + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * @internal Helper to populate memory pool object using provided memory + * chunk: just slice objects one by one, taking care of not + * crossing page boundaries. + * + * If RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ is set in flags, the addresses + * of object headers will be aligned on a multiple of total_elt_sz. + * This feature is used by octeontx hardware. + * + * This function is internal to mempool library and mempool drivers. + * + * @param[in] mp + * A pointer to the mempool structure. + * @param[in] flags + * Logical OR of following flags: + * - RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ: align objects on addresses + * multiple of total_elt_sz. + * @param[in] max_objs + * Maximum number of objects to be added in mempool. + * @param[in] vaddr + * The virtual address of memory that should be used to store objects. + * @param[in] iova + * The IO address corresponding to vaddr, or RTE_BAD_IOVA. + * @param[in] len + * The length of memory in bytes. + * @param[in] obj_cb + * Callback function to be executed for each populated object. + * @param[in] obj_cb_arg + * An opaque pointer passed to the callback function. + * @return + * The number of objects added in mempool. + */ +__rte_experimental +int rte_mempool_op_populate_helper(struct rte_mempool *mp, + unsigned int flags, unsigned int max_objs, + void *vaddr, rte_iova_t iova, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg); + +/** + * Default way to populate memory pool object using provided memory chunk. + * + * Equivalent to rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, iova, + * len, obj_cb, obj_cb_arg). */ int rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs, @@ -653,6 +738,7 @@ rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp, { struct rte_mempool_ops *ops; + rte_mempool_trace_ops_dequeue_bulk(mp, obj_table, n); ops = rte_mempool_get_ops(mp->ops_index); return ops->dequeue(mp, obj_table, n); } @@ -678,6 +764,7 @@ rte_mempool_ops_dequeue_contig_blocks(struct rte_mempool *mp, ops = rte_mempool_get_ops(mp->ops_index); RTE_ASSERT(ops->dequeue_contig_blocks != NULL); + rte_mempool_trace_ops_dequeue_contig_blocks(mp, first_obj_table, n); return ops->dequeue_contig_blocks(mp, first_obj_table, n); } @@ -700,6 +787,7 @@ rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table, { struct rte_mempool_ops *ops; + rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n); ops = rte_mempool_get_ops(mp->ops_index); return ops->enqueue(mp, obj_table, n); } @@ -1024,9 +1112,12 @@ rte_mempool_free(struct rte_mempool *mp); * @param opaque * An opaque argument passed to free_cb. * @return - * The number of objects added on success. + * The number of objects added on success (strictly positive). * On error, the chunk is not added in the memory list of the - * mempool and a negative errno is returned. + * mempool the following code is returned: + * (0): not enough room in chunk for one object. + * (-ENOSPC): mempool is already populated. + * (-ENOMEM): allocation failure. */ int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, @@ -1051,9 +1142,12 @@ int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, * @param opaque * An opaque argument passed to free_cb. * @return - * The number of objects added on success. + * The number of objects added on success (strictly positive). * On error, the chunk is not added in the memory list of the - * mempool and a negative errno is returned. + * mempool the following code is returned: + * (0): not enough room in chunk for one object. + * (-ENOSPC): mempool is already populated. + * (-ENOMEM): allocation failure. */ int rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, @@ -1085,8 +1179,8 @@ int rte_mempool_populate_default(struct rte_mempool *mp); * A pointer to the mempool structure. * @return * The number of objects added on success. - * On error, the chunk is not added in the memory list of the - * mempool and a negative errno is returned. + * On error, 0 is returned, rte_errno is set, and the chunk is not added in + * the memory list of the mempool. */ int rte_mempool_populate_anon(struct rte_mempool *mp); @@ -1139,7 +1233,7 @@ void rte_mempool_dump(FILE *f, struct rte_mempool *mp); /** * Create a user-owned mempool cache. * - * This can be used by non-EAL threads to enable caching when they + * This can be used by unregistered non-EAL threads to enable caching when they * interact with a mempool. * * @param size @@ -1170,7 +1264,8 @@ rte_mempool_cache_free(struct rte_mempool_cache *cache); * @param lcore_id * The logical core id. * @return - * A pointer to the mempool cache or NULL if disabled or non-EAL thread. + * A pointer to the mempool cache or NULL if disabled or unregistered non-EAL + * thread. */ static __rte_always_inline struct rte_mempool_cache * rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id) @@ -1181,6 +1276,8 @@ rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id) if (lcore_id >= RTE_MAX_LCORE) return NULL; + rte_mempool_trace_default_cache(mp, lcore_id, + &mp->local_cache[lcore_id]); return &mp->local_cache[lcore_id]; } @@ -1200,6 +1297,7 @@ rte_mempool_cache_flush(struct rte_mempool_cache *cache, cache = rte_mempool_default_cache(mp, rte_lcore_id()); if (cache == NULL || cache->len == 0) return; + rte_mempool_trace_cache_flush(cache, mp); rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len); cache->len = 0; } @@ -1279,6 +1377,7 @@ static __rte_always_inline void rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, unsigned int n, struct rte_mempool_cache *cache) { + rte_mempool_trace_generic_put(mp, obj_table, n, cache); __mempool_check_cookies(mp, obj_table, n, 0); __mempool_generic_put(mp, obj_table, n, cache); } @@ -1303,6 +1402,7 @@ rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, { struct rte_mempool_cache *cache; cache = rte_mempool_default_cache(mp, rte_lcore_id()); + rte_mempool_trace_put_bulk(mp, obj_table, n, cache); rte_mempool_generic_put(mp, obj_table, n, cache); } @@ -1424,6 +1524,7 @@ rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, ret = __mempool_generic_get(mp, obj_table, n, cache); if (ret == 0) __mempool_check_cookies(mp, obj_table, n, 1); + rte_mempool_trace_generic_get(mp, obj_table, n, cache); return ret; } @@ -1454,6 +1555,7 @@ rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n) { struct rte_mempool_cache *cache; cache = rte_mempool_default_cache(mp, rte_lcore_id()); + rte_mempool_trace_get_bulk(mp, obj_table, n, cache); return rte_mempool_generic_get(mp, obj_table, n, cache); } @@ -1523,6 +1625,7 @@ rte_mempool_get_contig_blocks(struct rte_mempool *mp, __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_fail, n); } + rte_mempool_trace_get_contig_blocks(mp, first_obj_table, n); return ret; } @@ -1571,7 +1674,7 @@ rte_mempool_in_use_count(const struct rte_mempool *mp); static inline int rte_mempool_full(const struct rte_mempool *mp) { - return !!(rte_mempool_avail_count(mp) == mp->size); + return rte_mempool_avail_count(mp) == mp->size; } /** @@ -1590,7 +1693,7 @@ rte_mempool_full(const struct rte_mempool *mp) static inline int rte_mempool_empty(const struct rte_mempool *mp) { - return !!(rte_mempool_avail_count(mp) == 0); + return rte_mempool_avail_count(mp) == 0; } /** @@ -1691,6 +1794,17 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg), void *arg); +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * @internal Get page size used for mempool object allocation. + * This function is internal to mempool library and mempool drivers. + */ +__rte_experimental +int +rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz); + #ifdef __cplusplus } #endif