From 23fb3b460fdccf232c74a6115171e035d1f56435 Mon Sep 17 00:00:00 2001 From: Olivier Matz Date: Wed, 18 May 2016 13:04:22 +0200 Subject: [PATCH] mempool: rework comments and style No functional change, just fix some comments and styling issues. Also avoid to duplicate comments between rte_mempool_create() and rte_mempool_xmem_create(). Signed-off-by: Olivier Matz Acked by: Keith Wiles --- lib/librte_mempool/rte_mempool.c | 17 +++++++-- lib/librte_mempool/rte_mempool.h | 59 +++++++------------------------- 2 files changed, 27 insertions(+), 49 deletions(-) diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index 0724942390..daf4d06864 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -152,6 +152,14 @@ mempool_add_elem(struct rte_mempool *mp, void *obj, uint32_t obj_idx, rte_ring_sp_enqueue(mp->ring, obj); } +/* Iterate through objects at the given address + * + * Given the pointer to the memory, and its topology in physical memory + * (the physical addresses table), iterate through the "elt_num" objects + * of size "total_elt_sz" aligned at "align". For each object in this memory + * chunk, invoke a callback. It returns the effective number of objects + * in this memory. + */ uint32_t rte_mempool_obj_iter(void *vaddr, uint32_t elt_num, size_t elt_sz, size_t align, const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift, @@ -341,9 +349,8 @@ rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz, uint32_t pg_shift) return sz; } -/* - * Calculate how much memory would be actually required with the - * given memory footprint to store required number of elements. +/* Callback used by rte_mempool_xmem_usage(): it sets the opaque + * argument to the end of the object. */ static void mempool_lelem_iter(void *arg, __rte_unused void *start, void *end, @@ -352,6 +359,10 @@ mempool_lelem_iter(void *arg, __rte_unused void *start, void *end, *(uintptr_t *)arg = (uintptr_t)end; } +/* + * Calculate how much memory would be actually required with the + * given memory footprint to store required number of elements. + */ ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz, const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift) diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index 0f3ef4a03f..dd70469ebb 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -214,7 +214,7 @@ struct rte_mempool { } __rte_cache_aligned; -#define MEMPOOL_F_NO_SPREAD 0x0001 /**< Do not spread in memory. */ +#define MEMPOOL_F_NO_SPREAD 0x0001 /**< Do not spread among memory channels. */ #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/ #define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/ #define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/ @@ -272,7 +272,8 @@ struct rte_mempool { /* return the header of a mempool object (internal) */ static inline struct rte_mempool_objhdr *__mempool_get_header(void *obj) { - return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj, sizeof(struct rte_mempool_objhdr)); + return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj, + sizeof(struct rte_mempool_objhdr)); } /** @@ -546,8 +547,9 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, /** * Create a new mempool named *name* in memory. * - * This function uses ``memzone_reserve()`` to allocate memory. The - * pool contains n elements of elt_size. Its size is set to n. + * The pool contains n elements of elt_size. Its size is set to n. + * This function uses ``memzone_reserve()`` to allocate the mempool header + * (and the objects if vaddr is NULL). * Depending on the input parameters, mempool elements can be either allocated * together with the mempool header, or an externally provided memory buffer * could be used to store mempool objects. In later case, that external @@ -562,18 +564,7 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, * @param elt_size * The size of each element. * @param cache_size - * If cache_size is non-zero, the rte_mempool library will try to - * limit the accesses to the common lockless pool, by maintaining a - * per-lcore object cache. This argument must be lower or equal to - * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose - * cache_size to have "n modulo cache_size == 0": if this is - * not the case, some elements will always stay in the pool and will - * never be used. The access to the per-lcore table is of course - * faster than the multi-producer/consumer pool. The cache can be - * disabled if the cache_size argument is set to 0; it can be useful to - * avoid losing objects in cache. Note that even if not used, the - * memory space for cache is always reserved in a mempool structure, - * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0. + * Size of the cache. See rte_mempool_create() for details. * @param private_data_size * The size of the private data appended after the mempool * structure. This is useful for storing some private data after the @@ -587,35 +578,17 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, * An opaque pointer to data that can be used in the mempool * constructor function. * @param obj_init - * A function pointer that is called for each object at - * initialization of the pool. The user can set some meta data in - * objects if needed. This parameter can be NULL if not needed. - * The obj_init() function takes the mempool pointer, the init_arg, - * the object pointer and the object number as parameters. + * A function called for each object at initialization of the pool. + * See rte_mempool_create() for details. * @param obj_init_arg - * An opaque pointer to data that can be used as an argument for - * each call to the object constructor function. + * An opaque pointer passed to the object constructor function. * @param socket_id * The *socket_id* argument is the socket identifier in the case of * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA * constraint for the reserved zone. * @param flags - * The *flags* arguments is an OR of following flags: - * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread - * between channels in RAM: the pool allocator will add padding - * between objects depending on the hardware configuration. See - * Memory alignment constraints for details. If this flag is set, - * the allocator will just align them to a cache line. - * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are - * cache-aligned. This flag removes this constraint, and no - * padding will be present between objects. This flag implies - * MEMPOOL_F_NO_SPREAD. - * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior - * when using rte_mempool_put() or rte_mempool_put_bulk() is - * "single-producer". Otherwise, it is "multi-producers". - * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior - * when using rte_mempool_get() or rte_mempool_get_bulk() is - * "single-consumer". Otherwise, it is "multi-consumers". + * Flags controlling the behavior of the mempool. See + * rte_mempool_create() for details. * @param vaddr * Virtual address of the externally allocated memory buffer. * Will be used to store mempool objects. @@ -628,13 +601,7 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, * LOG2 of the physical pages size. * @return * The pointer to the new allocated mempool, on success. NULL on error - * with rte_errno set appropriately. Possible rte_errno values include: - * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure - * - E_RTE_SECONDARY - function was called from a secondary process instance - * - EINVAL - cache size provided is too large - * - ENOSPC - the maximum number of memzones has already been allocated - * - EEXIST - a memzone with the same name already exists - * - ENOMEM - no appropriate memory area found in which to create memzone + * with rte_errno set appropriately. See rte_mempool_create() for details. */ struct rte_mempool * rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, -- 2.20.1