X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Fmempool%2Frte_mempool.h;h=3ada37cb867830ab923c050dc61f27a2370c1677;hb=151e828f6427667faf3fdfaa00d14a65c7f57cd6;hp=b2e20c88558846de7641d44ce7171b3ce7cd5de3;hpb=11541c5c81dd2b286132dbff8a8c324d7e9bc945;p=dpdk.git diff --git a/lib/mempool/rte_mempool.h b/lib/mempool/rte_mempool.h index b2e20c8855..3ada37cb86 100644 --- a/lib/mempool/rte_mempool.h +++ b/lib/mempool/rte_mempool.h @@ -34,17 +34,13 @@ */ #include -#include #include -#include #include #include #include -#include #include #include -#include #include #include #include @@ -116,10 +112,11 @@ struct rte_mempool_objsz { /* "MP_" */ #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s" -#define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1) +#define MEMPOOL_PG_SHIFT_MAX \ + RTE_DEPRECATED(MEMPOOL_PG_SHIFT_MAX) (sizeof(uintptr_t) * CHAR_BIT - 1) -/** Mempool over one chunk of physically continuous memory */ -#define MEMPOOL_PG_NUM_DEFAULT 1 +/** Deprecated. Mempool over one chunk of physically continuous memory */ +#define MEMPOOL_PG_NUM_DEFAULT RTE_DEPRECATED(MEMPOOL_PG_NUM_DEFAULT) 1 #ifndef RTE_MEMPOOL_ALIGN /** @@ -250,16 +247,55 @@ struct rte_mempool { #endif } __rte_cache_aligned; -#define MEMPOOL_F_NO_SPREAD 0x0001 - /**< Spreading among memory channels not required. */ -#define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/ -#define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/ -#define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/ -#define MEMPOOL_F_POOL_CREATED 0x0010 /**< Internal: pool is created. */ -#define MEMPOOL_F_NO_IOVA_CONTIG 0x0020 /**< Don't need IOVA contiguous objs. */ +/** Spreading among memory channels not required. */ +#define RTE_MEMPOOL_F_NO_SPREAD 0x0001 +/** + * Backward compatibility synonym for RTE_MEMPOOL_F_NO_SPREAD. + * To be deprecated. + */ +#define MEMPOOL_F_NO_SPREAD RTE_MEMPOOL_F_NO_SPREAD +/** Do not align objects on cache lines. */ +#define RTE_MEMPOOL_F_NO_CACHE_ALIGN 0x0002 +/** + * Backward compatibility synonym for RTE_MEMPOOL_F_NO_CACHE_ALIGN. + * To be deprecated. + */ +#define MEMPOOL_F_NO_CACHE_ALIGN RTE_MEMPOOL_F_NO_CACHE_ALIGN +/** Default put is "single-producer". */ +#define RTE_MEMPOOL_F_SP_PUT 0x0004 +/** + * Backward compatibility synonym for RTE_MEMPOOL_F_SP_PUT. + * To be deprecated. + */ +#define MEMPOOL_F_SP_PUT RTE_MEMPOOL_F_SP_PUT +/** Default get is "single-consumer". */ +#define RTE_MEMPOOL_F_SC_GET 0x0008 +/** + * Backward compatibility synonym for RTE_MEMPOOL_F_SC_GET. + * To be deprecated. + */ +#define MEMPOOL_F_SC_GET RTE_MEMPOOL_F_SC_GET +/** Internal: pool is created. */ +#define RTE_MEMPOOL_F_POOL_CREATED 0x0010 +/** Don't need IOVA contiguous objects. */ +#define RTE_MEMPOOL_F_NO_IOVA_CONTIG 0x0020 +/** + * Backward compatibility synonym for RTE_MEMPOOL_F_NO_IOVA_CONTIG. + * To be deprecated. + */ +#define MEMPOOL_F_NO_IOVA_CONTIG RTE_MEMPOOL_F_NO_IOVA_CONTIG /** Internal: no object from the pool can be used for device IO (DMA). */ -#define MEMPOOL_F_NON_IO 0x0040 +#define RTE_MEMPOOL_F_NON_IO 0x0040 +/** + * This macro lists all the mempool flags an application may request. + */ +#define RTE_MEMPOOL_VALID_USER_FLAGS (RTE_MEMPOOL_F_NO_SPREAD \ + | RTE_MEMPOOL_F_NO_CACHE_ALIGN \ + | RTE_MEMPOOL_F_SP_PUT \ + | RTE_MEMPOOL_F_SC_GET \ + | RTE_MEMPOOL_F_NO_IOVA_CONTIG \ + ) /** * @internal When debug is enabled, store some statistics. * @@ -271,30 +307,35 @@ struct rte_mempool { * Number to add to the object-oriented statistics. */ #ifdef RTE_LIBRTE_MEMPOOL_DEBUG -#define __MEMPOOL_STAT_ADD(mp, name, n) do { \ +#define RTE_MEMPOOL_STAT_ADD(mp, name, n) do { \ unsigned __lcore_id = rte_lcore_id(); \ if (__lcore_id < RTE_MAX_LCORE) { \ mp->stats[__lcore_id].name += n; \ } \ - } while(0) + } while (0) #else -#define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0) +#define RTE_MEMPOOL_STAT_ADD(mp, name, n) do {} while (0) #endif /** - * Calculate the size of the mempool header. + * @internal Calculate the size of the mempool header. * * @param mp * Pointer to the memory pool. * @param cs * Size of the per-lcore cache. */ -#define MEMPOOL_HEADER_SIZE(mp, cs) \ +#define RTE_MEMPOOL_HEADER_SIZE(mp, cs) \ (sizeof(*(mp)) + (((cs) == 0) ? 0 : \ (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE))) +/** Deprecated. Use RTE_MEMPOOL_HEADER_SIZE() for internal purposes only. */ +#define MEMPOOL_HEADER_SIZE(mp, cs) \ + RTE_DEPRECATED(MEMPOOL_HEADER_SIZE) RTE_MEMPOOL_HEADER_SIZE(mp, cs) + /* return the header of a mempool object (internal) */ -static inline struct rte_mempool_objhdr *__mempool_get_header(void *obj) +static inline struct rte_mempool_objhdr * +rte_mempool_get_header(void *obj) { return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj, sizeof(struct rte_mempool_objhdr)); @@ -311,12 +352,12 @@ static inline struct rte_mempool_objhdr *__mempool_get_header(void *obj) */ static inline struct rte_mempool *rte_mempool_from_obj(void *obj) { - struct rte_mempool_objhdr *hdr = __mempool_get_header(obj); + struct rte_mempool_objhdr *hdr = rte_mempool_get_header(obj); return hdr->mp; } /* return the trailer of a mempool object (internal) */ -static inline struct rte_mempool_objtlr *__mempool_get_trailer(void *obj) +static inline struct rte_mempool_objtlr *rte_mempool_get_trailer(void *obj) { struct rte_mempool *mp = rte_mempool_from_obj(obj); return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size); @@ -340,10 +381,10 @@ void rte_mempool_check_cookies(const struct rte_mempool *mp, void * const *obj_table_const, unsigned n, int free); #ifdef RTE_LIBRTE_MEMPOOL_DEBUG -#define __mempool_check_cookies(mp, obj_table_const, n, free) \ +#define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) \ rte_mempool_check_cookies(mp, obj_table_const, n, free) #else -#define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0) +#define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) do {} while (0) #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */ /** @@ -365,13 +406,13 @@ void rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp, void * const *first_obj_table_const, unsigned int n, int free); #ifdef RTE_LIBRTE_MEMPOOL_DEBUG -#define __mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \ - free) \ +#define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \ + free) \ rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \ free) #else -#define __mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \ - free) \ +#define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \ + free) \ do {} while (0) #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */ @@ -421,9 +462,9 @@ typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp); * Calculate memory size required to store given number of objects. * * If mempool objects are not required to be IOVA-contiguous - * (the flag MEMPOOL_F_NO_IOVA_CONTIG is set), min_chunk_size defines + * (the flag RTE_MEMPOOL_F_NO_IOVA_CONTIG is set), min_chunk_size defines * virtually contiguous chunk size. Otherwise, if mempool objects must - * be IOVA-contiguous (the flag MEMPOOL_F_NO_IOVA_CONTIG is clear), + * be IOVA-contiguous (the flag RTE_MEMPOOL_F_NO_IOVA_CONTIG is clear), * min_chunk_size defines IOVA-contiguous chunk size. * * @param[in] mp @@ -706,8 +747,8 @@ rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp, ops = rte_mempool_get_ops(mp->ops_index); ret = ops->dequeue(mp, obj_table, n); if (ret == 0) { - __MEMPOOL_STAT_ADD(mp, get_common_pool_bulk, 1); - __MEMPOOL_STAT_ADD(mp, get_common_pool_objs, n); + RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_bulk, 1); + RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_objs, n); } return ret; } @@ -756,8 +797,8 @@ rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table, { struct rte_mempool_ops *ops; - __MEMPOOL_STAT_ADD(mp, put_common_pool_bulk, 1); - __MEMPOOL_STAT_ADD(mp, put_common_pool_objs, n); + RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_bulk, 1); + RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_objs, n); rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n); ops = rte_mempool_get_ops(mp->ops_index); return ops->enqueue(mp, obj_table, n); @@ -887,12 +928,16 @@ int rte_mempool_register_ops(const struct rte_mempool_ops *ops); * Note that the rte_mempool_register_ops fails silently here when * more than RTE_MEMPOOL_MAX_OPS_IDX is registered. */ -#define MEMPOOL_REGISTER_OPS(ops) \ +#define RTE_MEMPOOL_REGISTER_OPS(ops) \ RTE_INIT(mp_hdlr_init_##ops) \ { \ rte_mempool_register_ops(&ops); \ } +/** Deprecated. Use RTE_MEMPOOL_REGISTER_OPS() instead. */ +#define MEMPOOL_REGISTER_OPS(ops) \ + RTE_DEPRECATED(MEMPOOL_REGISTER_OPS) RTE_MEMPOOL_REGISTER_OPS(ops) + /** * An object callback function for mempool. * @@ -971,22 +1016,22 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *); * constraint for the reserved zone. * @param flags * The *flags* arguments is an OR of following flags: - * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread + * - RTE_MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread * between channels in RAM: the pool allocator will add padding * between objects depending on the hardware configuration. See * Memory alignment constraints for details. If this flag is set, * the allocator will just align them to a cache line. - * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are + * - RTE_MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are * cache-aligned. This flag removes this constraint, and no * padding will be present between objects. This flag implies - * MEMPOOL_F_NO_SPREAD. - * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior + * RTE_MEMPOOL_F_NO_SPREAD. + * - RTE_MEMPOOL_F_SP_PUT: If this flag is set, the default behavior * when using rte_mempool_put() or rte_mempool_put_bulk() is * "single-producer". Otherwise, it is "multi-producers". - * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior + * - RTE_MEMPOOL_F_SC_GET: If this flag is set, the default behavior * when using rte_mempool_get() or rte_mempool_get_bulk() is * "single-consumer". Otherwise, it is "multi-consumers". - * - MEMPOOL_F_NO_IOVA_CONTIG: If set, allocated objects won't + * - RTE_MEMPOOL_F_NO_IOVA_CONTIG: If set, allocated objects won't * necessarily be contiguous in IO memory. * @return * The pointer to the new allocated mempool, on success. NULL on error @@ -1052,6 +1097,7 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, * * @param mp * A pointer to the mempool structure. + * If NULL then, the function does nothing. */ void rte_mempool_free(struct rte_mempool *mp); @@ -1282,14 +1328,14 @@ rte_mempool_cache_flush(struct rte_mempool_cache *cache, * A pointer to a mempool cache structure. May be NULL if not needed. */ static __rte_always_inline void -__mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, - unsigned int n, struct rte_mempool_cache *cache) +rte_mempool_do_generic_put(struct rte_mempool *mp, void * const *obj_table, + unsigned int n, struct rte_mempool_cache *cache) { void **cache_objs; /* increment stat now, adding in mempool always success */ - __MEMPOOL_STAT_ADD(mp, put_bulk, 1); - __MEMPOOL_STAT_ADD(mp, put_objs, n); + RTE_MEMPOOL_STAT_ADD(mp, put_bulk, 1); + RTE_MEMPOOL_STAT_ADD(mp, put_objs, n); /* No cache provided or if put would overflow mem allocated for cache */ if (unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE)) @@ -1346,8 +1392,8 @@ rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, unsigned int n, struct rte_mempool_cache *cache) { rte_mempool_trace_generic_put(mp, obj_table, n, cache); - __mempool_check_cookies(mp, obj_table, n, 0); - __mempool_generic_put(mp, obj_table, n, cache); + RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 0); + rte_mempool_do_generic_put(mp, obj_table, n, cache); } /** @@ -1407,8 +1453,8 @@ rte_mempool_put(struct rte_mempool *mp, void *obj) * - <0: Error; code of ring dequeue function. */ static __rte_always_inline int -__mempool_generic_get(struct rte_mempool *mp, void **obj_table, - unsigned int n, struct rte_mempool_cache *cache) +rte_mempool_do_generic_get(struct rte_mempool *mp, void **obj_table, + unsigned int n, struct rte_mempool_cache *cache) { int ret; uint32_t index, len; @@ -1447,8 +1493,8 @@ __mempool_generic_get(struct rte_mempool *mp, void **obj_table, cache->len -= n; - __MEMPOOL_STAT_ADD(mp, get_success_bulk, 1); - __MEMPOOL_STAT_ADD(mp, get_success_objs, n); + RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1); + RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n); return 0; @@ -1458,11 +1504,11 @@ ring_dequeue: ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n); if (ret < 0) { - __MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1); - __MEMPOOL_STAT_ADD(mp, get_fail_objs, n); + RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1); + RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n); } else { - __MEMPOOL_STAT_ADD(mp, get_success_bulk, 1); - __MEMPOOL_STAT_ADD(mp, get_success_objs, n); + RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1); + RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n); } return ret; @@ -1493,9 +1539,9 @@ rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned int n, struct rte_mempool_cache *cache) { int ret; - ret = __mempool_generic_get(mp, obj_table, n, cache); + ret = rte_mempool_do_generic_get(mp, obj_table, n, cache); if (ret == 0) - __mempool_check_cookies(mp, obj_table, n, 1); + RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 1); rte_mempool_trace_generic_get(mp, obj_table, n, cache); return ret; } @@ -1586,13 +1632,13 @@ rte_mempool_get_contig_blocks(struct rte_mempool *mp, ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n); if (ret == 0) { - __MEMPOOL_STAT_ADD(mp, get_success_bulk, 1); - __MEMPOOL_STAT_ADD(mp, get_success_blks, n); - __mempool_contig_blocks_check_cookies(mp, first_obj_table, n, - 1); + RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1); + RTE_MEMPOOL_STAT_ADD(mp, get_success_blks, n); + RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table, n, + 1); } else { - __MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1); - __MEMPOOL_STAT_ADD(mp, get_fail_blks, n); + RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1); + RTE_MEMPOOL_STAT_ADD(mp, get_fail_blks, n); } rte_mempool_trace_get_contig_blocks(mp, first_obj_table, n); @@ -1673,7 +1719,7 @@ rte_mempool_empty(const struct rte_mempool *mp) * A pointer (virtual address) to the element of the pool. * @return * The IO address of the elt element. - * If the mempool was created with MEMPOOL_F_NO_IOVA_CONTIG, the + * If the mempool was created with RTE_MEMPOOL_F_NO_IOVA_CONTIG, the * returned value is RTE_BAD_IOVA. */ static inline rte_iova_t @@ -1708,7 +1754,7 @@ void rte_mempool_audit(struct rte_mempool *mp); static inline void *rte_mempool_get_priv(struct rte_mempool *mp) { return (char *)mp + - MEMPOOL_HEADER_SIZE(mp, mp->cache_size); + RTE_MEMPOOL_HEADER_SIZE(mp, mp->cache_size); } /**