X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_mempool%2Frte_mempool.h;h=9ea7ff934caa4a085068f782003e67bfb4c11e08;hb=a622f2d44180035ba5ae649f10ad30d24cbbca98;hp=a2c92727a6e1d289e15587dc5706068ec1e043b4;hpb=3f2d6766e3a620a166ee868dfde324e6a1e4b7f3;p=dpdk.git diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index a2c92727a6..9ea7ff934c 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -28,9 +28,9 @@ * rte_mempool_get() or rte_mempool_put() are designed to be called from an EAL * thread due to the internal per-lcore cache. Due to the lack of caching, * rte_mempool_get() or rte_mempool_put() performance will suffer when called - * by non-EAL threads. Instead, non-EAL threads should call - * rte_mempool_generic_get() or rte_mempool_generic_put() with a user cache - * created with rte_mempool_cache_create(). + * by unregistered non-EAL threads. Instead, unregistered non-EAL threads + * should call rte_mempool_generic_get() or rte_mempool_generic_put() with a + * user cache created with rte_mempool_cache_create(). */ #include @@ -51,6 +51,8 @@ #include #include +#include "rte_mempool_trace_fp.h" + #ifdef __cplusplus extern "C" { #endif @@ -136,11 +138,7 @@ struct rte_mempool_objsz { struct rte_mempool_objhdr { STAILQ_ENTRY(rte_mempool_objhdr) next; /**< Next in list. */ struct rte_mempool *mp; /**< The mempool owning the object. */ - RTE_STD_C11 - union { - rte_iova_t iova; /**< IO address of the object. */ - phys_addr_t physaddr; /**< deprecated - Physical address of the object. */ - }; + rte_iova_t iova; /**< IO address of the object. */ #ifdef RTE_LIBRTE_MEMPOOL_DEBUG uint64_t cookie; /**< Debug cookie. */ #endif @@ -186,11 +184,7 @@ struct rte_mempool_memhdr { STAILQ_ENTRY(rte_mempool_memhdr) next; /**< Next in list. */ struct rte_mempool *mp; /**< The mempool owning the chunk */ void *addr; /**< Virtual address of the chunk */ - RTE_STD_C11 - union { - rte_iova_t iova; /**< IO address of the chunk */ - phys_addr_t phys_addr; /**< Physical address of the chunk */ - }; + rte_iova_t iova; /**< IO address of the chunk */ size_t len; /**< length of the chunk */ rte_mempool_memchunk_free_cb_t *free_cb; /**< Free callback */ void *opaque; /**< Argument passed to the free callback */ @@ -267,7 +261,6 @@ struct rte_mempool { #define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/ #define MEMPOOL_F_POOL_CREATED 0x0010 /**< Internal: pool is created. */ #define MEMPOOL_F_NO_IOVA_CONTIG 0x0020 /**< Don't need IOVA contiguous objs. */ -#define MEMPOOL_F_NO_PHYS_CONTIG MEMPOOL_F_NO_IOVA_CONTIG /* deprecated */ /** * @internal When debug is enabled, store some statistics. @@ -736,6 +729,7 @@ rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp, { struct rte_mempool_ops *ops; + rte_mempool_trace_ops_dequeue_bulk(mp, obj_table, n); ops = rte_mempool_get_ops(mp->ops_index); return ops->dequeue(mp, obj_table, n); } @@ -761,6 +755,7 @@ rte_mempool_ops_dequeue_contig_blocks(struct rte_mempool *mp, ops = rte_mempool_get_ops(mp->ops_index); RTE_ASSERT(ops->dequeue_contig_blocks != NULL); + rte_mempool_trace_ops_dequeue_contig_blocks(mp, first_obj_table, n); return ops->dequeue_contig_blocks(mp, first_obj_table, n); } @@ -783,6 +778,7 @@ rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table, { struct rte_mempool_ops *ops; + rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n); ops = rte_mempool_get_ops(mp->ops_index); return ops->enqueue(mp, obj_table, n); } @@ -965,7 +961,7 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *); * If cache_size is non-zero, the rte_mempool library will try to * limit the accesses to the common lockless pool, by maintaining a * per-lcore object cache. This argument must be lower or equal to - * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose + * RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose * cache_size to have "n modulo cache_size == 0": if this is * not the case, some elements will always stay in the pool and will * never be used. The access to the per-lcore table is of course @@ -1107,9 +1103,12 @@ rte_mempool_free(struct rte_mempool *mp); * @param opaque * An opaque argument passed to free_cb. * @return - * The number of objects added on success. + * The number of objects added on success (strictly positive). * On error, the chunk is not added in the memory list of the - * mempool and a negative errno is returned. + * mempool the following code is returned: + * (0): not enough room in chunk for one object. + * (-ENOSPC): mempool is already populated. + * (-ENOMEM): allocation failure. */ int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, @@ -1134,9 +1133,12 @@ int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, * @param opaque * An opaque argument passed to free_cb. * @return - * The number of objects added on success. + * The number of objects added on success (strictly positive). * On error, the chunk is not added in the memory list of the - * mempool and a negative errno is returned. + * mempool the following code is returned: + * (0): not enough room in chunk for one object. + * (-ENOSPC): mempool is already populated. + * (-ENOMEM): allocation failure. */ int rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, @@ -1222,7 +1224,7 @@ void rte_mempool_dump(FILE *f, struct rte_mempool *mp); /** * Create a user-owned mempool cache. * - * This can be used by non-EAL threads to enable caching when they + * This can be used by unregistered non-EAL threads to enable caching when they * interact with a mempool. * * @param size @@ -1253,7 +1255,8 @@ rte_mempool_cache_free(struct rte_mempool_cache *cache); * @param lcore_id * The logical core id. * @return - * A pointer to the mempool cache or NULL if disabled or non-EAL thread. + * A pointer to the mempool cache or NULL if disabled or unregistered non-EAL + * thread. */ static __rte_always_inline struct rte_mempool_cache * rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id) @@ -1264,6 +1267,8 @@ rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id) if (lcore_id >= RTE_MAX_LCORE) return NULL; + rte_mempool_trace_default_cache(mp, lcore_id, + &mp->local_cache[lcore_id]); return &mp->local_cache[lcore_id]; } @@ -1283,6 +1288,7 @@ rte_mempool_cache_flush(struct rte_mempool_cache *cache, cache = rte_mempool_default_cache(mp, rte_lcore_id()); if (cache == NULL || cache->len == 0) return; + rte_mempool_trace_cache_flush(cache, mp); rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len); cache->len = 0; } @@ -1362,6 +1368,7 @@ static __rte_always_inline void rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, unsigned int n, struct rte_mempool_cache *cache) { + rte_mempool_trace_generic_put(mp, obj_table, n, cache); __mempool_check_cookies(mp, obj_table, n, 0); __mempool_generic_put(mp, obj_table, n, cache); } @@ -1386,6 +1393,7 @@ rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, { struct rte_mempool_cache *cache; cache = rte_mempool_default_cache(mp, rte_lcore_id()); + rte_mempool_trace_put_bulk(mp, obj_table, n, cache); rte_mempool_generic_put(mp, obj_table, n, cache); } @@ -1507,6 +1515,7 @@ rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, ret = __mempool_generic_get(mp, obj_table, n, cache); if (ret == 0) __mempool_check_cookies(mp, obj_table, n, 1); + rte_mempool_trace_generic_get(mp, obj_table, n, cache); return ret; } @@ -1537,6 +1546,7 @@ rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n) { struct rte_mempool_cache *cache; cache = rte_mempool_default_cache(mp, rte_lcore_id()); + rte_mempool_trace_get_bulk(mp, obj_table, n, cache); return rte_mempool_generic_get(mp, obj_table, n, cache); } @@ -1606,6 +1616,7 @@ rte_mempool_get_contig_blocks(struct rte_mempool *mp, __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_fail, n); } + rte_mempool_trace_get_contig_blocks(mp, first_obj_table, n); return ret; } @@ -1654,7 +1665,7 @@ rte_mempool_in_use_count(const struct rte_mempool *mp); static inline int rte_mempool_full(const struct rte_mempool *mp) { - return !!(rte_mempool_avail_count(mp) == mp->size); + return rte_mempool_avail_count(mp) == mp->size; } /** @@ -1673,7 +1684,7 @@ rte_mempool_full(const struct rte_mempool *mp) static inline int rte_mempool_empty(const struct rte_mempool *mp) { - return !!(rte_mempool_avail_count(mp) == 0); + return rte_mempool_avail_count(mp) == 0; } /**