X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_mempool%2Frte_mempool.h;h=a4a961042394c2ca9dd14366ce78c72f36f1a65d;hb=f161fb6ad5125e8286ea6aaa01e8903dacf21161;hp=7668671c6078e00ed9730c073507b568a48ec6e9;hpb=1896b4ec5e7ad5089fa17120bebf17d5dea8f476;p=dpdk.git diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index 7668671c60..a4a9610423 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -1,13 +1,13 @@ /*- * BSD LICENSE - * + * * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright @@ -17,7 +17,7 @@ * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -59,6 +59,7 @@ * that won't work as rte_lcore_id() will not return a correct value. */ +#include #include #include #include @@ -142,8 +143,6 @@ struct rte_mempool_objsz { * The RTE mempool structure. */ struct rte_mempool { - TAILQ_ENTRY(rte_mempool) next; /**< Next in list. */ - char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */ struct rte_ring *ring; /**< Ring to store objects. */ phys_addr_t phys_addr; /**< Phys. addr. of mempool struct. */ @@ -199,10 +198,12 @@ struct rte_mempool { * Number to add to the object-oriented statistics. */ #ifdef RTE_LIBRTE_MEMPOOL_DEBUG -#define __MEMPOOL_STAT_ADD(mp, name, n) do { \ - unsigned __lcore_id = rte_lcore_id(); \ - mp->stats[__lcore_id].name##_objs += n; \ - mp->stats[__lcore_id].name##_bulk += 1; \ +#define __MEMPOOL_STAT_ADD(mp, name, n) do { \ + unsigned __lcore_id = rte_lcore_id(); \ + if (__lcore_id < RTE_MAX_LCORE) { \ + mp->stats[__lcore_id].name##_objs += n; \ + mp->stats[__lcore_id].name##_bulk += 1; \ + } \ } while(0) #else #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0) @@ -212,12 +213,12 @@ struct rte_mempool { * Calculates size of the mempool header. * @param mp * Pointer to the memory pool. - * @param pgn + * @param pgn * Number of page used to store mempool objects. */ #define MEMPOOL_HEADER_SIZE(mp, pgn) (sizeof(*(mp)) + \ RTE_ALIGN_CEIL(((pgn) - RTE_DIM((mp)->elt_pa)) * \ - sizeof ((mp)->elt_pa[0]), CACHE_LINE_SIZE)) + sizeof ((mp)->elt_pa[0]), RTE_CACHE_LINE_SIZE)) /** * Returns TRUE if whole mempool is allocated in one contiguous block of memory. @@ -343,8 +344,8 @@ static inline void __mempool_check_cookies(const struct rte_mempool *mp, if (cookie != RTE_MEMPOOL_HEADER_COOKIE1) { rte_log_set_history(0); RTE_LOG(CRIT, MEMPOOL, - "obj=%p, mempool=%p, cookie=%"PRIx64"\n", - obj, mp, cookie); + "obj=%p, mempool=%p, cookie=%" PRIx64 "\n", + obj, (const void *) mp, cookie); rte_panic("MEMPOOL: bad header cookie (put)\n"); } __mempool_write_header_cookie(obj, 1); @@ -353,8 +354,8 @@ static inline void __mempool_check_cookies(const struct rte_mempool *mp, if (cookie != RTE_MEMPOOL_HEADER_COOKIE2) { rte_log_set_history(0); RTE_LOG(CRIT, MEMPOOL, - "obj=%p, mempool=%p, cookie=%"PRIx64"\n", - obj, mp, cookie); + "obj=%p, mempool=%p, cookie=%" PRIx64 "\n", + obj, (const void *) mp, cookie); rte_panic("MEMPOOL: bad header cookie (get)\n"); } __mempool_write_header_cookie(obj, 0); @@ -364,8 +365,8 @@ static inline void __mempool_check_cookies(const struct rte_mempool *mp, cookie != RTE_MEMPOOL_HEADER_COOKIE2) { rte_log_set_history(0); RTE_LOG(CRIT, MEMPOOL, - "obj=%p, mempool=%p, cookie=%"PRIx64"\n", - obj, mp, cookie); + "obj=%p, mempool=%p, cookie=%" PRIx64 "\n", + obj, (const void *) mp, cookie); rte_panic("MEMPOOL: bad header cookie (audit)\n"); } } @@ -373,8 +374,8 @@ static inline void __mempool_check_cookies(const struct rte_mempool *mp, if (cookie != RTE_MEMPOOL_TRAILER_COOKIE) { rte_log_set_history(0); RTE_LOG(CRIT, MEMPOOL, - "obj=%p, mempool=%p, cookie=%"PRIx64"\n", - obj, mp, cookie); + "obj=%p, mempool=%p, cookie=%" PRIx64 "\n", + obj, (const void *) mp, cookie); rte_panic("MEMPOOL: bad trailer cookie\n"); } } @@ -467,13 +468,13 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *); * If cache_size is non-zero, the rte_mempool library will try to * limit the accesses to the common lockless pool, by maintaining a * per-lcore object cache. This argument must be lower or equal to - * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose + * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose * cache_size to have "n modulo cache_size == 0": if this is * not the case, some elements will always stay in the pool and will * never be used. The access to the per-lcore table is of course * faster than the multi-producer/consumer pool. The cache can be * disabled if the cache_size argument is set to 0; it can be useful to - * avoid loosing objects in cache. Note that even if not used, the + * avoid losing objects in cache. Note that even if not used, the * memory space for cache is always reserved in a mempool structure, * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0. * @param private_data_size @@ -523,7 +524,6 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *); * with rte_errno set appropriately. Possible rte_errno values include: * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure * - E_RTE_SECONDARY - function was called from a secondary process instance - * - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list * - EINVAL - cache size provided is too large * - ENOSPC - the maximum number of memzones has already been allocated * - EEXIST - a memzone with the same name already exists @@ -564,7 +564,7 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, * never be used. The access to the per-lcore table is of course * faster than the multi-producer/consumer pool. The cache can be * disabled if the cache_size argument is set to 0; it can be useful to - * avoid loosing objects in cache. Note that even if not used, the + * avoid losing objects in cache. Note that even if not used, the * memory space for cache is always reserved in a mempool structure, * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0. * @param private_data_size @@ -624,7 +624,6 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, * with rte_errno set appropriately. Possible rte_errno values include: * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure * - E_RTE_SECONDARY - function was called from a secondary process instance - * - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list * - EINVAL - cache size provided is too large * - ENOSPC - the maximum number of memzones has already been allocated * - EEXIST - a memzone with the same name already exists @@ -665,7 +664,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, * never be used. The access to the per-lcore table is of course * faster than the multi-producer/consumer pool. The cache can be * disabled if the cache_size argument is set to 0; it can be useful to - * avoid loosing objects in cache. Note that even if not used, the + * avoid losing objects in cache. Note that even if not used, the * memory space for cache is always reserved in a mempool structure, * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0. * @param private_data_size @@ -715,7 +714,6 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, * with rte_errno set appropriately. Possible rte_errno values include: * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure * - E_RTE_SECONDARY - function was called from a secondary process instance - * - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list * - EINVAL - cache size provided is too large * - ENOSPC - the maximum number of memzones has already been allocated * - EEXIST - a memzone with the same name already exists @@ -732,10 +730,12 @@ rte_dom0_mempool_create(const char *name, unsigned n, unsigned elt_size, /** * Dump the status of the mempool to the console. * + * @param f + * A pointer to a file for output * @param mp * A pointer to the mempool structure. */ -void rte_mempool_dump(const struct rte_mempool *mp); +void rte_mempool_dump(FILE *f, const struct rte_mempool *mp); /** * @internal Put several objects back in the mempool; used internally. @@ -766,8 +766,9 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, __MEMPOOL_STAT_ADD(mp, put, n); #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 - /* cache is not enabled or single producer */ - if (unlikely(cache_size == 0 || is_mp == 0)) + /* cache is not enabled or single producer or non-EAL thread */ + if (unlikely(cache_size == 0 || is_mp == 0 || + lcore_id >= RTE_MAX_LCORE)) goto ring_enqueue; /* Go straight to ring if put would overflow mem allocated for cache */ @@ -943,9 +944,6 @@ __mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n, int is_mc) { int ret; -#ifdef RTE_LIBRTE_MEMPOOL_DEBUG - unsigned n_orig = n; -#endif #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 struct rte_mempool_cache *cache; uint32_t index, len; @@ -954,7 +952,8 @@ __mempool_get_bulk(struct rte_mempool *mp, void **obj_table, uint32_t cache_size = mp->cache_size; /* cache is not enabled or single consumer */ - if (unlikely(cache_size == 0 || is_mc == 0 || n >= cache_size)) + if (unlikely(cache_size == 0 || is_mc == 0 || + n >= cache_size || lcore_id >= RTE_MAX_LCORE)) goto ring_dequeue; cache = &mp->local_cache[lcore_id]; @@ -986,7 +985,7 @@ __mempool_get_bulk(struct rte_mempool *mp, void **obj_table, cache->len -= n; - __MEMPOOL_STAT_ADD(mp, get_success, n_orig); + __MEMPOOL_STAT_ADD(mp, get_success, n); return 0; @@ -1000,9 +999,9 @@ ring_dequeue: ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n); if (ret < 0) - __MEMPOOL_STAT_ADD(mp, get_fail, n_orig); + __MEMPOOL_STAT_ADD(mp, get_fail, n); else - __MEMPOOL_STAT_ADD(mp, get_success, n_orig); + __MEMPOOL_STAT_ADD(mp, get_success, n); return ret; } @@ -1297,8 +1296,11 @@ static inline void *rte_mempool_get_priv(struct rte_mempool *mp) /** * Dump the status of all mempools on the console + * + * @param f + * A pointer to a file for output */ -void rte_mempool_list_dump(void); +void rte_mempool_list_dump(FILE *f); /** * Search a mempool from its name @@ -1331,7 +1333,7 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, /** * Calculate maximum amount of memory required to store given number of objects. - * Assumes that the memory buffer will be alligned at page boundary. + * Assumes that the memory buffer will be aligned at page boundary. * Note, that if object size is bigger then page size, then it assumes that * we have a subsets of physically continuous pages big enough to store * at least one object. @@ -1373,6 +1375,17 @@ size_t rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz, ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz, const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift); +/** + * Walk list of all memory pools + * + * @param func + * Iterator function + * @param arg + * Argument passed to iterator + */ +void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *arg), + void *arg); + #ifdef __cplusplus } #endif