X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_mempool%2Frte_mempool.h;h=76b5b3b15232b44f0e91a03afad39dcf1b7d98fb;hb=92ac1d5a13859e5dc65c94aaa94559680f2e26c9;hp=991feaa795966c0afa9e341cab8f7f2c591f5596;hpb=93092a56101b5ce10679004d07ce434a0eabcd97;p=dpdk.git diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index 991feaa795..76b5b3b152 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -51,13 +51,15 @@ * meta-data in the object data and retrieve them when allocating a * new object. * - * Note: the mempool implementation is not preemptable. A lcore must - * not be interrupted by another task that uses the same mempool - * (because it uses a ring which is not preemptable). Also, mempool - * functions must not be used outside the DPDK environment: for - * example, in linuxapp environment, a thread that is not created by - * the EAL must not use mempools. This is due to the per-lcore cache - * that won't work as rte_lcore_id() will not return a correct value. + * Note: the mempool implementation is not preemptible. An lcore must not be + * interrupted by another task that uses the same mempool (because it uses a + * ring which is not preemptible). Also, usual mempool functions like + * rte_mempool_get() or rte_mempool_put() are designed to be called from an EAL + * thread due to the internal per-lcore cache. Due to the lack of caching, + * rte_mempool_get() or rte_mempool_put() performance will suffer when called + * by non-EAL threads. Instead, non-EAL threads should call + * rte_mempool_generic_get() or rte_mempool_generic_put() with a user cache + * created with rte_mempool_cache_create(). */ #include @@ -654,7 +656,7 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *); * when using rte_mempool_get() or rte_mempool_get_bulk() is * "single-consumer". Otherwise, it is "multi-consumers". * - MEMPOOL_F_NO_PHYS_CONTIG: If set, allocated objects won't - * necessarilly be contiguous in physical memory. + * necessarily be contiguous in physical memory. * @return * The pointer to the new allocated mempool, on success. NULL on error * with rte_errno set appropriately. Possible rte_errno values include: @@ -794,7 +796,7 @@ rte_mempool_free(struct rte_mempool *mp); * Add physically contiguous memory for objects in the pool at init * * Add a virtually and physically contiguous memory chunk in the pool - * where objects can be instanciated. + * where objects can be instantiated. * * If the given physical address is unknown (paddr = RTE_BAD_PHYS_ADDR), * the chunk doesn't need to be physically contiguous (only virtually), @@ -825,7 +827,7 @@ int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, * Add physical memory for objects in the pool at init * * Add a virtually contiguous memory chunk in the pool where objects can - * be instanciated. The physical addresses corresponding to the virtual + * be instantiated. The physical addresses corresponding to the virtual * area are described in paddr[], pg_num, pg_shift. * * @param mp @@ -856,7 +858,7 @@ int rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr, * Add virtually contiguous memory for objects in the pool at init * * Add a virtually contiguous memory chunk in the pool where objects can - * be instanciated. + * be instantiated. * * @param mp * A pointer to the mempool structure. @@ -991,7 +993,7 @@ rte_mempool_cache_free(struct rte_mempool_cache *cache); * @param mp * A pointer to the mempool. */ -static inline void __attribute__((always_inline)) +static __rte_always_inline void rte_mempool_cache_flush(struct rte_mempool_cache *cache, struct rte_mempool *mp) { @@ -1009,7 +1011,7 @@ rte_mempool_cache_flush(struct rte_mempool_cache *cache, * @return * A pointer to the mempool cache or NULL if disabled or non-EAL thread. */ -static inline struct rte_mempool_cache *__attribute__((always_inline)) +static __rte_always_inline struct rte_mempool_cache * rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id) { if (mp->cache_size == 0) @@ -1036,7 +1038,7 @@ rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id) * The flags used for the mempool creation. * Single-producer (MEMPOOL_F_SP_PUT flag) or multi-producers. */ -static inline void __attribute__((always_inline)) +static __rte_always_inline void __mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, unsigned n, struct rte_mempool_cache *cache) { @@ -1098,7 +1100,7 @@ ring_enqueue: * The flags used for the mempool creation. * Single-producer (MEMPOOL_F_SP_PUT flag) or multi-producers. */ -static inline void __attribute__((always_inline)) +static __rte_always_inline void rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, unsigned n, struct rte_mempool_cache *cache, __rte_unused int flags) @@ -1121,7 +1123,7 @@ rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, * @param n * The number of objects to add in the mempool from obj_table. */ -static inline void __attribute__((always_inline)) +static __rte_always_inline void rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, unsigned n) { @@ -1142,7 +1144,7 @@ rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, * @param obj * A pointer to the object to be added. */ -static inline void __attribute__((always_inline)) +static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj) { rte_mempool_put_bulk(mp, &obj, 1); @@ -1165,7 +1167,7 @@ rte_mempool_put(struct rte_mempool *mp, void *obj) * - >=0: Success; number of objects supplied. * - <0: Error; code of ring dequeue function. */ -static inline int __attribute__((always_inline)) +static __rte_always_inline int __mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned n, struct rte_mempool_cache *cache) { @@ -1246,7 +1248,7 @@ ring_dequeue: * - 0: Success; objects taken. * - -ENOENT: Not enough entries in the mempool; no object is retrieved. */ -static inline int __attribute__((always_inline)) +static __rte_always_inline int rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned n, struct rte_mempool_cache *cache, __rte_unused int flags) { @@ -1279,7 +1281,7 @@ rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned n, * - 0: Success; objects taken * - -ENOENT: Not enough entries in the mempool; no object is retrieved. */ -static inline int __attribute__((always_inline)) +static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) { struct rte_mempool_cache *cache; @@ -1307,7 +1309,7 @@ rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) * - 0: Success; objects taken. * - -ENOENT: Not enough entries in the mempool; no object is retrieved. */ -static inline int __attribute__((always_inline)) +static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p) { return rte_mempool_get_bulk(mp, obj_p, 1);