-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2016 6WIND S.A.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation.
+ * Copyright(c) 2016 6WIND S.A.
*/
#ifndef _RTE_MEMPOOL_H_
* meta-data in the object data and retrieve them when allocating a
* new object.
*
- * Note: the mempool implementation is not preemptable. A lcore must
- * not be interrupted by another task that uses the same mempool
- * (because it uses a ring which is not preemptable). Also, mempool
- * functions must not be used outside the DPDK environment: for
- * example, in linuxapp environment, a thread that is not created by
- * the EAL must not use mempools. This is due to the per-lcore cache
- * that won't work as rte_lcore_id() will not return a correct value.
+ * Note: the mempool implementation is not preemptible. An lcore must not be
+ * interrupted by another task that uses the same mempool (because it uses a
+ * ring which is not preemptible). Also, usual mempool functions like
+ * rte_mempool_get() or rte_mempool_put() are designed to be called from an EAL
+ * thread due to the internal per-lcore cache. Due to the lack of caching,
+ * rte_mempool_get() or rte_mempool_put() performance will suffer when called
+ * by non-EAL threads. Instead, non-EAL threads should call
+ * rte_mempool_generic_get() or rte_mempool_generic_put() with a user cache
+ * created with rte_mempool_cache_create().
*/
#include <stdio.h>
#include <inttypes.h>
#include <sys/queue.h>
+#include <rte_config.h>
#include <rte_spinlock.h>
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_memory.h>
#include <rte_branch_prediction.h>
#include <rte_ring.h>
+#include <rte_memcpy.h>
+#include <rte_common.h>
#ifdef __cplusplus
extern "C" {
* A structure that stores a per-core object cache.
*/
struct rte_mempool_cache {
- unsigned len; /**< Cache len */
+ uint32_t size; /**< Size of the cache */
+ uint32_t flushthresh; /**< Threshold before we flush excess elements */
+ uint32_t len; /**< Current cache count */
/*
* Cache is allocated to this size to allow it to overflow in certain
* cases to avoid needless emptying of cache.
/**< Total size of an object (header + elt + trailer). */
};
-#define RTE_MEMPOOL_NAMESIZE 32 /**< Maximum length of a memory pool. */
+/**< Maximum length of a memory pool's name. */
+#define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \
+ sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1)
#define RTE_MEMPOOL_MZ_PREFIX "MP_"
/* "MP_<name>" */
struct rte_mempool_objhdr {
STAILQ_ENTRY(rte_mempool_objhdr) next; /**< Next in list. */
struct rte_mempool *mp; /**< The mempool owning the object. */
- phys_addr_t physaddr; /**< Physical address of the object. */
+ RTE_STD_C11
+ union {
+ rte_iova_t iova; /**< IO address of the object. */
+ phys_addr_t physaddr; /**< deprecated - Physical address of the object. */
+ };
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
uint64_t cookie; /**< Debug cookie. */
#endif
*/
STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+
/**
* Mempool object trailer structure
*
* trailer structure containing a cookie preventing memory corruptions.
*/
struct rte_mempool_objtlr {
-#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
uint64_t cookie; /**< Debug cookie. */
-#endif
};
+#endif
+
/**
* A list of memory where objects are stored
*/
STAILQ_ENTRY(rte_mempool_memhdr) next; /**< Next in list. */
struct rte_mempool *mp; /**< The mempool owning the chunk */
void *addr; /**< Virtual address of the chunk */
- phys_addr_t phys_addr; /**< Physical address of the chunk */
+ RTE_STD_C11
+ union {
+ rte_iova_t iova; /**< IO address of the chunk */
+ phys_addr_t phys_addr; /**< Physical address of the chunk */
+ };
size_t len; /**< length of the chunk */
rte_mempool_memchunk_free_cb_t *free_cb; /**< Free callback */
void *opaque; /**< Argument passed to the free callback */
* The RTE mempool structure.
*/
struct rte_mempool {
- char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */
+ /*
+ * Note: this field kept the RTE_MEMZONE_NAMESIZE size due to ABI
+ * compatibility requirements, it could be changed to
+ * RTE_MEMPOOL_NAMESIZE next time the ABI changes
+ */
+ char name[RTE_MEMZONE_NAMESIZE]; /**< Name of mempool. */
+ RTE_STD_C11
union {
void *pool_data; /**< Ring or pool to store objects. */
uint64_t pool_id; /**< External mempool identifier. */
};
void *pool_config; /**< optional args for ops alloc. */
const struct rte_memzone *mz; /**< Memzone where pool is alloc'd. */
- int flags; /**< Flags of the mempool. */
+ unsigned int flags; /**< Flags of the mempool. */
int socket_id; /**< Socket id passed at create. */
uint32_t size; /**< Max size of the mempool. */
- uint32_t cache_size; /**< Size of per-lcore local cache. */
- uint32_t cache_flushthresh;
- /**< Threshold before we flush excess elements. */
+ uint32_t cache_size;
+ /**< Size of per-lcore default local cache. */
uint32_t elt_size; /**< Size of an element. */
uint32_t header_size; /**< Size of header (before elt). */
#define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/
#define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/
#define MEMPOOL_F_POOL_CREATED 0x0010 /**< Internal: pool is created. */
-#define MEMPOOL_F_NO_PHYS_CONTIG 0x0020 /**< Don't need physically contiguous objs. */
+#define MEMPOOL_F_NO_IOVA_CONTIG 0x0020 /**< Don't need IOVA contiguous objs. */
+#define MEMPOOL_F_NO_PHYS_CONTIG MEMPOOL_F_NO_IOVA_CONTIG /* deprecated */
+/**
+ * This capability flag is advertised by a mempool handler, if the whole
+ * memory area containing the objects must be physically contiguous.
+ * Note: This flag should not be passed by application.
+ */
+#define MEMPOOL_F_CAPA_PHYS_CONTIG 0x0040
+/**
+ * This capability flag is advertised by a mempool handler. Used for a case
+ * where mempool driver wants object start address(vaddr) aligned to block
+ * size(/ total element size).
+ *
+ * Note:
+ * - This flag should not be passed by application.
+ * Flag used for mempool driver only.
+ * - Mempool driver must also set MEMPOOL_F_CAPA_PHYS_CONTIG flag along with
+ * MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS.
+ */
+#define MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS 0x0080
/**
* @internal When debug is enabled, store some statistics.
* Prototype for implementation specific data provisioning function.
*
* The function should provide the implementation specific memory for
- * for use by the other mempool ops functions in a given mempool ops struct.
+ * use by the other mempool ops functions in a given mempool ops struct.
* E.g. the default ops provides an instance of the rte_ring for this purpose.
* it will most likely point to a different type of data structure, and
* will be transparent to the application programmer.
*/
typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
+/**
+ * Get the mempool capabilities.
+ */
+typedef int (*rte_mempool_get_capabilities_t)(const struct rte_mempool *mp,
+ unsigned int *flags);
+
+/**
+ * Notify new memory area to mempool.
+ */
+typedef int (*rte_mempool_ops_register_memory_area_t)
+(const struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len);
+
/** Structure defining mempool operations structure */
struct rte_mempool_ops {
char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct. */
rte_mempool_enqueue_t enqueue; /**< Enqueue an object. */
rte_mempool_dequeue_t dequeue; /**< Dequeue an object. */
rte_mempool_get_count get_count; /**< Get qty of available objs. */
+ /**
+ * Get the mempool capabilities
+ */
+ rte_mempool_get_capabilities_t get_capabilities;
+ /**
+ * Notify new memory area to mempool
+ */
+ rte_mempool_ops_register_memory_area_t register_memory_area;
} __rte_cache_aligned;
#define RTE_MEMPOOL_MAX_OPS_IDX 16 /**< Max registered ops structs */
unsigned
rte_mempool_ops_get_count(const struct rte_mempool *mp);
+/**
+ * @internal wrapper for mempool_ops get_capabilities callback.
+ *
+ * @param mp [in]
+ * Pointer to the memory pool.
+ * @param flags [out]
+ * Pointer to the mempool flags.
+ * @return
+ * - 0: Success; The mempool driver has advertised his pool capabilities in
+ * flags param.
+ * - -ENOTSUP - doesn't support get_capabilities ops (valid case).
+ * - Otherwise, pool create fails.
+ */
+int
+rte_mempool_ops_get_capabilities(const struct rte_mempool *mp,
+ unsigned int *flags);
+/**
+ * @internal wrapper for mempool_ops register_memory_area callback.
+ * API to notify the mempool handler when a new memory area is added to pool.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ * @param vaddr
+ * Pointer to the buffer virtual address.
+ * @param iova
+ * Pointer to the buffer IO address.
+ * @param len
+ * Pool size.
+ * @return
+ * - 0: Success;
+ * - -ENOTSUP - doesn't support register_memory_area ops (valid error case).
+ * - Otherwise, rte_mempool_populate_phys fails thus pool create fails.
+ */
+int
+rte_mempool_ops_register_memory_area(const struct rte_mempool *mp,
+ char *vaddr, rte_iova_t iova, size_t len);
+
/**
* @internal wrapper for mempool_ops free callback.
*
/**
* Macro to statically register the ops of a mempool handler.
* Note that the rte_mempool_register_ops fails silently here when
- * more then RTE_MEMPOOL_MAX_OPS_IDX is registered.
+ * more than RTE_MEMPOOL_MAX_OPS_IDX is registered.
*/
#define MEMPOOL_REGISTER_OPS(ops) \
void mp_hdlr_init_##ops(void); \
/**
* Create a new mempool named *name* in memory.
*
- * This function uses ``memzone_reserve()`` to allocate memory. The
+ * This function uses ``rte_memzone_reserve()`` to allocate memory. The
* pool contains n elements of elt_size. Its size is set to n.
- * All elements of the mempool are allocated together with the mempool header,
- * in one physically continuous chunk of memory.
*
* @param name
* The name of the mempool.
* never be used. The access to the per-lcore table is of course
* faster than the multi-producer/consumer pool. The cache can be
* disabled if the cache_size argument is set to 0; it can be useful to
- * avoid losing objects in cache. Note that even if not used, the
- * memory space for cache is always reserved in a mempool structure,
- * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
+ * avoid losing objects in cache.
* @param private_data_size
* The size of the private data appended after the mempool
* structure. This is useful for storing some private data after the
* - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
* when using rte_mempool_get() or rte_mempool_get_bulk() is
* "single-consumer". Otherwise, it is "multi-consumers".
- * - MEMPOOL_F_NO_PHYS_CONTIG: If set, allocated objects won't
- * necessarilly be contiguous in physical memory.
+ * - MEMPOOL_F_NO_IOVA_CONTIG: If set, allocated objects won't
+ * necessarily be contiguous in IO memory.
* @return
* The pointer to the new allocated mempool, on success. NULL on error
* with rte_errno set appropriately. Possible rte_errno values include:
* @param vaddr
* Virtual address of the externally allocated memory buffer.
* Will be used to store mempool objects.
- * @param paddr
- * Array of physical addresses of the pages that comprises given memory
- * buffer.
+ * @param iova
+ * Array of IO addresses of the pages that comprises given memory buffer.
* @param pg_num
- * Number of elements in the paddr array.
+ * Number of elements in the iova array.
* @param pg_shift
* LOG2 of the physical pages size.
* @return
rte_mempool_ctor_t *mp_init, void *mp_init_arg,
rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags, void *vaddr,
- const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
+ const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift);
/**
* Create an empty mempool
*
* The mempool is allocated and initialized, but it is not populated: no
* memory is allocated for the mempool elements. The user has to call
- * rte_mempool_populate_*() or to add memory chunks to the pool. Once
+ * rte_mempool_populate_*() to add memory chunks to the pool. Once
* populated, the user may also want to initialize each object with
* rte_mempool_obj_iter().
*
* Add physically contiguous memory for objects in the pool at init
*
* Add a virtually and physically contiguous memory chunk in the pool
- * where objects can be instanciated.
+ * where objects can be instantiated.
+ *
+ * If the given IO address is unknown (iova = RTE_BAD_IOVA),
+ * the chunk doesn't need to be physically contiguous (only virtually),
+ * and allocated objects may span two pages.
*
* @param mp
* A pointer to the mempool structure.
* @param vaddr
* The virtual address of memory that should be used to store objects.
- * @param paddr
- * The physical address
+ * @param iova
+ * The IO address
* @param len
* The length of memory in bytes.
* @param free_cb
* On error, the chunk is not added in the memory list of the
* mempool and a negative errno is returned.
*/
+int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
+ rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque);
+
+__rte_deprecated
int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
void *opaque);
* Add physical memory for objects in the pool at init
*
* Add a virtually contiguous memory chunk in the pool where objects can
- * be instanciated. The physical addresses corresponding to the virtual
- * area are described in paddr[], pg_num, pg_shift.
+ * be instantiated. The IO addresses corresponding to the virtual
+ * area are described in iova[], pg_num, pg_shift.
*
* @param mp
* A pointer to the mempool structure.
* @param vaddr
* The virtual address of memory that should be used to store objects.
- * @param paddr
- * An array of physical addresses of each page composing the virtual
- * area.
+ * @param iova
+ * An array of IO addresses of each page composing the virtual area.
* @param pg_num
- * Number of elements in the paddr array.
+ * Number of elements in the iova array.
* @param pg_shift
* LOG2 of the physical pages size.
* @param free_cb
* On error, the chunks are not added in the memory list of the
* mempool and a negative errno is returned.
*/
+int rte_mempool_populate_iova_tab(struct rte_mempool *mp, char *vaddr,
+ const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift,
+ rte_mempool_memchunk_free_cb_t *free_cb, void *opaque);
+
+__rte_deprecated
int rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
rte_mempool_memchunk_free_cb_t *free_cb, void *opaque);
* Add virtually contiguous memory for objects in the pool at init
*
* Add a virtually contiguous memory chunk in the pool where objects can
- * be instanciated.
+ * be instantiated.
*
* @param mp
* A pointer to the mempool structure.
rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg);
/**
- * Dump the status of the mempool to the console.
+ * Dump the status of the mempool to a file.
*
* @param f
* A pointer to a file for output
*/
void rte_mempool_dump(FILE *f, struct rte_mempool *mp);
+/**
+ * Create a user-owned mempool cache.
+ *
+ * This can be used by non-EAL threads to enable caching when they
+ * interact with a mempool.
+ *
+ * @param size
+ * The size of the mempool cache. See rte_mempool_create()'s cache_size
+ * parameter description for more information. The same limits and
+ * considerations apply here too.
+ * @param socket_id
+ * The socket identifier in the case of NUMA. The value can be
+ * SOCKET_ID_ANY if there is no NUMA constraint for the reserved zone.
+ */
+struct rte_mempool_cache *
+rte_mempool_cache_create(uint32_t size, int socket_id);
+
+/**
+ * Free a user-owned mempool cache.
+ *
+ * @param cache
+ * A pointer to the mempool cache.
+ */
+void
+rte_mempool_cache_free(struct rte_mempool_cache *cache);
+
+/**
+ * Flush a user-owned mempool cache to the specified mempool.
+ *
+ * @param cache
+ * A pointer to the mempool cache.
+ * @param mp
+ * A pointer to the mempool.
+ */
+static __rte_always_inline void
+rte_mempool_cache_flush(struct rte_mempool_cache *cache,
+ struct rte_mempool *mp)
+{
+ rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len);
+ cache->len = 0;
+}
+
+/**
+ * Get a pointer to the per-lcore default mempool cache.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param lcore_id
+ * The logical core id.
+ * @return
+ * A pointer to the mempool cache or NULL if disabled or non-EAL thread.
+ */
+static __rte_always_inline struct rte_mempool_cache *
+rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
+{
+ if (mp->cache_size == 0)
+ return NULL;
+
+ if (lcore_id >= RTE_MAX_LCORE)
+ return NULL;
+
+ return &mp->local_cache[lcore_id];
+}
+
/**
* @internal Put several objects back in the mempool; used internally.
* @param mp
* @param n
* The number of objects to store back in the mempool, must be strictly
* positive.
- * @param is_mp
- * Mono-producer (0) or multi-producers (1).
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
*/
-static inline void __attribute__((always_inline))
-__mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
- unsigned n, int is_mp)
+static __rte_always_inline void
+__mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
+ unsigned int n, struct rte_mempool_cache *cache)
{
- struct rte_mempool_cache *cache;
- uint32_t index;
void **cache_objs;
- unsigned lcore_id = rte_lcore_id();
- uint32_t cache_size = mp->cache_size;
- uint32_t flushthresh = mp->cache_flushthresh;
/* increment stat now, adding in mempool always success */
__MEMPOOL_STAT_ADD(mp, put, n);
- /* cache is not enabled or single producer or non-EAL thread */
- if (unlikely(cache_size == 0 || is_mp == 0 ||
- lcore_id >= RTE_MAX_LCORE))
+ /* No cache provided or if put would overflow mem allocated for cache */
+ if (unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE))
goto ring_enqueue;
- /* Go straight to ring if put would overflow mem allocated for cache */
- if (unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE))
- goto ring_enqueue;
-
- cache = &mp->local_cache[lcore_id];
cache_objs = &cache->objs[cache->len];
/*
*/
/* Add elements back into the cache */
- for (index = 0; index < n; ++index, obj_table++)
- cache_objs[index] = *obj_table;
+ rte_memcpy(&cache_objs[0], obj_table, sizeof(void *) * n);
cache->len += n;
- if (cache->len >= flushthresh) {
- rte_mempool_ops_enqueue_bulk(mp, &cache->objs[cache_size],
- cache->len - cache_size);
- cache->len = cache_size;
+ if (cache->len >= cache->flushthresh) {
+ rte_mempool_ops_enqueue_bulk(mp, &cache->objs[cache->size],
+ cache->len - cache->size);
+ cache->len = cache->size;
}
return;
/**
- * Put several objects back in the mempool (multi-producers safe).
+ * Put several objects back in the mempool.
*
* @param mp
* A pointer to the mempool structure.
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to add in the mempool from the obj_table.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
*/
-static inline void __attribute__((always_inline))
-rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
- unsigned n)
-{
- __mempool_check_cookies(mp, obj_table, n, 0);
- __mempool_put_bulk(mp, obj_table, n, 1);
-}
-
-/**
- * Put several objects back in the mempool (NOT multi-producers safe).
- *
- * @param mp
- * A pointer to the mempool structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects).
- * @param n
- * The number of objects to add in the mempool from obj_table.
- */
-static inline void
-rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
- unsigned n)
+static __rte_always_inline void
+rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
+ unsigned int n, struct rte_mempool_cache *cache)
{
__mempool_check_cookies(mp, obj_table, n, 0);
- __mempool_put_bulk(mp, obj_table, n, 0);
+ __mempool_generic_put(mp, obj_table, n, cache);
}
/**
* @param n
* The number of objects to add in the mempool from obj_table.
*/
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
- unsigned n)
+ unsigned int n)
{
- __mempool_check_cookies(mp, obj_table, n, 0);
- __mempool_put_bulk(mp, obj_table, n, !(mp->flags & MEMPOOL_F_SP_PUT));
-}
-
-/**
- * Put one object in the mempool (multi-producers safe).
- *
- * @param mp
- * A pointer to the mempool structure.
- * @param obj
- * A pointer to the object to be added.
- */
-static inline void __attribute__((always_inline))
-rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
-{
- rte_mempool_mp_put_bulk(mp, &obj, 1);
-}
-
-/**
- * Put one object back in the mempool (NOT multi-producers safe).
- *
- * @param mp
- * A pointer to the mempool structure.
- * @param obj
- * A pointer to the object to be added.
- */
-static inline void __attribute__((always_inline))
-rte_mempool_sp_put(struct rte_mempool *mp, void *obj)
-{
- rte_mempool_sp_put_bulk(mp, &obj, 1);
+ struct rte_mempool_cache *cache;
+ cache = rte_mempool_default_cache(mp, rte_lcore_id());
+ rte_mempool_generic_put(mp, obj_table, n, cache);
}
/**
* @param obj
* A pointer to the object to be added.
*/
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_mempool_put(struct rte_mempool *mp, void *obj)
{
rte_mempool_put_bulk(mp, &obj, 1);
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to get, must be strictly positive.
- * @param is_mc
- * Mono-consumer (0) or multi-consumers (1).
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
* @return
* - >=0: Success; number of objects supplied.
* - <0: Error; code of ring dequeue function.
*/
-static inline int __attribute__((always_inline))
-__mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
- unsigned n, int is_mc)
+static __rte_always_inline int
+__mempool_generic_get(struct rte_mempool *mp, void **obj_table,
+ unsigned int n, struct rte_mempool_cache *cache)
{
int ret;
- struct rte_mempool_cache *cache;
uint32_t index, len;
void **cache_objs;
- unsigned lcore_id = rte_lcore_id();
- uint32_t cache_size = mp->cache_size;
- /* cache is not enabled or single consumer */
- if (unlikely(cache_size == 0 || is_mc == 0 ||
- n >= cache_size || lcore_id >= RTE_MAX_LCORE))
+ /* No cache provided or cannot be satisfied from cache */
+ if (unlikely(cache == NULL || n >= cache->size))
goto ring_dequeue;
- cache = &mp->local_cache[lcore_id];
cache_objs = cache->objs;
/* Can this be satisfied from the cache? */
if (cache->len < n) {
/* No. Backfill the cache first, and then fill from it */
- uint32_t req = n + (cache_size - cache->len);
+ uint32_t req = n + (cache->size - cache->len);
/* How many do we require i.e. number to fill the cache + the request */
ret = rte_mempool_ops_dequeue_bulk(mp,
}
/**
- * Get several objects from the mempool (multi-consumers safe).
+ * Get several objects from the mempool.
*
* If cache is enabled, objects will be retrieved first from cache,
* subsequently from the common pool. Note that it can return -ENOENT when
* A pointer to a table of void * pointers (objects) that will be filled.
* @param n
* The number of objects to get from mempool to obj_table.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
* @return
* - 0: Success; objects taken.
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
-static inline int __attribute__((always_inline))
-rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
+static __rte_always_inline int
+rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table,
+ unsigned int n, struct rte_mempool_cache *cache)
{
int ret;
- ret = __mempool_get_bulk(mp, obj_table, n, 1);
- if (ret == 0)
- __mempool_check_cookies(mp, obj_table, n, 1);
- return ret;
-}
-
-/**
- * Get several objects from the mempool (NOT multi-consumers safe).
- *
- * If cache is enabled, objects will be retrieved first from cache,
- * subsequently from the common pool. Note that it can return -ENOENT when
- * the local cache and common pool are empty, even if cache from other
- * lcores are full.
- *
- * @param mp
- * A pointer to the mempool structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
- * @param n
- * The number of objects to get from the mempool to obj_table.
- * @return
- * - 0: Success; objects taken.
- * - -ENOENT: Not enough entries in the mempool; no object is
- * retrieved.
- */
-static inline int __attribute__((always_inline))
-rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
-{
- int ret;
- ret = __mempool_get_bulk(mp, obj_table, n, 0);
+ ret = __mempool_generic_get(mp, obj_table, n, cache);
if (ret == 0)
__mempool_check_cookies(mp, obj_table, n, 1);
return ret;
* - 0: Success; objects taken
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
-static inline int __attribute__((always_inline))
-rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
-{
- int ret;
- ret = __mempool_get_bulk(mp, obj_table, n,
- !(mp->flags & MEMPOOL_F_SC_GET));
- if (ret == 0)
- __mempool_check_cookies(mp, obj_table, n, 1);
- return ret;
-}
-
-/**
- * Get one object from the mempool (multi-consumers safe).
- *
- * If cache is enabled, objects will be retrieved first from cache,
- * subsequently from the common pool. Note that it can return -ENOENT when
- * the local cache and common pool are empty, even if cache from other
- * lcores are full.
- *
- * @param mp
- * A pointer to the mempool structure.
- * @param obj_p
- * A pointer to a void * pointer (object) that will be filled.
- * @return
- * - 0: Success; objects taken.
- * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
- */
-static inline int __attribute__((always_inline))
-rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
+static __rte_always_inline int
+rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
{
- return rte_mempool_mc_get_bulk(mp, obj_p, 1);
-}
-
-/**
- * Get one object from the mempool (NOT multi-consumers safe).
- *
- * If cache is enabled, objects will be retrieved first from cache,
- * subsequently from the common pool. Note that it can return -ENOENT when
- * the local cache and common pool are empty, even if cache from other
- * lcores are full.
- *
- * @param mp
- * A pointer to the mempool structure.
- * @param obj_p
- * A pointer to a void * pointer (object) that will be filled.
- * @return
- * - 0: Success; objects taken.
- * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
- */
-static inline int __attribute__((always_inline))
-rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p)
-{
- return rte_mempool_sc_get_bulk(mp, obj_p, 1);
+ struct rte_mempool_cache *cache;
+ cache = rte_mempool_default_cache(mp, rte_lcore_id());
+ return rte_mempool_generic_get(mp, obj_table, n, cache);
}
/**
* - 0: Success; objects taken.
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_mempool_get(struct rte_mempool *mp, void **obj_p)
{
return rte_mempool_get_bulk(mp, obj_p, 1);
*
* When cache is enabled, this function has to browse the length of
* all lcores, so it should not be used in a data path, but only for
- * debug purposes.
+ * debug purposes. User-owned mempool caches are not accounted for.
*
* @param mp
* A pointer to the mempool structure.
* @return
* The number of entries in the mempool.
*/
-unsigned rte_mempool_count(const struct rte_mempool *mp);
+unsigned int rte_mempool_avail_count(const struct rte_mempool *mp);
/**
- * Return the number of free entries in the mempool ring.
- * i.e. how many entries can be freed back to the mempool.
- *
- * NOTE: This corresponds to the number of elements *allocated* from the
- * memory pool, not the number of elements in the pool itself. To count
- * the number elements currently available in the pool, use "rte_mempool_count"
+ * Return the number of elements which have been allocated from the mempool
*
* When cache is enabled, this function has to browse the length of
* all lcores, so it should not be used in a data path, but only for
* @return
* The number of free entries in the mempool.
*/
-static inline unsigned
-rte_mempool_free_count(const struct rte_mempool *mp)
-{
- return mp->size - rte_mempool_count(mp);
-}
+unsigned int
+rte_mempool_in_use_count(const struct rte_mempool *mp);
/**
* Test if the mempool is full.
*
* When cache is enabled, this function has to browse the length of all
* lcores, so it should not be used in a data path, but only for debug
- * purposes.
+ * purposes. User-owned mempool caches are not accounted for.
*
* @param mp
* A pointer to the mempool structure.
static inline int
rte_mempool_full(const struct rte_mempool *mp)
{
- return !!(rte_mempool_count(mp) == mp->size);
+ return !!(rte_mempool_avail_count(mp) == mp->size);
}
/**
*
* When cache is enabled, this function has to browse the length of all
* lcores, so it should not be used in a data path, but only for debug
- * purposes.
+ * purposes. User-owned mempool caches are not accounted for.
*
* @param mp
* A pointer to the mempool structure.
static inline int
rte_mempool_empty(const struct rte_mempool *mp)
{
- return !!(rte_mempool_count(mp) == 0);
+ return !!(rte_mempool_avail_count(mp) == 0);
}
/**
- * Return the physical address of elt, which is an element of the pool mp.
+ * Return the IO address of elt, which is an element of the pool mp.
*
- * @param mp
- * A pointer to the mempool structure.
* @param elt
* A pointer (virtual address) to the element of the pool.
* @return
- * The physical address of the elt element.
- * If the mempool was created with MEMPOOL_F_NO_PHYS_CONTIG, the
- * returned value is RTE_BAD_PHYS_ADDR.
+ * The IO address of the elt element.
+ * If the mempool was created with MEMPOOL_F_NO_IOVA_CONTIG, the
+ * returned value is RTE_BAD_IOVA.
*/
-static inline phys_addr_t
-rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt)
+static inline rte_iova_t
+rte_mempool_virt2iova(const void *elt)
{
const struct rte_mempool_objhdr *hdr;
hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt,
sizeof(*hdr));
- return hdr->physaddr;
+ return hdr->iova;
+}
+
+__rte_deprecated
+static inline phys_addr_t
+rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt)
+{
+ return rte_mempool_virt2iova(elt);
}
/**
* by rte_mempool_calc_obj_size().
* @param pg_shift
* LOG2 of the physical pages size. If set to 0, ignore page boundaries.
+ * @param flags
+ * The mempool flags.
* @return
* Required memory size aligned at page boundary.
*/
size_t rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz,
- uint32_t pg_shift);
+ uint32_t pg_shift, unsigned int flags);
/**
* Get the size of memory required to store mempool elements.
* @param total_elt_sz
* The size of each element, including header and trailer, as returned
* by rte_mempool_calc_obj_size().
- * @param paddr
- * Array of physical addresses of the pages that comprises given memory
- * buffer.
+ * @param iova
+ * Array of IO addresses of the pages that comprises given memory buffer.
* @param pg_num
- * Number of elements in the paddr array.
+ * Number of elements in the iova array.
* @param pg_shift
* LOG2 of the physical pages size.
+ * @param flags
+ * The mempool flags.
* @return
* On success, the number of bytes needed to store given number of
* objects, aligned to the given page size. If the provided memory
* is the actual number of elements that can be stored in that buffer.
*/
ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num,
- size_t total_elt_sz, const phys_addr_t paddr[], uint32_t pg_num,
- uint32_t pg_shift);
+ size_t total_elt_sz, const rte_iova_t iova[], uint32_t pg_num,
+ uint32_t pg_shift, unsigned int flags);
/**
* Walk list of all memory pools