/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
* are met:
*
- * * Redistributions of source code must retain the above copyright
+ * * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef _RTE_MEMPOOL_H_
#include <rte_log.h>
#include <rte_debug.h>
-#include <rte_memory.h>
#include <rte_lcore.h>
+#include <rte_memory.h>
#include <rte_branch_prediction.h>
#include <rte_ring.h>
*/
struct rte_mempool_cache {
unsigned len; /**< Cache len */
- void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE]; /**< Cache objects */
+ /*
+ * Cache is allocated to this size to allow it to overflow in certain
+ * cases to avoid needless emptying of cache.
+ */
+ void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3]; /**< Cache objects */
} __rte_cache_aligned;
#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
phys_addr_t phys_addr; /**< Phys. addr. of mempool struct. */
int flags; /**< Flags of the mempool. */
uint32_t size; /**< Size of the mempool. */
- uint32_t bulk_default; /**< Default bulk count. */
uint32_t cache_size; /**< Size of per-lcore local cache. */
+ uint32_t cache_flushthresh; /**< Threshold before we flush excess elements. */
uint32_t elt_size; /**< Size of an element. */
uint32_t header_size; /**< Size of header (before elt). */
#define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/
/**
- * When debug is enabled, store some statistics.
+ * @internal When debug is enabled, store some statistics.
* @param mp
* Pointer to the memory pool.
* @param name
#endif
/**
- * Get a pointer to a mempool pointer in the object header.
+ * @internal Get a pointer to a mempool pointer in the object header.
* @param obj
* Pointer to object.
* @return
#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
/**
- * Check and update cookies or panic.
+ * @internal Check and update cookies or panic.
*
* @param mp
* Pointer to the memory pool.
* Creates a new mempool named *name* in memory.
*
* This function uses ``memzone_reserve()`` to allocate memory. The
- * pool contains n elements of elt_size. Its size is set to n. By
- * default, bulk_default_count (the default number of elements to
- * get/put in the pool) is set to 1. @see rte_mempool_set_bulk_count()
- * to modify this valule.
+ * pool contains n elements of elt_size. Its size is set to n.
*
* @param name
* The name of the mempool.
rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags);
-/**
- * Set the default bulk count for put/get.
- *
- * The *count* parameter is the default number of bulk elements to
- * get/put when using ``rte_mempool_*_{en,de}queue_bulk()``. It must
- * be greater than 0 and less than half of the mempool size.
- *
- * @param mp
- * A pointer to the mempool structure.
- * @param count
- * A new water mark value.
- * @return
- * - 0: Success; default_bulk_count changed.
- * - -EINVAL: Invalid count value.
- */
-static inline int
-rte_mempool_set_bulk_count(struct rte_mempool *mp, unsigned count)
-{
- if (unlikely(count == 0 || count >= mp->size))
- return -EINVAL;
-
- mp->bulk_default = count;
- return 0;
-}
-
-/**
- * Get the default bulk count for put/get.
- *
- * @param mp
- * A pointer to the mempool structure.
- * @return
- * The default bulk count for enqueue/dequeue.
- */
-static inline unsigned
-rte_mempool_get_bulk_count(struct rte_mempool *mp)
-{
- return mp->bulk_default;
-}
-
/**
* Dump the status of the mempool to the console.
*
* @param is_mp
* Mono-producer (0) or multi-producers (1).
*/
-static inline void
+static inline void __attribute__((always_inline))
__mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
unsigned n, int is_mp)
{
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
struct rte_mempool_cache *cache;
- uint32_t cache_len;
+ uint32_t index;
void **cache_objs;
unsigned lcore_id = rte_lcore_id();
uint32_t cache_size = mp->cache_size;
- uint32_t cache_add_count;
+ uint32_t flushthresh = mp->cache_flushthresh;
#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
/* increment stat now, adding in mempool always success */
if (unlikely(cache_size == 0 || is_mp == 0))
goto ring_enqueue;
- cache = &mp->local_cache[lcore_id];
- cache_len = cache->len;
- cache_objs = cache->objs;
-
- /* cache is full and we add many objects: enqueue in ring */
- if (unlikely(cache_len == cache_size && n >= cache_size))
+ /* Go straight to ring if put would overflow mem allocated for cache */
+ if (unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE))
goto ring_enqueue;
+ cache = &mp->local_cache[lcore_id];
+ cache_objs = &cache->objs[cache->len];
+
/*
- * cache is full and we add few objects: enqueue the content
- * of the cache in ring
+ * The cache follows the following algorithm
+ * 1. Add the objects to the cache
+ * 2. Anything greater than the cache min value (if it crosses the
+ * cache flush threshold) is flushed to the ring.
*/
- if (unlikely(cache_len == cache_size)) {
-#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
- if (rte_ring_mp_enqueue_bulk(mp->ring, cache->objs,
- cache_size) < 0)
- rte_panic("cannot put objects in mempool\n");
-#else
- rte_ring_mp_enqueue_bulk(mp->ring, cache->objs,
- cache_size);
-#endif
- cache_len = 0;
- }
- /* determine how many objects we can add in cache */
- if (likely(n <= cache_size - cache_len))
- cache_add_count = n;
- else
- cache_add_count = cache_size - cache_len;
-
- /* add in cache while there is enough room */
- while (cache_add_count > 0) {
- cache_objs[cache_len] = *obj_table;
- obj_table++;
- cache_len++;
- n--;
- cache_add_count--;
- }
+ /* Add elements back into the cache */
+ for (index = 0; index < n; ++index, obj_table++)
+ cache_objs[index] = *obj_table;
- cache->len = cache_len;
+ cache->len += n;
- /* no more object to add, return */
- if (likely(n == 0))
- return;
+ if (cache->len >= flushthresh) {
+ rte_ring_mp_enqueue_bulk(mp->ring, &cache->objs[cache_size],
+ cache->len - cache_size);
+ cache->len = cache_size;
+ }
+
+ return;
- ring_enqueue:
+ring_enqueue:
#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
/* push remaining objects in ring */
* @param n
* The number of objects to add in the mempool from the obj_table.
*/
-static inline void
+static inline void __attribute__((always_inline))
rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
* @param n
* The number of objects to add in the mempool from obj_table.
*/
-static inline void
+static inline void __attribute__((always_inline))
rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
* @param obj
* A pointer to the object to be added.
*/
-static inline void
+static inline void __attribute__((always_inline))
rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
{
rte_mempool_mp_put_bulk(mp, &obj, 1);
* @param obj
* A pointer to the object to be added.
*/
-static inline void
+static inline void __attribute__((always_inline))
rte_mempool_sp_put(struct rte_mempool *mp, void *obj)
{
rte_mempool_sp_put_bulk(mp, &obj, 1);
* @param obj
* A pointer to the object to be added.
*/
-static inline void
+static inline void __attribute__((always_inline))
rte_mempool_put(struct rte_mempool *mp, void *obj)
{
rte_mempool_put_bulk(mp, &obj, 1);
* - >=0: Success; number of objects supplied.
* - <0: Error; code of ring dequeue function.
*/
-static inline int
+static inline int __attribute__((always_inline))
__mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
unsigned n, int is_mc)
{
#endif
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
struct rte_mempool_cache *cache;
- uint32_t cache_len, cache_len_save = 0;
+ uint32_t index, len;
void **cache_objs;
unsigned lcore_id = rte_lcore_id();
uint32_t cache_size = mp->cache_size;
- uint32_t cache_del_count;
-
- cache = &mp->local_cache[lcore_id];
/* cache is not enabled or single consumer */
- if (unlikely(cache_size == 0 || is_mc == 0))
+ if (unlikely(cache_size == 0 || is_mc == 0 || n >= cache_size))
goto ring_dequeue;
- cache_len = cache->len;
+ cache = &mp->local_cache[lcore_id];
cache_objs = cache->objs;
- /* cache is empty and we need many objects: dequeue from ring */
- if (unlikely(cache_len == 0 && n >= cache_size))
- goto ring_dequeue;
+ /* Can this be satisfied from the cache? */
+ if (cache->len < n) {
+ /* No. Backfill the cache first, and then fill from it */
+ uint32_t req = n + (cache_size - cache->len);
- /* cache is empty and we dequeue few objects: fill the cache first */
- if (unlikely(cache_len == 0 && n < cache_size)) {
- ret = rte_ring_mc_dequeue_bulk(mp->ring, cache_objs,
- cache_size);
+ /* How many do we require i.e. number to fill the cache + the request */
+ ret = rte_ring_mc_dequeue_bulk(mp->ring, &cache->objs[cache->len], req);
if (unlikely(ret < 0)) {
- __MEMPOOL_STAT_ADD(mp, get_fail, n_orig);
- return ret;
+ /*
+ * In the offchance that we are buffer constrained,
+ * where we are not able to allocate cache + n, go to
+ * the ring directly. If that fails, we are truly out of
+ * buffers.
+ */
+ goto ring_dequeue;
}
- cache_len = cache_size;
+ cache->len += req;
}
- if (likely(n <= cache_len))
- cache_del_count = n;
- else
- cache_del_count = cache_len;
+ /* Now fill in the response ... */
+ for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++)
+ *obj_table = cache_objs[len];
- cache_len_save = cache_len;
+ cache->len -= n;
- /* add in cache only while there is enough room */
- while (cache_del_count > 0) {
- cache_len--;
- *obj_table = cache_objs[cache_len];
- obj_table++;
- n--;
- cache_del_count--;
- }
-
- cache->len = cache_len;
+ __MEMPOOL_STAT_ADD(mp, get_success, n_orig);
- /* no more object to get, return */
- if (likely(n == 0)) {
- __MEMPOOL_STAT_ADD(mp, get_success, n_orig);
- return 0;
- }
+ return 0;
- ring_dequeue:
+ring_dequeue:
#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
/* get remaining objects from ring */
else
ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n);
-#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
- /*
- * bad luck, the ring is empty but we already dequeued some
- * entries from cache, we have to restore them
- */
- if (unlikely(ret < 0 && cache_len_save != 0))
- cache->len = cache_len_save;
-#endif
-
if (ret < 0)
__MEMPOOL_STAT_ADD(mp, get_fail, n_orig);
else
* - 0: Success; objects taken.
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
-static inline int
+static inline int __attribute__((always_inline))
rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
{
int ret;
* - -ENOENT: Not enough entries in the mempool; no object is
* retrieved.
*/
-static inline int
+static inline int __attribute__((always_inline))
rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
{
int ret;
* - 0: Success; objects taken
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
-static inline int
+static inline int __attribute__((always_inline))
rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
{
int ret;
* - 0: Success; objects taken.
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
-static inline int
+static inline int __attribute__((always_inline))
rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
{
return rte_mempool_mc_get_bulk(mp, obj_p, 1);
* - 0: Success; objects taken.
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
-static inline int
+static inline int __attribute__((always_inline))
rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p)
{
return rte_mempool_sc_get_bulk(mp, obj_p, 1);
* - 0: Success; objects taken.
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
-static inline int
+static inline int __attribute__((always_inline))
rte_mempool_get(struct rte_mempool *mp, void **obj_p)
{
return rte_mempool_get_bulk(mp, obj_p, 1);
unsigned rte_mempool_count(const struct rte_mempool *mp);
/**
- * Return the number of free entries in the mempool.
+ * Return the number of free entries in the mempool ring.
+ * i.e. how many entries can be freed back to the mempool.
+ *
+ * NOTE: This corresponds to the number of elements *allocated* from the
+ * memory pool, not the number of elements in the pool itself. To count
+ * the number elements currently available in the pool, use "rte_mempool_count"
*
* When cache is enabled, this function has to browse the length of
* all lcores, so it should not be used in a data path, but only for