#include <rte_memcpy.h>
#include <rte_common.h>
+#include "rte_mempool_trace_fp.h"
+
#ifdef __cplusplus
extern "C" {
#endif
#define MEMPOOL_PG_NUM_DEFAULT 1
#ifndef RTE_MEMPOOL_ALIGN
+/**
+ * Alignment of elements inside mempool.
+ */
#define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
#endif
#endif
} __rte_cache_aligned;
-#define MEMPOOL_F_NO_SPREAD 0x0001 /**< Do not spread among memory channels. */
+#define MEMPOOL_F_NO_SPREAD 0x0001
+ /**< Spreading among memory channels not required. */
#define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/
#define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/
#define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
*
- * Dequeue a number of contiquous object blocks from the external pool.
+ * Dequeue a number of contiguous object blocks from the external pool.
*/
typedef int (*rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp,
void **first_obj_table, unsigned int n);
* @param[out] align
* Location for required memory chunk alignment.
* @return
- * Required memory size aligned at page boundary.
+ * Required memory size.
*/
typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp,
uint32_t obj_num, uint32_t pg_shift,
size_t *min_chunk_size, size_t *align);
/**
- * Default way to calculate memory size required to store given number of
- * objects.
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @internal Helper to calculate memory size required to store given
+ * number of objects.
+ *
+ * This function is internal to mempool library and mempool drivers.
*
* If page boundaries may be ignored, it is just a product of total
* object size including header and trailer and number of objects.
* that pages are grouped in subsets of physically continuous pages big
* enough to store at least one object.
*
- * Minimum size of memory chunk is a maximum of the page size and total
- * element size.
+ * Minimum size of memory chunk is the total element size.
+ * Required memory chunk alignment is the cache line size.
*
- * Required memory chunk alignment is a maximum of page size and cache
- * line size.
+ * @param[in] mp
+ * A pointer to the mempool structure.
+ * @param[in] obj_num
+ * Number of objects to be added in mempool.
+ * @param[in] pg_shift
+ * LOG2 of the physical pages size. If set to 0, ignore page boundaries.
+ * @param[in] chunk_reserve
+ * Amount of memory that must be reserved at the beginning of each page,
+ * or at the beginning of the memory area if pg_shift is 0.
+ * @param[out] min_chunk_size
+ * Location for minimum size of the memory chunk which may be used to
+ * store memory pool objects.
+ * @param[out] align
+ * Location for required memory chunk alignment.
+ * @return
+ * Required memory size.
*/
-ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
- uint32_t obj_num, uint32_t pg_shift,
+__rte_experimental
+ssize_t rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift, size_t chunk_reserve,
size_t *min_chunk_size, size_t *align);
/**
- * @internal Helper function to calculate memory size required to store
- * specified number of objects in assumption that the memory buffer will
- * be aligned at page boundary.
- *
- * Note that if object size is bigger than page size, then it assumes
- * that pages are grouped in subsets of physically continuous pages big
- * enough to store at least one object.
+ * Default way to calculate memory size required to store given number of
+ * objects.
*
- * @param elt_num
- * Number of elements.
- * @param total_elt_sz
- * The size of each element, including header and trailer, as returned
- * by rte_mempool_calc_obj_size().
- * @param pg_shift
- * LOG2 of the physical pages size. If set to 0, ignore page boundaries.
- * @return
- * Required memory size aligned at page boundary.
+ * Equivalent to rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,
+ * 0, min_chunk_size, align).
*/
-size_t rte_mempool_calc_mem_size_helper(uint32_t elt_num, size_t total_elt_sz,
- uint32_t pg_shift);
+ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align);
/**
* Function to be called for each populated object.
rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
/**
- * Default way to populate memory pool object using provided memory
- * chunk: just slice objects one by one.
+ * Align objects on addresses multiple of total_elt_sz.
+ */
+#define RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ 0x0001
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @internal Helper to populate memory pool object using provided memory
+ * chunk: just slice objects one by one, taking care of not
+ * crossing page boundaries.
+ *
+ * If RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ is set in flags, the addresses
+ * of object headers will be aligned on a multiple of total_elt_sz.
+ * This feature is used by octeontx hardware.
+ *
+ * This function is internal to mempool library and mempool drivers.
+ *
+ * @param[in] mp
+ * A pointer to the mempool structure.
+ * @param[in] flags
+ * Logical OR of following flags:
+ * - RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ: align objects on addresses
+ * multiple of total_elt_sz.
+ * @param[in] max_objs
+ * Maximum number of objects to be added in mempool.
+ * @param[in] vaddr
+ * The virtual address of memory that should be used to store objects.
+ * @param[in] iova
+ * The IO address corresponding to vaddr, or RTE_BAD_IOVA.
+ * @param[in] len
+ * The length of memory in bytes.
+ * @param[in] obj_cb
+ * Callback function to be executed for each populated object.
+ * @param[in] obj_cb_arg
+ * An opaque pointer passed to the callback function.
+ * @return
+ * The number of objects added in mempool.
+ */
+__rte_experimental
+int rte_mempool_op_populate_helper(struct rte_mempool *mp,
+ unsigned int flags, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
+
+/**
+ * Default way to populate memory pool object using provided memory chunk.
+ *
+ * Equivalent to rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, iova,
+ * len, obj_cb, obj_cb_arg).
*/
int rte_mempool_op_populate_default(struct rte_mempool *mp,
unsigned int max_objs,
{
struct rte_mempool_ops *ops;
+ rte_mempool_trace_ops_dequeue_bulk(mp, obj_table, n);
ops = rte_mempool_get_ops(mp->ops_index);
return ops->dequeue(mp, obj_table, n);
}
ops = rte_mempool_get_ops(mp->ops_index);
RTE_ASSERT(ops->dequeue_contig_blocks != NULL);
+ rte_mempool_trace_ops_dequeue_contig_blocks(mp, first_obj_table, n);
return ops->dequeue_contig_blocks(mp, first_obj_table, n);
}
{
struct rte_mempool_ops *ops;
+ rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n);
ops = rte_mempool_get_ops(mp->ops_index);
return ops->enqueue(mp, obj_table, n);
}
* Note that the rte_mempool_register_ops fails silently here when
* more than RTE_MEMPOOL_MAX_OPS_IDX is registered.
*/
-#define MEMPOOL_REGISTER_OPS(ops) \
- void mp_hdlr_init_##ops(void); \
- void __attribute__((constructor, used)) mp_hdlr_init_##ops(void)\
- { \
+#define MEMPOOL_REGISTER_OPS(ops) \
+ RTE_INIT(mp_hdlr_init_##ops) \
+ { \
rte_mempool_register_ops(&ops); \
}
* A pointer to the mempool structure.
* @param addr
* The virtual address of memory that should be used to store objects.
- * Must be page-aligned.
* @param len
- * The length of memory in bytes. Must be page-aligned.
+ * The length of memory in bytes.
* @param pg_sz
* The size of memory pages in this virtual area.
* @param free_cb
* A pointer to the mempool structure.
* @return
* The number of objects added on success.
- * On error, the chunk is not added in the memory list of the
- * mempool and a negative errno is returned.
+ * On error, 0 is returned, rte_errno is set, and the chunk is not added in
+ * the memory list of the mempool.
*/
int rte_mempool_populate_anon(struct rte_mempool *mp);
if (lcore_id >= RTE_MAX_LCORE)
return NULL;
+ rte_mempool_trace_default_cache(mp, lcore_id,
+ &mp->local_cache[lcore_id]);
return &mp->local_cache[lcore_id];
}
cache = rte_mempool_default_cache(mp, rte_lcore_id());
if (cache == NULL || cache->len == 0)
return;
+ rte_mempool_trace_cache_flush(cache, mp);
rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len);
cache->len = 0;
}
rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
unsigned int n, struct rte_mempool_cache *cache)
{
+ rte_mempool_trace_generic_put(mp, obj_table, n, cache);
__mempool_check_cookies(mp, obj_table, n, 0);
__mempool_generic_put(mp, obj_table, n, cache);
}
{
struct rte_mempool_cache *cache;
cache = rte_mempool_default_cache(mp, rte_lcore_id());
+ rte_mempool_trace_put_bulk(mp, obj_table, n, cache);
rte_mempool_generic_put(mp, obj_table, n, cache);
}
&cache->objs[cache->len], req);
if (unlikely(ret < 0)) {
/*
- * In the offchance that we are buffer constrained,
+ * In the off chance that we are buffer constrained,
* where we are not able to allocate cache + n, go to
* the ring directly. If that fails, we are truly out of
* buffers.
ret = __mempool_generic_get(mp, obj_table, n, cache);
if (ret == 0)
__mempool_check_cookies(mp, obj_table, n, 1);
+ rte_mempool_trace_generic_get(mp, obj_table, n, cache);
return ret;
}
{
struct rte_mempool_cache *cache;
cache = rte_mempool_default_cache(mp, rte_lcore_id());
+ rte_mempool_trace_get_bulk(mp, obj_table, n, cache);
return rte_mempool_generic_get(mp, obj_table, n, cache);
}
__MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_fail, n);
}
+ rte_mempool_trace_get_contig_blocks(mp, first_obj_table, n);
return ret;
}
static inline int
rte_mempool_full(const struct rte_mempool *mp)
{
- return !!(rte_mempool_avail_count(mp) == mp->size);
+ return rte_mempool_avail_count(mp) == mp->size;
}
/**
static inline int
rte_mempool_empty(const struct rte_mempool *mp)
{
- return !!(rte_mempool_avail_count(mp) == 0);
+ return rte_mempool_avail_count(mp) == 0;
}
/**
void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
void *arg);
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @internal Get page size used for mempool object allocation.
+ * This function is internal to mempool library and mempool drivers.
+ */
+__rte_experimental
+int
+rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz);
+
#ifdef __cplusplus
}
#endif