#include <rte_memcpy.h>
#include <rte_common.h>
+#include "rte_mempool_trace_fp.h"
+
#ifdef __cplusplus
extern "C" {
#endif
#define MEMPOOL_PG_NUM_DEFAULT 1
#ifndef RTE_MEMPOOL_ALIGN
+/**
+ * Alignment of elements inside mempool.
+ */
#define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
#endif
#endif
} __rte_cache_aligned;
-#define MEMPOOL_F_NO_SPREAD 0x0001 /**< Do not spread among memory channels. */
+#define MEMPOOL_F_NO_SPREAD 0x0001
+ /**< Spreading among memory channels not required. */
#define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/
#define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/
#define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/
{
struct rte_mempool_ops *ops;
+ rte_mempool_trace_ops_dequeue_bulk(mp, obj_table, n);
ops = rte_mempool_get_ops(mp->ops_index);
return ops->dequeue(mp, obj_table, n);
}
ops = rte_mempool_get_ops(mp->ops_index);
RTE_ASSERT(ops->dequeue_contig_blocks != NULL);
+ rte_mempool_trace_ops_dequeue_contig_blocks(mp, first_obj_table, n);
return ops->dequeue_contig_blocks(mp, first_obj_table, n);
}
{
struct rte_mempool_ops *ops;
+ rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n);
ops = rte_mempool_get_ops(mp->ops_index);
return ops->enqueue(mp, obj_table, n);
}
* A pointer to the mempool structure.
* @return
* The number of objects added on success.
- * On error, the chunk is not added in the memory list of the
- * mempool and a negative errno is returned.
+ * On error, 0 is returned, rte_errno is set, and the chunk is not added in
+ * the memory list of the mempool.
*/
int rte_mempool_populate_anon(struct rte_mempool *mp);
if (lcore_id >= RTE_MAX_LCORE)
return NULL;
+ rte_mempool_trace_default_cache(mp, lcore_id,
+ &mp->local_cache[lcore_id]);
return &mp->local_cache[lcore_id];
}
cache = rte_mempool_default_cache(mp, rte_lcore_id());
if (cache == NULL || cache->len == 0)
return;
+ rte_mempool_trace_cache_flush(cache, mp);
rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len);
cache->len = 0;
}
rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
unsigned int n, struct rte_mempool_cache *cache)
{
+ rte_mempool_trace_generic_put(mp, obj_table, n, cache);
__mempool_check_cookies(mp, obj_table, n, 0);
__mempool_generic_put(mp, obj_table, n, cache);
}
{
struct rte_mempool_cache *cache;
cache = rte_mempool_default_cache(mp, rte_lcore_id());
+ rte_mempool_trace_put_bulk(mp, obj_table, n, cache);
rte_mempool_generic_put(mp, obj_table, n, cache);
}
ret = __mempool_generic_get(mp, obj_table, n, cache);
if (ret == 0)
__mempool_check_cookies(mp, obj_table, n, 1);
+ rte_mempool_trace_generic_get(mp, obj_table, n, cache);
return ret;
}
{
struct rte_mempool_cache *cache;
cache = rte_mempool_default_cache(mp, rte_lcore_id());
+ rte_mempool_trace_get_bulk(mp, obj_table, n, cache);
return rte_mempool_generic_get(mp, obj_table, n, cache);
}
__MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_fail, n);
}
+ rte_mempool_trace_get_contig_blocks(mp, first_obj_table, n);
return ret;
}
static inline int
rte_mempool_full(const struct rte_mempool *mp)
{
- return !!(rte_mempool_avail_count(mp) == mp->size);
+ return rte_mempool_avail_count(mp) == mp->size;
}
/**
static inline int
rte_mempool_empty(const struct rte_mempool *mp)
{
- return !!(rte_mempool_avail_count(mp) == 0);
+ return rte_mempool_avail_count(mp) == 0;
}
/**