for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
sum.put_bulk += mp->stats[lcore_id].put_bulk;
sum.put_objs += mp->stats[lcore_id].put_objs;
+ sum.put_common_pool_bulk += mp->stats[lcore_id].put_common_pool_bulk;
+ sum.put_common_pool_objs += mp->stats[lcore_id].put_common_pool_objs;
+ sum.get_common_pool_bulk += mp->stats[lcore_id].get_common_pool_bulk;
+ sum.get_common_pool_objs += mp->stats[lcore_id].get_common_pool_objs;
sum.get_success_bulk += mp->stats[lcore_id].get_success_bulk;
sum.get_success_objs += mp->stats[lcore_id].get_success_objs;
sum.get_fail_bulk += mp->stats[lcore_id].get_fail_bulk;
fprintf(f, " stats:\n");
fprintf(f, " put_bulk=%"PRIu64"\n", sum.put_bulk);
fprintf(f, " put_objs=%"PRIu64"\n", sum.put_objs);
+ fprintf(f, " put_common_pool_bulk=%"PRIu64"\n", sum.put_common_pool_bulk);
+ fprintf(f, " put_common_pool_objs=%"PRIu64"\n", sum.put_common_pool_objs);
+ fprintf(f, " get_common_pool_bulk=%"PRIu64"\n", sum.get_common_pool_bulk);
+ fprintf(f, " get_common_pool_objs=%"PRIu64"\n", sum.get_common_pool_objs);
fprintf(f, " get_success_bulk=%"PRIu64"\n", sum.get_success_bulk);
fprintf(f, " get_success_objs=%"PRIu64"\n", sum.get_success_objs);
fprintf(f, " get_fail_bulk=%"PRIu64"\n", sum.get_fail_bulk);
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
/**
* A structure that stores the mempool statistics (per-lcore).
+ * Note: Cache stats (put_cache_bulk/objs, get_cache_bulk/objs) are not
+ * captured since they can be calculated from other stats.
+ * For example: put_cache_objs = put_objs - put_common_pool_objs.
*/
struct rte_mempool_debug_stats {
- uint64_t put_bulk; /**< Number of puts. */
- uint64_t put_objs; /**< Number of objects successfully put. */
- uint64_t get_success_bulk; /**< Successful allocation number. */
- uint64_t get_success_objs; /**< Objects successfully allocated. */
- uint64_t get_fail_bulk; /**< Failed allocation number. */
- uint64_t get_fail_objs; /**< Objects that failed to be allocated. */
- /** Successful allocation number of contiguous blocks. */
- uint64_t get_success_blks;
- /** Failed allocation number of contiguous blocks. */
- uint64_t get_fail_blks;
+ uint64_t put_bulk; /**< Number of puts. */
+ uint64_t put_objs; /**< Number of objects successfully put. */
+ uint64_t put_common_pool_bulk; /**< Number of bulks enqueued in common pool. */
+ uint64_t put_common_pool_objs; /**< Number of objects enqueued in common pool. */
+ uint64_t get_common_pool_bulk; /**< Number of bulks dequeued from common pool. */
+ uint64_t get_common_pool_objs; /**< Number of objects dequeued from common pool. */
+ uint64_t get_success_bulk; /**< Successful allocation number. */
+ uint64_t get_success_objs; /**< Objects successfully allocated. */
+ uint64_t get_fail_bulk; /**< Failed allocation number. */
+ uint64_t get_fail_objs; /**< Objects that failed to be allocated. */
+ uint64_t get_success_blks; /**< Successful allocation number of contiguous blocks. */
+ uint64_t get_fail_blks; /**< Failed allocation number of contiguous blocks. */
} __rte_cache_aligned;
#endif
void **obj_table, unsigned n)
{
struct rte_mempool_ops *ops;
+ int ret;
rte_mempool_trace_ops_dequeue_bulk(mp, obj_table, n);
ops = rte_mempool_get_ops(mp->ops_index);
- return ops->dequeue(mp, obj_table, n);
+ ret = ops->dequeue(mp, obj_table, n);
+ if (ret == 0) {
+ __MEMPOOL_STAT_ADD(mp, get_common_pool_bulk, 1);
+ __MEMPOOL_STAT_ADD(mp, get_common_pool_objs, n);
+ }
+ return ret;
}
/**
{
struct rte_mempool_ops *ops;
+ __MEMPOOL_STAT_ADD(mp, put_common_pool_bulk, 1);
+ __MEMPOOL_STAT_ADD(mp, put_common_pool_objs, n);
rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n);
ops = rte_mempool_get_ops(mp->ops_index);
return ops->enqueue(mp, obj_table, n);