X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_mempool%2Frte_mempool_ops.c;h=5e226677876dfd7d5e41ffe9b4eaa1132c013e51;hb=6857fdaff5ee93de0b12f407f5dceb2f433b5aa2;hp=ea9be1eb24668397400cf84f707ffa2978da0571;hpb=05912855bcd69742eeea42c1cf987fb501b7239a;p=dpdk.git diff --git a/lib/librte_mempool/rte_mempool_ops.c b/lib/librte_mempool/rte_mempool_ops.c index ea9be1eb24..5e22667787 100644 --- a/lib/librte_mempool/rte_mempool_ops.c +++ b/lib/librte_mempool/rte_mempool_ops.c @@ -6,10 +6,13 @@ #include #include +#include #include #include #include +#include "rte_mempool_trace.h" + /* indirect jump table to support external memory pools. */ struct rte_mempool_ops_table rte_mempool_ops_table = { .sl = RTE_SPINLOCK_INITIALIZER, @@ -51,7 +54,7 @@ rte_mempool_register_ops(const struct rte_mempool_ops *h) ops_index = rte_mempool_ops_table.num_ops++; ops = &rte_mempool_ops_table.ops[ops_index]; - snprintf(ops->name, sizeof(ops->name), "%s", h->name); + strlcpy(ops->name, h->name, sizeof(ops->name)); ops->alloc = h->alloc; ops->free = h->free; ops->enqueue = h->enqueue; @@ -59,6 +62,8 @@ rte_mempool_register_ops(const struct rte_mempool_ops *h) ops->get_count = h->get_count; ops->calc_mem_size = h->calc_mem_size; ops->populate = h->populate; + ops->get_info = h->get_info; + ops->dequeue_contig_blocks = h->dequeue_contig_blocks; rte_spinlock_unlock(&rte_mempool_ops_table.sl); @@ -71,6 +76,7 @@ rte_mempool_ops_alloc(struct rte_mempool *mp) { struct rte_mempool_ops *ops; + rte_mempool_trace_ops_alloc(mp); ops = rte_mempool_get_ops(mp->ops_index); return ops->alloc(mp); } @@ -81,6 +87,7 @@ rte_mempool_ops_free(struct rte_mempool *mp) { struct rte_mempool_ops *ops; + rte_mempool_trace_ops_free(mp); ops = rte_mempool_get_ops(mp->ops_index); if (ops->free == NULL) return; @@ -97,7 +104,9 @@ rte_mempool_ops_get_count(const struct rte_mempool *mp) return ops->get_count(mp); } -/* wrapper to notify new memory area to external mempool */ +/* wrapper to calculate the memory size required to store given number + * of objects + */ ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, @@ -125,6 +134,8 @@ rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs, ops = rte_mempool_get_ops(mp->ops_index); + rte_mempool_trace_ops_populate(mp, max_objs, vaddr, iova, len, obj_cb, + obj_cb_arg); if (ops->populate == NULL) return rte_mempool_op_populate_default(mp, max_objs, vaddr, iova, len, obj_cb, @@ -134,6 +145,20 @@ rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs, obj_cb_arg); } +/* wrapper to get additional mempool info */ +int +rte_mempool_ops_get_info(const struct rte_mempool *mp, + struct rte_mempool_info *info) +{ + struct rte_mempool_ops *ops; + + ops = rte_mempool_get_ops(mp->ops_index); + + RTE_FUNC_PTR_OR_ERR_RET(ops->get_info, -ENOTSUP); + return ops->get_info(mp, info); +} + + /* sets mempool ops previously registered by rte_mempool_register_ops. */ int rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name, @@ -159,5 +184,6 @@ rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name, mp->ops_index = i; mp->pool_config = pool_config; + rte_mempool_trace_set_ops_byname(mp, name, pool_config); return 0; }