X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fmempool%2Fdpaa2%2Fdpaa2_hw_mempool.c;h=7d0435f51887ead8312b2ab0483a0f817abfbc9b;hb=23150bd8d8a8;hp=ce7a4c577fec301799fdf8dd3f189fc4944afe9a;hpb=3646ccf0b03688333cffee2fc94e36f47feb5ef1;p=dpdk.git diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c index ce7a4c577f..7d0435f518 100644 --- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c +++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c @@ -21,6 +21,7 @@ #include #include #include +#include "rte_dpaa2_mempool.h" #include #include @@ -32,6 +33,13 @@ struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID]; static struct dpaa2_bp_list *h_bp_list; +/* List of all the memseg information locally maintained in dpaa2 driver. This + * is to optimize the PA_to_VA searches until a better mechanism (algo) is + * available. + */ +struct dpaa2_memseg_list rte_dpaa2_memsegs + = TAILQ_HEAD_INITIALIZER(rte_dpaa2_memsegs); + /* Dynamic logging identified for mempool */ int dpaa2_logtype_mempool; @@ -237,6 +245,35 @@ aligned: } } +uint16_t +rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp) +{ + struct dpaa2_bp_info *bp_info; + + bp_info = mempool_to_bpinfo(mp); + if (!(bp_info->bp_list)) { + RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n"); + return -ENOMEM; + } + + return bp_info->bpid; +} + +struct rte_mbuf * +rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr) +{ + struct dpaa2_bp_info *bp_info; + + bp_info = mempool_to_bpinfo(mp); + if (!(bp_info->bp_list)) { + RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n"); + return NULL; + } + + return (struct rte_mbuf *)((uint8_t *)buf_addr - + bp_info->meta_data_size); +} + int rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool, void **obj_table, unsigned int count) @@ -358,6 +395,41 @@ rte_hw_mbuf_get_count(const struct rte_mempool *mp) return num_of_bufs; } +static int +dpaa2_populate(struct rte_mempool *mp, unsigned int max_objs, + void *vaddr, rte_iova_t paddr, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg) +{ + struct dpaa2_memseg *ms; + + /* For each memory chunk pinned to the Mempool, a linked list of the + * contained memsegs is created for searching when PA to VA + * conversion is required. + */ + ms = rte_zmalloc(NULL, sizeof(struct dpaa2_memseg), 0); + if (!ms) { + DPAA2_MEMPOOL_ERR("Unable to allocate internal memory."); + DPAA2_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available."); + /* If the element is not added, it would only lead to failure + * in searching for the element and the logic would Fallback + * to traditional DPDK memseg traversal code. So, this is not + * a blocking error - but, error would be printed on screen. + */ + return 0; + } + + ms->vaddr = vaddr; + ms->iova = paddr; + ms->len = len; + /* Head insertions are generally faster than tail insertions as the + * buffers pinned are picked from rear end. + */ + TAILQ_INSERT_HEAD(&rte_dpaa2_memsegs, ms, next); + + return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len, + obj_cb, obj_cb_arg); +} + struct rte_mempool_ops dpaa2_mpool_ops = { .name = DPAA2_MEMPOOL_OPS_NAME, .alloc = rte_hw_mbuf_create_pool, @@ -365,13 +437,12 @@ struct rte_mempool_ops dpaa2_mpool_ops = { .enqueue = rte_hw_mbuf_free_bulk, .dequeue = rte_dpaa2_mbuf_alloc_bulk, .get_count = rte_hw_mbuf_get_count, + .populate = dpaa2_populate, }; MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops); -RTE_INIT(dpaa2_mempool_init_log); -static void -dpaa2_mempool_init_log(void) +RTE_INIT(dpaa2_mempool_init_log) { dpaa2_logtype_mempool = rte_log_register("mempool.dpaa2"); if (dpaa2_logtype_mempool >= 0)