X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fmempool%2Fdpaa%2Fdpaa_mempool.c;h=00308177257381a3bb1abebbff8243b1bcf4f1a3;hb=e6b65f90ba4bc3b51b3585abc70c74847a4a6177;hp=fb3b6ba0cd34a983272e8f45e4745df3fe60a66f;hpb=5d944582d0282f0d133619cff7515d89bc4ed216;p=dpdk.git diff --git a/drivers/mempool/dpaa/dpaa_mempool.c b/drivers/mempool/dpaa/dpaa_mempool.c index fb3b6ba0cd..0030817725 100644 --- a/drivers/mempool/dpaa/dpaa_mempool.c +++ b/drivers/mempool/dpaa/dpaa_mempool.c @@ -26,8 +26,16 @@ #include #include +#include -struct dpaa_bp_info rte_dpaa_bpid_info[DPAA_MAX_BPOOLS]; +/* List of all the memseg information locally maintained in dpaa driver. This + * is to optimize the PA_to_VA searches until a better mechanism (algo) is + * available. + */ +struct dpaa_memseg_list rte_dpaa_memsegs + = TAILQ_HEAD_INITIALIZER(rte_dpaa_memsegs); + +struct dpaa_bp_info *rte_dpaa_bpid_info; static int dpaa_mbuf_create_pool(struct rte_mempool *mp) @@ -66,6 +74,14 @@ dpaa_mbuf_create_pool(struct rte_mempool *mp) DPAA_MEMPOOL_WARN("drained %u bufs from BPID %d", num_bufs, bpid); + if (rte_dpaa_bpid_info == NULL) { + rte_dpaa_bpid_info = (struct dpaa_bp_info *)rte_zmalloc(NULL, + sizeof(struct dpaa_bp_info) * DPAA_MAX_BPOOLS, + RTE_CACHE_LINE_SIZE); + if (rte_dpaa_bpid_info == NULL) + return -ENOMEM; + } + rte_dpaa_bpid_info[bpid].mp = mp; rte_dpaa_bpid_info[bpid].bpid = bpid; rte_dpaa_bpid_info[bpid].size = mp->elt_size; @@ -115,7 +131,8 @@ dpaa_buf_free(struct dpaa_bp_info *bp_info, uint64_t addr) struct bm_buffer buf; int ret; - DPAA_MEMPOOL_DEBUG("Free 0x%lx to bpid: %d", addr, bp_info->bpid); + DPAA_MEMPOOL_DPDEBUG("Free 0x%" PRIx64 " to bpid: %d", + addr, bp_info->bpid); bm_buffer_set64(&buf, addr); retry: @@ -154,8 +171,7 @@ dpaa_mbuf_free_bulk(struct rte_mempool *pool, if (unlikely(!bp_info->ptov_off)) { /* buffers are from single mem segment */ if (bp_info->flags & DPAA_MPOOL_SINGLE_SEGMENT) { - bp_info->ptov_off - = (uint64_t)obj_table[i] - phy; + bp_info->ptov_off = (size_t)obj_table[i] - phy; rte_dpaa_bpid_info[bp_info->bpid].ptov_off = bp_info->ptov_off; } @@ -264,10 +280,9 @@ dpaa_mbuf_get_count(const struct rte_mempool *mp) } static int -dpaa_register_memory_area(const struct rte_mempool *mp, - char *vaddr __rte_unused, - rte_iova_t paddr __rte_unused, - size_t len) +dpaa_populate(struct rte_mempool *mp, unsigned int max_objs, + void *vaddr, rte_iova_t paddr, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg) { struct dpaa_bp_info *bp_info; unsigned int total_elt_sz; @@ -279,27 +294,56 @@ dpaa_register_memory_area(const struct rte_mempool *mp, return 0; } + /* Update the PA-VA Table */ + dpaax_iova_table_update(paddr, vaddr, len); + bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; - DPAA_MEMPOOL_DEBUG("Req size %lu vs Available %u\n", - len, total_elt_sz * mp->size); + DPAA_MEMPOOL_DEBUG("Req size %" PRIx64 " vs Available %u\n", + (uint64_t)len, total_elt_sz * mp->size); /* Detect pool area has sufficient space for elements in this memzone */ if (len >= total_elt_sz * mp->size) bp_info->flags |= DPAA_MPOOL_SINGLE_SEGMENT; + struct dpaa_memseg *ms; + + /* For each memory chunk pinned to the Mempool, a linked list of the + * contained memsegs is created for searching when PA to VA + * conversion is required. + */ + ms = rte_zmalloc(NULL, sizeof(struct dpaa_memseg), 0); + if (!ms) { + DPAA_MEMPOOL_ERR("Unable to allocate internal memory."); + DPAA_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available."); + /* If the element is not added, it would only lead to failure + * in searching for the element and the logic would Fallback + * to traditional DPDK memseg traversal code. So, this is not + * a blocking error - but, error would be printed on screen. + */ + return 0; + } - return 0; + ms->vaddr = vaddr; + ms->iova = paddr; + ms->len = len; + /* Head insertions are generally faster than tail insertions as the + * buffers pinned are picked from rear end. + */ + TAILQ_INSERT_HEAD(&rte_dpaa_memsegs, ms, next); + + return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len, + obj_cb, obj_cb_arg); } -struct rte_mempool_ops dpaa_mpool_ops = { +static const struct rte_mempool_ops dpaa_mpool_ops = { .name = DPAA_MEMPOOL_OPS_NAME, .alloc = dpaa_mbuf_create_pool, .free = dpaa_mbuf_free_pool, .enqueue = dpaa_mbuf_free_bulk, .dequeue = dpaa_mbuf_alloc_bulk, .get_count = dpaa_mbuf_get_count, - .register_memory_area = dpaa_register_memory_area, + .populate = dpaa_populate, }; MEMPOOL_REGISTER_OPS(dpaa_mpool_ops);