X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fmempool%2Fdpaa2%2Fdpaa2_hw_mempool.c;h=48887beb7ed1f12f00d7ea21e70d96539fcf0c77;hb=f35e5b3e07b2e7999f7d3085236cc366c9cb4da6;hp=da66577ccf9ec9f48d5946c442e8ee69fa4bcceb;hpb=f8f9f645226ed63f3b55dd487faa22f766078e53;p=dpdk.git diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c index da66577ccf..48887beb7e 100644 --- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c +++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2016 NXP + * Copyright 2016-2019 NXP * */ @@ -23,6 +23,7 @@ #include #include "rte_dpaa2_mempool.h" +#include "fslmc_vfio.h" #include #include #include @@ -191,7 +192,7 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused, struct qbman_release_desc releasedesc; struct qbman_swp *swp; int ret; - int i, n; + int i, n, retry_count; uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL]; if (unlikely(!DPAA2_PER_LCORE_DPIO)) { @@ -224,9 +225,15 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused, } /* feed them to bman */ - do { - ret = qbman_swp_release(swp, &releasedesc, bufs, n); - } while (ret == -EBUSY); + retry_count = 0; + while ((ret = qbman_swp_release(swp, &releasedesc, bufs, n)) == + -EBUSY) { + retry_count++; + if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { + DPAA2_MEMPOOL_ERR("bman release retry exceeded, low fbpr?"); + return; + } + } aligned: /* if there are more buffers to free */ @@ -242,10 +249,15 @@ aligned: #endif } - do { - ret = qbman_swp_release(swp, &releasedesc, bufs, - DPAA2_MBUF_MAX_ACQ_REL); - } while (ret == -EBUSY); + retry_count = 0; + while ((ret = qbman_swp_release(swp, &releasedesc, bufs, + DPAA2_MBUF_MAX_ACQ_REL)) == -EBUSY) { + retry_count++; + if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { + DPAA2_MEMPOOL_ERR("bman release retry exceeded, low fbpr?"); + return; + } + } n += DPAA2_MBUF_MAX_ACQ_REL; } } @@ -405,11 +417,23 @@ dpaa2_populate(struct rte_mempool *mp, unsigned int max_objs, void *vaddr, rte_iova_t paddr, size_t len, rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg) { + struct rte_memseg_list *msl; + /* The memsegment list exists incase the memory is not external. + * So, DMA-Map is required only when memory is provided by user, + * i.e. External. + */ + msl = rte_mem_virt2memseg_list(vaddr); + + if (!msl) { + DPAA2_MEMPOOL_DEBUG("Memsegment is External.\n"); + rte_fslmc_vfio_mem_dmamap((size_t)vaddr, + (size_t)paddr, (size_t)len); + } /* Insert entry into the PA->VA Table */ dpaax_iova_table_update(paddr, vaddr, len); - return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len, - obj_cb, obj_cb_arg); + return rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, paddr, + len, obj_cb, obj_cb_arg); } static const struct rte_mempool_ops dpaa2_mpool_ops = {