/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP
+ * Copyright 2016-2019 NXP
*
*/
#include <errno.h>
#include <rte_mbuf.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
+#include "rte_dpaa2_mempool.h"
+#include "fslmc_vfio.h"
#include <fslmc_logs.h>
#include <mc/fsl_dpbp.h>
#include <portal/dpaa2_hw_pvt.h>
#include "dpaa2_hw_mempool.h"
#include "dpaa2_hw_mempool_logs.h"
-struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
-static struct dpaa2_bp_list *h_bp_list;
-
-/* List of all the memseg information locally maintained in dpaa2 driver. This
- * is to optimize the PA_to_VA searches until a better mechanism (algo) is
- * available.
- */
-struct dpaa2_memseg_list rte_dpaa2_memsegs
- = TAILQ_HEAD_INITIALIZER(rte_dpaa2_memsegs);
+#include <dpaax_iova_table.h>
-/* Dynamic logging identified for mempool */
-int dpaa2_logtype_mempool;
+struct dpaa2_bp_info *rte_dpaa2_bpid_info;
+static struct dpaa2_bp_list *h_bp_list;
static int
rte_hw_mbuf_create_pool(struct rte_mempool *mp)
avail_dpbp = dpaa2_alloc_dpbp_dev();
+ if (rte_dpaa2_bpid_info == NULL) {
+ rte_dpaa2_bpid_info = (struct dpaa2_bp_info *)rte_malloc(NULL,
+ sizeof(struct dpaa2_bp_info) * MAX_BPID,
+ RTE_CACHE_LINE_SIZE);
+ if (rte_dpaa2_bpid_info == NULL)
+ return -ENOMEM;
+ memset(rte_dpaa2_bpid_info, 0,
+ sizeof(struct dpaa2_bp_info) * MAX_BPID);
+ }
+
if (!avail_dpbp) {
DPAA2_MEMPOOL_ERR("DPAA2 pool not available!");
return -ENOENT;
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- DPAA2_MEMPOOL_ERR("Failure in affining portal");
+ DPAA2_MEMPOOL_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
goto err1;
}
}
struct qbman_release_desc releasedesc;
struct qbman_swp *swp;
int ret;
- int i, n;
+ int i, n, retry_count;
uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret != 0) {
- DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
+ DPAA2_MEMPOOL_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
return;
}
}
}
/* feed them to bman */
- do {
- ret = qbman_swp_release(swp, &releasedesc, bufs, n);
- } while (ret == -EBUSY);
+ retry_count = 0;
+ while ((ret = qbman_swp_release(swp, &releasedesc, bufs, n)) ==
+ -EBUSY) {
+ retry_count++;
+ if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
+ DPAA2_MEMPOOL_ERR("bman release retry exceeded, low fbpr?");
+ return;
+ }
+ }
aligned:
/* if there are more buffers to free */
#endif
}
- do {
- ret = qbman_swp_release(swp, &releasedesc, bufs,
- DPAA2_MBUF_MAX_ACQ_REL);
- } while (ret == -EBUSY);
+ retry_count = 0;
+ while ((ret = qbman_swp_release(swp, &releasedesc, bufs,
+ DPAA2_MBUF_MAX_ACQ_REL)) == -EBUSY) {
+ retry_count++;
+ if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
+ DPAA2_MEMPOOL_ERR("bman release retry exceeded, low fbpr?");
+ return;
+ }
+ }
n += DPAA2_MBUF_MAX_ACQ_REL;
}
}
+uint16_t
+rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp)
+{
+ struct dpaa2_bp_info *bp_info;
+
+ bp_info = mempool_to_bpinfo(mp);
+ if (!(bp_info->bp_list)) {
+ RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ return -ENOMEM;
+ }
+
+ return bp_info->bpid;
+}
+
+struct rte_mbuf *
+rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr)
+{
+ struct dpaa2_bp_info *bp_info;
+
+ bp_info = mempool_to_bpinfo(mp);
+ if (!(bp_info->bp_list)) {
+ RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ return NULL;
+ }
+
+ return (struct rte_mbuf *)((uint8_t *)buf_addr -
+ bp_info->meta_data_size);
+}
+
int
rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
void **obj_table, unsigned int count)
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret != 0) {
- DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
+ DPAA2_MEMPOOL_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
return ret;
}
}
* in pool, qbman_swp_acquire returns 0
*/
if (ret <= 0) {
- DPAA2_MEMPOOL_ERR("Buffer acquire failed with"
- " err code: %d", ret);
+ DPAA2_MEMPOOL_DP_DEBUG(
+ "Buffer acquire failed with err code: %d", ret);
/* The API expect the exact number of requested bufs */
/* Releasing all buffers allocated */
rte_dpaa2_mbuf_release(pool, obj_table, bpid,
unsigned int num_of_bufs = 0;
struct dpaa2_bp_info *bp_info;
struct dpaa2_dpbp_dev *dpbp_node;
+ struct fsl_mc_io mc_io;
if (!mp || !mp->pool_data) {
DPAA2_MEMPOOL_ERR("Invalid mempool provided");
bp_info = (struct dpaa2_bp_info *)mp->pool_data;
dpbp_node = bp_info->bp_list->buf_pool.dpbp_node;
- ret = dpbp_get_num_free_bufs(&dpbp_node->dpbp, CMD_PRI_LOW,
+ /* In case as secondary process access stats, MCP portal in priv-hw may
+ * have primary process address. Need the secondary process based MCP
+ * portal address for this object.
+ */
+ mc_io.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
+ ret = dpbp_get_num_free_bufs(&mc_io, CMD_PRI_LOW,
dpbp_node->token, &num_of_bufs);
if (ret) {
DPAA2_MEMPOOL_ERR("Unable to obtain free buf count (err=%d)",
void *vaddr, rte_iova_t paddr, size_t len,
rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
{
- struct dpaa2_memseg *ms;
-
- /* For each memory chunk pinned to the Mempool, a linked list of the
- * contained memsegs is created for searching when PA to VA
- * conversion is required.
+ struct rte_memseg_list *msl;
+ /* The memsegment list exists incase the memory is not external.
+ * So, DMA-Map is required only when memory is provided by user,
+ * i.e. External.
*/
- ms = rte_zmalloc(NULL, sizeof(struct dpaa2_memseg), 0);
- if (!ms) {
- DPAA2_MEMPOOL_ERR("Unable to allocate internal memory.");
- DPAA2_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
- /* If the element is not added, it would only lead to failure
- * in searching for the element and the logic would Fallback
- * to traditional DPDK memseg traversal code. So, this is not
- * a blocking error - but, error would be printed on screen.
- */
- return 0;
- }
+ msl = rte_mem_virt2memseg_list(vaddr);
- ms->vaddr = vaddr;
- ms->iova = paddr;
- ms->len = len;
- /* Head insertions are generally faster than tail insertions as the
- * buffers pinned are picked from rear end.
- */
- TAILQ_INSERT_HEAD(&rte_dpaa2_memsegs, ms, next);
+ if (!msl) {
+ DPAA2_MEMPOOL_DEBUG("Memsegment is External.\n");
+ rte_fslmc_vfio_mem_dmamap((size_t)vaddr,
+ (size_t)paddr, (size_t)len);
+ }
+ /* Insert entry into the PA->VA Table */
+ dpaax_iova_table_update(paddr, vaddr, len);
- return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len,
- obj_cb, obj_cb_arg);
+ return rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, paddr,
+ len, obj_cb, obj_cb_arg);
}
-struct rte_mempool_ops dpaa2_mpool_ops = {
+static const struct rte_mempool_ops dpaa2_mpool_ops = {
.name = DPAA2_MEMPOOL_OPS_NAME,
.alloc = rte_hw_mbuf_create_pool,
.free = rte_hw_mbuf_free_pool,
MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops);
-RTE_INIT(dpaa2_mempool_init_log);
-static void
-dpaa2_mempool_init_log(void)
-{
- dpaa2_logtype_mempool = rte_log_register("mempool.dpaa2");
- if (dpaa2_logtype_mempool >= 0)
- rte_log_set_level(dpaa2_logtype_mempool, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER(dpaa2_logtype_mempool, mempool.dpaa2, NOTICE);