net/mlx5: support connection tracking between two ports
[dpdk.git] / drivers / mempool / dpaa2 / dpaa2_hw_mempool.c
index 84ff128..bc146e4 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016 NXP
+ *   Copyright 2016-2019 NXP
  *
  */
 
@@ -14,7 +14,7 @@
 #include <errno.h>
 
 #include <rte_mbuf.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
 #include <rte_malloc.h>
 #include <rte_memcpy.h>
 #include <rte_string_fns.h>
@@ -23,6 +23,7 @@
 #include <rte_dev.h>
 #include "rte_dpaa2_mempool.h"
 
+#include "fslmc_vfio.h"
 #include <fslmc_logs.h>
 #include <mc/fsl_dpbp.h>
 #include <portal/dpaa2_hw_pvt.h>
 #include "dpaa2_hw_mempool.h"
 #include "dpaa2_hw_mempool_logs.h"
 
-struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
-static struct dpaa2_bp_list *h_bp_list;
+#include <dpaax_iova_table.h>
 
-/* Dynamic logging identified for mempool */
-int dpaa2_logtype_mempool;
+struct dpaa2_bp_info *rte_dpaa2_bpid_info;
+static struct dpaa2_bp_list *h_bp_list;
 
 static int
 rte_hw_mbuf_create_pool(struct rte_mempool *mp)
@@ -48,6 +48,16 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
 
        avail_dpbp = dpaa2_alloc_dpbp_dev();
 
+       if (rte_dpaa2_bpid_info == NULL) {
+               rte_dpaa2_bpid_info = (struct dpaa2_bp_info *)rte_malloc(NULL,
+                                     sizeof(struct dpaa2_bp_info) * MAX_BPID,
+                                     RTE_CACHE_LINE_SIZE);
+               if (rte_dpaa2_bpid_info == NULL)
+                       return -ENOMEM;
+               memset(rte_dpaa2_bpid_info, 0,
+                      sizeof(struct dpaa2_bp_info) * MAX_BPID);
+       }
+
        if (!avail_dpbp) {
                DPAA2_MEMPOOL_ERR("DPAA2 pool not available!");
                return -ENOENT;
@@ -56,7 +66,9 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
        if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
                ret = dpaa2_affine_qbman_swp();
                if (ret) {
-                       DPAA2_MEMPOOL_ERR("Failure in affining portal");
+                       DPAA2_MEMPOOL_ERR(
+                               "Failed to allocate IO portal, tid: %d\n",
+                               rte_gettid());
                        goto err1;
                }
        }
@@ -179,13 +191,15 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
        struct qbman_release_desc releasedesc;
        struct qbman_swp *swp;
        int ret;
-       int i, n;
+       int i, n, retry_count;
        uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
 
        if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
                ret = dpaa2_affine_qbman_swp();
                if (ret != 0) {
-                       DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
+                       DPAA2_MEMPOOL_ERR(
+                               "Failed to allocate IO portal, tid: %d\n",
+                               rte_gettid());
                        return;
                }
        }
@@ -212,9 +226,15 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
        }
 
        /* feed them to bman */
-       do {
-               ret = qbman_swp_release(swp, &releasedesc, bufs, n);
-       } while (ret == -EBUSY);
+       retry_count = 0;
+       while ((ret = qbman_swp_release(swp, &releasedesc, bufs, n)) ==
+                       -EBUSY) {
+               retry_count++;
+               if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
+                       DPAA2_MEMPOOL_ERR("bman release retry exceeded, low fbpr?");
+                       return;
+               }
+       }
 
 aligned:
        /* if there are more buffers to free */
@@ -230,10 +250,15 @@ aligned:
 #endif
                }
 
-               do {
-                       ret = qbman_swp_release(swp, &releasedesc, bufs,
-                                               DPAA2_MBUF_MAX_ACQ_REL);
-               } while (ret == -EBUSY);
+               retry_count = 0;
+               while ((ret = qbman_swp_release(swp, &releasedesc, bufs,
+                                       DPAA2_MBUF_MAX_ACQ_REL)) == -EBUSY) {
+                       retry_count++;
+                       if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
+                               DPAA2_MEMPOOL_ERR("bman release retry exceeded, low fbpr?");
+                               return;
+                       }
+               }
                n += DPAA2_MBUF_MAX_ACQ_REL;
        }
 }
@@ -293,7 +318,9 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
        if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
                ret = dpaa2_affine_qbman_swp();
                if (ret != 0) {
-                       DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
+                       DPAA2_MEMPOOL_ERR(
+                               "Failed to allocate IO portal, tid: %d\n",
+                               rte_gettid());
                        return ret;
                }
        }
@@ -314,8 +341,8 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
                 * in pool, qbman_swp_acquire returns 0
                 */
                if (ret <= 0) {
-                       DPAA2_MEMPOOL_ERR("Buffer acquire failed with"
-                                         " err code: %d", ret);
+                       DPAA2_MEMPOOL_DP_DEBUG(
+                               "Buffer acquire failed with err code: %d", ret);
                        /* The API expect the exact number of requested bufs */
                        /* Releasing all buffers allocated */
                        rte_dpaa2_mbuf_release(pool, obj_table, bpid,
@@ -366,6 +393,7 @@ rte_hw_mbuf_get_count(const struct rte_mempool *mp)
        unsigned int num_of_bufs = 0;
        struct dpaa2_bp_info *bp_info;
        struct dpaa2_dpbp_dev *dpbp_node;
+       struct fsl_mc_io mc_io;
 
        if (!mp || !mp->pool_data) {
                DPAA2_MEMPOOL_ERR("Invalid mempool provided");
@@ -375,7 +403,12 @@ rte_hw_mbuf_get_count(const struct rte_mempool *mp)
        bp_info = (struct dpaa2_bp_info *)mp->pool_data;
        dpbp_node = bp_info->bp_list->buf_pool.dpbp_node;
 
-       ret = dpbp_get_num_free_bufs(&dpbp_node->dpbp, CMD_PRI_LOW,
+       /* In case as secondary process access stats, MCP portal in priv-hw may
+        * have primary process address. Need the secondary process based MCP
+        * portal address for this object.
+        */
+       mc_io.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
+       ret = dpbp_get_num_free_bufs(&mc_io, CMD_PRI_LOW,
                                     dpbp_node->token, &num_of_bufs);
        if (ret) {
                DPAA2_MEMPOOL_ERR("Unable to obtain free buf count (err=%d)",
@@ -393,37 +426,26 @@ dpaa2_populate(struct rte_mempool *mp, unsigned int max_objs,
              void *vaddr, rte_iova_t paddr, size_t len,
              rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
 {
-       struct dpaa2_memseg *ms;
-
-       /* For each memory chunk pinned to the Mempool, a linked list of the
-        * contained memsegs is created for searching when PA to VA
-        * conversion is required.
+       struct rte_memseg_list *msl;
+       /* The memsegment list exists incase the memory is not external.
+        * So, DMA-Map is required only when memory is provided by user,
+        * i.e. External.
         */
-       ms = rte_zmalloc(NULL, sizeof(struct dpaa2_memseg), 0);
-       if (!ms) {
-               DPAA2_MEMPOOL_ERR("Unable to allocate internal memory.");
-               DPAA2_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
-               /* If the element is not added, it would only lead to failure
-                * in searching for the element and the logic would Fallback
-                * to traditional DPDK memseg traversal code. So, this is not
-                * a blocking error - but, error would be printed on screen.
-                */
-               return 0;
-       }
+       msl = rte_mem_virt2memseg_list(vaddr);
 
-       ms->vaddr = vaddr;
-       ms->iova = paddr;
-       ms->len = len;
-       /* Head insertions are generally faster than tail insertions as the
-        * buffers pinned are picked from rear end.
-        */
-       TAILQ_INSERT_HEAD(&rte_dpaa2_memsegs, ms, next);
+       if (!msl) {
+               DPAA2_MEMPOOL_DEBUG("Memsegment is External.\n");
+               rte_fslmc_vfio_mem_dmamap((size_t)vaddr,
+                               (size_t)paddr, (size_t)len);
+       }
+       /* Insert entry into the PA->VA Table */
+       dpaax_iova_table_update(paddr, vaddr, len);
 
-       return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len,
-                                              obj_cb, obj_cb_arg);
+       return rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, paddr,
+                                              len, obj_cb, obj_cb_arg);
 }
 
-struct rte_mempool_ops dpaa2_mpool_ops = {
+static const struct rte_mempool_ops dpaa2_mpool_ops = {
        .name = DPAA2_MEMPOOL_OPS_NAME,
        .alloc = rte_hw_mbuf_create_pool,
        .free = rte_hw_mbuf_free_pool,
@@ -435,9 +457,4 @@ struct rte_mempool_ops dpaa2_mpool_ops = {
 
 MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops);
 
-RTE_INIT(dpaa2_mempool_init_log)
-{
-       dpaa2_logtype_mempool = rte_log_register("mempool.dpaa2");
-       if (dpaa2_logtype_mempool >= 0)
-               rte_log_set_level(dpaa2_logtype_mempool, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER(dpaa2_logtype_mempool, mempool.dpaa2, NOTICE);