mempool: prevent objects from being across pages
[dpdk.git] / drivers / mempool / dpaa2 / dpaa2_hw_mempool.c
index 14ca226..b5084c1 100644 (file)
@@ -1,34 +1,8 @@
-/*-
- *   BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016 NXP.
+ *   Copyright 2016 NXP
  *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Freescale Semiconductor, Inc nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include <unistd.h>
 #include <errno.h>
 
 #include <rte_mbuf.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
 #include <rte_malloc.h>
 #include <rte_memcpy.h>
 #include <rte_string_fns.h>
 #include <rte_cycles.h>
 #include <rte_kvargs.h>
 #include <rte_dev.h>
+#include "rte_dpaa2_mempool.h"
 
+#include "fslmc_vfio.h"
 #include <fslmc_logs.h>
 #include <mc/fsl_dpbp.h>
 #include <portal/dpaa2_hw_pvt.h>
 #include <portal/dpaa2_hw_dpio.h>
 #include "dpaa2_hw_mempool.h"
+#include "dpaa2_hw_mempool_logs.h"
 
-struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
+#include <dpaax_iova_table.h>
+
+struct dpaa2_bp_info *rte_dpaa2_bpid_info;
 static struct dpaa2_bp_list *h_bp_list;
 
+/* Dynamic logging identified for mempool */
+int dpaa2_logtype_mempool;
+
 static int
 rte_hw_mbuf_create_pool(struct rte_mempool *mp)
 {
@@ -65,47 +47,64 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
        struct dpaa2_bp_info *bp_info;
        struct dpbp_attr dpbp_attr;
        uint32_t bpid;
-       int ret, p_ret;
+       int ret;
 
        avail_dpbp = dpaa2_alloc_dpbp_dev();
 
+       if (rte_dpaa2_bpid_info == NULL) {
+               rte_dpaa2_bpid_info = (struct dpaa2_bp_info *)rte_malloc(NULL,
+                                     sizeof(struct dpaa2_bp_info) * MAX_BPID,
+                                     RTE_CACHE_LINE_SIZE);
+               if (rte_dpaa2_bpid_info == NULL)
+                       return -ENOMEM;
+               memset(rte_dpaa2_bpid_info, 0,
+                      sizeof(struct dpaa2_bp_info) * MAX_BPID);
+       }
+
        if (!avail_dpbp) {
-               PMD_DRV_LOG(ERR, "DPAA2 resources not available");
+               DPAA2_MEMPOOL_ERR("DPAA2 pool not available!");
                return -ENOENT;
        }
 
        if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
                ret = dpaa2_affine_qbman_swp();
                if (ret) {
-                       RTE_LOG(ERR, PMD, "Failure in affining portal\n");
-                       return ret;
+                       DPAA2_MEMPOOL_ERR("Failure in affining portal");
+                       goto err1;
                }
        }
 
        ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
        if (ret != 0) {
-               PMD_INIT_LOG(ERR, "Resource enable failure with"
-                       " err code: %d\n", ret);
-               return ret;
+               DPAA2_MEMPOOL_ERR("Resource enable failure with err code: %d",
+                                 ret);
+               goto err1;
        }
 
        ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW,
                                  avail_dpbp->token, &dpbp_attr);
        if (ret != 0) {
-               PMD_INIT_LOG(ERR, "Resource read failure with"
-                            " err code: %d\n", ret);
-               p_ret = ret;
-               ret = dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW,
-                                  avail_dpbp->token);
-               return p_ret;
+               DPAA2_MEMPOOL_ERR("Resource read failure with err code: %d",
+                                 ret);
+               goto err2;
+       }
+
+       bp_info = rte_malloc(NULL,
+                            sizeof(struct dpaa2_bp_info),
+                            RTE_CACHE_LINE_SIZE);
+       if (!bp_info) {
+               DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
+               ret = -ENOMEM;
+               goto err2;
        }
 
        /* Allocate the bp_list which will be added into global_bp_list */
        bp_list = rte_malloc(NULL, sizeof(struct dpaa2_bp_list),
                             RTE_CACHE_LINE_SIZE);
        if (!bp_list) {
-               PMD_INIT_LOG(ERR, "No heap memory available");
-               return -ENOMEM;
+               DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
+               ret = -ENOMEM;
+               goto err3;
        }
 
        /* Set parameters of buffer pool list */
@@ -127,17 +126,22 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
        rte_dpaa2_bpid_info[bpid].bp_list = bp_list;
        rte_dpaa2_bpid_info[bpid].bpid = bpid;
 
-       bp_info = rte_malloc(NULL,
-                            sizeof(struct dpaa2_bp_info),
-                            RTE_CACHE_LINE_SIZE);
        rte_memcpy(bp_info, (void *)&rte_dpaa2_bpid_info[bpid],
                   sizeof(struct dpaa2_bp_info));
        mp->pool_data = (void *)bp_info;
 
-       PMD_INIT_LOG(DEBUG, "BP List created for bpid =%d", dpbp_attr.bpid);
+       DPAA2_MEMPOOL_DEBUG("BP List created for bpid =%d", dpbp_attr.bpid);
 
        h_bp_list = bp_list;
        return 0;
+err3:
+       rte_free(bp_info);
+err2:
+       dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
+err1:
+       dpaa2_free_dpbp_dev(avail_dpbp);
+
+       return ret;
 }
 
 static void
@@ -148,7 +152,7 @@ rte_hw_mbuf_free_pool(struct rte_mempool *mp)
        struct dpaa2_dpbp_dev *dpbp_node;
 
        if (!mp->pool_data) {
-               PMD_DRV_LOG(ERR, "Not a valid dpaa22 pool");
+               DPAA2_MEMPOOL_ERR("Not a valid dpaa2 buffer pool");
                return;
        }
 
@@ -166,7 +170,7 @@ rte_hw_mbuf_free_pool(struct rte_mempool *mp)
                while (temp) {
                        if (temp == bp) {
                                prev->next = temp->next;
-                               free(bp);
+                               rte_free(bp);
                                break;
                        }
                        prev = temp;
@@ -188,13 +192,13 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
        struct qbman_release_desc releasedesc;
        struct qbman_swp *swp;
        int ret;
-       int i, n;
+       int i, n, retry_count;
        uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
 
        if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
                ret = dpaa2_affine_qbman_swp();
                if (ret != 0) {
-                       RTE_LOG(ERR, PMD, "Failed to allocate IO portal\n");
+                       DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
                        return;
                }
        }
@@ -213,7 +217,7 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
        /* convert mbuf to buffers for the remainder */
        for (i = 0; i < n ; i++) {
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-               bufs[i] = (uint64_t)rte_mempool_virt2phy(pool, obj_table[i])
+               bufs[i] = (uint64_t)rte_mempool_virt2iova(obj_table[i])
                                + meta_data_size;
 #else
                bufs[i] = (uint64_t)obj_table[i] + meta_data_size;
@@ -221,9 +225,15 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
        }
 
        /* feed them to bman */
-       do {
-               ret = qbman_swp_release(swp, &releasedesc, bufs, n);
-       } while (ret == -EBUSY);
+       retry_count = 0;
+       while ((ret = qbman_swp_release(swp, &releasedesc, bufs, n)) ==
+                       -EBUSY) {
+               retry_count++;
+               if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
+                       DPAA2_MEMPOOL_ERR("bman release retry exceeded, low fbpr?");
+                       return;
+               }
+       }
 
 aligned:
        /* if there are more buffers to free */
@@ -232,21 +242,55 @@ aligned:
                for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++) {
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
                        bufs[i] = (uint64_t)
-                                 rte_mempool_virt2phy(pool, obj_table[n + i])
+                                 rte_mempool_virt2iova(obj_table[n + i])
                                  + meta_data_size;
 #else
                        bufs[i] = (uint64_t)obj_table[n + i] + meta_data_size;
 #endif
                }
 
-               do {
-                       ret = qbman_swp_release(swp, &releasedesc, bufs,
-                                               DPAA2_MBUF_MAX_ACQ_REL);
-               } while (ret == -EBUSY);
+               retry_count = 0;
+               while ((ret = qbman_swp_release(swp, &releasedesc, bufs,
+                                       DPAA2_MBUF_MAX_ACQ_REL)) == -EBUSY) {
+                       retry_count++;
+                       if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
+                               DPAA2_MEMPOOL_ERR("bman release retry exceeded, low fbpr?");
+                               return;
+                       }
+               }
                n += DPAA2_MBUF_MAX_ACQ_REL;
        }
 }
 
+uint16_t
+rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp)
+{
+       struct dpaa2_bp_info *bp_info;
+
+       bp_info = mempool_to_bpinfo(mp);
+       if (!(bp_info->bp_list)) {
+               RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+               return -ENOMEM;
+       }
+
+       return bp_info->bpid;
+}
+
+struct rte_mbuf *
+rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr)
+{
+       struct dpaa2_bp_info *bp_info;
+
+       bp_info = mempool_to_bpinfo(mp);
+       if (!(bp_info->bp_list)) {
+               RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+               return NULL;
+       }
+
+       return (struct rte_mbuf *)((uint8_t *)buf_addr -
+                       bp_info->meta_data_size);
+}
+
 int
 rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
                          void **obj_table, unsigned int count)
@@ -256,7 +300,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
 #endif
        struct qbman_swp *swp;
        uint16_t bpid;
-       uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
+       size_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
        int i, ret;
        unsigned int n = 0;
        struct dpaa2_bp_info *bp_info;
@@ -264,7 +308,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
        bp_info = mempool_to_bpinfo(pool);
 
        if (!(bp_info->bp_list)) {
-               RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+               DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
                return -ENOENT;
        }
 
@@ -273,7 +317,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
        if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
                ret = dpaa2_affine_qbman_swp();
                if (ret != 0) {
-                       RTE_LOG(ERR, PMD, "Failed to allocate IO portal\n");
+                       DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
                        return ret;
                }
        }
@@ -284,18 +328,18 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
                 * then the remainder.
                 */
                if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) {
-                       ret = qbman_swp_acquire(swp, bpid, bufs,
+                       ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
                                                DPAA2_MBUF_MAX_ACQ_REL);
                } else {
-                       ret = qbman_swp_acquire(swp, bpid, bufs,
+                       ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
                                                count - n);
                }
                /* In case of less than requested number of buffers available
                 * in pool, qbman_swp_acquire returns 0
                 */
                if (ret <= 0) {
-                       PMD_TX_LOG(ERR, "Buffer acquire failed with"
-                                  " err code: %d", ret);
+                       DPAA2_MEMPOOL_DP_DEBUG(
+                               "Buffer acquire failed with err code: %d", ret);
                        /* The API expect the exact number of requested bufs */
                        /* Releasing all buffers allocated */
                        rte_dpaa2_mbuf_release(pool, obj_table, bpid,
@@ -304,10 +348,11 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
                }
                /* assigning mbuf from the acquired objects */
                for (i = 0; (i < ret) && bufs[i]; i++) {
-                       DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], uint64_t);
+                       DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], size_t);
                        obj_table[n] = (struct rte_mbuf *)
                                       (bufs[i] - bp_info->meta_data_size);
-                       PMD_TX_LOG(DEBUG, "Acquired %p address %p from BMAN",
+                       DPAA2_MEMPOOL_DP_DEBUG(
+                                  "Acquired %p address %p from BMAN\n",
                                   (void *)bufs[i], (void *)obj_table[n]);
                        n++;
                }
@@ -315,8 +360,8 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
 
 #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
        alloc += n;
-       PMD_TX_LOG(DEBUG, "Total = %d , req = %d done = %d",
-                  alloc, count, n);
+       DPAA2_MEMPOOL_DP_DEBUG("Total = %d , req = %d done = %d\n",
+                              alloc, count, n);
 #endif
        return 0;
 }
@@ -329,7 +374,7 @@ rte_hw_mbuf_free_bulk(struct rte_mempool *pool,
 
        bp_info = mempool_to_bpinfo(pool);
        if (!(bp_info->bp_list)) {
-               RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+               DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
                return -ENOENT;
        }
        rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid,
@@ -347,7 +392,7 @@ rte_hw_mbuf_get_count(const struct rte_mempool *mp)
        struct dpaa2_dpbp_dev *dpbp_node;
 
        if (!mp || !mp->pool_data) {
-               RTE_LOG(ERR, PMD, "Invalid mempool provided\n");
+               DPAA2_MEMPOOL_ERR("Invalid mempool provided");
                return 0;
        }
 
@@ -357,23 +402,55 @@ rte_hw_mbuf_get_count(const struct rte_mempool *mp)
        ret = dpbp_get_num_free_bufs(&dpbp_node->dpbp, CMD_PRI_LOW,
                                     dpbp_node->token, &num_of_bufs);
        if (ret) {
-               RTE_LOG(ERR, PMD, "Unable to obtain free buf count (err=%d)\n",
-                       ret);
+               DPAA2_MEMPOOL_ERR("Unable to obtain free buf count (err=%d)",
+                                 ret);
                return 0;
        }
 
-       RTE_LOG(DEBUG, PMD, "Free bufs = %u\n", num_of_bufs);
+       DPAA2_MEMPOOL_DP_DEBUG("Free bufs = %u\n", num_of_bufs);
 
        return num_of_bufs;
 }
 
-struct rte_mempool_ops dpaa2_mpool_ops = {
-       .name = "dpaa2",
+static int
+dpaa2_populate(struct rte_mempool *mp, unsigned int max_objs,
+             void *vaddr, rte_iova_t paddr, size_t len,
+             rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+       struct rte_memseg_list *msl;
+       /* The memsegment list exists incase the memory is not external.
+        * So, DMA-Map is required only when memory is provided by user,
+        * i.e. External.
+        */
+       msl = rte_mem_virt2memseg_list(vaddr);
+
+       if (!msl) {
+               DPAA2_MEMPOOL_DEBUG("Memsegment is External.\n");
+               rte_fslmc_vfio_mem_dmamap((size_t)vaddr,
+                               (size_t)paddr, (size_t)len);
+       }
+       /* Insert entry into the PA->VA Table */
+       dpaax_iova_table_update(paddr, vaddr, len);
+
+       return rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, paddr,
+                                              len, obj_cb, obj_cb_arg);
+}
+
+static const struct rte_mempool_ops dpaa2_mpool_ops = {
+       .name = DPAA2_MEMPOOL_OPS_NAME,
        .alloc = rte_hw_mbuf_create_pool,
        .free = rte_hw_mbuf_free_pool,
        .enqueue = rte_hw_mbuf_free_bulk,
        .dequeue = rte_dpaa2_mbuf_alloc_bulk,
        .get_count = rte_hw_mbuf_get_count,
+       .populate = dpaa2_populate,
 };
 
 MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops);
+
+RTE_INIT(dpaa2_mempool_init_log)
+{
+       dpaa2_logtype_mempool = rte_log_register("mempool.dpaa2");
+       if (dpaa2_logtype_mempool >= 0)
+               rte_log_set_level(dpaa2_logtype_mempool, RTE_LOG_NOTICE);
+}