net/mvpp2: change default policer configuration
[dpdk.git] / drivers / mempool / octeontx / octeontx_fpavf.c
index 3290aa0..4cf387e 100644 (file)
@@ -1,33 +1,5 @@
-/*
- *   BSD LICENSE
- *
- *   Copyright (C) Cavium Inc. 2017. All Right reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Cavium networks nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
  */
 
 #include <stdlib.h>
 
 #include <rte_atomic.h>
 #include <rte_eal.h>
-#include <rte_pci.h>
+#include <rte_bus_pci.h>
 #include <rte_errno.h>
 #include <rte_memory.h>
 #include <rte_malloc.h>
 #include <rte_spinlock.h>
 #include <rte_mbuf.h>
 
-#include <rte_pmd_octeontx_ssovf.h>
+#include "octeontx_mbox.h"
 #include "octeontx_fpavf.h"
 
 /* FPA Mbox Message */
@@ -133,6 +105,16 @@ struct octeontx_fpadev {
 
 static struct octeontx_fpadev fpadev;
 
+int octeontx_logtype_fpavf;
+int octeontx_logtype_fpavf_mbox;
+
+RTE_INIT(otx_pool_init_log)
+{
+       octeontx_logtype_fpavf = rte_log_register("pmd.mempool.octeontx");
+       if (octeontx_logtype_fpavf >= 0)
+               rte_log_set_level(octeontx_logtype_fpavf, RTE_LOG_NOTICE);
+}
+
 /* lock is taken by caller */
 static int
 octeontx_fpa_gpool_alloc(unsigned int object_size)
@@ -176,9 +158,6 @@ octeontx_fpa_gpool2handle(uint16_t gpool)
        RTE_ASSERT(gpool < FPA_VF_MAX);
 
        res = &fpadev.pool[gpool];
-       if (unlikely(res == NULL))
-               return 0;
-
        return (uintptr_t)res->bar0 | gpool;
 }
 
@@ -224,7 +203,7 @@ octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size,
                          signed short buf_offset, unsigned int max_buf_count)
 {
        void *memptr = NULL;
-       phys_addr_t phys_addr;
+       rte_iova_t phys_addr;
        unsigned int memsz;
        struct fpavf_res *fpa = NULL;
        uint64_t reg;
@@ -247,7 +226,7 @@ octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size,
 
        /* Configure stack */
        fpa->pool_stack_base = memptr;
-       phys_addr = rte_malloc_virt2phy(memptr);
+       phys_addr = rte_malloc_virt2iova(memptr);
 
        buf_size /= FPA_LN_SIZE;
 
@@ -262,13 +241,13 @@ octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size,
                POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN |
                POOL_ENA;
 
-       cfg.aid = 0;
+       cfg.aid = FPA_AURA_IDX(gpool);
        cfg.pool_cfg = reg;
        cfg.pool_stack_base = phys_addr;
        cfg.pool_stack_end = phys_addr + memsz;
        cfg.aura_cfg = (1 << 9);
 
-       ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
+       ret = octeontx_mbox_send(&hdr, &cfg,
                                        sizeof(struct octeontx_mbox_fpa_cfg),
                                        &resp, sizeof(resp));
        if (ret < 0) {
@@ -313,7 +292,7 @@ octeontx_fpapf_pool_destroy(unsigned int gpool_index)
        cfg.pool_stack_end = 0;
        cfg.aura_cfg = 0;
 
-       ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
+       ret = octeontx_mbox_send(&hdr, &cfg,
                                        sizeof(struct octeontx_mbox_fpa_cfg),
                                        &resp, sizeof(resp));
        if (ret < 0) {
@@ -346,15 +325,16 @@ octeontx_fpapf_aura_attach(unsigned int gpool_index)
        hdr.vfid = gpool_index;
        hdr.res_code = 0;
        memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg));
-       cfg.aid = gpool_index; /* gpool is guara */
+       cfg.aid = FPA_AURA_IDX(gpool_index);
 
-       ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
+       ret = octeontx_mbox_send(&hdr, &cfg,
                                        sizeof(struct octeontx_mbox_fpa_cfg),
                                        &resp, sizeof(resp));
        if (ret < 0) {
                fpavf_log_err("Could not attach fpa ");
                fpavf_log_err("aura %d to pool %d. Err=%d. FuncErr=%d\n",
-                             gpool_index, gpool_index, ret, hdr.res_code);
+                             FPA_AURA_IDX(gpool_index), gpool_index, ret,
+                             hdr.res_code);
                ret = -EACCES;
                goto err;
        }
@@ -374,14 +354,15 @@ octeontx_fpapf_aura_detach(unsigned int gpool_index)
                goto err;
        }
 
-       cfg.aid = gpool_index; /* gpool is gaura */
+       cfg.aid = FPA_AURA_IDX(gpool_index);
        hdr.coproc = FPA_COPROC;
        hdr.msg = FPA_DETACHAURA;
        hdr.vfid = gpool_index;
-       ret = octeontx_ssovf_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0);
+       ret = octeontx_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0);
        if (ret < 0) {
                fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n",
-                             gpool_index, ret, hdr.res_code);
+                             FPA_AURA_IDX(gpool_index), ret,
+                             hdr.res_code);
                ret = -EINVAL;
        }
 
@@ -389,8 +370,8 @@ err:
        return ret;
 }
 
-static int
-octeontx_fpavf_pool_setup(uintptr_t handle, unsigned long memsz,
+int
+octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz,
                          void *memva, uint16_t gpool)
 {
        uint64_t va_end;
@@ -425,7 +406,7 @@ octeontx_fpapf_start_count(uint16_t gpool_index)
        hdr.coproc = FPA_COPROC;
        hdr.msg = FPA_START_COUNT;
        hdr.vfid = gpool_index;
-       ret = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
+       ret = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
        if (ret < 0) {
                fpavf_log_err("Could not start buffer counting for ");
                fpavf_log_err("FPA pool %d. Err=%d. FuncErr=%d\n",
@@ -483,24 +464,48 @@ octeontx_fpa_bufpool_block_size(uintptr_t handle)
        return FPA_CACHE_LINE_2_OBJSZ(res->sz128);
 }
 
+int
+octeontx_fpa_bufpool_free_count(uintptr_t handle)
+{
+       uint64_t cnt, limit, avail;
+       uint8_t gpool;
+       uint16_t gaura;
+       uintptr_t pool_bar;
+
+       if (unlikely(!octeontx_fpa_handle_valid(handle)))
+               return -EINVAL;
+
+       /* get the gpool */
+       gpool = octeontx_fpa_bufpool_gpool(handle);
+       /* get the aura */
+       gaura = octeontx_fpa_bufpool_gaura(handle);
+
+       /* Get pool bar address from handle */
+       pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
+
+       cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
+                               FPA_VF_VHAURA_CNT(gaura)));
+       limit = fpavf_read64((void *)((uintptr_t)pool_bar +
+                               FPA_VF_VHAURA_CNT_LIMIT(gaura)));
+
+       avail = fpavf_read64((void *)((uintptr_t)pool_bar +
+                               FPA_VF_VHPOOL_AVAILABLE(gpool)));
+
+       return RTE_MIN(avail, (limit - cnt));
+}
+
 uintptr_t
 octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
-                               unsigned int buf_offset, char **va_start,
-                               int node_id)
+                               unsigned int buf_offset, int node_id)
 {
        unsigned int gpool;
-       void *memva;
-       unsigned long memsz;
+       unsigned int gaura;
        uintptr_t gpool_handle;
        uintptr_t pool_bar;
        int res;
 
        RTE_SET_USED(node_id);
-       FPAVF_STATIC_ASSERTION(sizeof(struct rte_mbuf) <=
-                               OCTEONTX_FPAVF_BUF_OFFSET);
-
-       if (unlikely(*va_start == NULL))
-               goto error_end;
+       RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET);
 
        object_size = RTE_CACHE_LINE_ROUNDUP(object_size);
        if (object_size > FPA_MAX_OBJ_SIZE) {
@@ -544,32 +549,23 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
                goto error_pool_destroy;
        }
 
-       /* vf pool setup */
-       memsz = object_size * object_count;
-       memva = *va_start;
-       res = octeontx_fpavf_pool_setup(pool_bar, memsz, memva, gpool);
-       if (res < 0) {
-               errno = res;
-               goto error_gaura_detach;
-       }
+       gaura = FPA_AURA_IDX(gpool);
 
        /* Release lock */
        rte_spinlock_unlock(&fpadev.lock);
 
        /* populate AURA registers */
        fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
-                        FPA_VF_VHAURA_CNT(gpool)));
+                        FPA_VF_VHAURA_CNT(gaura)));
        fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
-                        FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+                        FPA_VF_VHAURA_CNT_LIMIT(gaura)));
        fpavf_write64(object_count + 1, (void *)((uintptr_t)pool_bar +
-                        FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+                        FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
 
        octeontx_fpapf_start_count(gpool);
 
        return gpool_handle;
 
-error_gaura_detach:
-       (void) octeontx_fpapf_aura_detach(gpool);
 error_pool_destroy:
        octeontx_fpavf_free(gpool);
        octeontx_fpapf_pool_destroy(gpool);
@@ -591,6 +587,7 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
        uint64_t sz;
        uint64_t cnt, avail;
        uint8_t gpool;
+       uint16_t gaura;
        uintptr_t pool_bar;
        int ret;
 
@@ -604,15 +601,17 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
 
        /* get the pool */
        gpool = octeontx_fpa_bufpool_gpool(handle);
+       /* get the aura */
+       gaura = octeontx_fpa_bufpool_gaura(handle);
 
        /* Get pool bar address from handle */
        pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
 
         /* Check for no outstanding buffers */
        cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
-                                       FPA_VF_VHAURA_CNT(gpool)));
+                                       FPA_VF_VHAURA_CNT(gaura)));
        if (cnt) {
-               fpavf_log_dbg("buffer exist in pool cnt %ld\n", cnt);
+               fpavf_log_dbg("buffer exist in pool cnt %" PRId64 "\n", cnt);
                return -EBUSY;
        }
 
@@ -623,9 +622,9 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
 
        /* Prepare to empty the entire POOL */
        fpavf_write64(avail, (void *)((uintptr_t)pool_bar +
-                        FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+                        FPA_VF_VHAURA_CNT_LIMIT(gaura)));
        fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar +
-                        FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+                        FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
 
        /* Empty the pool */
        /* Invalidate the POOL */
@@ -637,11 +636,11 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
                /* Yank a buffer from the pool */
                node = (void *)(uintptr_t)
                        fpavf_read64((void *)
-                                   (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gpool)));
+                                   (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gaura)));
 
                if (node == NULL) {
                        fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n",
-                                     gpool, avail);
+                                     gaura, avail);
                        break;
                }
 
@@ -675,9 +674,9 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
 
        /* Deactivate the AURA */
        fpavf_write64(0, (void *)((uintptr_t)pool_bar +
-                       FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+                       FPA_VF_VHAURA_CNT_LIMIT(gaura)));
        fpavf_write64(0, (void *)((uintptr_t)pool_bar +
-                       FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+                       FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
 
        ret = octeontx_fpapf_aura_detach(gpool);
        if (ret) {
@@ -756,7 +755,7 @@ fpavf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 {
        uint8_t *idreg;
        int res;
-       struct fpavf_res *fpa;
+       struct fpavf_res *fpa = NULL;
 
        RTE_SET_USED(pci_drv);
        RTE_SET_USED(fpa);