X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fmempool%2Focteontx%2Focteontx_fpavf.c;h=baabc0152e10de55234829864c7b80390ab86ce8;hb=e162b8e910d2acd33d47844a776ea58e50231ce6;hp=a1ace14ccd3d75e46e576c8925f59c5a9b179e20;hpb=1513fc302300f309a3a14d3d88ebafc223d54057;p=dpdk.git diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c index a1ace14ccd..baabc0152e 100644 --- a/drivers/mempool/octeontx/octeontx_fpavf.c +++ b/drivers/mempool/octeontx/octeontx_fpavf.c @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (C) Cavium Inc. 2017. All Right reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium networks nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc */ #include @@ -41,14 +13,14 @@ #include #include -#include +#include #include #include #include #include #include -#include +#include "octeontx_mbox.h" #include "octeontx_fpavf.h" /* FPA Mbox Message */ @@ -133,6 +105,16 @@ struct octeontx_fpadev { static struct octeontx_fpadev fpadev; +int octeontx_logtype_fpavf; +int octeontx_logtype_fpavf_mbox; + +RTE_INIT(otx_pool_init_log) +{ + octeontx_logtype_fpavf = rte_log_register("pmd.mempool.octeontx"); + if (octeontx_logtype_fpavf >= 0) + rte_log_set_level(octeontx_logtype_fpavf, RTE_LOG_NOTICE); +} + /* lock is taken by caller */ static int octeontx_fpa_gpool_alloc(unsigned int object_size) @@ -221,7 +203,7 @@ octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size, signed short buf_offset, unsigned int max_buf_count) { void *memptr = NULL; - phys_addr_t phys_addr; + rte_iova_t phys_addr; unsigned int memsz; struct fpavf_res *fpa = NULL; uint64_t reg; @@ -244,7 +226,7 @@ octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size, /* Configure stack */ fpa->pool_stack_base = memptr; - phys_addr = rte_malloc_virt2phy(memptr); + phys_addr = rte_malloc_virt2iova(memptr); buf_size /= FPA_LN_SIZE; @@ -259,13 +241,13 @@ octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size, POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN | POOL_ENA; - cfg.aid = 0; + cfg.aid = FPA_AURA_IDX(gpool); cfg.pool_cfg = reg; cfg.pool_stack_base = phys_addr; cfg.pool_stack_end = phys_addr + memsz; cfg.aura_cfg = (1 << 9); - ret = octeontx_ssovf_mbox_send(&hdr, &cfg, + ret = octeontx_mbox_send(&hdr, &cfg, sizeof(struct octeontx_mbox_fpa_cfg), &resp, sizeof(resp)); if (ret < 0) { @@ -310,7 +292,7 @@ octeontx_fpapf_pool_destroy(unsigned int gpool_index) cfg.pool_stack_end = 0; cfg.aura_cfg = 0; - ret = octeontx_ssovf_mbox_send(&hdr, &cfg, + ret = octeontx_mbox_send(&hdr, &cfg, sizeof(struct octeontx_mbox_fpa_cfg), &resp, sizeof(resp)); if (ret < 0) { @@ -343,15 +325,16 @@ octeontx_fpapf_aura_attach(unsigned int gpool_index) hdr.vfid = gpool_index; hdr.res_code = 0; memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg)); - cfg.aid = gpool_index; /* gpool is guara */ + cfg.aid = FPA_AURA_IDX(gpool_index); - ret = octeontx_ssovf_mbox_send(&hdr, &cfg, + ret = octeontx_mbox_send(&hdr, &cfg, sizeof(struct octeontx_mbox_fpa_cfg), &resp, sizeof(resp)); if (ret < 0) { fpavf_log_err("Could not attach fpa "); fpavf_log_err("aura %d to pool %d. Err=%d. FuncErr=%d\n", - gpool_index, gpool_index, ret, hdr.res_code); + FPA_AURA_IDX(gpool_index), gpool_index, ret, + hdr.res_code); ret = -EACCES; goto err; } @@ -371,14 +354,15 @@ octeontx_fpapf_aura_detach(unsigned int gpool_index) goto err; } - cfg.aid = gpool_index; /* gpool is gaura */ + cfg.aid = FPA_AURA_IDX(gpool_index); hdr.coproc = FPA_COPROC; hdr.msg = FPA_DETACHAURA; hdr.vfid = gpool_index; - ret = octeontx_ssovf_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0); + ret = octeontx_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0); if (ret < 0) { fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n", - gpool_index, ret, hdr.res_code); + FPA_AURA_IDX(gpool_index), ret, + hdr.res_code); ret = -EINVAL; } @@ -386,8 +370,8 @@ err: return ret; } -static int -octeontx_fpavf_pool_setup(uintptr_t handle, unsigned long memsz, +int +octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz, void *memva, uint16_t gpool) { uint64_t va_end; @@ -422,7 +406,7 @@ octeontx_fpapf_start_count(uint16_t gpool_index) hdr.coproc = FPA_COPROC; hdr.msg = FPA_START_COUNT; hdr.vfid = gpool_index; - ret = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0); + ret = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0); if (ret < 0) { fpavf_log_err("Could not start buffer counting for "); fpavf_log_err("FPA pool %d. Err=%d. FuncErr=%d\n", @@ -485,6 +469,7 @@ octeontx_fpa_bufpool_free_count(uintptr_t handle) { uint64_t cnt, limit, avail; uint8_t gpool; + uint16_t gaura; uintptr_t pool_bar; if (unlikely(!octeontx_fpa_handle_valid(handle))) @@ -492,14 +477,16 @@ octeontx_fpa_bufpool_free_count(uintptr_t handle) /* get the gpool */ gpool = octeontx_fpa_bufpool_gpool(handle); + /* get the aura */ + gaura = octeontx_fpa_bufpool_gaura(handle); /* Get pool bar address from handle */ pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK; cnt = fpavf_read64((void *)((uintptr_t)pool_bar + - FPA_VF_VHAURA_CNT(gpool))); + FPA_VF_VHAURA_CNT(gaura))); limit = fpavf_read64((void *)((uintptr_t)pool_bar + - FPA_VF_VHAURA_CNT_LIMIT(gpool))); + FPA_VF_VHAURA_CNT_LIMIT(gaura))); avail = fpavf_read64((void *)((uintptr_t)pool_bar + FPA_VF_VHPOOL_AVAILABLE(gpool))); @@ -509,12 +496,10 @@ octeontx_fpa_bufpool_free_count(uintptr_t handle) uintptr_t octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count, - unsigned int buf_offset, char **va_start, - int node_id) + unsigned int buf_offset, int node_id) { unsigned int gpool; - void *memva; - unsigned long memsz; + unsigned int gaura; uintptr_t gpool_handle; uintptr_t pool_bar; int res; @@ -522,9 +507,6 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count, RTE_SET_USED(node_id); RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET); - if (unlikely(*va_start == NULL)) - goto error_end; - object_size = RTE_CACHE_LINE_ROUNDUP(object_size); if (object_size > FPA_MAX_OBJ_SIZE) { errno = EINVAL; @@ -567,32 +549,23 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count, goto error_pool_destroy; } - /* vf pool setup */ - memsz = object_size * object_count; - memva = *va_start; - res = octeontx_fpavf_pool_setup(pool_bar, memsz, memva, gpool); - if (res < 0) { - errno = res; - goto error_gaura_detach; - } + gaura = FPA_AURA_IDX(gpool); /* Release lock */ rte_spinlock_unlock(&fpadev.lock); /* populate AURA registers */ fpavf_write64(object_count, (void *)((uintptr_t)pool_bar + - FPA_VF_VHAURA_CNT(gpool))); + FPA_VF_VHAURA_CNT(gaura))); fpavf_write64(object_count, (void *)((uintptr_t)pool_bar + - FPA_VF_VHAURA_CNT_LIMIT(gpool))); + FPA_VF_VHAURA_CNT_LIMIT(gaura))); fpavf_write64(object_count + 1, (void *)((uintptr_t)pool_bar + - FPA_VF_VHAURA_CNT_THRESHOLD(gpool))); + FPA_VF_VHAURA_CNT_THRESHOLD(gaura))); octeontx_fpapf_start_count(gpool); return gpool_handle; -error_gaura_detach: - (void) octeontx_fpapf_aura_detach(gpool); error_pool_destroy: octeontx_fpavf_free(gpool); octeontx_fpapf_pool_destroy(gpool); @@ -614,6 +587,7 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id) uint64_t sz; uint64_t cnt, avail; uint8_t gpool; + uint16_t gaura; uintptr_t pool_bar; int ret; @@ -627,15 +601,17 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id) /* get the pool */ gpool = octeontx_fpa_bufpool_gpool(handle); + /* get the aura */ + gaura = octeontx_fpa_bufpool_gaura(handle); /* Get pool bar address from handle */ pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK; /* Check for no outstanding buffers */ cnt = fpavf_read64((void *)((uintptr_t)pool_bar + - FPA_VF_VHAURA_CNT(gpool))); + FPA_VF_VHAURA_CNT(gaura))); if (cnt) { - fpavf_log_dbg("buffer exist in pool cnt %ld\n", cnt); + fpavf_log_dbg("buffer exist in pool cnt %" PRId64 "\n", cnt); return -EBUSY; } @@ -646,9 +622,9 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id) /* Prepare to empty the entire POOL */ fpavf_write64(avail, (void *)((uintptr_t)pool_bar + - FPA_VF_VHAURA_CNT_LIMIT(gpool))); + FPA_VF_VHAURA_CNT_LIMIT(gaura))); fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar + - FPA_VF_VHAURA_CNT_THRESHOLD(gpool))); + FPA_VF_VHAURA_CNT_THRESHOLD(gaura))); /* Empty the pool */ /* Invalidate the POOL */ @@ -660,11 +636,11 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id) /* Yank a buffer from the pool */ node = (void *)(uintptr_t) fpavf_read64((void *) - (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gpool))); + (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gaura))); if (node == NULL) { fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n", - gpool, avail); + gaura, avail); break; } @@ -698,9 +674,9 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id) /* Deactivate the AURA */ fpavf_write64(0, (void *)((uintptr_t)pool_bar + - FPA_VF_VHAURA_CNT_LIMIT(gpool))); + FPA_VF_VHAURA_CNT_LIMIT(gaura))); fpavf_write64(0, (void *)((uintptr_t)pool_bar + - FPA_VF_VHAURA_CNT_THRESHOLD(gpool))); + FPA_VF_VHAURA_CNT_THRESHOLD(gaura))); ret = octeontx_fpapf_aura_detach(gpool); if (ret) { @@ -823,7 +799,7 @@ static const struct rte_pci_id pci_fpavf_map[] = { static struct rte_pci_driver pci_fpavf = { .id_table = pci_fpavf_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA, .probe = fpavf_probe, };