From 2baa3f0b7de5cae05b23a32760b17d48595166ac Mon Sep 17 00:00:00 2001 From: Santosh Shukla Date: Sun, 8 Oct 2017 18:10:10 +0530 Subject: [PATCH] mempool/octeontx: support memory area ops Add support for register_memory_area ops in mempool driver. Allow more than one HW pool when using OcteonTx mempool driver: By storing each pool information to the list and find appropriate list element by matching the rte_mempool pointers. Signed-off-by: Santosh Shukla Signed-off-by: Jerin Jacob --- .../mempool/octeontx/rte_mempool_octeontx.c | 74 ++++++++++++++++++- 1 file changed, 72 insertions(+), 2 deletions(-) diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx.c b/drivers/mempool/octeontx/rte_mempool_octeontx.c index 09df114c05..9f1c07f9dd 100644 --- a/drivers/mempool/octeontx/rte_mempool_octeontx.c +++ b/drivers/mempool/octeontx/rte_mempool_octeontx.c @@ -36,17 +36,49 @@ #include "octeontx_fpavf.h" +/* + * Per-pool descriptor. + * Links mempool with the corresponding memzone, + * that provides memory under the pool's elements. + */ +struct octeontx_pool_info { + const struct rte_mempool *mp; + uintptr_t mz_addr; + + SLIST_ENTRY(octeontx_pool_info) link; +}; + +SLIST_HEAD(octeontx_pool_list, octeontx_pool_info); + +/* List of the allocated pools */ +static struct octeontx_pool_list octeontx_pool_head = + SLIST_HEAD_INITIALIZER(octeontx_pool_head); +/* Spinlock to protect pool list */ +static rte_spinlock_t pool_list_lock = RTE_SPINLOCK_INITIALIZER; + static int octeontx_fpavf_alloc(struct rte_mempool *mp) { uintptr_t pool; + struct octeontx_pool_info *pool_info; uint32_t memseg_count = mp->size; uint32_t object_size; uintptr_t va_start; int rc = 0; + rte_spinlock_lock(&pool_list_lock); + SLIST_FOREACH(pool_info, &octeontx_pool_head, link) { + if (pool_info->mp == mp) + break; + } + if (pool_info == NULL) { + rte_spinlock_unlock(&pool_list_lock); + return -ENXIO; + } + /* virtual hugepage mapped addr */ - va_start = ~(uint64_t)0; + va_start = pool_info->mz_addr; + rte_spinlock_unlock(&pool_list_lock); object_size = mp->elt_size + mp->header_size + mp->trailer_size; @@ -77,10 +109,27 @@ _end: static void octeontx_fpavf_free(struct rte_mempool *mp) { + struct octeontx_pool_info *pool_info; uintptr_t pool; pool = (uintptr_t)mp->pool_id; + rte_spinlock_lock(&pool_list_lock); + SLIST_FOREACH(pool_info, &octeontx_pool_head, link) { + if (pool_info->mp == mp) + break; + } + + if (pool_info == NULL) { + rte_spinlock_unlock(&pool_list_lock); + rte_panic("%s: trying to free pool with no valid metadata", + __func__); + } + + SLIST_REMOVE(&octeontx_pool_head, pool_info, octeontx_pool_info, link); + rte_spinlock_unlock(&pool_list_lock); + + rte_free(pool_info); octeontx_fpa_bufpool_destroy(pool, mp->socket_id); } @@ -169,6 +218,27 @@ octeontx_fpavf_get_capabilities(const struct rte_mempool *mp, return 0; } +static int +octeontx_fpavf_register_memory_area(const struct rte_mempool *mp, + char *vaddr, phys_addr_t paddr, size_t len) +{ + struct octeontx_pool_info *pool_info; + + RTE_SET_USED(paddr); + RTE_SET_USED(len); + + pool_info = rte_malloc("octeontx_pool_info", sizeof(*pool_info), 0); + if (pool_info == NULL) + return -ENOMEM; + + pool_info->mp = mp; + pool_info->mz_addr = (uintptr_t)vaddr; + rte_spinlock_lock(&pool_list_lock); + SLIST_INSERT_HEAD(&octeontx_pool_head, pool_info, link); + rte_spinlock_unlock(&pool_list_lock); + return 0; +} + static struct rte_mempool_ops octeontx_fpavf_ops = { .name = "octeontx_fpavf", .alloc = octeontx_fpavf_alloc, @@ -177,7 +247,7 @@ static struct rte_mempool_ops octeontx_fpavf_ops = { .dequeue = octeontx_fpavf_dequeue, .get_count = octeontx_fpavf_get_count, .get_capabilities = octeontx_fpavf_get_capabilities, - .register_memory_area = NULL, + .register_memory_area = octeontx_fpavf_register_memory_area, }; MEMPOOL_REGISTER_OPS(octeontx_fpavf_ops); -- 2.20.1