X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fmempool%2Focteontx%2Frte_mempool_octeontx.c;h=ab94dfe9131bdb7bc3dc8f5a8d7bb08ca4e3517b;hb=f73477e276720dc0fdb4ce8da130a2ab1c963846;hp=e96aebc67790a8bacb1058bf43224660dd0862c3;hpb=aaf4363e1e9e518c034c7ff9938a2faefde9854d;p=dpdk.git diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx.c b/drivers/mempool/octeontx/rte_mempool_octeontx.c index e96aebc677..ab94dfe913 100644 --- a/drivers/mempool/octeontx/rte_mempool_octeontx.c +++ b/drivers/mempool/octeontx/rte_mempool_octeontx.c @@ -9,55 +9,18 @@ #include "octeontx_fpavf.h" -/* - * Per-pool descriptor. - * Links mempool with the corresponding memzone, - * that provides memory under the pool's elements. - */ -struct octeontx_pool_info { - const struct rte_mempool *mp; - uintptr_t mz_addr; - - SLIST_ENTRY(octeontx_pool_info) link; -}; - -SLIST_HEAD(octeontx_pool_list, octeontx_pool_info); - -/* List of the allocated pools */ -static struct octeontx_pool_list octeontx_pool_head = - SLIST_HEAD_INITIALIZER(octeontx_pool_head); -/* Spinlock to protect pool list */ -static rte_spinlock_t pool_list_lock = RTE_SPINLOCK_INITIALIZER; - static int octeontx_fpavf_alloc(struct rte_mempool *mp) { uintptr_t pool; - struct octeontx_pool_info *pool_info; uint32_t memseg_count = mp->size; uint32_t object_size; - uintptr_t va_start; int rc = 0; - rte_spinlock_lock(&pool_list_lock); - SLIST_FOREACH(pool_info, &octeontx_pool_head, link) { - if (pool_info->mp == mp) - break; - } - if (pool_info == NULL) { - rte_spinlock_unlock(&pool_list_lock); - return -ENXIO; - } - - /* virtual hugepage mapped addr */ - va_start = pool_info->mz_addr; - rte_spinlock_unlock(&pool_list_lock); - object_size = mp->elt_size + mp->header_size + mp->trailer_size; pool = octeontx_fpa_bufpool_create(object_size, memseg_count, OCTEONTX_FPAVF_BUF_OFFSET, - (char **)&va_start, mp->socket_id); rc = octeontx_fpa_bufpool_block_size(pool); if (rc < 0) @@ -82,27 +45,9 @@ _end: static void octeontx_fpavf_free(struct rte_mempool *mp) { - struct octeontx_pool_info *pool_info; uintptr_t pool; - pool = (uintptr_t)mp->pool_id; - rte_spinlock_lock(&pool_list_lock); - SLIST_FOREACH(pool_info, &octeontx_pool_head, link) { - if (pool_info->mp == mp) - break; - } - - if (pool_info == NULL) { - rte_spinlock_unlock(&pool_list_lock); - rte_panic("%s: trying to free pool with no valid metadata", - __func__); - } - - SLIST_REMOVE(&octeontx_pool_head, pool_info, octeontx_pool_info, link); - rte_spinlock_unlock(&pool_list_lock); - - rte_free(pool_info); octeontx_fpa_bufpool_destroy(pool, mp->socket_id); } @@ -181,35 +126,66 @@ octeontx_fpavf_get_count(const struct rte_mempool *mp) return octeontx_fpa_bufpool_free_count(pool); } -static int -octeontx_fpavf_get_capabilities(const struct rte_mempool *mp, - unsigned int *flags) +static ssize_t +octeontx_fpavf_calc_mem_size(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, + size_t *min_chunk_size, size_t *align) { - RTE_SET_USED(mp); - *flags |= (MEMPOOL_F_CAPA_PHYS_CONTIG | - MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS); - return 0; + ssize_t mem_size; + + /* + * Simply need space for one more object to be able to + * fulfil alignment requirements. + */ + mem_size = rte_mempool_op_calc_mem_size_default(mp, obj_num + 1, + pg_shift, + min_chunk_size, align); + if (mem_size >= 0) { + /* + * Memory area which contains objects must be physically + * contiguous. + */ + *min_chunk_size = mem_size; + } + + return mem_size; } static int -octeontx_fpavf_register_memory_area(const struct rte_mempool *mp, - char *vaddr, rte_iova_t paddr, size_t len) +octeontx_fpavf_populate(struct rte_mempool *mp, unsigned int max_objs, + void *vaddr, rte_iova_t iova, size_t len, + rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg) { - struct octeontx_pool_info *pool_info; + size_t total_elt_sz; + size_t off; + uint8_t gpool; + uintptr_t pool_bar; + int ret; - RTE_SET_USED(paddr); - RTE_SET_USED(len); + if (iova == RTE_BAD_IOVA) + return -EINVAL; - pool_info = rte_malloc("octeontx_pool_info", sizeof(*pool_info), 0); - if (pool_info == NULL) - return -ENOMEM; + total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; - pool_info->mp = mp; - pool_info->mz_addr = (uintptr_t)vaddr; - rte_spinlock_lock(&pool_list_lock); - SLIST_INSERT_HEAD(&octeontx_pool_head, pool_info, link); - rte_spinlock_unlock(&pool_list_lock); - return 0; + /* align object start address to a multiple of total_elt_sz */ + off = total_elt_sz - ((uintptr_t)vaddr % total_elt_sz); + + if (len < off) + return -EINVAL; + + vaddr = (char *)vaddr + off; + iova += off; + len -= off; + + gpool = octeontx_fpa_bufpool_gpool(mp->pool_id); + pool_bar = mp->pool_id & ~(uint64_t)FPA_GPOOL_MASK; + + ret = octeontx_fpavf_pool_set_range(pool_bar, len, vaddr, gpool); + if (ret < 0) + return ret; + + return rte_mempool_op_populate_default(mp, max_objs, vaddr, iova, len, + obj_cb, obj_cb_arg); } static struct rte_mempool_ops octeontx_fpavf_ops = { @@ -219,8 +195,8 @@ static struct rte_mempool_ops octeontx_fpavf_ops = { .enqueue = octeontx_fpavf_enqueue, .dequeue = octeontx_fpavf_dequeue, .get_count = octeontx_fpavf_get_count, - .get_capabilities = octeontx_fpavf_get_capabilities, - .register_memory_area = octeontx_fpavf_register_memory_area, + .calc_mem_size = octeontx_fpavf_calc_mem_size, + .populate = octeontx_fpavf_populate, }; MEMPOOL_REGISTER_OPS(octeontx_fpavf_ops);