4 * Copyright (C) 2017 Cavium Inc. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
37 #include "octeontx_fpavf.h"
40 * Per-pool descriptor.
41 * Links mempool with the corresponding memzone,
42 * that provides memory under the pool's elements.
44 struct octeontx_pool_info {
45 const struct rte_mempool *mp;
48 SLIST_ENTRY(octeontx_pool_info) link;
51 SLIST_HEAD(octeontx_pool_list, octeontx_pool_info);
53 /* List of the allocated pools */
54 static struct octeontx_pool_list octeontx_pool_head =
55 SLIST_HEAD_INITIALIZER(octeontx_pool_head);
56 /* Spinlock to protect pool list */
57 static rte_spinlock_t pool_list_lock = RTE_SPINLOCK_INITIALIZER;
60 octeontx_fpavf_alloc(struct rte_mempool *mp)
63 struct octeontx_pool_info *pool_info;
64 uint32_t memseg_count = mp->size;
69 rte_spinlock_lock(&pool_list_lock);
70 SLIST_FOREACH(pool_info, &octeontx_pool_head, link) {
71 if (pool_info->mp == mp)
74 if (pool_info == NULL) {
75 rte_spinlock_unlock(&pool_list_lock);
79 /* virtual hugepage mapped addr */
80 va_start = pool_info->mz_addr;
81 rte_spinlock_unlock(&pool_list_lock);
83 object_size = mp->elt_size + mp->header_size + mp->trailer_size;
85 pool = octeontx_fpa_bufpool_create(object_size, memseg_count,
86 OCTEONTX_FPAVF_BUF_OFFSET,
89 rc = octeontx_fpa_bufpool_block_size(pool);
93 if ((uint32_t)rc != object_size)
94 fpavf_log_err("buffer size mismatch: %d instead of %u\n",
97 fpavf_log_info("Pool created %p with .. ", (void *)pool);
98 fpavf_log_info("obj_sz %d, cnt %d\n", object_size, memseg_count);
100 /* assign pool handle to mempool */
101 mp->pool_id = (uint64_t)pool;
110 octeontx_fpavf_free(struct rte_mempool *mp)
112 struct octeontx_pool_info *pool_info;
115 pool = (uintptr_t)mp->pool_id;
117 rte_spinlock_lock(&pool_list_lock);
118 SLIST_FOREACH(pool_info, &octeontx_pool_head, link) {
119 if (pool_info->mp == mp)
123 if (pool_info == NULL) {
124 rte_spinlock_unlock(&pool_list_lock);
125 rte_panic("%s: trying to free pool with no valid metadata",
129 SLIST_REMOVE(&octeontx_pool_head, pool_info, octeontx_pool_info, link);
130 rte_spinlock_unlock(&pool_list_lock);
133 octeontx_fpa_bufpool_destroy(pool, mp->socket_id);
136 static __rte_always_inline void *
137 octeontx_fpa_bufpool_alloc(uintptr_t handle)
139 return (void *)(uintptr_t)fpavf_read64((void *)(handle +
140 FPA_VF_VHAURA_OP_ALLOC(0)));
143 static __rte_always_inline void
144 octeontx_fpa_bufpool_free(uintptr_t handle, void *buf)
146 uint64_t free_addr = FPA_VF_FREE_ADDRS_S(FPA_VF_VHAURA_OP_FREE(0),
147 0 /* DWB */, 1 /* FABS */);
149 fpavf_write64((uintptr_t)buf, (void *)(uintptr_t)(handle + free_addr));
153 octeontx_fpavf_enqueue(struct rte_mempool *mp, void * const *obj_table,
159 pool = (uintptr_t)mp->pool_id;
160 /* Get pool bar address from handle */
161 pool &= ~(uint64_t)FPA_GPOOL_MASK;
162 for (index = 0; index < n; index++, obj_table++)
163 octeontx_fpa_bufpool_free(pool, *obj_table);
169 octeontx_fpavf_dequeue(struct rte_mempool *mp, void **obj_table,
176 pool = (uintptr_t)mp->pool_id;
177 /* Get pool bar address from handle */
178 pool &= ~(uint64_t)FPA_GPOOL_MASK;
179 for (index = 0; index < n; index++, obj_table++) {
180 obj = octeontx_fpa_bufpool_alloc(pool);
183 * Failed to allocate the requested number of objects
184 * from the pool. Current pool implementation requires
185 * completing the entire request or returning error
187 * Free already allocated buffers to the pool.
189 for (; index > 0; index--) {
191 octeontx_fpa_bufpool_free(pool, *obj_table);
202 octeontx_fpavf_get_count(const struct rte_mempool *mp)
206 pool = (uintptr_t)mp->pool_id;
208 return octeontx_fpa_bufpool_free_count(pool);
212 octeontx_fpavf_get_capabilities(const struct rte_mempool *mp,
216 *flags |= (MEMPOOL_F_CAPA_PHYS_CONTIG |
217 MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS);
222 octeontx_fpavf_register_memory_area(const struct rte_mempool *mp,
223 char *vaddr, phys_addr_t paddr, size_t len)
225 struct octeontx_pool_info *pool_info;
230 pool_info = rte_malloc("octeontx_pool_info", sizeof(*pool_info), 0);
231 if (pool_info == NULL)
235 pool_info->mz_addr = (uintptr_t)vaddr;
236 rte_spinlock_lock(&pool_list_lock);
237 SLIST_INSERT_HEAD(&octeontx_pool_head, pool_info, link);
238 rte_spinlock_unlock(&pool_list_lock);
242 static struct rte_mempool_ops octeontx_fpavf_ops = {
243 .name = "octeontx_fpavf",
244 .alloc = octeontx_fpavf_alloc,
245 .free = octeontx_fpavf_free,
246 .enqueue = octeontx_fpavf_enqueue,
247 .dequeue = octeontx_fpavf_dequeue,
248 .get_count = octeontx_fpavf_get_count,
249 .get_capabilities = octeontx_fpavf_get_capabilities,
250 .register_memory_area = octeontx_fpavf_register_memory_area,
253 MEMPOOL_REGISTER_OPS(octeontx_fpavf_ops);