drivers: use SPDX tag for Cavium copyright files
[dpdk.git] / drivers / mempool / octeontx / rte_mempool_octeontx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #include <stdio.h>
6 #include <rte_mempool.h>
7 #include <rte_malloc.h>
8 #include <rte_mbuf.h>
9
10 #include "octeontx_fpavf.h"
11
12 /*
13  * Per-pool descriptor.
14  * Links mempool with the corresponding memzone,
15  * that provides memory under the pool's elements.
16  */
17 struct octeontx_pool_info {
18         const struct rte_mempool *mp;
19         uintptr_t mz_addr;
20
21         SLIST_ENTRY(octeontx_pool_info) link;
22 };
23
24 SLIST_HEAD(octeontx_pool_list, octeontx_pool_info);
25
26 /* List of the allocated pools */
27 static struct octeontx_pool_list octeontx_pool_head =
28                                 SLIST_HEAD_INITIALIZER(octeontx_pool_head);
29 /* Spinlock to protect pool list */
30 static rte_spinlock_t pool_list_lock = RTE_SPINLOCK_INITIALIZER;
31
32 static int
33 octeontx_fpavf_alloc(struct rte_mempool *mp)
34 {
35         uintptr_t pool;
36         struct octeontx_pool_info *pool_info;
37         uint32_t memseg_count = mp->size;
38         uint32_t object_size;
39         uintptr_t va_start;
40         int rc = 0;
41
42         rte_spinlock_lock(&pool_list_lock);
43         SLIST_FOREACH(pool_info, &octeontx_pool_head, link) {
44                 if (pool_info->mp == mp)
45                         break;
46         }
47         if (pool_info == NULL) {
48                 rte_spinlock_unlock(&pool_list_lock);
49                 return -ENXIO;
50         }
51
52         /* virtual hugepage mapped addr */
53         va_start = pool_info->mz_addr;
54         rte_spinlock_unlock(&pool_list_lock);
55
56         object_size = mp->elt_size + mp->header_size + mp->trailer_size;
57
58         pool = octeontx_fpa_bufpool_create(object_size, memseg_count,
59                                                 OCTEONTX_FPAVF_BUF_OFFSET,
60                                                 (char **)&va_start,
61                                                 mp->socket_id);
62         rc = octeontx_fpa_bufpool_block_size(pool);
63         if (rc < 0)
64                 goto _end;
65
66         if ((uint32_t)rc != object_size)
67                 fpavf_log_err("buffer size mismatch: %d instead of %u\n",
68                                 rc, object_size);
69
70         fpavf_log_info("Pool created %p with .. ", (void *)pool);
71         fpavf_log_info("obj_sz %d, cnt %d\n", object_size, memseg_count);
72
73         /* assign pool handle to mempool */
74         mp->pool_id = (uint64_t)pool;
75
76         return 0;
77
78 _end:
79         return rc;
80 }
81
82 static void
83 octeontx_fpavf_free(struct rte_mempool *mp)
84 {
85         struct octeontx_pool_info *pool_info;
86         uintptr_t pool;
87
88         pool = (uintptr_t)mp->pool_id;
89
90         rte_spinlock_lock(&pool_list_lock);
91         SLIST_FOREACH(pool_info, &octeontx_pool_head, link) {
92                 if (pool_info->mp == mp)
93                         break;
94         }
95
96         if (pool_info == NULL) {
97                 rte_spinlock_unlock(&pool_list_lock);
98                 rte_panic("%s: trying to free pool with no valid metadata",
99                     __func__);
100         }
101
102         SLIST_REMOVE(&octeontx_pool_head, pool_info, octeontx_pool_info, link);
103         rte_spinlock_unlock(&pool_list_lock);
104
105         rte_free(pool_info);
106         octeontx_fpa_bufpool_destroy(pool, mp->socket_id);
107 }
108
109 static __rte_always_inline void *
110 octeontx_fpa_bufpool_alloc(uintptr_t handle)
111 {
112         return (void *)(uintptr_t)fpavf_read64((void *)(handle +
113                                                 FPA_VF_VHAURA_OP_ALLOC(0)));
114 }
115
116 static __rte_always_inline void
117 octeontx_fpa_bufpool_free(uintptr_t handle, void *buf)
118 {
119         uint64_t free_addr = FPA_VF_FREE_ADDRS_S(FPA_VF_VHAURA_OP_FREE(0),
120                                                  0 /* DWB */, 1 /* FABS */);
121
122         fpavf_write64((uintptr_t)buf, (void *)(uintptr_t)(handle + free_addr));
123 }
124
125 static int
126 octeontx_fpavf_enqueue(struct rte_mempool *mp, void * const *obj_table,
127                         unsigned int n)
128 {
129         uintptr_t pool;
130         unsigned int index;
131
132         pool = (uintptr_t)mp->pool_id;
133         /* Get pool bar address from handle */
134         pool &= ~(uint64_t)FPA_GPOOL_MASK;
135         for (index = 0; index < n; index++, obj_table++)
136                 octeontx_fpa_bufpool_free(pool, *obj_table);
137
138         return 0;
139 }
140
141 static int
142 octeontx_fpavf_dequeue(struct rte_mempool *mp, void **obj_table,
143                         unsigned int n)
144 {
145         unsigned int index;
146         uintptr_t pool;
147         void *obj;
148
149         pool = (uintptr_t)mp->pool_id;
150         /* Get pool bar address from handle */
151         pool &= ~(uint64_t)FPA_GPOOL_MASK;
152         for (index = 0; index < n; index++, obj_table++) {
153                 obj = octeontx_fpa_bufpool_alloc(pool);
154                 if (obj == NULL) {
155                         /*
156                          * Failed to allocate the requested number of objects
157                          * from the pool. Current pool implementation requires
158                          * completing the entire request or returning error
159                          * otherwise.
160                          * Free already allocated buffers to the pool.
161                          */
162                         for (; index > 0; index--) {
163                                 obj_table--;
164                                 octeontx_fpa_bufpool_free(pool, *obj_table);
165                         }
166                         return -ENOMEM;
167                 }
168                 *obj_table = obj;
169         }
170
171         return 0;
172 }
173
174 static unsigned int
175 octeontx_fpavf_get_count(const struct rte_mempool *mp)
176 {
177         uintptr_t pool;
178
179         pool = (uintptr_t)mp->pool_id;
180
181         return octeontx_fpa_bufpool_free_count(pool);
182 }
183
184 static int
185 octeontx_fpavf_get_capabilities(const struct rte_mempool *mp,
186                                 unsigned int *flags)
187 {
188         RTE_SET_USED(mp);
189         *flags |= (MEMPOOL_F_CAPA_PHYS_CONTIG |
190                         MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS);
191         return 0;
192 }
193
194 static int
195 octeontx_fpavf_register_memory_area(const struct rte_mempool *mp,
196                                     char *vaddr, rte_iova_t paddr, size_t len)
197 {
198         struct octeontx_pool_info *pool_info;
199
200         RTE_SET_USED(paddr);
201         RTE_SET_USED(len);
202
203         pool_info = rte_malloc("octeontx_pool_info", sizeof(*pool_info), 0);
204         if (pool_info == NULL)
205                 return -ENOMEM;
206
207         pool_info->mp = mp;
208         pool_info->mz_addr = (uintptr_t)vaddr;
209         rte_spinlock_lock(&pool_list_lock);
210         SLIST_INSERT_HEAD(&octeontx_pool_head, pool_info, link);
211         rte_spinlock_unlock(&pool_list_lock);
212         return 0;
213 }
214
215 static struct rte_mempool_ops octeontx_fpavf_ops = {
216         .name = "octeontx_fpavf",
217         .alloc = octeontx_fpavf_alloc,
218         .free = octeontx_fpavf_free,
219         .enqueue = octeontx_fpavf_enqueue,
220         .dequeue = octeontx_fpavf_dequeue,
221         .get_count = octeontx_fpavf_get_count,
222         .get_capabilities = octeontx_fpavf_get_capabilities,
223         .register_memory_area = octeontx_fpavf_register_memory_area,
224 };
225
226 MEMPOOL_REGISTER_OPS(octeontx_fpavf_ops);