vhost: fix C++ include
[dpdk.git] / drivers / mempool / dpaa / dpaa_mempool.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2017,2019 NXP
4  *
5  */
6
7 /* System headers */
8 #include <stdio.h>
9 #include <inttypes.h>
10 #include <unistd.h>
11 #include <limits.h>
12 #include <sched.h>
13 #include <signal.h>
14 #include <pthread.h>
15 #include <sys/types.h>
16 #include <sys/syscall.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_common.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_memory.h>
23 #include <rte_tailq.h>
24 #include <rte_eal.h>
25 #include <rte_malloc.h>
26 #include <rte_ring.h>
27
28 #include <dpaa_mempool.h>
29 #include <dpaax_iova_table.h>
30
31 /* List of all the memseg information locally maintained in dpaa driver. This
32  * is to optimize the PA_to_VA searches until a better mechanism (algo) is
33  * available.
34  */
35 struct dpaa_memseg_list rte_dpaa_memsegs
36         = TAILQ_HEAD_INITIALIZER(rte_dpaa_memsegs);
37
38 struct dpaa_bp_info *rte_dpaa_bpid_info;
39
40 RTE_LOG_REGISTER_DEFAULT(dpaa_logtype_mempool, NOTICE);
41
42 static int
43 dpaa_mbuf_create_pool(struct rte_mempool *mp)
44 {
45         struct bman_pool *bp;
46         struct bm_buffer bufs[8];
47         struct dpaa_bp_info *bp_info;
48         uint8_t bpid;
49         int num_bufs = 0, ret = 0;
50         struct bman_pool_params params = {
51                 .flags = BMAN_POOL_FLAG_DYNAMIC_BPID
52         };
53
54         MEMPOOL_INIT_FUNC_TRACE();
55
56         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
57                 ret = rte_dpaa_portal_init((void *)0);
58                 if (ret) {
59                         DPAA_MEMPOOL_ERR(
60                                 "rte_dpaa_portal_init failed with ret: %d",
61                                  ret);
62                         return -1;
63                 }
64         }
65         bp = bman_new_pool(&params);
66         if (!bp) {
67                 DPAA_MEMPOOL_ERR("bman_new_pool() failed");
68                 return -ENODEV;
69         }
70         bpid = bman_get_params(bp)->bpid;
71
72         /* Drain the pool of anything already in it. */
73         do {
74                 /* Acquire is all-or-nothing, so we drain in 8s,
75                  * then in 1s for the remainder.
76                  */
77                 if (ret != 1)
78                         ret = bman_acquire(bp, bufs, 8, 0);
79                 if (ret < 8)
80                         ret = bman_acquire(bp, bufs, 1, 0);
81                 if (ret > 0)
82                         num_bufs += ret;
83         } while (ret > 0);
84         if (num_bufs)
85                 DPAA_MEMPOOL_WARN("drained %u bufs from BPID %d",
86                                   num_bufs, bpid);
87
88         if (rte_dpaa_bpid_info == NULL) {
89                 rte_dpaa_bpid_info = (struct dpaa_bp_info *)rte_zmalloc(NULL,
90                                 sizeof(struct dpaa_bp_info) * DPAA_MAX_BPOOLS,
91                                 RTE_CACHE_LINE_SIZE);
92                 if (rte_dpaa_bpid_info == NULL) {
93                         bman_free_pool(bp);
94                         return -ENOMEM;
95                 }
96         }
97
98         rte_dpaa_bpid_info[bpid].mp = mp;
99         rte_dpaa_bpid_info[bpid].bpid = bpid;
100         rte_dpaa_bpid_info[bpid].size = mp->elt_size;
101         rte_dpaa_bpid_info[bpid].bp = bp;
102         rte_dpaa_bpid_info[bpid].meta_data_size =
103                 sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mp);
104         rte_dpaa_bpid_info[bpid].dpaa_ops_index = mp->ops_index;
105         rte_dpaa_bpid_info[bpid].ptov_off = 0;
106         rte_dpaa_bpid_info[bpid].flags = 0;
107
108         bp_info = rte_malloc(NULL,
109                              sizeof(struct dpaa_bp_info),
110                              RTE_CACHE_LINE_SIZE);
111         if (!bp_info) {
112                 DPAA_MEMPOOL_WARN("Memory allocation failed for bp_info");
113                 bman_free_pool(bp);
114                 return -ENOMEM;
115         }
116
117         rte_memcpy(bp_info, (void *)&rte_dpaa_bpid_info[bpid],
118                    sizeof(struct dpaa_bp_info));
119         mp->pool_data = (void *)bp_info;
120
121         DPAA_MEMPOOL_INFO("BMAN pool created for bpid =%d", bpid);
122         return 0;
123 }
124
125 static void
126 dpaa_mbuf_free_pool(struct rte_mempool *mp)
127 {
128         struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
129
130         MEMPOOL_INIT_FUNC_TRACE();
131
132         if (bp_info) {
133                 bman_free_pool(bp_info->bp);
134                 DPAA_MEMPOOL_INFO("BMAN pool freed for bpid =%d",
135                                   bp_info->bpid);
136                 rte_free(mp->pool_data);
137                 bp_info->bp = NULL;
138                 mp->pool_data = NULL;
139         }
140 }
141
142 static void
143 dpaa_buf_free(struct dpaa_bp_info *bp_info, uint64_t addr)
144 {
145         struct bm_buffer buf;
146         int ret;
147
148         DPAA_MEMPOOL_DPDEBUG("Free 0x%" PRIx64 " to bpid: %d",
149                            addr, bp_info->bpid);
150
151         bm_buffer_set64(&buf, addr);
152 retry:
153         ret = bman_release(bp_info->bp, &buf, 1, 0);
154         if (ret) {
155                 DPAA_MEMPOOL_DEBUG("BMAN busy. Retrying...");
156                 cpu_spin(CPU_SPIN_BACKOFF_CYCLES);
157                 goto retry;
158         }
159 }
160
161 static int
162 dpaa_mbuf_free_bulk(struct rte_mempool *pool,
163                     void *const *obj_table,
164                     unsigned int n)
165 {
166         struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);
167         int ret;
168         unsigned int i = 0;
169
170         DPAA_MEMPOOL_DPDEBUG("Request to free %d buffers in bpid = %d",
171                              n, bp_info->bpid);
172
173         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
174                 ret = rte_dpaa_portal_init((void *)0);
175                 if (ret) {
176                         DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
177                                          ret);
178                         return 0;
179                 }
180         }
181
182         while (i < n) {
183                 uint64_t phy = rte_mempool_virt2iova(obj_table[i]);
184
185                 if (unlikely(!bp_info->ptov_off)) {
186                         /* buffers are from single mem segment */
187                         if (bp_info->flags & DPAA_MPOOL_SINGLE_SEGMENT) {
188                                 bp_info->ptov_off = (size_t)obj_table[i] - phy;
189                                 rte_dpaa_bpid_info[bp_info->bpid].ptov_off
190                                                 = bp_info->ptov_off;
191                         }
192                 }
193
194                 dpaa_buf_free(bp_info,
195                               (uint64_t)phy + bp_info->meta_data_size);
196                 i = i + 1;
197         }
198
199         DPAA_MEMPOOL_DPDEBUG("freed %d buffers in bpid =%d",
200                              n, bp_info->bpid);
201
202         return 0;
203 }
204
205 static int
206 dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
207                      void **obj_table,
208                      unsigned int count)
209 {
210         struct rte_mbuf **m = (struct rte_mbuf **)obj_table;
211         struct bm_buffer bufs[DPAA_MBUF_MAX_ACQ_REL];
212         struct dpaa_bp_info *bp_info;
213         void *bufaddr;
214         int i, ret;
215         unsigned int n = 0;
216
217         bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);
218
219         DPAA_MEMPOOL_DPDEBUG("Request to alloc %d buffers in bpid = %d",
220                              count, bp_info->bpid);
221
222         if (unlikely(count >= (RTE_MEMPOOL_CACHE_MAX_SIZE * 2))) {
223                 DPAA_MEMPOOL_ERR("Unable to allocate requested (%u) buffers",
224                                  count);
225                 return -1;
226         }
227
228         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
229                 ret = rte_dpaa_portal_init((void *)0);
230                 if (ret) {
231                         DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
232                                          ret);
233                         return -1;
234                 }
235         }
236
237         while (n < count) {
238                 /* Acquire is all-or-nothing, so we drain in 7s,
239                  * then the remainder.
240                  */
241                 if ((count - n) > DPAA_MBUF_MAX_ACQ_REL) {
242                         ret = bman_acquire(bp_info->bp, bufs,
243                                            DPAA_MBUF_MAX_ACQ_REL, 0);
244                 } else {
245                         ret = bman_acquire(bp_info->bp, bufs, count - n, 0);
246                 }
247                 /* In case of less than requested number of buffers available
248                  * in pool, qbman_swp_acquire returns 0
249                  */
250                 if (ret <= 0) {
251                         DPAA_MEMPOOL_DPDEBUG("Buffer acquire failed (%d)",
252                                              ret);
253                         /* The API expect the exact number of requested
254                          * buffers. Releasing all buffers allocated
255                          */
256                         dpaa_mbuf_free_bulk(pool, obj_table, n);
257                         return -ENOBUFS;
258                 }
259                 /* assigning mbuf from the acquired objects */
260                 for (i = 0; (i < ret) && bufs[i].addr; i++) {
261                         /* TODO-errata - observed that bufs may be null
262                          * i.e. first buffer is valid, remaining 6 buffers
263                          * may be null.
264                          */
265                         bufaddr = DPAA_MEMPOOL_PTOV(bp_info, bufs[i].addr);
266                         m[n] = (struct rte_mbuf *)((char *)bufaddr
267                                                 - bp_info->meta_data_size);
268                         DPAA_MEMPOOL_DPDEBUG("Paddr (%p), FD (%p) from BMAN",
269                                              (void *)bufaddr, (void *)m[n]);
270                         n++;
271                 }
272         }
273
274         DPAA_MEMPOOL_DPDEBUG("Allocated %d buffers from bpid=%d",
275                              n, bp_info->bpid);
276         return 0;
277 }
278
279 static unsigned int
280 dpaa_mbuf_get_count(const struct rte_mempool *mp)
281 {
282         struct dpaa_bp_info *bp_info;
283
284         MEMPOOL_INIT_FUNC_TRACE();
285
286         if (!mp || !mp->pool_data) {
287                 DPAA_MEMPOOL_ERR("Invalid mempool provided\n");
288                 return 0;
289         }
290
291         bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
292
293         return bman_query_free_buffers(bp_info->bp);
294 }
295
296 static int
297 dpaa_populate(struct rte_mempool *mp, unsigned int max_objs,
298               void *vaddr, rte_iova_t paddr, size_t len,
299               rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
300 {
301         struct dpaa_bp_info *bp_info;
302         unsigned int total_elt_sz;
303
304         if (!mp || !mp->pool_data) {
305                 DPAA_MEMPOOL_ERR("Invalid mempool provided\n");
306                 return 0;
307         }
308
309         /* Update the PA-VA Table */
310         dpaax_iova_table_update(paddr, vaddr, len);
311
312         bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
313         total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
314
315         DPAA_MEMPOOL_DPDEBUG("Req size %" PRIx64 " vs Available %u\n",
316                            (uint64_t)len, total_elt_sz * mp->size);
317
318         /* Detect pool area has sufficient space for elements in this memzone */
319         if (len >= total_elt_sz * mp->size)
320                 bp_info->flags |= DPAA_MPOOL_SINGLE_SEGMENT;
321         struct dpaa_memseg *ms;
322
323         /* For each memory chunk pinned to the Mempool, a linked list of the
324          * contained memsegs is created for searching when PA to VA
325          * conversion is required.
326          */
327         ms = rte_zmalloc(NULL, sizeof(struct dpaa_memseg), 0);
328         if (!ms) {
329                 DPAA_MEMPOOL_ERR("Unable to allocate internal memory.");
330                 DPAA_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
331                 /* If the element is not added, it would only lead to failure
332                  * in searching for the element and the logic would Fallback
333                  * to traditional DPDK memseg traversal code. So, this is not
334                  * a blocking error - but, error would be printed on screen.
335                  */
336                 return 0;
337         }
338
339         ms->vaddr = vaddr;
340         ms->iova = paddr;
341         ms->len = len;
342         /* Head insertions are generally faster than tail insertions as the
343          * buffers pinned are picked from rear end.
344          */
345         TAILQ_INSERT_HEAD(&rte_dpaa_memsegs, ms, next);
346
347         return rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, paddr,
348                                                len, obj_cb, obj_cb_arg);
349 }
350
351 static const struct rte_mempool_ops dpaa_mpool_ops = {
352         .name = DPAA_MEMPOOL_OPS_NAME,
353         .alloc = dpaa_mbuf_create_pool,
354         .free = dpaa_mbuf_free_pool,
355         .enqueue = dpaa_mbuf_free_bulk,
356         .dequeue = dpaa_mbuf_alloc_bulk,
357         .get_count = dpaa_mbuf_get_count,
358         .populate = dpaa_populate,
359 };
360
361 RTE_MEMPOOL_REGISTER_OPS(dpaa_mpool_ops);