a25697f05cb184dc2a2dc67da4438f5d3adbbb75
[dpdk.git] / drivers / mempool / dpaa / dpaa_mempool.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2017,2019 NXP
4  *
5  */
6
7 /* System headers */
8 #include <stdio.h>
9 #include <inttypes.h>
10 #include <unistd.h>
11 #include <limits.h>
12 #include <sched.h>
13 #include <signal.h>
14 #include <pthread.h>
15 #include <sys/types.h>
16 #include <sys/syscall.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_common.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_memory.h>
23 #include <rte_tailq.h>
24 #include <rte_eal.h>
25 #include <rte_malloc.h>
26 #include <rte_ring.h>
27
28 #include <dpaa_mempool.h>
29 #include <dpaax_iova_table.h>
30
31 /* List of all the memseg information locally maintained in dpaa driver. This
32  * is to optimize the PA_to_VA searches until a better mechanism (algo) is
33  * available.
34  */
35 struct dpaa_memseg_list rte_dpaa_memsegs
36         = TAILQ_HEAD_INITIALIZER(rte_dpaa_memsegs);
37
38 struct dpaa_bp_info *rte_dpaa_bpid_info;
39
40 static int
41 dpaa_mbuf_create_pool(struct rte_mempool *mp)
42 {
43         struct bman_pool *bp;
44         struct bm_buffer bufs[8];
45         struct dpaa_bp_info *bp_info;
46         uint8_t bpid;
47         int num_bufs = 0, ret = 0;
48         struct bman_pool_params params = {
49                 .flags = BMAN_POOL_FLAG_DYNAMIC_BPID
50         };
51
52         MEMPOOL_INIT_FUNC_TRACE();
53
54         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
55                 ret = rte_dpaa_portal_init((void *)0);
56                 if (ret) {
57                         DPAA_MEMPOOL_ERR(
58                                 "rte_dpaa_portal_init failed with ret: %d",
59                                  ret);
60                         return -1;
61                 }
62         }
63         bp = bman_new_pool(&params);
64         if (!bp) {
65                 DPAA_MEMPOOL_ERR("bman_new_pool() failed");
66                 return -ENODEV;
67         }
68         bpid = bman_get_params(bp)->bpid;
69
70         /* Drain the pool of anything already in it. */
71         do {
72                 /* Acquire is all-or-nothing, so we drain in 8s,
73                  * then in 1s for the remainder.
74                  */
75                 if (ret != 1)
76                         ret = bman_acquire(bp, bufs, 8, 0);
77                 if (ret < 8)
78                         ret = bman_acquire(bp, bufs, 1, 0);
79                 if (ret > 0)
80                         num_bufs += ret;
81         } while (ret > 0);
82         if (num_bufs)
83                 DPAA_MEMPOOL_WARN("drained %u bufs from BPID %d",
84                                   num_bufs, bpid);
85
86         if (rte_dpaa_bpid_info == NULL) {
87                 rte_dpaa_bpid_info = (struct dpaa_bp_info *)rte_zmalloc(NULL,
88                                 sizeof(struct dpaa_bp_info) * DPAA_MAX_BPOOLS,
89                                 RTE_CACHE_LINE_SIZE);
90                 if (rte_dpaa_bpid_info == NULL) {
91                         bman_free_pool(bp);
92                         return -ENOMEM;
93                 }
94         }
95
96         rte_dpaa_bpid_info[bpid].mp = mp;
97         rte_dpaa_bpid_info[bpid].bpid = bpid;
98         rte_dpaa_bpid_info[bpid].size = mp->elt_size;
99         rte_dpaa_bpid_info[bpid].bp = bp;
100         rte_dpaa_bpid_info[bpid].meta_data_size =
101                 sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mp);
102         rte_dpaa_bpid_info[bpid].dpaa_ops_index = mp->ops_index;
103         rte_dpaa_bpid_info[bpid].ptov_off = 0;
104         rte_dpaa_bpid_info[bpid].flags = 0;
105
106         bp_info = rte_malloc(NULL,
107                              sizeof(struct dpaa_bp_info),
108                              RTE_CACHE_LINE_SIZE);
109         if (!bp_info) {
110                 DPAA_MEMPOOL_WARN("Memory allocation failed for bp_info");
111                 bman_free_pool(bp);
112                 return -ENOMEM;
113         }
114
115         rte_memcpy(bp_info, (void *)&rte_dpaa_bpid_info[bpid],
116                    sizeof(struct dpaa_bp_info));
117         mp->pool_data = (void *)bp_info;
118
119         DPAA_MEMPOOL_INFO("BMAN pool created for bpid =%d", bpid);
120         return 0;
121 }
122
123 static void
124 dpaa_mbuf_free_pool(struct rte_mempool *mp)
125 {
126         struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
127
128         MEMPOOL_INIT_FUNC_TRACE();
129
130         if (bp_info) {
131                 bman_free_pool(bp_info->bp);
132                 DPAA_MEMPOOL_INFO("BMAN pool freed for bpid =%d",
133                                   bp_info->bpid);
134                 rte_free(mp->pool_data);
135                 mp->pool_data = NULL;
136         }
137 }
138
139 static void
140 dpaa_buf_free(struct dpaa_bp_info *bp_info, uint64_t addr)
141 {
142         struct bm_buffer buf;
143         int ret;
144
145         DPAA_MEMPOOL_DPDEBUG("Free 0x%" PRIx64 " to bpid: %d",
146                            addr, bp_info->bpid);
147
148         bm_buffer_set64(&buf, addr);
149 retry:
150         ret = bman_release(bp_info->bp, &buf, 1, 0);
151         if (ret) {
152                 DPAA_MEMPOOL_DEBUG("BMAN busy. Retrying...");
153                 cpu_spin(CPU_SPIN_BACKOFF_CYCLES);
154                 goto retry;
155         }
156 }
157
158 static int
159 dpaa_mbuf_free_bulk(struct rte_mempool *pool,
160                     void *const *obj_table,
161                     unsigned int n)
162 {
163         struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);
164         int ret;
165         unsigned int i = 0;
166
167         DPAA_MEMPOOL_DPDEBUG("Request to free %d buffers in bpid = %d",
168                              n, bp_info->bpid);
169
170         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
171                 ret = rte_dpaa_portal_init((void *)0);
172                 if (ret) {
173                         DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
174                                          ret);
175                         return 0;
176                 }
177         }
178
179         while (i < n) {
180                 uint64_t phy = rte_mempool_virt2iova(obj_table[i]);
181
182                 if (unlikely(!bp_info->ptov_off)) {
183                         /* buffers are from single mem segment */
184                         if (bp_info->flags & DPAA_MPOOL_SINGLE_SEGMENT) {
185                                 bp_info->ptov_off = (size_t)obj_table[i] - phy;
186                                 rte_dpaa_bpid_info[bp_info->bpid].ptov_off
187                                                 = bp_info->ptov_off;
188                         }
189                 }
190
191                 dpaa_buf_free(bp_info,
192                               (uint64_t)phy + bp_info->meta_data_size);
193                 i = i + 1;
194         }
195
196         DPAA_MEMPOOL_DPDEBUG("freed %d buffers in bpid =%d",
197                              n, bp_info->bpid);
198
199         return 0;
200 }
201
202 static int
203 dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
204                      void **obj_table,
205                      unsigned int count)
206 {
207         struct rte_mbuf **m = (struct rte_mbuf **)obj_table;
208         struct bm_buffer bufs[DPAA_MBUF_MAX_ACQ_REL];
209         struct dpaa_bp_info *bp_info;
210         void *bufaddr;
211         int i, ret;
212         unsigned int n = 0;
213
214         bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);
215
216         DPAA_MEMPOOL_DPDEBUG("Request to alloc %d buffers in bpid = %d",
217                              count, bp_info->bpid);
218
219         if (unlikely(count >= (RTE_MEMPOOL_CACHE_MAX_SIZE * 2))) {
220                 DPAA_MEMPOOL_ERR("Unable to allocate requested (%u) buffers",
221                                  count);
222                 return -1;
223         }
224
225         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
226                 ret = rte_dpaa_portal_init((void *)0);
227                 if (ret) {
228                         DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
229                                          ret);
230                         return -1;
231                 }
232         }
233
234         while (n < count) {
235                 /* Acquire is all-or-nothing, so we drain in 7s,
236                  * then the remainder.
237                  */
238                 if ((count - n) > DPAA_MBUF_MAX_ACQ_REL) {
239                         ret = bman_acquire(bp_info->bp, bufs,
240                                            DPAA_MBUF_MAX_ACQ_REL, 0);
241                 } else {
242                         ret = bman_acquire(bp_info->bp, bufs, count - n, 0);
243                 }
244                 /* In case of less than requested number of buffers available
245                  * in pool, qbman_swp_acquire returns 0
246                  */
247                 if (ret <= 0) {
248                         DPAA_MEMPOOL_DPDEBUG("Buffer acquire failed (%d)",
249                                              ret);
250                         /* The API expect the exact number of requested
251                          * buffers. Releasing all buffers allocated
252                          */
253                         dpaa_mbuf_free_bulk(pool, obj_table, n);
254                         return -ENOBUFS;
255                 }
256                 /* assigning mbuf from the acquired objects */
257                 for (i = 0; (i < ret) && bufs[i].addr; i++) {
258                         /* TODO-errata - objerved that bufs may be null
259                          * i.e. first buffer is valid, remaining 6 buffers
260                          * may be null.
261                          */
262                         bufaddr = DPAA_MEMPOOL_PTOV(bp_info, bufs[i].addr);
263                         m[n] = (struct rte_mbuf *)((char *)bufaddr
264                                                 - bp_info->meta_data_size);
265                         DPAA_MEMPOOL_DPDEBUG("Paddr (%p), FD (%p) from BMAN",
266                                              (void *)bufaddr, (void *)m[n]);
267                         n++;
268                 }
269         }
270
271         DPAA_MEMPOOL_DPDEBUG("Allocated %d buffers from bpid=%d",
272                              n, bp_info->bpid);
273         return 0;
274 }
275
276 static unsigned int
277 dpaa_mbuf_get_count(const struct rte_mempool *mp)
278 {
279         struct dpaa_bp_info *bp_info;
280
281         MEMPOOL_INIT_FUNC_TRACE();
282
283         if (!mp || !mp->pool_data) {
284                 DPAA_MEMPOOL_ERR("Invalid mempool provided\n");
285                 return 0;
286         }
287
288         bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
289
290         return bman_query_free_buffers(bp_info->bp);
291 }
292
293 static int
294 dpaa_populate(struct rte_mempool *mp, unsigned int max_objs,
295               void *vaddr, rte_iova_t paddr, size_t len,
296               rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
297 {
298         struct dpaa_bp_info *bp_info;
299         unsigned int total_elt_sz;
300
301         if (!mp || !mp->pool_data) {
302                 DPAA_MEMPOOL_ERR("Invalid mempool provided\n");
303                 return 0;
304         }
305
306         /* Update the PA-VA Table */
307         dpaax_iova_table_update(paddr, vaddr, len);
308
309         bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
310         total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
311
312         DPAA_MEMPOOL_DPDEBUG("Req size %" PRIx64 " vs Available %u\n",
313                            (uint64_t)len, total_elt_sz * mp->size);
314
315         /* Detect pool area has sufficient space for elements in this memzone */
316         if (len >= total_elt_sz * mp->size)
317                 bp_info->flags |= DPAA_MPOOL_SINGLE_SEGMENT;
318         struct dpaa_memseg *ms;
319
320         /* For each memory chunk pinned to the Mempool, a linked list of the
321          * contained memsegs is created for searching when PA to VA
322          * conversion is required.
323          */
324         ms = rte_zmalloc(NULL, sizeof(struct dpaa_memseg), 0);
325         if (!ms) {
326                 DPAA_MEMPOOL_ERR("Unable to allocate internal memory.");
327                 DPAA_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
328                 /* If the element is not added, it would only lead to failure
329                  * in searching for the element and the logic would Fallback
330                  * to traditional DPDK memseg traversal code. So, this is not
331                  * a blocking error - but, error would be printed on screen.
332                  */
333                 return 0;
334         }
335
336         ms->vaddr = vaddr;
337         ms->iova = paddr;
338         ms->len = len;
339         /* Head insertions are generally faster than tail insertions as the
340          * buffers pinned are picked from rear end.
341          */
342         TAILQ_INSERT_HEAD(&rte_dpaa_memsegs, ms, next);
343
344         return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len,
345                                                obj_cb, obj_cb_arg);
346 }
347
348 static const struct rte_mempool_ops dpaa_mpool_ops = {
349         .name = DPAA_MEMPOOL_OPS_NAME,
350         .alloc = dpaa_mbuf_create_pool,
351         .free = dpaa_mbuf_free_pool,
352         .enqueue = dpaa_mbuf_free_bulk,
353         .dequeue = dpaa_mbuf_alloc_bulk,
354         .get_count = dpaa_mbuf_get_count,
355         .populate = dpaa_populate,
356 };
357
358 MEMPOOL_REGISTER_OPS(dpaa_mpool_ops);