451e2d5d56a148e7e5d13e1c805c6fb8fc6e17b3
[dpdk.git] / drivers / mempool / dpaa / dpaa_mempool.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2017,2019 NXP
4  *
5  */
6
7 /* System headers */
8 #include <stdio.h>
9 #include <inttypes.h>
10 #include <unistd.h>
11 #include <limits.h>
12 #include <sched.h>
13 #include <signal.h>
14 #include <pthread.h>
15 #include <sys/types.h>
16 #include <sys/syscall.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_common.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_memory.h>
23 #include <rte_tailq.h>
24 #include <rte_eal.h>
25 #include <rte_malloc.h>
26 #include <rte_ring.h>
27
28 #include <dpaa_mempool.h>
29 #include <dpaax_iova_table.h>
30
31 /* List of all the memseg information locally maintained in dpaa driver. This
32  * is to optimize the PA_to_VA searches until a better mechanism (algo) is
33  * available.
34  */
35 struct dpaa_memseg_list rte_dpaa_memsegs
36         = TAILQ_HEAD_INITIALIZER(rte_dpaa_memsegs);
37
38 struct dpaa_bp_info *rte_dpaa_bpid_info;
39 int dpaa_logtype_mempool;
40
41 static int
42 dpaa_mbuf_create_pool(struct rte_mempool *mp)
43 {
44         struct bman_pool *bp;
45         struct bm_buffer bufs[8];
46         struct dpaa_bp_info *bp_info;
47         uint8_t bpid;
48         int num_bufs = 0, ret = 0;
49         struct bman_pool_params params = {
50                 .flags = BMAN_POOL_FLAG_DYNAMIC_BPID
51         };
52
53         MEMPOOL_INIT_FUNC_TRACE();
54
55         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
56                 ret = rte_dpaa_portal_init((void *)0);
57                 if (ret) {
58                         DPAA_MEMPOOL_ERR(
59                                 "rte_dpaa_portal_init failed with ret: %d",
60                                  ret);
61                         return -1;
62                 }
63         }
64         bp = bman_new_pool(&params);
65         if (!bp) {
66                 DPAA_MEMPOOL_ERR("bman_new_pool() failed");
67                 return -ENODEV;
68         }
69         bpid = bman_get_params(bp)->bpid;
70
71         /* Drain the pool of anything already in it. */
72         do {
73                 /* Acquire is all-or-nothing, so we drain in 8s,
74                  * then in 1s for the remainder.
75                  */
76                 if (ret != 1)
77                         ret = bman_acquire(bp, bufs, 8, 0);
78                 if (ret < 8)
79                         ret = bman_acquire(bp, bufs, 1, 0);
80                 if (ret > 0)
81                         num_bufs += ret;
82         } while (ret > 0);
83         if (num_bufs)
84                 DPAA_MEMPOOL_WARN("drained %u bufs from BPID %d",
85                                   num_bufs, bpid);
86
87         if (rte_dpaa_bpid_info == NULL) {
88                 rte_dpaa_bpid_info = (struct dpaa_bp_info *)rte_zmalloc(NULL,
89                                 sizeof(struct dpaa_bp_info) * DPAA_MAX_BPOOLS,
90                                 RTE_CACHE_LINE_SIZE);
91                 if (rte_dpaa_bpid_info == NULL) {
92                         bman_free_pool(bp);
93                         return -ENOMEM;
94                 }
95         }
96
97         rte_dpaa_bpid_info[bpid].mp = mp;
98         rte_dpaa_bpid_info[bpid].bpid = bpid;
99         rte_dpaa_bpid_info[bpid].size = mp->elt_size;
100         rte_dpaa_bpid_info[bpid].bp = bp;
101         rte_dpaa_bpid_info[bpid].meta_data_size =
102                 sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mp);
103         rte_dpaa_bpid_info[bpid].dpaa_ops_index = mp->ops_index;
104         rte_dpaa_bpid_info[bpid].ptov_off = 0;
105         rte_dpaa_bpid_info[bpid].flags = 0;
106
107         bp_info = rte_malloc(NULL,
108                              sizeof(struct dpaa_bp_info),
109                              RTE_CACHE_LINE_SIZE);
110         if (!bp_info) {
111                 DPAA_MEMPOOL_WARN("Memory allocation failed for bp_info");
112                 bman_free_pool(bp);
113                 return -ENOMEM;
114         }
115
116         rte_memcpy(bp_info, (void *)&rte_dpaa_bpid_info[bpid],
117                    sizeof(struct dpaa_bp_info));
118         mp->pool_data = (void *)bp_info;
119
120         DPAA_MEMPOOL_INFO("BMAN pool created for bpid =%d", bpid);
121         return 0;
122 }
123
124 static void
125 dpaa_mbuf_free_pool(struct rte_mempool *mp)
126 {
127         struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
128
129         MEMPOOL_INIT_FUNC_TRACE();
130
131         if (bp_info) {
132                 bman_free_pool(bp_info->bp);
133                 DPAA_MEMPOOL_INFO("BMAN pool freed for bpid =%d",
134                                   bp_info->bpid);
135                 rte_free(mp->pool_data);
136                 mp->pool_data = NULL;
137         }
138 }
139
140 static void
141 dpaa_buf_free(struct dpaa_bp_info *bp_info, uint64_t addr)
142 {
143         struct bm_buffer buf;
144         int ret;
145
146         DPAA_MEMPOOL_DPDEBUG("Free 0x%" PRIx64 " to bpid: %d",
147                            addr, bp_info->bpid);
148
149         bm_buffer_set64(&buf, addr);
150 retry:
151         ret = bman_release(bp_info->bp, &buf, 1, 0);
152         if (ret) {
153                 DPAA_MEMPOOL_DEBUG("BMAN busy. Retrying...");
154                 cpu_spin(CPU_SPIN_BACKOFF_CYCLES);
155                 goto retry;
156         }
157 }
158
159 static int
160 dpaa_mbuf_free_bulk(struct rte_mempool *pool,
161                     void *const *obj_table,
162                     unsigned int n)
163 {
164         struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);
165         int ret;
166         unsigned int i = 0;
167
168         DPAA_MEMPOOL_DPDEBUG("Request to free %d buffers in bpid = %d",
169                              n, bp_info->bpid);
170
171         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
172                 ret = rte_dpaa_portal_init((void *)0);
173                 if (ret) {
174                         DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
175                                          ret);
176                         return 0;
177                 }
178         }
179
180         while (i < n) {
181                 uint64_t phy = rte_mempool_virt2iova(obj_table[i]);
182
183                 if (unlikely(!bp_info->ptov_off)) {
184                         /* buffers are from single mem segment */
185                         if (bp_info->flags & DPAA_MPOOL_SINGLE_SEGMENT) {
186                                 bp_info->ptov_off = (size_t)obj_table[i] - phy;
187                                 rte_dpaa_bpid_info[bp_info->bpid].ptov_off
188                                                 = bp_info->ptov_off;
189                         }
190                 }
191
192                 dpaa_buf_free(bp_info,
193                               (uint64_t)phy + bp_info->meta_data_size);
194                 i = i + 1;
195         }
196
197         DPAA_MEMPOOL_DPDEBUG("freed %d buffers in bpid =%d",
198                              n, bp_info->bpid);
199
200         return 0;
201 }
202
203 static int
204 dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
205                      void **obj_table,
206                      unsigned int count)
207 {
208         struct rte_mbuf **m = (struct rte_mbuf **)obj_table;
209         struct bm_buffer bufs[DPAA_MBUF_MAX_ACQ_REL];
210         struct dpaa_bp_info *bp_info;
211         void *bufaddr;
212         int i, ret;
213         unsigned int n = 0;
214
215         bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);
216
217         DPAA_MEMPOOL_DPDEBUG("Request to alloc %d buffers in bpid = %d",
218                              count, bp_info->bpid);
219
220         if (unlikely(count >= (RTE_MEMPOOL_CACHE_MAX_SIZE * 2))) {
221                 DPAA_MEMPOOL_ERR("Unable to allocate requested (%u) buffers",
222                                  count);
223                 return -1;
224         }
225
226         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
227                 ret = rte_dpaa_portal_init((void *)0);
228                 if (ret) {
229                         DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
230                                          ret);
231                         return -1;
232                 }
233         }
234
235         while (n < count) {
236                 /* Acquire is all-or-nothing, so we drain in 7s,
237                  * then the remainder.
238                  */
239                 if ((count - n) > DPAA_MBUF_MAX_ACQ_REL) {
240                         ret = bman_acquire(bp_info->bp, bufs,
241                                            DPAA_MBUF_MAX_ACQ_REL, 0);
242                 } else {
243                         ret = bman_acquire(bp_info->bp, bufs, count - n, 0);
244                 }
245                 /* In case of less than requested number of buffers available
246                  * in pool, qbman_swp_acquire returns 0
247                  */
248                 if (ret <= 0) {
249                         DPAA_MEMPOOL_DPDEBUG("Buffer acquire failed (%d)",
250                                              ret);
251                         /* The API expect the exact number of requested
252                          * buffers. Releasing all buffers allocated
253                          */
254                         dpaa_mbuf_free_bulk(pool, obj_table, n);
255                         return -ENOBUFS;
256                 }
257                 /* assigning mbuf from the acquired objects */
258                 for (i = 0; (i < ret) && bufs[i].addr; i++) {
259                         /* TODO-errata - objerved that bufs may be null
260                          * i.e. first buffer is valid, remaining 6 buffers
261                          * may be null.
262                          */
263                         bufaddr = DPAA_MEMPOOL_PTOV(bp_info, bufs[i].addr);
264                         m[n] = (struct rte_mbuf *)((char *)bufaddr
265                                                 - bp_info->meta_data_size);
266                         DPAA_MEMPOOL_DPDEBUG("Paddr (%p), FD (%p) from BMAN",
267                                              (void *)bufaddr, (void *)m[n]);
268                         n++;
269                 }
270         }
271
272         DPAA_MEMPOOL_DPDEBUG("Allocated %d buffers from bpid=%d",
273                              n, bp_info->bpid);
274         return 0;
275 }
276
277 static unsigned int
278 dpaa_mbuf_get_count(const struct rte_mempool *mp)
279 {
280         struct dpaa_bp_info *bp_info;
281
282         MEMPOOL_INIT_FUNC_TRACE();
283
284         if (!mp || !mp->pool_data) {
285                 DPAA_MEMPOOL_ERR("Invalid mempool provided\n");
286                 return 0;
287         }
288
289         bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
290
291         return bman_query_free_buffers(bp_info->bp);
292 }
293
294 static int
295 dpaa_populate(struct rte_mempool *mp, unsigned int max_objs,
296               void *vaddr, rte_iova_t paddr, size_t len,
297               rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
298 {
299         struct dpaa_bp_info *bp_info;
300         unsigned int total_elt_sz;
301
302         if (!mp || !mp->pool_data) {
303                 DPAA_MEMPOOL_ERR("Invalid mempool provided\n");
304                 return 0;
305         }
306
307         /* Update the PA-VA Table */
308         dpaax_iova_table_update(paddr, vaddr, len);
309
310         bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
311         total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
312
313         DPAA_MEMPOOL_DPDEBUG("Req size %" PRIx64 " vs Available %u\n",
314                            (uint64_t)len, total_elt_sz * mp->size);
315
316         /* Detect pool area has sufficient space for elements in this memzone */
317         if (len >= total_elt_sz * mp->size)
318                 bp_info->flags |= DPAA_MPOOL_SINGLE_SEGMENT;
319         struct dpaa_memseg *ms;
320
321         /* For each memory chunk pinned to the Mempool, a linked list of the
322          * contained memsegs is created for searching when PA to VA
323          * conversion is required.
324          */
325         ms = rte_zmalloc(NULL, sizeof(struct dpaa_memseg), 0);
326         if (!ms) {
327                 DPAA_MEMPOOL_ERR("Unable to allocate internal memory.");
328                 DPAA_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
329                 /* If the element is not added, it would only lead to failure
330                  * in searching for the element and the logic would Fallback
331                  * to traditional DPDK memseg traversal code. So, this is not
332                  * a blocking error - but, error would be printed on screen.
333                  */
334                 return 0;
335         }
336
337         ms->vaddr = vaddr;
338         ms->iova = paddr;
339         ms->len = len;
340         /* Head insertions are generally faster than tail insertions as the
341          * buffers pinned are picked from rear end.
342          */
343         TAILQ_INSERT_HEAD(&rte_dpaa_memsegs, ms, next);
344
345         return rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, paddr,
346                                                len, obj_cb, obj_cb_arg);
347 }
348
349 static const struct rte_mempool_ops dpaa_mpool_ops = {
350         .name = DPAA_MEMPOOL_OPS_NAME,
351         .alloc = dpaa_mbuf_create_pool,
352         .free = dpaa_mbuf_free_pool,
353         .enqueue = dpaa_mbuf_free_bulk,
354         .dequeue = dpaa_mbuf_alloc_bulk,
355         .get_count = dpaa_mbuf_get_count,
356         .populate = dpaa_populate,
357 };
358
359 MEMPOOL_REGISTER_OPS(dpaa_mpool_ops);
360
361 RTE_INIT(dpaa_mp_init_log)
362 {
363         dpaa_logtype_mempool = rte_log_register("mempool.dpaa");
364         if (dpaa_logtype_mempool >= 0)
365                 rte_log_set_level(dpaa_logtype_mempool, RTE_LOG_NOTICE);
366 }