mempool/dpaa2: support dynamic logging
[dpdk.git] / drivers / mempool / dpaa2 / dpaa2_hw_mempool.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016 NXP
5  *
6  */
7
8 #include <unistd.h>
9 #include <stdio.h>
10 #include <sys/types.h>
11 #include <string.h>
12 #include <stdlib.h>
13 #include <fcntl.h>
14 #include <errno.h>
15
16 #include <rte_mbuf.h>
17 #include <rte_ethdev_driver.h>
18 #include <rte_malloc.h>
19 #include <rte_memcpy.h>
20 #include <rte_string_fns.h>
21 #include <rte_cycles.h>
22 #include <rte_kvargs.h>
23 #include <rte_dev.h>
24
25 #include <fslmc_logs.h>
26 #include <mc/fsl_dpbp.h>
27 #include <portal/dpaa2_hw_pvt.h>
28 #include <portal/dpaa2_hw_dpio.h>
29 #include "dpaa2_hw_mempool.h"
30 #include "dpaa2_hw_mempool_logs.h"
31
32 struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
33 static struct dpaa2_bp_list *h_bp_list;
34
35 /* Dynamic logging identified for mempool */
36 int dpaa2_logtype_mempool;
37
38 static int
39 rte_hw_mbuf_create_pool(struct rte_mempool *mp)
40 {
41         struct dpaa2_bp_list *bp_list;
42         struct dpaa2_dpbp_dev *avail_dpbp;
43         struct dpaa2_bp_info *bp_info;
44         struct dpbp_attr dpbp_attr;
45         uint32_t bpid;
46         int ret;
47
48         avail_dpbp = dpaa2_alloc_dpbp_dev();
49
50         if (!avail_dpbp) {
51                 DPAA2_MEMPOOL_ERR("DPAA2 pool not available!");
52                 return -ENOENT;
53         }
54
55         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
56                 ret = dpaa2_affine_qbman_swp();
57                 if (ret) {
58                         DPAA2_MEMPOOL_ERR("Failure in affining portal");
59                         goto err1;
60                 }
61         }
62
63         ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
64         if (ret != 0) {
65                 DPAA2_MEMPOOL_ERR("Resource enable failure with err code: %d",
66                                   ret);
67                 goto err1;
68         }
69
70         ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW,
71                                   avail_dpbp->token, &dpbp_attr);
72         if (ret != 0) {
73                 DPAA2_MEMPOOL_ERR("Resource read failure with err code: %d",
74                                   ret);
75                 goto err2;
76         }
77
78         bp_info = rte_malloc(NULL,
79                              sizeof(struct dpaa2_bp_info),
80                              RTE_CACHE_LINE_SIZE);
81         if (!bp_info) {
82                 DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
83                 ret = -ENOMEM;
84                 goto err2;
85         }
86
87         /* Allocate the bp_list which will be added into global_bp_list */
88         bp_list = rte_malloc(NULL, sizeof(struct dpaa2_bp_list),
89                              RTE_CACHE_LINE_SIZE);
90         if (!bp_list) {
91                 DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
92                 ret = -ENOMEM;
93                 goto err3;
94         }
95
96         /* Set parameters of buffer pool list */
97         bp_list->buf_pool.num_bufs = mp->size;
98         bp_list->buf_pool.size = mp->elt_size
99                         - sizeof(struct rte_mbuf) - rte_pktmbuf_priv_size(mp);
100         bp_list->buf_pool.bpid = dpbp_attr.bpid;
101         bp_list->buf_pool.h_bpool_mem = NULL;
102         bp_list->buf_pool.dpbp_node = avail_dpbp;
103         /* Identification for our offloaded pool_data structure */
104         bp_list->dpaa2_ops_index = mp->ops_index;
105         bp_list->next = h_bp_list;
106         bp_list->mp = mp;
107
108         bpid = dpbp_attr.bpid;
109
110         rte_dpaa2_bpid_info[bpid].meta_data_size = sizeof(struct rte_mbuf)
111                                 + rte_pktmbuf_priv_size(mp);
112         rte_dpaa2_bpid_info[bpid].bp_list = bp_list;
113         rte_dpaa2_bpid_info[bpid].bpid = bpid;
114
115         rte_memcpy(bp_info, (void *)&rte_dpaa2_bpid_info[bpid],
116                    sizeof(struct dpaa2_bp_info));
117         mp->pool_data = (void *)bp_info;
118
119         DPAA2_MEMPOOL_DEBUG("BP List created for bpid =%d", dpbp_attr.bpid);
120
121         h_bp_list = bp_list;
122         return 0;
123 err3:
124         rte_free(bp_info);
125 err2:
126         dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
127 err1:
128         dpaa2_free_dpbp_dev(avail_dpbp);
129
130         return ret;
131 }
132
133 static void
134 rte_hw_mbuf_free_pool(struct rte_mempool *mp)
135 {
136         struct dpaa2_bp_info *bpinfo;
137         struct dpaa2_bp_list *bp;
138         struct dpaa2_dpbp_dev *dpbp_node;
139
140         if (!mp->pool_data) {
141                 DPAA2_MEMPOOL_ERR("Not a valid dpaa2 buffer pool");
142                 return;
143         }
144
145         bpinfo = (struct dpaa2_bp_info *)mp->pool_data;
146         bp = bpinfo->bp_list;
147         dpbp_node = bp->buf_pool.dpbp_node;
148
149         dpbp_disable(&(dpbp_node->dpbp), CMD_PRI_LOW, dpbp_node->token);
150
151         if (h_bp_list == bp) {
152                 h_bp_list = h_bp_list->next;
153         } else { /* if it is not the first node */
154                 struct dpaa2_bp_list *prev = h_bp_list, *temp;
155                 temp = h_bp_list->next;
156                 while (temp) {
157                         if (temp == bp) {
158                                 prev->next = temp->next;
159                                 rte_free(bp);
160                                 break;
161                         }
162                         prev = temp;
163                         temp = temp->next;
164                 }
165         }
166
167         rte_free(mp->pool_data);
168         dpaa2_free_dpbp_dev(dpbp_node);
169 }
170
171 static void
172 rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
173                         void * const *obj_table,
174                         uint32_t bpid,
175                         uint32_t meta_data_size,
176                         int count)
177 {
178         struct qbman_release_desc releasedesc;
179         struct qbman_swp *swp;
180         int ret;
181         int i, n;
182         uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
183
184         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
185                 ret = dpaa2_affine_qbman_swp();
186                 if (ret != 0) {
187                         DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
188                         return;
189                 }
190         }
191         swp = DPAA2_PER_LCORE_PORTAL;
192
193         /* Create a release descriptor required for releasing
194          * buffers into QBMAN
195          */
196         qbman_release_desc_clear(&releasedesc);
197         qbman_release_desc_set_bpid(&releasedesc, bpid);
198
199         n = count % DPAA2_MBUF_MAX_ACQ_REL;
200         if (unlikely(!n))
201                 goto aligned;
202
203         /* convert mbuf to buffers for the remainder */
204         for (i = 0; i < n ; i++) {
205 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
206                 bufs[i] = (uint64_t)rte_mempool_virt2iova(obj_table[i])
207                                 + meta_data_size;
208 #else
209                 bufs[i] = (uint64_t)obj_table[i] + meta_data_size;
210 #endif
211         }
212
213         /* feed them to bman */
214         do {
215                 ret = qbman_swp_release(swp, &releasedesc, bufs, n);
216         } while (ret == -EBUSY);
217
218 aligned:
219         /* if there are more buffers to free */
220         while (n < count) {
221                 /* convert mbuf to buffers */
222                 for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++) {
223 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
224                         bufs[i] = (uint64_t)
225                                   rte_mempool_virt2iova(obj_table[n + i])
226                                   + meta_data_size;
227 #else
228                         bufs[i] = (uint64_t)obj_table[n + i] + meta_data_size;
229 #endif
230                 }
231
232                 do {
233                         ret = qbman_swp_release(swp, &releasedesc, bufs,
234                                                 DPAA2_MBUF_MAX_ACQ_REL);
235                 } while (ret == -EBUSY);
236                 n += DPAA2_MBUF_MAX_ACQ_REL;
237         }
238 }
239
240 int
241 rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
242                           void **obj_table, unsigned int count)
243 {
244 #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
245         static int alloc;
246 #endif
247         struct qbman_swp *swp;
248         uint16_t bpid;
249         size_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
250         int i, ret;
251         unsigned int n = 0;
252         struct dpaa2_bp_info *bp_info;
253
254         bp_info = mempool_to_bpinfo(pool);
255
256         if (!(bp_info->bp_list)) {
257                 DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
258                 return -ENOENT;
259         }
260
261         bpid = bp_info->bpid;
262
263         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
264                 ret = dpaa2_affine_qbman_swp();
265                 if (ret != 0) {
266                         DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
267                         return ret;
268                 }
269         }
270         swp = DPAA2_PER_LCORE_PORTAL;
271
272         while (n < count) {
273                 /* Acquire is all-or-nothing, so we drain in 7s,
274                  * then the remainder.
275                  */
276                 if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) {
277                         ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
278                                                 DPAA2_MBUF_MAX_ACQ_REL);
279                 } else {
280                         ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
281                                                 count - n);
282                 }
283                 /* In case of less than requested number of buffers available
284                  * in pool, qbman_swp_acquire returns 0
285                  */
286                 if (ret <= 0) {
287                         DPAA2_MEMPOOL_ERR("Buffer acquire failed with"
288                                           " err code: %d", ret);
289                         /* The API expect the exact number of requested bufs */
290                         /* Releasing all buffers allocated */
291                         rte_dpaa2_mbuf_release(pool, obj_table, bpid,
292                                            bp_info->meta_data_size, n);
293                         return -ENOBUFS;
294                 }
295                 /* assigning mbuf from the acquired objects */
296                 for (i = 0; (i < ret) && bufs[i]; i++) {
297                         DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], size_t);
298                         obj_table[n] = (struct rte_mbuf *)
299                                        (bufs[i] - bp_info->meta_data_size);
300                         DPAA2_MEMPOOL_DP_DEBUG(
301                                    "Acquired %p address %p from BMAN\n",
302                                    (void *)bufs[i], (void *)obj_table[n]);
303                         n++;
304                 }
305         }
306
307 #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
308         alloc += n;
309         DPAA2_MEMPOOL_DP_DEBUG("Total = %d , req = %d done = %d\n",
310                                alloc, count, n);
311 #endif
312         return 0;
313 }
314
315 static int
316 rte_hw_mbuf_free_bulk(struct rte_mempool *pool,
317                   void * const *obj_table, unsigned int n)
318 {
319         struct dpaa2_bp_info *bp_info;
320
321         bp_info = mempool_to_bpinfo(pool);
322         if (!(bp_info->bp_list)) {
323                 DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
324                 return -ENOENT;
325         }
326         rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid,
327                            bp_info->meta_data_size, n);
328
329         return 0;
330 }
331
332 static unsigned int
333 rte_hw_mbuf_get_count(const struct rte_mempool *mp)
334 {
335         int ret;
336         unsigned int num_of_bufs = 0;
337         struct dpaa2_bp_info *bp_info;
338         struct dpaa2_dpbp_dev *dpbp_node;
339
340         if (!mp || !mp->pool_data) {
341                 DPAA2_MEMPOOL_ERR("Invalid mempool provided");
342                 return 0;
343         }
344
345         bp_info = (struct dpaa2_bp_info *)mp->pool_data;
346         dpbp_node = bp_info->bp_list->buf_pool.dpbp_node;
347
348         ret = dpbp_get_num_free_bufs(&dpbp_node->dpbp, CMD_PRI_LOW,
349                                      dpbp_node->token, &num_of_bufs);
350         if (ret) {
351                 DPAA2_MEMPOOL_ERR("Unable to obtain free buf count (err=%d)",
352                                   ret);
353                 return 0;
354         }
355
356         DPAA2_MEMPOOL_DP_DEBUG("Free bufs = %u\n", num_of_bufs);
357
358         return num_of_bufs;
359 }
360
361 struct rte_mempool_ops dpaa2_mpool_ops = {
362         .name = DPAA2_MEMPOOL_OPS_NAME,
363         .alloc = rte_hw_mbuf_create_pool,
364         .free = rte_hw_mbuf_free_pool,
365         .enqueue = rte_hw_mbuf_free_bulk,
366         .dequeue = rte_dpaa2_mbuf_alloc_bulk,
367         .get_count = rte_hw_mbuf_get_count,
368 };
369
370 MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops);
371
372 RTE_INIT(dpaa2_mempool_init_log);
373 static void
374 dpaa2_mempool_init_log(void)
375 {
376         dpaa2_logtype_mempool = rte_log_register("mempool.dpaa2");
377         if (dpaa2_logtype_mempool >= 0)
378                 rte_log_set_level(dpaa2_logtype_mempool, RTE_LOG_NOTICE);
379 }