mempool/dpaa: support NXP DPAA mempool
[dpdk.git] / drivers / mempool / dpaa / dpaa_mempool.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2017 NXP.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of NXP nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 /* System headers */
34 #include <stdio.h>
35 #include <inttypes.h>
36 #include <unistd.h>
37 #include <limits.h>
38 #include <sched.h>
39 #include <signal.h>
40 #include <pthread.h>
41 #include <sys/types.h>
42 #include <sys/syscall.h>
43
44 #include <rte_config.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_memory.h>
50 #include <rte_memzone.h>
51 #include <rte_tailq.h>
52 #include <rte_eal.h>
53 #include <rte_malloc.h>
54 #include <rte_ring.h>
55
56 #include <dpaa_mempool.h>
57
58 struct dpaa_bp_info rte_dpaa_bpid_info[DPAA_MAX_BPOOLS];
59
60 static int
61 dpaa_mbuf_create_pool(struct rte_mempool *mp)
62 {
63         struct bman_pool *bp;
64         struct bm_buffer bufs[8];
65         struct dpaa_bp_info *bp_info;
66         uint8_t bpid;
67         int num_bufs = 0, ret = 0;
68         struct bman_pool_params params = {
69                 .flags = BMAN_POOL_FLAG_DYNAMIC_BPID
70         };
71
72         MEMPOOL_INIT_FUNC_TRACE();
73
74         bp = bman_new_pool(&params);
75         if (!bp) {
76                 DPAA_MEMPOOL_ERR("bman_new_pool() failed");
77                 return -ENODEV;
78         }
79         bpid = bman_get_params(bp)->bpid;
80
81         /* Drain the pool of anything already in it. */
82         do {
83                 /* Acquire is all-or-nothing, so we drain in 8s,
84                  * then in 1s for the remainder.
85                  */
86                 if (ret != 1)
87                         ret = bman_acquire(bp, bufs, 8, 0);
88                 if (ret < 8)
89                         ret = bman_acquire(bp, bufs, 1, 0);
90                 if (ret > 0)
91                         num_bufs += ret;
92         } while (ret > 0);
93         if (num_bufs)
94                 DPAA_MEMPOOL_WARN("drained %u bufs from BPID %d",
95                                   num_bufs, bpid);
96
97         rte_dpaa_bpid_info[bpid].mp = mp;
98         rte_dpaa_bpid_info[bpid].bpid = bpid;
99         rte_dpaa_bpid_info[bpid].size = mp->elt_size;
100         rte_dpaa_bpid_info[bpid].bp = bp;
101         rte_dpaa_bpid_info[bpid].meta_data_size =
102                 sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mp);
103         rte_dpaa_bpid_info[bpid].dpaa_ops_index = mp->ops_index;
104
105         bp_info = rte_malloc(NULL,
106                              sizeof(struct dpaa_bp_info),
107                              RTE_CACHE_LINE_SIZE);
108         if (!bp_info) {
109                 DPAA_MEMPOOL_WARN("Memory allocation failed for bp_info");
110                 bman_free_pool(bp);
111                 return -ENOMEM;
112         }
113
114         rte_memcpy(bp_info, (void *)&rte_dpaa_bpid_info[bpid],
115                    sizeof(struct dpaa_bp_info));
116         mp->pool_data = (void *)bp_info;
117
118         DPAA_MEMPOOL_INFO("BMAN pool created for bpid =%d", bpid);
119         return 0;
120 }
121
122 static void
123 dpaa_mbuf_free_pool(struct rte_mempool *mp)
124 {
125         struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
126
127         MEMPOOL_INIT_FUNC_TRACE();
128
129         if (bp_info) {
130                 bman_free_pool(bp_info->bp);
131                 DPAA_MEMPOOL_INFO("BMAN pool freed for bpid =%d",
132                                   bp_info->bpid);
133                 rte_free(mp->pool_data);
134                 mp->pool_data = NULL;
135         }
136 }
137
138 static void
139 dpaa_buf_free(struct dpaa_bp_info *bp_info, uint64_t addr)
140 {
141         struct bm_buffer buf;
142         int ret;
143
144         DPAA_MEMPOOL_DEBUG("Free 0x%lx to bpid: %d", addr, bp_info->bpid);
145
146         bm_buffer_set64(&buf, addr);
147 retry:
148         ret = bman_release(bp_info->bp, &buf, 1, 0);
149         if (ret) {
150                 DPAA_MEMPOOL_DEBUG("BMAN busy. Retrying...");
151                 cpu_spin(CPU_SPIN_BACKOFF_CYCLES);
152                 goto retry;
153         }
154 }
155
156 static int
157 dpaa_mbuf_free_bulk(struct rte_mempool *pool,
158                     void *const *obj_table,
159                     unsigned int n)
160 {
161         struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);
162         int ret;
163         unsigned int i = 0;
164
165         DPAA_MEMPOOL_DPDEBUG("Request to free %d buffers in bpid = %d",
166                              n, bp_info->bpid);
167
168         ret = rte_dpaa_portal_init((void *)0);
169         if (ret) {
170                 DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
171                                  ret);
172                 return 0;
173         }
174
175         while (i < n) {
176                 dpaa_buf_free(bp_info,
177                               (uint64_t)rte_mempool_virt2phy(pool,
178                               obj_table[i]) + bp_info->meta_data_size);
179                 i = i + 1;
180         }
181
182         DPAA_MEMPOOL_DPDEBUG("freed %d buffers in bpid =%d",
183                              n, bp_info->bpid);
184
185         return 0;
186 }
187
188 static int
189 dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
190                      void **obj_table,
191                      unsigned int count)
192 {
193         struct rte_mbuf **m = (struct rte_mbuf **)obj_table;
194         struct bm_buffer bufs[DPAA_MBUF_MAX_ACQ_REL];
195         struct dpaa_bp_info *bp_info;
196         void *bufaddr;
197         int i, ret;
198         unsigned int n = 0;
199
200         bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);
201
202         DPAA_MEMPOOL_DPDEBUG("Request to alloc %d buffers in bpid = %d",
203                              count, bp_info->bpid);
204
205         if (unlikely(count >= (RTE_MEMPOOL_CACHE_MAX_SIZE * 2))) {
206                 DPAA_MEMPOOL_ERR("Unable to allocate requested (%u) buffers",
207                                  count);
208                 return -1;
209         }
210
211         ret = rte_dpaa_portal_init((void *)0);
212         if (ret) {
213                 DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
214                                  ret);
215                 return -1;
216         }
217
218         while (n < count) {
219                 /* Acquire is all-or-nothing, so we drain in 7s,
220                  * then the remainder.
221                  */
222                 if ((count - n) > DPAA_MBUF_MAX_ACQ_REL) {
223                         ret = bman_acquire(bp_info->bp, bufs,
224                                            DPAA_MBUF_MAX_ACQ_REL, 0);
225                 } else {
226                         ret = bman_acquire(bp_info->bp, bufs, count - n, 0);
227                 }
228                 /* In case of less than requested number of buffers available
229                  * in pool, qbman_swp_acquire returns 0
230                  */
231                 if (ret <= 0) {
232                         DPAA_MEMPOOL_DPDEBUG("Buffer acquire failed (%d)",
233                                              ret);
234                         /* The API expect the exact number of requested
235                          * buffers. Releasing all buffers allocated
236                          */
237                         dpaa_mbuf_free_bulk(pool, obj_table, n);
238                         return -ENOBUFS;
239                 }
240                 /* assigning mbuf from the acquired objects */
241                 for (i = 0; (i < ret) && bufs[i].addr; i++) {
242                         /* TODO-errata - objerved that bufs may be null
243                          * i.e. first buffer is valid, remaining 6 buffers
244                          * may be null.
245                          */
246                         bufaddr = (void *)rte_dpaa_mem_ptov(bufs[i].addr);
247                         m[n] = (struct rte_mbuf *)((char *)bufaddr
248                                                 - bp_info->meta_data_size);
249                         DPAA_MEMPOOL_DPDEBUG("Paddr (%p), FD (%p) from BMAN",
250                                              (void *)bufaddr, (void *)m[n]);
251                         n++;
252                 }
253         }
254
255         DPAA_MEMPOOL_DPDEBUG("Allocated %d buffers from bpid=%d",
256                              n, bp_info->bpid);
257         return 0;
258 }
259
260 static unsigned int
261 dpaa_mbuf_get_count(const struct rte_mempool *mp)
262 {
263         struct dpaa_bp_info *bp_info;
264
265         MEMPOOL_INIT_FUNC_TRACE();
266
267         if (!mp || !mp->pool_data) {
268                 DPAA_MEMPOOL_ERR("Invalid mempool provided\n");
269                 return 0;
270         }
271
272         bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
273
274         return bman_query_free_buffers(bp_info->bp);
275 }
276
277 struct rte_mempool_ops dpaa_mpool_ops = {
278         .name = "dpaa",
279         .alloc = dpaa_mbuf_create_pool,
280         .free = dpaa_mbuf_free_pool,
281         .enqueue = dpaa_mbuf_free_bulk,
282         .dequeue = dpaa_mbuf_alloc_bulk,
283         .get_count = dpaa_mbuf_get_count,
284 };
285
286 MEMPOOL_REGISTER_OPS(dpaa_mpool_ops);