1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2017,2019 NXP
15 #include <sys/types.h>
16 #include <sys/syscall.h>
18 #include <rte_byteorder.h>
19 #include <rte_common.h>
21 #include <rte_debug.h>
22 #include <rte_memory.h>
23 #include <rte_tailq.h>
25 #include <rte_malloc.h>
28 #include <dpaa_mempool.h>
29 #include <dpaax_iova_table.h>
31 /* List of all the memseg information locally maintained in dpaa driver. This
32 * is to optimize the PA_to_VA searches until a better mechanism (algo) is
35 struct dpaa_memseg_list rte_dpaa_memsegs
36 = TAILQ_HEAD_INITIALIZER(rte_dpaa_memsegs);
38 struct dpaa_bp_info *rte_dpaa_bpid_info;
41 dpaa_mbuf_create_pool(struct rte_mempool *mp)
44 struct bm_buffer bufs[8];
45 struct dpaa_bp_info *bp_info;
47 int num_bufs = 0, ret = 0;
48 struct bman_pool_params params = {
49 .flags = BMAN_POOL_FLAG_DYNAMIC_BPID
52 MEMPOOL_INIT_FUNC_TRACE();
54 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
55 ret = rte_dpaa_portal_init((void *)0);
58 "rte_dpaa_portal_init failed with ret: %d",
63 bp = bman_new_pool(¶ms);
65 DPAA_MEMPOOL_ERR("bman_new_pool() failed");
68 bpid = bman_get_params(bp)->bpid;
70 /* Drain the pool of anything already in it. */
72 /* Acquire is all-or-nothing, so we drain in 8s,
73 * then in 1s for the remainder.
76 ret = bman_acquire(bp, bufs, 8, 0);
78 ret = bman_acquire(bp, bufs, 1, 0);
83 DPAA_MEMPOOL_WARN("drained %u bufs from BPID %d",
86 if (rte_dpaa_bpid_info == NULL) {
87 rte_dpaa_bpid_info = (struct dpaa_bp_info *)rte_zmalloc(NULL,
88 sizeof(struct dpaa_bp_info) * DPAA_MAX_BPOOLS,
90 if (rte_dpaa_bpid_info == NULL) {
96 rte_dpaa_bpid_info[bpid].mp = mp;
97 rte_dpaa_bpid_info[bpid].bpid = bpid;
98 rte_dpaa_bpid_info[bpid].size = mp->elt_size;
99 rte_dpaa_bpid_info[bpid].bp = bp;
100 rte_dpaa_bpid_info[bpid].meta_data_size =
101 sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mp);
102 rte_dpaa_bpid_info[bpid].dpaa_ops_index = mp->ops_index;
103 rte_dpaa_bpid_info[bpid].ptov_off = 0;
104 rte_dpaa_bpid_info[bpid].flags = 0;
106 bp_info = rte_malloc(NULL,
107 sizeof(struct dpaa_bp_info),
108 RTE_CACHE_LINE_SIZE);
110 DPAA_MEMPOOL_WARN("Memory allocation failed for bp_info");
115 rte_memcpy(bp_info, (void *)&rte_dpaa_bpid_info[bpid],
116 sizeof(struct dpaa_bp_info));
117 mp->pool_data = (void *)bp_info;
119 DPAA_MEMPOOL_INFO("BMAN pool created for bpid =%d", bpid);
124 dpaa_mbuf_free_pool(struct rte_mempool *mp)
126 struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
128 MEMPOOL_INIT_FUNC_TRACE();
131 bman_free_pool(bp_info->bp);
132 DPAA_MEMPOOL_INFO("BMAN pool freed for bpid =%d",
134 rte_free(mp->pool_data);
135 mp->pool_data = NULL;
140 dpaa_buf_free(struct dpaa_bp_info *bp_info, uint64_t addr)
142 struct bm_buffer buf;
145 DPAA_MEMPOOL_DPDEBUG("Free 0x%" PRIx64 " to bpid: %d",
146 addr, bp_info->bpid);
148 bm_buffer_set64(&buf, addr);
150 ret = bman_release(bp_info->bp, &buf, 1, 0);
152 DPAA_MEMPOOL_DEBUG("BMAN busy. Retrying...");
153 cpu_spin(CPU_SPIN_BACKOFF_CYCLES);
159 dpaa_mbuf_free_bulk(struct rte_mempool *pool,
160 void *const *obj_table,
163 struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);
167 DPAA_MEMPOOL_DPDEBUG("Request to free %d buffers in bpid = %d",
170 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
171 ret = rte_dpaa_portal_init((void *)0);
173 DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
180 uint64_t phy = rte_mempool_virt2iova(obj_table[i]);
182 if (unlikely(!bp_info->ptov_off)) {
183 /* buffers are from single mem segment */
184 if (bp_info->flags & DPAA_MPOOL_SINGLE_SEGMENT) {
185 bp_info->ptov_off = (size_t)obj_table[i] - phy;
186 rte_dpaa_bpid_info[bp_info->bpid].ptov_off
191 dpaa_buf_free(bp_info,
192 (uint64_t)phy + bp_info->meta_data_size);
196 DPAA_MEMPOOL_DPDEBUG("freed %d buffers in bpid =%d",
203 dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
207 struct rte_mbuf **m = (struct rte_mbuf **)obj_table;
208 struct bm_buffer bufs[DPAA_MBUF_MAX_ACQ_REL];
209 struct dpaa_bp_info *bp_info;
214 bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);
216 DPAA_MEMPOOL_DPDEBUG("Request to alloc %d buffers in bpid = %d",
217 count, bp_info->bpid);
219 if (unlikely(count >= (RTE_MEMPOOL_CACHE_MAX_SIZE * 2))) {
220 DPAA_MEMPOOL_ERR("Unable to allocate requested (%u) buffers",
225 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
226 ret = rte_dpaa_portal_init((void *)0);
228 DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
235 /* Acquire is all-or-nothing, so we drain in 7s,
236 * then the remainder.
238 if ((count - n) > DPAA_MBUF_MAX_ACQ_REL) {
239 ret = bman_acquire(bp_info->bp, bufs,
240 DPAA_MBUF_MAX_ACQ_REL, 0);
242 ret = bman_acquire(bp_info->bp, bufs, count - n, 0);
244 /* In case of less than requested number of buffers available
245 * in pool, qbman_swp_acquire returns 0
248 DPAA_MEMPOOL_DPDEBUG("Buffer acquire failed (%d)",
250 /* The API expect the exact number of requested
251 * buffers. Releasing all buffers allocated
253 dpaa_mbuf_free_bulk(pool, obj_table, n);
256 /* assigning mbuf from the acquired objects */
257 for (i = 0; (i < ret) && bufs[i].addr; i++) {
258 /* TODO-errata - objerved that bufs may be null
259 * i.e. first buffer is valid, remaining 6 buffers
262 bufaddr = DPAA_MEMPOOL_PTOV(bp_info, bufs[i].addr);
263 m[n] = (struct rte_mbuf *)((char *)bufaddr
264 - bp_info->meta_data_size);
265 DPAA_MEMPOOL_DPDEBUG("Paddr (%p), FD (%p) from BMAN",
266 (void *)bufaddr, (void *)m[n]);
271 DPAA_MEMPOOL_DPDEBUG("Allocated %d buffers from bpid=%d",
277 dpaa_mbuf_get_count(const struct rte_mempool *mp)
279 struct dpaa_bp_info *bp_info;
281 MEMPOOL_INIT_FUNC_TRACE();
283 if (!mp || !mp->pool_data) {
284 DPAA_MEMPOOL_ERR("Invalid mempool provided\n");
288 bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
290 return bman_query_free_buffers(bp_info->bp);
294 dpaa_populate(struct rte_mempool *mp, unsigned int max_objs,
295 void *vaddr, rte_iova_t paddr, size_t len,
296 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
298 struct dpaa_bp_info *bp_info;
299 unsigned int total_elt_sz;
301 MEMPOOL_INIT_FUNC_TRACE();
303 if (!mp || !mp->pool_data) {
304 DPAA_MEMPOOL_ERR("Invalid mempool provided\n");
308 /* Update the PA-VA Table */
309 dpaax_iova_table_update(paddr, vaddr, len);
311 bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
312 total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
314 DPAA_MEMPOOL_DEBUG("Req size %" PRIx64 " vs Available %u\n",
315 (uint64_t)len, total_elt_sz * mp->size);
317 /* Detect pool area has sufficient space for elements in this memzone */
318 if (len >= total_elt_sz * mp->size)
319 bp_info->flags |= DPAA_MPOOL_SINGLE_SEGMENT;
320 struct dpaa_memseg *ms;
322 /* For each memory chunk pinned to the Mempool, a linked list of the
323 * contained memsegs is created for searching when PA to VA
324 * conversion is required.
326 ms = rte_zmalloc(NULL, sizeof(struct dpaa_memseg), 0);
328 DPAA_MEMPOOL_ERR("Unable to allocate internal memory.");
329 DPAA_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
330 /* If the element is not added, it would only lead to failure
331 * in searching for the element and the logic would Fallback
332 * to traditional DPDK memseg traversal code. So, this is not
333 * a blocking error - but, error would be printed on screen.
341 /* Head insertions are generally faster than tail insertions as the
342 * buffers pinned are picked from rear end.
344 TAILQ_INSERT_HEAD(&rte_dpaa_memsegs, ms, next);
346 return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len,
350 static const struct rte_mempool_ops dpaa_mpool_ops = {
351 .name = DPAA_MEMPOOL_OPS_NAME,
352 .alloc = dpaa_mbuf_create_pool,
353 .free = dpaa_mbuf_free_pool,
354 .enqueue = dpaa_mbuf_free_bulk,
355 .dequeue = dpaa_mbuf_alloc_bulk,
356 .get_count = dpaa_mbuf_get_count,
357 .populate = dpaa_populate,
360 MEMPOOL_REGISTER_OPS(dpaa_mpool_ops);