1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
10 #include <sys/types.h>
17 #include <rte_ethdev_driver.h>
18 #include <rte_malloc.h>
19 #include <rte_memcpy.h>
20 #include <rte_string_fns.h>
21 #include <rte_cycles.h>
22 #include <rte_kvargs.h>
24 #include "rte_dpaa2_mempool.h"
26 #include <fslmc_logs.h>
27 #include <mc/fsl_dpbp.h>
28 #include <portal/dpaa2_hw_pvt.h>
29 #include <portal/dpaa2_hw_dpio.h>
30 #include "dpaa2_hw_mempool.h"
31 #include "dpaa2_hw_mempool_logs.h"
33 struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
34 static struct dpaa2_bp_list *h_bp_list;
36 /* Dynamic logging identified for mempool */
37 int dpaa2_logtype_mempool;
40 rte_hw_mbuf_create_pool(struct rte_mempool *mp)
42 struct dpaa2_bp_list *bp_list;
43 struct dpaa2_dpbp_dev *avail_dpbp;
44 struct dpaa2_bp_info *bp_info;
45 struct dpbp_attr dpbp_attr;
49 avail_dpbp = dpaa2_alloc_dpbp_dev();
52 DPAA2_MEMPOOL_ERR("DPAA2 pool not available!");
56 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
57 ret = dpaa2_affine_qbman_swp();
59 DPAA2_MEMPOOL_ERR("Failure in affining portal");
64 ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
66 DPAA2_MEMPOOL_ERR("Resource enable failure with err code: %d",
71 ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW,
72 avail_dpbp->token, &dpbp_attr);
74 DPAA2_MEMPOOL_ERR("Resource read failure with err code: %d",
79 bp_info = rte_malloc(NULL,
80 sizeof(struct dpaa2_bp_info),
83 DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
88 /* Allocate the bp_list which will be added into global_bp_list */
89 bp_list = rte_malloc(NULL, sizeof(struct dpaa2_bp_list),
92 DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
97 /* Set parameters of buffer pool list */
98 bp_list->buf_pool.num_bufs = mp->size;
99 bp_list->buf_pool.size = mp->elt_size
100 - sizeof(struct rte_mbuf) - rte_pktmbuf_priv_size(mp);
101 bp_list->buf_pool.bpid = dpbp_attr.bpid;
102 bp_list->buf_pool.h_bpool_mem = NULL;
103 bp_list->buf_pool.dpbp_node = avail_dpbp;
104 /* Identification for our offloaded pool_data structure */
105 bp_list->dpaa2_ops_index = mp->ops_index;
106 bp_list->next = h_bp_list;
109 bpid = dpbp_attr.bpid;
111 rte_dpaa2_bpid_info[bpid].meta_data_size = sizeof(struct rte_mbuf)
112 + rte_pktmbuf_priv_size(mp);
113 rte_dpaa2_bpid_info[bpid].bp_list = bp_list;
114 rte_dpaa2_bpid_info[bpid].bpid = bpid;
116 rte_memcpy(bp_info, (void *)&rte_dpaa2_bpid_info[bpid],
117 sizeof(struct dpaa2_bp_info));
118 mp->pool_data = (void *)bp_info;
120 DPAA2_MEMPOOL_DEBUG("BP List created for bpid =%d", dpbp_attr.bpid);
127 dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
129 dpaa2_free_dpbp_dev(avail_dpbp);
135 rte_hw_mbuf_free_pool(struct rte_mempool *mp)
137 struct dpaa2_bp_info *bpinfo;
138 struct dpaa2_bp_list *bp;
139 struct dpaa2_dpbp_dev *dpbp_node;
141 if (!mp->pool_data) {
142 DPAA2_MEMPOOL_ERR("Not a valid dpaa2 buffer pool");
146 bpinfo = (struct dpaa2_bp_info *)mp->pool_data;
147 bp = bpinfo->bp_list;
148 dpbp_node = bp->buf_pool.dpbp_node;
150 dpbp_disable(&(dpbp_node->dpbp), CMD_PRI_LOW, dpbp_node->token);
152 if (h_bp_list == bp) {
153 h_bp_list = h_bp_list->next;
154 } else { /* if it is not the first node */
155 struct dpaa2_bp_list *prev = h_bp_list, *temp;
156 temp = h_bp_list->next;
159 prev->next = temp->next;
168 rte_free(mp->pool_data);
169 dpaa2_free_dpbp_dev(dpbp_node);
173 rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
174 void * const *obj_table,
176 uint32_t meta_data_size,
179 struct qbman_release_desc releasedesc;
180 struct qbman_swp *swp;
183 uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
185 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
186 ret = dpaa2_affine_qbman_swp();
188 DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
192 swp = DPAA2_PER_LCORE_PORTAL;
194 /* Create a release descriptor required for releasing
197 qbman_release_desc_clear(&releasedesc);
198 qbman_release_desc_set_bpid(&releasedesc, bpid);
200 n = count % DPAA2_MBUF_MAX_ACQ_REL;
204 /* convert mbuf to buffers for the remainder */
205 for (i = 0; i < n ; i++) {
206 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
207 bufs[i] = (uint64_t)rte_mempool_virt2iova(obj_table[i])
210 bufs[i] = (uint64_t)obj_table[i] + meta_data_size;
214 /* feed them to bman */
216 ret = qbman_swp_release(swp, &releasedesc, bufs, n);
217 } while (ret == -EBUSY);
220 /* if there are more buffers to free */
222 /* convert mbuf to buffers */
223 for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++) {
224 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
226 rte_mempool_virt2iova(obj_table[n + i])
229 bufs[i] = (uint64_t)obj_table[n + i] + meta_data_size;
234 ret = qbman_swp_release(swp, &releasedesc, bufs,
235 DPAA2_MBUF_MAX_ACQ_REL);
236 } while (ret == -EBUSY);
237 n += DPAA2_MBUF_MAX_ACQ_REL;
242 rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp)
244 struct dpaa2_bp_info *bp_info;
246 bp_info = mempool_to_bpinfo(mp);
247 if (!(bp_info->bp_list)) {
248 RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
252 return bp_info->bpid;
256 rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr)
258 struct dpaa2_bp_info *bp_info;
260 bp_info = mempool_to_bpinfo(mp);
261 if (!(bp_info->bp_list)) {
262 RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
266 return (struct rte_mbuf *)((uint8_t *)buf_addr -
267 bp_info->meta_data_size);
271 rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
272 void **obj_table, unsigned int count)
274 #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
277 struct qbman_swp *swp;
279 size_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
282 struct dpaa2_bp_info *bp_info;
284 bp_info = mempool_to_bpinfo(pool);
286 if (!(bp_info->bp_list)) {
287 DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
291 bpid = bp_info->bpid;
293 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
294 ret = dpaa2_affine_qbman_swp();
296 DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
300 swp = DPAA2_PER_LCORE_PORTAL;
303 /* Acquire is all-or-nothing, so we drain in 7s,
304 * then the remainder.
306 if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) {
307 ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
308 DPAA2_MBUF_MAX_ACQ_REL);
310 ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
313 /* In case of less than requested number of buffers available
314 * in pool, qbman_swp_acquire returns 0
317 DPAA2_MEMPOOL_ERR("Buffer acquire failed with"
318 " err code: %d", ret);
319 /* The API expect the exact number of requested bufs */
320 /* Releasing all buffers allocated */
321 rte_dpaa2_mbuf_release(pool, obj_table, bpid,
322 bp_info->meta_data_size, n);
325 /* assigning mbuf from the acquired objects */
326 for (i = 0; (i < ret) && bufs[i]; i++) {
327 DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], size_t);
328 obj_table[n] = (struct rte_mbuf *)
329 (bufs[i] - bp_info->meta_data_size);
330 DPAA2_MEMPOOL_DP_DEBUG(
331 "Acquired %p address %p from BMAN\n",
332 (void *)bufs[i], (void *)obj_table[n]);
337 #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
339 DPAA2_MEMPOOL_DP_DEBUG("Total = %d , req = %d done = %d\n",
346 rte_hw_mbuf_free_bulk(struct rte_mempool *pool,
347 void * const *obj_table, unsigned int n)
349 struct dpaa2_bp_info *bp_info;
351 bp_info = mempool_to_bpinfo(pool);
352 if (!(bp_info->bp_list)) {
353 DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
356 rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid,
357 bp_info->meta_data_size, n);
363 rte_hw_mbuf_get_count(const struct rte_mempool *mp)
366 unsigned int num_of_bufs = 0;
367 struct dpaa2_bp_info *bp_info;
368 struct dpaa2_dpbp_dev *dpbp_node;
370 if (!mp || !mp->pool_data) {
371 DPAA2_MEMPOOL_ERR("Invalid mempool provided");
375 bp_info = (struct dpaa2_bp_info *)mp->pool_data;
376 dpbp_node = bp_info->bp_list->buf_pool.dpbp_node;
378 ret = dpbp_get_num_free_bufs(&dpbp_node->dpbp, CMD_PRI_LOW,
379 dpbp_node->token, &num_of_bufs);
381 DPAA2_MEMPOOL_ERR("Unable to obtain free buf count (err=%d)",
386 DPAA2_MEMPOOL_DP_DEBUG("Free bufs = %u\n", num_of_bufs);
392 dpaa2_populate(struct rte_mempool *mp, unsigned int max_objs,
393 void *vaddr, rte_iova_t paddr, size_t len,
394 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
396 struct dpaa2_memseg *ms;
398 /* For each memory chunk pinned to the Mempool, a linked list of the
399 * contained memsegs is created for searching when PA to VA
400 * conversion is required.
402 ms = rte_zmalloc(NULL, sizeof(struct dpaa2_memseg), 0);
404 DPAA2_MEMPOOL_ERR("Unable to allocate internal memory.");
405 DPAA2_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
406 /* If the element is not added, it would only lead to failure
407 * in searching for the element and the logic would Fallback
408 * to traditional DPDK memseg traversal code. So, this is not
409 * a blocking error - but, error would be printed on screen.
417 /* Head insertions are generally faster than tail insertions as the
418 * buffers pinned are picked from rear end.
420 TAILQ_INSERT_HEAD(&rte_dpaa2_memsegs, ms, next);
422 return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len,
426 struct rte_mempool_ops dpaa2_mpool_ops = {
427 .name = DPAA2_MEMPOOL_OPS_NAME,
428 .alloc = rte_hw_mbuf_create_pool,
429 .free = rte_hw_mbuf_free_pool,
430 .enqueue = rte_hw_mbuf_free_bulk,
431 .dequeue = rte_dpaa2_mbuf_alloc_bulk,
432 .get_count = rte_hw_mbuf_get_count,
433 .populate = dpaa2_populate,
436 MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops);
438 RTE_INIT(dpaa2_mempool_init_log)
440 dpaa2_logtype_mempool = rte_log_register("mempool.dpaa2");
441 if (dpaa2_logtype_mempool >= 0)
442 rte_log_set_level(dpaa2_logtype_mempool, RTE_LOG_NOTICE);