1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
10 #include <sys/types.h>
17 #include <rte_ethdev_driver.h>
18 #include <rte_malloc.h>
19 #include <rte_memcpy.h>
20 #include <rte_string_fns.h>
21 #include <rte_cycles.h>
22 #include <rte_kvargs.h>
24 #include "rte_dpaa2_mempool.h"
26 #include <fslmc_logs.h>
27 #include <mc/fsl_dpbp.h>
28 #include <portal/dpaa2_hw_pvt.h>
29 #include <portal/dpaa2_hw_dpio.h>
30 #include "dpaa2_hw_mempool.h"
31 #include "dpaa2_hw_mempool_logs.h"
33 struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
34 static struct dpaa2_bp_list *h_bp_list;
36 /* List of all the memseg information locally maintained in dpaa2 driver. This
37 * is to optimize the PA_to_VA searches until a better mechanism (algo) is
40 struct dpaa2_memseg_list rte_dpaa2_memsegs
41 = TAILQ_HEAD_INITIALIZER(rte_dpaa2_memsegs);
43 /* Dynamic logging identified for mempool */
44 int dpaa2_logtype_mempool;
47 rte_hw_mbuf_create_pool(struct rte_mempool *mp)
49 struct dpaa2_bp_list *bp_list;
50 struct dpaa2_dpbp_dev *avail_dpbp;
51 struct dpaa2_bp_info *bp_info;
52 struct dpbp_attr dpbp_attr;
56 avail_dpbp = dpaa2_alloc_dpbp_dev();
59 DPAA2_MEMPOOL_ERR("DPAA2 pool not available!");
63 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
64 ret = dpaa2_affine_qbman_swp();
66 DPAA2_MEMPOOL_ERR("Failure in affining portal");
71 ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
73 DPAA2_MEMPOOL_ERR("Resource enable failure with err code: %d",
78 ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW,
79 avail_dpbp->token, &dpbp_attr);
81 DPAA2_MEMPOOL_ERR("Resource read failure with err code: %d",
86 bp_info = rte_malloc(NULL,
87 sizeof(struct dpaa2_bp_info),
90 DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
95 /* Allocate the bp_list which will be added into global_bp_list */
96 bp_list = rte_malloc(NULL, sizeof(struct dpaa2_bp_list),
99 DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
104 /* Set parameters of buffer pool list */
105 bp_list->buf_pool.num_bufs = mp->size;
106 bp_list->buf_pool.size = mp->elt_size
107 - sizeof(struct rte_mbuf) - rte_pktmbuf_priv_size(mp);
108 bp_list->buf_pool.bpid = dpbp_attr.bpid;
109 bp_list->buf_pool.h_bpool_mem = NULL;
110 bp_list->buf_pool.dpbp_node = avail_dpbp;
111 /* Identification for our offloaded pool_data structure */
112 bp_list->dpaa2_ops_index = mp->ops_index;
113 bp_list->next = h_bp_list;
116 bpid = dpbp_attr.bpid;
118 rte_dpaa2_bpid_info[bpid].meta_data_size = sizeof(struct rte_mbuf)
119 + rte_pktmbuf_priv_size(mp);
120 rte_dpaa2_bpid_info[bpid].bp_list = bp_list;
121 rte_dpaa2_bpid_info[bpid].bpid = bpid;
123 rte_memcpy(bp_info, (void *)&rte_dpaa2_bpid_info[bpid],
124 sizeof(struct dpaa2_bp_info));
125 mp->pool_data = (void *)bp_info;
127 DPAA2_MEMPOOL_DEBUG("BP List created for bpid =%d", dpbp_attr.bpid);
134 dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
136 dpaa2_free_dpbp_dev(avail_dpbp);
142 rte_hw_mbuf_free_pool(struct rte_mempool *mp)
144 struct dpaa2_bp_info *bpinfo;
145 struct dpaa2_bp_list *bp;
146 struct dpaa2_dpbp_dev *dpbp_node;
148 if (!mp->pool_data) {
149 DPAA2_MEMPOOL_ERR("Not a valid dpaa2 buffer pool");
153 bpinfo = (struct dpaa2_bp_info *)mp->pool_data;
154 bp = bpinfo->bp_list;
155 dpbp_node = bp->buf_pool.dpbp_node;
157 dpbp_disable(&(dpbp_node->dpbp), CMD_PRI_LOW, dpbp_node->token);
159 if (h_bp_list == bp) {
160 h_bp_list = h_bp_list->next;
161 } else { /* if it is not the first node */
162 struct dpaa2_bp_list *prev = h_bp_list, *temp;
163 temp = h_bp_list->next;
166 prev->next = temp->next;
175 rte_free(mp->pool_data);
176 dpaa2_free_dpbp_dev(dpbp_node);
180 rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
181 void * const *obj_table,
183 uint32_t meta_data_size,
186 struct qbman_release_desc releasedesc;
187 struct qbman_swp *swp;
190 uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
192 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
193 ret = dpaa2_affine_qbman_swp();
195 DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
199 swp = DPAA2_PER_LCORE_PORTAL;
201 /* Create a release descriptor required for releasing
204 qbman_release_desc_clear(&releasedesc);
205 qbman_release_desc_set_bpid(&releasedesc, bpid);
207 n = count % DPAA2_MBUF_MAX_ACQ_REL;
211 /* convert mbuf to buffers for the remainder */
212 for (i = 0; i < n ; i++) {
213 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
214 bufs[i] = (uint64_t)rte_mempool_virt2iova(obj_table[i])
217 bufs[i] = (uint64_t)obj_table[i] + meta_data_size;
221 /* feed them to bman */
223 ret = qbman_swp_release(swp, &releasedesc, bufs, n);
224 } while (ret == -EBUSY);
227 /* if there are more buffers to free */
229 /* convert mbuf to buffers */
230 for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++) {
231 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
233 rte_mempool_virt2iova(obj_table[n + i])
236 bufs[i] = (uint64_t)obj_table[n + i] + meta_data_size;
241 ret = qbman_swp_release(swp, &releasedesc, bufs,
242 DPAA2_MBUF_MAX_ACQ_REL);
243 } while (ret == -EBUSY);
244 n += DPAA2_MBUF_MAX_ACQ_REL;
249 rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp)
251 struct dpaa2_bp_info *bp_info;
253 bp_info = mempool_to_bpinfo(mp);
254 if (!(bp_info->bp_list)) {
255 RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
259 return bp_info->bpid;
263 rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr)
265 struct dpaa2_bp_info *bp_info;
267 bp_info = mempool_to_bpinfo(mp);
268 if (!(bp_info->bp_list)) {
269 RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
273 return (struct rte_mbuf *)((uint8_t *)buf_addr -
274 bp_info->meta_data_size);
278 rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
279 void **obj_table, unsigned int count)
281 #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
284 struct qbman_swp *swp;
286 size_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
289 struct dpaa2_bp_info *bp_info;
291 bp_info = mempool_to_bpinfo(pool);
293 if (!(bp_info->bp_list)) {
294 DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
298 bpid = bp_info->bpid;
300 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
301 ret = dpaa2_affine_qbman_swp();
303 DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
307 swp = DPAA2_PER_LCORE_PORTAL;
310 /* Acquire is all-or-nothing, so we drain in 7s,
311 * then the remainder.
313 if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) {
314 ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
315 DPAA2_MBUF_MAX_ACQ_REL);
317 ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
320 /* In case of less than requested number of buffers available
321 * in pool, qbman_swp_acquire returns 0
324 DPAA2_MEMPOOL_ERR("Buffer acquire failed with"
325 " err code: %d", ret);
326 /* The API expect the exact number of requested bufs */
327 /* Releasing all buffers allocated */
328 rte_dpaa2_mbuf_release(pool, obj_table, bpid,
329 bp_info->meta_data_size, n);
332 /* assigning mbuf from the acquired objects */
333 for (i = 0; (i < ret) && bufs[i]; i++) {
334 DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], size_t);
335 obj_table[n] = (struct rte_mbuf *)
336 (bufs[i] - bp_info->meta_data_size);
337 DPAA2_MEMPOOL_DP_DEBUG(
338 "Acquired %p address %p from BMAN\n",
339 (void *)bufs[i], (void *)obj_table[n]);
344 #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
346 DPAA2_MEMPOOL_DP_DEBUG("Total = %d , req = %d done = %d\n",
353 rte_hw_mbuf_free_bulk(struct rte_mempool *pool,
354 void * const *obj_table, unsigned int n)
356 struct dpaa2_bp_info *bp_info;
358 bp_info = mempool_to_bpinfo(pool);
359 if (!(bp_info->bp_list)) {
360 DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
363 rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid,
364 bp_info->meta_data_size, n);
370 rte_hw_mbuf_get_count(const struct rte_mempool *mp)
373 unsigned int num_of_bufs = 0;
374 struct dpaa2_bp_info *bp_info;
375 struct dpaa2_dpbp_dev *dpbp_node;
377 if (!mp || !mp->pool_data) {
378 DPAA2_MEMPOOL_ERR("Invalid mempool provided");
382 bp_info = (struct dpaa2_bp_info *)mp->pool_data;
383 dpbp_node = bp_info->bp_list->buf_pool.dpbp_node;
385 ret = dpbp_get_num_free_bufs(&dpbp_node->dpbp, CMD_PRI_LOW,
386 dpbp_node->token, &num_of_bufs);
388 DPAA2_MEMPOOL_ERR("Unable to obtain free buf count (err=%d)",
393 DPAA2_MEMPOOL_DP_DEBUG("Free bufs = %u\n", num_of_bufs);
399 dpaa2_populate(struct rte_mempool *mp, unsigned int max_objs,
400 void *vaddr, rte_iova_t paddr, size_t len,
401 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
403 struct dpaa2_memseg *ms;
405 /* For each memory chunk pinned to the Mempool, a linked list of the
406 * contained memsegs is created for searching when PA to VA
407 * conversion is required.
409 ms = rte_zmalloc(NULL, sizeof(struct dpaa2_memseg), 0);
411 DPAA2_MEMPOOL_ERR("Unable to allocate internal memory.");
412 DPAA2_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
413 /* If the element is not added, it would only lead to failure
414 * in searching for the element and the logic would Fallback
415 * to traditional DPDK memseg traversal code. So, this is not
416 * a blocking error - but, error would be printed on screen.
424 /* Head insertions are generally faster than tail insertions as the
425 * buffers pinned are picked from rear end.
427 TAILQ_INSERT_HEAD(&rte_dpaa2_memsegs, ms, next);
429 return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len,
433 struct rte_mempool_ops dpaa2_mpool_ops = {
434 .name = DPAA2_MEMPOOL_OPS_NAME,
435 .alloc = rte_hw_mbuf_create_pool,
436 .free = rte_hw_mbuf_free_pool,
437 .enqueue = rte_hw_mbuf_free_bulk,
438 .dequeue = rte_dpaa2_mbuf_alloc_bulk,
439 .get_count = rte_hw_mbuf_get_count,
440 .populate = dpaa2_populate,
443 MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops);
445 RTE_INIT(dpaa2_mempool_init_log)
447 dpaa2_logtype_mempool = rte_log_register("mempool.dpaa2");
448 if (dpaa2_logtype_mempool >= 0)
449 rte_log_set_level(dpaa2_logtype_mempool, RTE_LOG_NOTICE);