1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
10 #include <sys/types.h>
17 #include <rte_ethdev_driver.h>
18 #include <rte_malloc.h>
19 #include <rte_memcpy.h>
20 #include <rte_string_fns.h>
21 #include <rte_cycles.h>
22 #include <rte_kvargs.h>
25 #include <fslmc_logs.h>
26 #include <mc/fsl_dpbp.h>
27 #include <portal/dpaa2_hw_pvt.h>
28 #include <portal/dpaa2_hw_dpio.h>
29 #include "dpaa2_hw_mempool.h"
30 #include "dpaa2_hw_mempool_logs.h"
32 struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
33 static struct dpaa2_bp_list *h_bp_list;
35 /* List of all the memseg information locally maintained in dpaa2 driver. This
36 * is to optimize the PA_to_VA searches until a better mechanism (algo) is
39 struct dpaa2_memseg_list rte_dpaa2_memsegs
40 = TAILQ_HEAD_INITIALIZER(rte_dpaa2_memsegs);
42 /* Dynamic logging identified for mempool */
43 int dpaa2_logtype_mempool;
46 rte_hw_mbuf_create_pool(struct rte_mempool *mp)
48 struct dpaa2_bp_list *bp_list;
49 struct dpaa2_dpbp_dev *avail_dpbp;
50 struct dpaa2_bp_info *bp_info;
51 struct dpbp_attr dpbp_attr;
55 avail_dpbp = dpaa2_alloc_dpbp_dev();
58 DPAA2_MEMPOOL_ERR("DPAA2 pool not available!");
62 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
63 ret = dpaa2_affine_qbman_swp();
65 DPAA2_MEMPOOL_ERR("Failure in affining portal");
70 ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
72 DPAA2_MEMPOOL_ERR("Resource enable failure with err code: %d",
77 ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW,
78 avail_dpbp->token, &dpbp_attr);
80 DPAA2_MEMPOOL_ERR("Resource read failure with err code: %d",
85 bp_info = rte_malloc(NULL,
86 sizeof(struct dpaa2_bp_info),
89 DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
94 /* Allocate the bp_list which will be added into global_bp_list */
95 bp_list = rte_malloc(NULL, sizeof(struct dpaa2_bp_list),
98 DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
103 /* Set parameters of buffer pool list */
104 bp_list->buf_pool.num_bufs = mp->size;
105 bp_list->buf_pool.size = mp->elt_size
106 - sizeof(struct rte_mbuf) - rte_pktmbuf_priv_size(mp);
107 bp_list->buf_pool.bpid = dpbp_attr.bpid;
108 bp_list->buf_pool.h_bpool_mem = NULL;
109 bp_list->buf_pool.dpbp_node = avail_dpbp;
110 /* Identification for our offloaded pool_data structure */
111 bp_list->dpaa2_ops_index = mp->ops_index;
112 bp_list->next = h_bp_list;
115 bpid = dpbp_attr.bpid;
117 rte_dpaa2_bpid_info[bpid].meta_data_size = sizeof(struct rte_mbuf)
118 + rte_pktmbuf_priv_size(mp);
119 rte_dpaa2_bpid_info[bpid].bp_list = bp_list;
120 rte_dpaa2_bpid_info[bpid].bpid = bpid;
122 rte_memcpy(bp_info, (void *)&rte_dpaa2_bpid_info[bpid],
123 sizeof(struct dpaa2_bp_info));
124 mp->pool_data = (void *)bp_info;
126 DPAA2_MEMPOOL_DEBUG("BP List created for bpid =%d", dpbp_attr.bpid);
133 dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
135 dpaa2_free_dpbp_dev(avail_dpbp);
141 rte_hw_mbuf_free_pool(struct rte_mempool *mp)
143 struct dpaa2_bp_info *bpinfo;
144 struct dpaa2_bp_list *bp;
145 struct dpaa2_dpbp_dev *dpbp_node;
147 if (!mp->pool_data) {
148 DPAA2_MEMPOOL_ERR("Not a valid dpaa2 buffer pool");
152 bpinfo = (struct dpaa2_bp_info *)mp->pool_data;
153 bp = bpinfo->bp_list;
154 dpbp_node = bp->buf_pool.dpbp_node;
156 dpbp_disable(&(dpbp_node->dpbp), CMD_PRI_LOW, dpbp_node->token);
158 if (h_bp_list == bp) {
159 h_bp_list = h_bp_list->next;
160 } else { /* if it is not the first node */
161 struct dpaa2_bp_list *prev = h_bp_list, *temp;
162 temp = h_bp_list->next;
165 prev->next = temp->next;
174 rte_free(mp->pool_data);
175 dpaa2_free_dpbp_dev(dpbp_node);
179 rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
180 void * const *obj_table,
182 uint32_t meta_data_size,
185 struct qbman_release_desc releasedesc;
186 struct qbman_swp *swp;
189 uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
191 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
192 ret = dpaa2_affine_qbman_swp();
194 DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
198 swp = DPAA2_PER_LCORE_PORTAL;
200 /* Create a release descriptor required for releasing
203 qbman_release_desc_clear(&releasedesc);
204 qbman_release_desc_set_bpid(&releasedesc, bpid);
206 n = count % DPAA2_MBUF_MAX_ACQ_REL;
210 /* convert mbuf to buffers for the remainder */
211 for (i = 0; i < n ; i++) {
212 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
213 bufs[i] = (uint64_t)rte_mempool_virt2iova(obj_table[i])
216 bufs[i] = (uint64_t)obj_table[i] + meta_data_size;
220 /* feed them to bman */
222 ret = qbman_swp_release(swp, &releasedesc, bufs, n);
223 } while (ret == -EBUSY);
226 /* if there are more buffers to free */
228 /* convert mbuf to buffers */
229 for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++) {
230 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
232 rte_mempool_virt2iova(obj_table[n + i])
235 bufs[i] = (uint64_t)obj_table[n + i] + meta_data_size;
240 ret = qbman_swp_release(swp, &releasedesc, bufs,
241 DPAA2_MBUF_MAX_ACQ_REL);
242 } while (ret == -EBUSY);
243 n += DPAA2_MBUF_MAX_ACQ_REL;
248 rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
249 void **obj_table, unsigned int count)
251 #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
254 struct qbman_swp *swp;
256 size_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
259 struct dpaa2_bp_info *bp_info;
261 bp_info = mempool_to_bpinfo(pool);
263 if (!(bp_info->bp_list)) {
264 DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
268 bpid = bp_info->bpid;
270 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
271 ret = dpaa2_affine_qbman_swp();
273 DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
277 swp = DPAA2_PER_LCORE_PORTAL;
280 /* Acquire is all-or-nothing, so we drain in 7s,
281 * then the remainder.
283 if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) {
284 ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
285 DPAA2_MBUF_MAX_ACQ_REL);
287 ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
290 /* In case of less than requested number of buffers available
291 * in pool, qbman_swp_acquire returns 0
294 DPAA2_MEMPOOL_ERR("Buffer acquire failed with"
295 " err code: %d", ret);
296 /* The API expect the exact number of requested bufs */
297 /* Releasing all buffers allocated */
298 rte_dpaa2_mbuf_release(pool, obj_table, bpid,
299 bp_info->meta_data_size, n);
302 /* assigning mbuf from the acquired objects */
303 for (i = 0; (i < ret) && bufs[i]; i++) {
304 DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], size_t);
305 obj_table[n] = (struct rte_mbuf *)
306 (bufs[i] - bp_info->meta_data_size);
307 DPAA2_MEMPOOL_DP_DEBUG(
308 "Acquired %p address %p from BMAN\n",
309 (void *)bufs[i], (void *)obj_table[n]);
314 #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
316 DPAA2_MEMPOOL_DP_DEBUG("Total = %d , req = %d done = %d\n",
323 rte_hw_mbuf_free_bulk(struct rte_mempool *pool,
324 void * const *obj_table, unsigned int n)
326 struct dpaa2_bp_info *bp_info;
328 bp_info = mempool_to_bpinfo(pool);
329 if (!(bp_info->bp_list)) {
330 DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
333 rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid,
334 bp_info->meta_data_size, n);
340 rte_hw_mbuf_get_count(const struct rte_mempool *mp)
343 unsigned int num_of_bufs = 0;
344 struct dpaa2_bp_info *bp_info;
345 struct dpaa2_dpbp_dev *dpbp_node;
347 if (!mp || !mp->pool_data) {
348 DPAA2_MEMPOOL_ERR("Invalid mempool provided");
352 bp_info = (struct dpaa2_bp_info *)mp->pool_data;
353 dpbp_node = bp_info->bp_list->buf_pool.dpbp_node;
355 ret = dpbp_get_num_free_bufs(&dpbp_node->dpbp, CMD_PRI_LOW,
356 dpbp_node->token, &num_of_bufs);
358 DPAA2_MEMPOOL_ERR("Unable to obtain free buf count (err=%d)",
363 DPAA2_MEMPOOL_DP_DEBUG("Free bufs = %u\n", num_of_bufs);
369 dpaa2_populate(struct rte_mempool *mp, unsigned int max_objs,
370 void *vaddr, rte_iova_t paddr, size_t len,
371 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
373 struct dpaa2_memseg *ms;
375 /* For each memory chunk pinned to the Mempool, a linked list of the
376 * contained memsegs is created for searching when PA to VA
377 * conversion is required.
379 ms = rte_zmalloc(NULL, sizeof(struct dpaa2_memseg), 0);
381 DPAA2_MEMPOOL_ERR("Unable to allocate internal memory.");
382 DPAA2_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
383 /* If the element is not added, it would only lead to failure
384 * in searching for the element and the logic would Fallback
385 * to traditional DPDK memseg traversal code. So, this is not
386 * a blocking error - but, error would be printed on screen.
394 /* Head insertions are generally faster than tail insertions as the
395 * buffers pinned are picked from rear end.
397 TAILQ_INSERT_HEAD(&rte_dpaa2_memsegs, ms, next);
399 return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len,
403 struct rte_mempool_ops dpaa2_mpool_ops = {
404 .name = DPAA2_MEMPOOL_OPS_NAME,
405 .alloc = rte_hw_mbuf_create_pool,
406 .free = rte_hw_mbuf_free_pool,
407 .enqueue = rte_hw_mbuf_free_bulk,
408 .dequeue = rte_dpaa2_mbuf_alloc_bulk,
409 .get_count = rte_hw_mbuf_get_count,
410 .populate = dpaa2_populate,
413 MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops);
415 RTE_INIT(dpaa2_mempool_init_log);
417 dpaa2_mempool_init_log(void)
419 dpaa2_logtype_mempool = rte_log_register("mempool.dpaa2");
420 if (dpaa2_logtype_mempool >= 0)
421 rte_log_set_level(dpaa2_logtype_mempool, RTE_LOG_NOTICE);