1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
10 #include <sys/types.h>
17 #include <rte_ethdev.h>
18 #include <rte_malloc.h>
19 #include <rte_memcpy.h>
20 #include <rte_string_fns.h>
21 #include <rte_cycles.h>
22 #include <rte_kvargs.h>
25 #include <fslmc_logs.h>
26 #include <mc/fsl_dpbp.h>
27 #include <portal/dpaa2_hw_pvt.h>
28 #include <portal/dpaa2_hw_dpio.h>
29 #include "dpaa2_hw_mempool.h"
31 struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
32 static struct dpaa2_bp_list *h_bp_list;
35 rte_hw_mbuf_create_pool(struct rte_mempool *mp)
37 struct dpaa2_bp_list *bp_list;
38 struct dpaa2_dpbp_dev *avail_dpbp;
39 struct dpaa2_bp_info *bp_info;
40 struct dpbp_attr dpbp_attr;
44 avail_dpbp = dpaa2_alloc_dpbp_dev();
47 PMD_DRV_LOG(ERR, "DPAA2 resources not available");
51 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
52 ret = dpaa2_affine_qbman_swp();
54 RTE_LOG(ERR, PMD, "Failure in affining portal\n");
59 ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
61 PMD_INIT_LOG(ERR, "Resource enable failure with"
62 " err code: %d\n", ret);
66 ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW,
67 avail_dpbp->token, &dpbp_attr);
69 PMD_INIT_LOG(ERR, "Resource read failure with"
70 " err code: %d\n", ret);
74 bp_info = rte_malloc(NULL,
75 sizeof(struct dpaa2_bp_info),
78 PMD_INIT_LOG(ERR, "No heap memory available for bp_info");
83 /* Allocate the bp_list which will be added into global_bp_list */
84 bp_list = rte_malloc(NULL, sizeof(struct dpaa2_bp_list),
87 PMD_INIT_LOG(ERR, "No heap memory available");
92 /* Set parameters of buffer pool list */
93 bp_list->buf_pool.num_bufs = mp->size;
94 bp_list->buf_pool.size = mp->elt_size
95 - sizeof(struct rte_mbuf) - rte_pktmbuf_priv_size(mp);
96 bp_list->buf_pool.bpid = dpbp_attr.bpid;
97 bp_list->buf_pool.h_bpool_mem = NULL;
98 bp_list->buf_pool.dpbp_node = avail_dpbp;
99 /* Identification for our offloaded pool_data structure */
100 bp_list->dpaa2_ops_index = mp->ops_index;
101 bp_list->next = h_bp_list;
104 bpid = dpbp_attr.bpid;
106 rte_dpaa2_bpid_info[bpid].meta_data_size = sizeof(struct rte_mbuf)
107 + rte_pktmbuf_priv_size(mp);
108 rte_dpaa2_bpid_info[bpid].bp_list = bp_list;
109 rte_dpaa2_bpid_info[bpid].bpid = bpid;
111 rte_memcpy(bp_info, (void *)&rte_dpaa2_bpid_info[bpid],
112 sizeof(struct dpaa2_bp_info));
113 mp->pool_data = (void *)bp_info;
115 PMD_INIT_LOG(DEBUG, "BP List created for bpid =%d", dpbp_attr.bpid);
122 dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
124 dpaa2_free_dpbp_dev(avail_dpbp);
130 rte_hw_mbuf_free_pool(struct rte_mempool *mp)
132 struct dpaa2_bp_info *bpinfo;
133 struct dpaa2_bp_list *bp;
134 struct dpaa2_dpbp_dev *dpbp_node;
136 if (!mp->pool_data) {
137 PMD_DRV_LOG(ERR, "Not a valid dpaa22 pool");
141 bpinfo = (struct dpaa2_bp_info *)mp->pool_data;
142 bp = bpinfo->bp_list;
143 dpbp_node = bp->buf_pool.dpbp_node;
145 dpbp_disable(&(dpbp_node->dpbp), CMD_PRI_LOW, dpbp_node->token);
147 if (h_bp_list == bp) {
148 h_bp_list = h_bp_list->next;
149 } else { /* if it is not the first node */
150 struct dpaa2_bp_list *prev = h_bp_list, *temp;
151 temp = h_bp_list->next;
154 prev->next = temp->next;
163 rte_free(mp->pool_data);
164 dpaa2_free_dpbp_dev(dpbp_node);
168 rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
169 void * const *obj_table,
171 uint32_t meta_data_size,
174 struct qbman_release_desc releasedesc;
175 struct qbman_swp *swp;
178 uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
180 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
181 ret = dpaa2_affine_qbman_swp();
183 RTE_LOG(ERR, PMD, "Failed to allocate IO portal\n");
187 swp = DPAA2_PER_LCORE_PORTAL;
189 /* Create a release descriptor required for releasing
192 qbman_release_desc_clear(&releasedesc);
193 qbman_release_desc_set_bpid(&releasedesc, bpid);
195 n = count % DPAA2_MBUF_MAX_ACQ_REL;
199 /* convert mbuf to buffers for the remainder */
200 for (i = 0; i < n ; i++) {
201 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
202 bufs[i] = (uint64_t)rte_mempool_virt2iova(obj_table[i])
205 bufs[i] = (uint64_t)obj_table[i] + meta_data_size;
209 /* feed them to bman */
211 ret = qbman_swp_release(swp, &releasedesc, bufs, n);
212 } while (ret == -EBUSY);
215 /* if there are more buffers to free */
217 /* convert mbuf to buffers */
218 for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++) {
219 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
221 rte_mempool_virt2iova(obj_table[n + i])
224 bufs[i] = (uint64_t)obj_table[n + i] + meta_data_size;
229 ret = qbman_swp_release(swp, &releasedesc, bufs,
230 DPAA2_MBUF_MAX_ACQ_REL);
231 } while (ret == -EBUSY);
232 n += DPAA2_MBUF_MAX_ACQ_REL;
237 rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
238 void **obj_table, unsigned int count)
240 #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
243 struct qbman_swp *swp;
245 uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
248 struct dpaa2_bp_info *bp_info;
250 bp_info = mempool_to_bpinfo(pool);
252 if (!(bp_info->bp_list)) {
253 RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
257 bpid = bp_info->bpid;
259 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
260 ret = dpaa2_affine_qbman_swp();
262 RTE_LOG(ERR, PMD, "Failed to allocate IO portal\n");
266 swp = DPAA2_PER_LCORE_PORTAL;
269 /* Acquire is all-or-nothing, so we drain in 7s,
270 * then the remainder.
272 if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) {
273 ret = qbman_swp_acquire(swp, bpid, bufs,
274 DPAA2_MBUF_MAX_ACQ_REL);
276 ret = qbman_swp_acquire(swp, bpid, bufs,
279 /* In case of less than requested number of buffers available
280 * in pool, qbman_swp_acquire returns 0
283 PMD_TX_LOG(ERR, "Buffer acquire failed with"
284 " err code: %d", ret);
285 /* The API expect the exact number of requested bufs */
286 /* Releasing all buffers allocated */
287 rte_dpaa2_mbuf_release(pool, obj_table, bpid,
288 bp_info->meta_data_size, n);
291 /* assigning mbuf from the acquired objects */
292 for (i = 0; (i < ret) && bufs[i]; i++) {
293 DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], uint64_t);
294 obj_table[n] = (struct rte_mbuf *)
295 (bufs[i] - bp_info->meta_data_size);
296 PMD_TX_LOG(DEBUG, "Acquired %p address %p from BMAN",
297 (void *)bufs[i], (void *)obj_table[n]);
302 #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
304 PMD_TX_LOG(DEBUG, "Total = %d , req = %d done = %d",
311 rte_hw_mbuf_free_bulk(struct rte_mempool *pool,
312 void * const *obj_table, unsigned int n)
314 struct dpaa2_bp_info *bp_info;
316 bp_info = mempool_to_bpinfo(pool);
317 if (!(bp_info->bp_list)) {
318 RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
321 rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid,
322 bp_info->meta_data_size, n);
328 rte_hw_mbuf_get_count(const struct rte_mempool *mp)
331 unsigned int num_of_bufs = 0;
332 struct dpaa2_bp_info *bp_info;
333 struct dpaa2_dpbp_dev *dpbp_node;
335 if (!mp || !mp->pool_data) {
336 RTE_LOG(ERR, PMD, "Invalid mempool provided\n");
340 bp_info = (struct dpaa2_bp_info *)mp->pool_data;
341 dpbp_node = bp_info->bp_list->buf_pool.dpbp_node;
343 ret = dpbp_get_num_free_bufs(&dpbp_node->dpbp, CMD_PRI_LOW,
344 dpbp_node->token, &num_of_bufs);
346 RTE_LOG(ERR, PMD, "Unable to obtain free buf count (err=%d)\n",
351 RTE_LOG(DEBUG, PMD, "Free bufs = %u\n", num_of_bufs);
356 struct rte_mempool_ops dpaa2_mpool_ops = {
358 .alloc = rte_hw_mbuf_create_pool,
359 .free = rte_hw_mbuf_free_pool,
360 .enqueue = rte_hw_mbuf_free_bulk,
361 .dequeue = rte_dpaa2_mbuf_alloc_bulk,
362 .get_count = rte_hw_mbuf_get_count,
365 MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops);