1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2021 Broadcom
11 #include <rte_malloc.h>
16 int dpool_init(struct dpool *dpool,
19 uint8_t max_alloc_size,
21 int (*move_callback)(void *, uint64_t, uint32_t))
25 struct tfp_calloc_parms parms;
28 parms.size = sizeof(struct dpool_entry);
31 rc = tfp_calloc(&parms);
36 dpool->entry = parms.mem_va;
37 dpool->start_index = start_index;
39 dpool->max_alloc_size = max_alloc_size;
40 dpool->user_data = user_data;
41 dpool->move_callback = move_callback;
45 for (i = 0; i < size; i++) {
46 dpool->entry[i].flags = 0;
47 dpool->entry[i].index = start_index;
48 dpool->entry[i].entry_data = 0UL;
55 static int dpool_move(struct dpool *dpool,
61 if (DP_IS_FREE(dpool->entry[dst_index].flags)) {
62 size = DP_FLAGS_SIZE(dpool->entry[src_index].flags);
64 dpool->entry[dst_index].flags = dpool->entry[src_index].flags;
65 dpool->entry[dst_index].entry_data = dpool->entry[src_index].entry_data;
67 if (dpool->move_callback != NULL) {
68 dpool->move_callback(dpool->user_data,
69 dpool->entry[src_index].entry_data,
70 dst_index + dpool->start_index);
73 dpool->entry[src_index].flags = 0;
74 dpool->entry[src_index].entry_data = 0UL;
76 for (i = 1; i < size; i++) {
77 dpool->entry[dst_index + i].flags = size;
78 dpool->entry[src_index + i].flags = 0;
88 int dpool_defrag(struct dpool *dpool,
92 struct dpool_free_list *free_list;
93 struct dpool_adj_list *adj_list;
99 uint32_t largest_free_index = 0;
100 uint32_t largest_free_size;
103 uint32_t max_size = 0;
106 free_list = rte_zmalloc("dpool_free_list",
107 sizeof(struct dpool_free_list), 0);
108 if (free_list == NULL) {
109 TFP_DRV_LOG(ERR, "dpool free list allocation failed\n");
113 adj_list = rte_zmalloc("dpool_adjacent_list",
114 sizeof(struct dpool_adj_list), 0);
115 if (adj_list == NULL) {
116 TFP_DRV_LOG(ERR, "dpool adjacent list allocation failed\n");
122 * Create list of free entries
125 largest_free_size = 0;
126 largest_free_index = 0;
129 for (i = 0; i < dpool->size; i++) {
130 if (DP_IS_FREE(dpool->entry[i].flags)) {
134 } else if (count > 0) {
135 free_list->entry[free_list->size].index = index;
136 free_list->entry[free_list->size].size = count;
138 if (count > largest_free_size) {
139 largest_free_index = free_list->size;
140 largest_free_size = count;
148 if (free_list->size == 0)
149 largest_free_size = count;
152 * If using defrag to fit and there's a large enough
153 * space then we are done.
155 if (defrag == DP_DEFRAG_TO_FIT &&
156 largest_free_size >= entry_size)
160 * Create list of entries adjacent to free entries
166 for (i = 0; i < dpool->size; ) {
167 if (DP_IS_USED(dpool->entry[i].flags)) {
171 adj_list->entry[adj_list->size].index = i;
172 adj_list->entry[adj_list->size].size =
173 DP_FLAGS_SIZE(dpool->entry[i].flags);
174 adj_list->entry[adj_list->size].left = count;
176 if (adj_list->size > 0 && used == 1)
177 adj_list->entry[adj_list->size - 1].right = count;
183 i += DP_FLAGS_SIZE(dpool->entry[i].flags);
192 * Using the size of the largest free space available
193 * select the adjacency list entry of that size with
194 * the largest left + right + size count. If there
195 * are no entries of that size then decrement the size
202 for (size = largest_free_size; size > 0; size--) {
203 for (i = 0; i < adj_list->size; i++) {
204 if (adj_list->entry[i].size == size &&
206 adj_list->entry[i].left +
207 adj_list->entry[i].right) > max)) {
209 adj_list->entry[i].left +
210 adj_list->entry[i].right;
212 max_index = adj_list->entry[i].index;
221 * If the max entry is smaller than the largest_free_size
222 * find the first entry in the free list that it cn fit in to.
224 if (max_size < largest_free_size) {
225 for (i = 0; i < free_list->size; i++) {
226 if (free_list->entry[i].size >= max_size) {
227 largest_free_index = i;
234 * If we have a contender then move it to the new spot.
237 rc = dpool_move(dpool,
238 free_list->entry[largest_free_index].index,
253 return largest_free_size;
257 uint32_t dpool_alloc(struct dpool *dpool,
264 uint32_t first_entry_index;
267 if (size > dpool->max_alloc_size || size == 0)
268 return DP_INVALID_INDEX;
271 * Defrag requires EM move support.
273 if (defrag != DP_DEFRAG_NONE &&
274 dpool->move_callback == NULL)
275 return DP_INVALID_INDEX;
279 * find <size> consecutive free entries
281 for (i = 0; i < dpool->size; i++) {
282 if (DP_IS_FREE(dpool->entry[i].flags)) {
284 first_entry_index = i;
289 for (j = 0; j < size; j++) {
290 dpool->entry[j + first_entry_index].flags = size;
292 dpool->entry[j + first_entry_index].flags |=
296 dpool->entry[i].entry_data = 0UL;
297 return (first_entry_index + dpool->start_index);
305 * If defragging then do it to it
307 if (defrag != DP_DEFRAG_NONE) {
308 rc = dpool_defrag(dpool, size, defrag);
311 return DP_INVALID_INDEX;
317 * If the defrag created enough space then try the
318 * alloc again else quit.
320 if ((uint32_t)rc < size)
324 return DP_INVALID_INDEX;
327 int dpool_free(struct dpool *dpool,
331 int start = (index - dpool->start_index);
337 if (DP_IS_START(dpool->entry[start].flags)) {
338 size = DP_FLAGS_SIZE(dpool->entry[start].flags);
339 if (size > dpool->max_alloc_size || size == 0)
342 for (i = start; i < (start + size); i++)
343 dpool->entry[i].flags = 0;
351 void dpool_free_all(struct dpool *dpool)
355 for (i = 0; i < dpool->size; i++)
356 dpool_free(dpool, dpool->entry[i].index);
359 int dpool_set_entry_data(struct dpool *dpool,
363 int start = (index - dpool->start_index);
368 if (DP_IS_START(dpool->entry[start].flags)) {
369 dpool->entry[start].entry_data = entry_data;