1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2021 Broadcom
11 #include <rte_malloc.h>
16 int dpool_init(struct dpool *dpool,
19 uint8_t max_alloc_size,
21 int (*move_callback)(void *, uint64_t, uint32_t))
25 struct tfp_calloc_parms parms;
28 parms.size = sizeof(struct dpool_entry);
31 rc = tfp_calloc(&parms);
36 dpool->entry = parms.mem_va;
37 dpool->start_index = start_index;
39 dpool->max_alloc_size = max_alloc_size;
40 dpool->user_data = user_data;
41 dpool->move_callback = move_callback;
45 for (i = 0; i < size; i++) {
46 dpool->entry[i].flags = 0;
47 dpool->entry[i].index = start_index;
48 dpool->entry[i].entry_data = 0UL;
55 static int dpool_move(struct dpool *dpool,
61 if (DP_IS_FREE(dpool->entry[dst_index].flags)) {
62 size = DP_FLAGS_SIZE(dpool->entry[src_index].flags);
64 dpool->entry[dst_index].flags = dpool->entry[src_index].flags;
65 dpool->entry[dst_index].entry_data = dpool->entry[src_index].entry_data;
67 if (dpool->move_callback != NULL) {
68 dpool->move_callback(dpool->user_data,
69 dpool->entry[src_index].entry_data,
70 dst_index + dpool->start_index);
73 dpool->entry[src_index].flags = 0;
74 dpool->entry[src_index].entry_data = 0UL;
76 for (i = 1; i < size; i++) {
77 dpool->entry[dst_index + i].flags = size;
78 dpool->entry[src_index + i].flags = 0;
88 int dpool_defrag(struct dpool *dpool,
92 struct dpool_free_list *free_list;
93 struct dpool_adj_list *adj_list;
99 uint32_t largest_free_index = 0;
100 uint32_t largest_free_size;
103 uint32_t max_size = 0;
106 free_list = rte_zmalloc("dpool_free_list",
107 sizeof(struct dpool_free_list), 0);
108 if (free_list == NULL) {
109 TFP_DRV_LOG(ERR, "dpool free list allocation failed\n");
113 adj_list = rte_zmalloc("dpool_adjacent_list",
114 sizeof(struct dpool_adj_list), 0);
115 if (adj_list == NULL) {
116 TFP_DRV_LOG(ERR, "dpool adjacent list allocation failed\n");
122 * Create list of free entries
125 largest_free_size = 0;
126 largest_free_index = 0;
130 for (i = 0; i < dpool->size; i++) {
131 if (DP_IS_FREE(dpool->entry[i].flags)) {
135 } else if (count > 0) {
136 free_list->entry[free_list->size].index = index;
137 free_list->entry[free_list->size].size = count;
139 if (count > largest_free_size) {
140 largest_free_index = free_list->size;
141 largest_free_size = count;
149 if (free_list->size == 0)
150 largest_free_size = count;
153 * If using defrag to fit and there's a large enough
154 * space then we are done.
156 if (defrag == DP_DEFRAG_TO_FIT &&
157 largest_free_size >= entry_size)
161 * Create list of entries adjacent to free entries
167 for (i = 0; i < dpool->size; ) {
168 if (DP_IS_USED(dpool->entry[i].flags)) {
172 adj_list->entry[adj_list->size].index = i;
173 adj_list->entry[adj_list->size].size =
174 DP_FLAGS_SIZE(dpool->entry[i].flags);
175 adj_list->entry[adj_list->size].left = count;
177 if (adj_list->size > 0 && used == 1)
178 adj_list->entry[adj_list->size - 1].right = count;
184 i += DP_FLAGS_SIZE(dpool->entry[i].flags);
193 * Using the size of the largest free space available
194 * select the adjacency list entry of that size with
195 * the largest left + right + size count. If there
196 * are no entries of that size then decrement the size
203 for (size = largest_free_size; size > 0; size--) {
204 for (i = 0; i < adj_list->size; i++) {
205 if (adj_list->entry[i].size == size &&
207 adj_list->entry[i].left +
208 adj_list->entry[i].right) > max)) {
210 adj_list->entry[i].left +
211 adj_list->entry[i].right;
213 max_index = adj_list->entry[i].index;
222 * If the max entry is smaller than the largest_free_size
223 * find the first entry in the free list that it cn fit in to.
225 if (max_size < largest_free_size) {
226 for (i = 0; i < free_list->size; i++) {
227 if (free_list->entry[i].size >= max_size) {
228 largest_free_index = i;
235 * If we have a contender then move it to the new spot.
238 rc = dpool_move(dpool,
239 free_list->entry[largest_free_index].index,
254 return largest_free_size;
258 uint32_t dpool_alloc(struct dpool *dpool,
265 uint32_t first_entry_index;
268 if (size > dpool->max_alloc_size || size == 0)
269 return DP_INVALID_INDEX;
272 * Defrag requires EM move support.
274 if (defrag != DP_DEFRAG_NONE &&
275 dpool->move_callback == NULL)
276 return DP_INVALID_INDEX;
280 * find <size> consecutive free entries
282 for (i = 0; i < dpool->size; i++) {
283 if (DP_IS_FREE(dpool->entry[i].flags)) {
285 first_entry_index = i;
290 for (j = 0; j < size; j++) {
291 dpool->entry[j + first_entry_index].flags = size;
293 dpool->entry[j + first_entry_index].flags |=
297 dpool->entry[i].entry_data = 0UL;
298 return (first_entry_index + dpool->start_index);
306 * If defragging then do it to it
308 if (defrag != DP_DEFRAG_NONE) {
309 rc = dpool_defrag(dpool, size, defrag);
312 return DP_INVALID_INDEX;
318 * If the defrag created enough space then try the
319 * alloc again else quit.
321 if ((uint32_t)rc < size)
325 return DP_INVALID_INDEX;
328 int dpool_free(struct dpool *dpool,
332 int start = (index - dpool->start_index);
338 if (DP_IS_START(dpool->entry[start].flags)) {
339 size = DP_FLAGS_SIZE(dpool->entry[start].flags);
340 if (size > dpool->max_alloc_size || size == 0)
343 for (i = start; i < (start + size); i++)
344 dpool->entry[i].flags = 0;
352 void dpool_free_all(struct dpool *dpool)
356 for (i = 0; i < dpool->size; i++)
357 dpool_free(dpool, dpool->entry[i].index);
360 int dpool_set_entry_data(struct dpool *dpool,
364 int start = (index - dpool->start_index);
369 if (DP_IS_START(dpool->entry[start].flags)) {
370 dpool->entry[start].entry_data = entry_data;