1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2021 Broadcom
13 int dpool_init(struct dpool *dpool,
16 uint8_t max_alloc_size,
18 int (*move_callback)(void *, uint64_t, uint32_t))
22 struct tfp_calloc_parms parms;
25 parms.size = sizeof(struct dpool_entry);
28 rc = tfp_calloc(&parms);
33 dpool->entry = parms.mem_va;
34 dpool->start_index = start_index;
36 dpool->max_alloc_size = max_alloc_size;
37 dpool->user_data = user_data;
38 dpool->move_callback = move_callback;
42 for (i = 0; i < size; i++) {
43 dpool->entry[i].flags = 0;
44 dpool->entry[i].index = start_index;
45 dpool->entry[i].entry_data = 0UL;
52 static int dpool_move(struct dpool *dpool,
58 if (DP_IS_FREE(dpool->entry[dst_index].flags)) {
59 size = DP_FLAGS_SIZE(dpool->entry[src_index].flags);
61 dpool->entry[dst_index].flags = dpool->entry[src_index].flags;
62 dpool->entry[dst_index].entry_data = dpool->entry[src_index].entry_data;
64 if (dpool->move_callback != NULL) {
65 dpool->move_callback(dpool->user_data,
66 dpool->entry[src_index].entry_data,
67 dst_index + dpool->start_index);
70 dpool->entry[src_index].flags = 0;
71 dpool->entry[src_index].entry_data = 0UL;
73 for (i = 1; i < size; i++) {
74 dpool->entry[dst_index + i].flags = size;
75 dpool->entry[src_index + i].flags = 0;
84 int dpool_defrag(struct dpool *dpool,
88 struct dpool_free_list *free_list;
89 struct dpool_adj_list *adj_list;
90 struct tfp_calloc_parms parms;
96 uint32_t largest_free_index = 0;
97 uint32_t largest_free_size;
100 uint32_t max_size = 0;
104 parms.size = sizeof(struct dpool_free_list);
107 rc = tfp_calloc(&parms);
112 free_list = (struct dpool_free_list *)parms.mem_va;
113 if (free_list == NULL) {
114 TFP_DRV_LOG(ERR, "dpool free list allocation failed\n");
119 parms.size = sizeof(struct dpool_adj_list);
122 rc = tfp_calloc(&parms);
127 adj_list = (struct dpool_adj_list *)parms.mem_va;
128 if (adj_list == NULL) {
129 TFP_DRV_LOG(ERR, "dpool adjacent list allocation failed\n");
135 * Create list of free entries
138 largest_free_size = 0;
139 largest_free_index = 0;
143 for (i = 0; i < dpool->size; i++) {
144 if (DP_IS_FREE(dpool->entry[i].flags)) {
148 } else if (count > 0) {
149 free_list->entry[free_list->size].index = index;
150 free_list->entry[free_list->size].size = count;
152 if (count > largest_free_size) {
153 largest_free_index = free_list->size;
154 largest_free_size = count;
162 if (free_list->size == 0)
163 largest_free_size = count;
166 * If using defrag to fit and there's a large enough
167 * space then we are done.
169 if (defrag == DP_DEFRAG_TO_FIT &&
170 largest_free_size >= entry_size)
174 * Create list of entries adjacent to free entries
180 for (i = 0; i < dpool->size; ) {
181 if (DP_IS_USED(dpool->entry[i].flags)) {
185 adj_list->entry[adj_list->size].index = i;
186 adj_list->entry[adj_list->size].size =
187 DP_FLAGS_SIZE(dpool->entry[i].flags);
188 adj_list->entry[adj_list->size].left = count;
190 if (adj_list->size > 0 && used == 1)
191 adj_list->entry[adj_list->size - 1].right = count;
197 i += DP_FLAGS_SIZE(dpool->entry[i].flags);
206 * Using the size of the largest free space available
207 * select the adjacency list entry of that size with
208 * the largest left + right + size count. If there
209 * are no entries of that size then decrement the size
216 for (size = largest_free_size; size > 0; size--) {
217 for (i = 0; i < adj_list->size; i++) {
218 if (adj_list->entry[i].size == size &&
220 adj_list->entry[i].left +
221 adj_list->entry[i].right) > max)) {
223 adj_list->entry[i].left +
224 adj_list->entry[i].right;
226 max_index = adj_list->entry[i].index;
235 * If the max entry is smaller than the largest_free_size
236 * find the first entry in the free list that it cn fit in to.
238 if (max_size < largest_free_size) {
239 for (i = 0; i < free_list->size; i++) {
240 if (free_list->entry[i].size >= max_size) {
241 largest_free_index = i;
248 * If we have a contender then move it to the new spot.
251 rc = dpool_move(dpool,
252 free_list->entry[largest_free_index].index,
267 return largest_free_size;
270 uint32_t dpool_alloc(struct dpool *dpool,
277 uint32_t first_entry_index;
280 if (size > dpool->max_alloc_size || size == 0)
281 return DP_INVALID_INDEX;
284 * Defrag requires EM move support.
286 if (defrag != DP_DEFRAG_NONE &&
287 dpool->move_callback == NULL)
288 return DP_INVALID_INDEX;
292 * find <size> consecutive free entries
294 for (i = 0; i < dpool->size; i++) {
295 if (DP_IS_FREE(dpool->entry[i].flags)) {
297 first_entry_index = i;
302 for (j = 0; j < size; j++) {
303 dpool->entry[j + first_entry_index].flags = size;
305 dpool->entry[j + first_entry_index].flags |=
309 dpool->entry[i].entry_data = 0UL;
310 return (first_entry_index + dpool->start_index);
318 * If defragging then do it to it
320 if (defrag != DP_DEFRAG_NONE) {
321 rc = dpool_defrag(dpool, size, defrag);
324 return DP_INVALID_INDEX;
330 * If the defrag created enough space then try the
331 * alloc again else quit.
333 if ((uint32_t)rc < size)
337 return DP_INVALID_INDEX;
340 int dpool_free(struct dpool *dpool,
344 int start = (index - dpool->start_index);
350 if (DP_IS_START(dpool->entry[start].flags)) {
351 size = DP_FLAGS_SIZE(dpool->entry[start].flags);
352 if (size > dpool->max_alloc_size || size == 0)
355 for (i = start; i < (start + size); i++)
356 dpool->entry[i].flags = 0;
364 void dpool_free_all(struct dpool *dpool)
368 for (i = 0; i < dpool->size; i++)
369 dpool_free(dpool, dpool->entry[i].index);
372 int dpool_set_entry_data(struct dpool *dpool,
376 int start = (index - dpool->start_index);
381 if (DP_IS_START(dpool->entry[start].flags)) {
382 dpool->entry[start].entry_data = entry_data;