net/bnxt: add functions to get shared table increments
[dpdk.git] / drivers / net / bnxt / tf_core / dpool.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2021 Broadcom
3  * All rights reserved.
4  */
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <stdbool.h>
8 #include <stdint.h>
9 #include <errno.h>
10
11 #include <rte_malloc.h>
12
13 #include "tfp.h"
14 #include "dpool.h"
15
16 int dpool_init(struct dpool *dpool,
17                uint32_t start_index,
18                uint32_t size,
19                uint8_t max_alloc_size,
20                void *user_data,
21                int (*move_callback)(void *, uint64_t, uint32_t))
22 {
23         uint32_t i;
24         int rc;
25         struct tfp_calloc_parms parms;
26
27         parms.nitems = size;
28         parms.size = sizeof(struct dpool_entry);
29         parms.alignment = 0;
30
31         rc = tfp_calloc(&parms);
32
33         if (rc)
34                 return rc;
35
36         dpool->entry = parms.mem_va;
37         dpool->start_index = start_index;
38         dpool->size = size;
39         dpool->max_alloc_size = max_alloc_size;
40         dpool->user_data = user_data;
41         dpool->move_callback = move_callback;
42         /*
43          * Init entries
44          */
45         for (i = 0; i < size; i++) {
46                 dpool->entry[i].flags = 0;
47                 dpool->entry[i].index = start_index;
48                 dpool->entry[i].entry_data = 0UL;
49                 start_index++;
50         }
51
52         return 0;
53 }
54
55 static int dpool_move(struct dpool *dpool,
56                       uint32_t dst_index,
57                       uint32_t src_index)
58 {
59         uint32_t size;
60         uint32_t i;
61         if (DP_IS_FREE(dpool->entry[dst_index].flags)) {
62                 size = DP_FLAGS_SIZE(dpool->entry[src_index].flags);
63
64                 dpool->entry[dst_index].flags = dpool->entry[src_index].flags;
65                 dpool->entry[dst_index].entry_data = dpool->entry[src_index].entry_data;
66
67                 if (dpool->move_callback != NULL) {
68                         dpool->move_callback(dpool->user_data,
69                                              dpool->entry[src_index].entry_data,
70                                              dst_index + dpool->start_index);
71                 }
72
73                 dpool->entry[src_index].flags = 0;
74                 dpool->entry[src_index].entry_data = 0UL;
75
76                 for (i = 1; i < size; i++) {
77                         dpool->entry[dst_index + i].flags = size;
78                         dpool->entry[src_index + i].flags = 0;
79                 }
80         } else {
81                 return -1;
82         }
83
84         return 0;
85 }
86
87
88 int dpool_defrag(struct dpool *dpool,
89                  uint32_t entry_size,
90                  uint8_t defrag)
91 {
92         struct dpool_free_list *free_list;
93         struct dpool_adj_list *adj_list;
94         uint32_t count;
95         uint32_t index;
96         uint32_t used;
97         uint32_t i;
98         uint32_t size;
99         uint32_t largest_free_index = 0;
100         uint32_t largest_free_size;
101         uint32_t max;
102         uint32_t max_index;
103         uint32_t max_size = 0;
104         int rc;
105
106         free_list = rte_zmalloc("dpool_free_list",
107                                 sizeof(struct dpool_free_list), 0);
108         if (free_list == NULL) {
109                 TFP_DRV_LOG(ERR, "dpool free list allocation failed\n");
110                 return -ENOMEM;
111         }
112
113         adj_list = rte_zmalloc("dpool_adjacent_list",
114                                 sizeof(struct dpool_adj_list), 0);
115         if (adj_list == NULL) {
116                 TFP_DRV_LOG(ERR, "dpool adjacent list allocation failed\n");
117                 return -ENOMEM;
118         }
119
120         while (1) {
121                 /*
122                  * Create list of free entries
123                  */
124                 free_list->size = 0;
125                 largest_free_size = 0;
126                 largest_free_index = 0;
127                 count = 0;
128
129                 for (i = 0; i < dpool->size; i++) {
130                         if (DP_IS_FREE(dpool->entry[i].flags)) {
131                                 if (count == 0)
132                                         index = i;
133                                 count++;
134                         } else if (count > 0) {
135                                 free_list->entry[free_list->size].index = index;
136                                 free_list->entry[free_list->size].size = count;
137
138                                 if (count > largest_free_size) {
139                                         largest_free_index = free_list->size;
140                                         largest_free_size = count;
141                                 }
142
143                                 free_list->size++;
144                                 count = 0;
145                         }
146                 }
147
148                 if (free_list->size == 0)
149                         largest_free_size = count;
150
151                 /*
152                  * If using defrag to fit and there's a large enough
153                  * space then we are done.
154                  */
155                 if (defrag == DP_DEFRAG_TO_FIT &&
156                     largest_free_size >= entry_size)
157                         goto done;
158
159                 /*
160                  * Create list of entries adjacent to free entries
161                  */
162                 count = 0;
163                 adj_list->size = 0;
164                 used = 0;
165
166                 for (i = 0; i < dpool->size; ) {
167                         if (DP_IS_USED(dpool->entry[i].flags)) {
168                                 used++;
169
170                                 if (count > 0) {
171                                         adj_list->entry[adj_list->size].index = i;
172                                         adj_list->entry[adj_list->size].size =
173                                                 DP_FLAGS_SIZE(dpool->entry[i].flags);
174                                         adj_list->entry[adj_list->size].left = count;
175
176                                         if (adj_list->size > 0 && used == 1)
177                                                 adj_list->entry[adj_list->size - 1].right = count;
178
179                                         adj_list->size++;
180                                 }
181
182                                 count = 0;
183                                 i += DP_FLAGS_SIZE(dpool->entry[i].flags);
184                         } else {
185                                 used = 0;
186                                 count++;
187                                 i++;
188                         }
189                 }
190
191                 /*
192                  * Using the size of the largest free space available
193                  * select the adjacency list entry of that size with
194                  * the largest left + right + size count. If there
195                  * are no entries of that size then decrement the size
196                  * and try again.
197                  */
198                 max = 0;
199                 max_index = 0;
200                 max_size = 0;
201
202                 for (size = largest_free_size; size > 0; size--) {
203                         for (i = 0; i < adj_list->size; i++) {
204                                 if (adj_list->entry[i].size == size &&
205                                     ((size +
206                                       adj_list->entry[i].left +
207                                       adj_list->entry[i].right) > max)) {
208                                         max = size +
209                                                 adj_list->entry[i].left +
210                                                 adj_list->entry[i].right;
211                                         max_size = size;
212                                         max_index = adj_list->entry[i].index;
213                                 }
214                         }
215
216                         if (max)
217                                 break;
218                 }
219
220                 /*
221                  * If the max entry is smaller than the largest_free_size
222                  * find the first entry in the free list that it cn fit in to.
223                  */
224                 if (max_size < largest_free_size) {
225                         for (i = 0; i < free_list->size; i++) {
226                                 if (free_list->entry[i].size >= max_size) {
227                                         largest_free_index = i;
228                                         break;
229                                 }
230                         }
231                 }
232
233                 /*
234                  * If we have a contender then move it to the new spot.
235                  */
236                 if (max) {
237                         rc = dpool_move(dpool,
238                                         free_list->entry[largest_free_index].index,
239                                         max_index);
240                         if (rc) {
241                                 rte_free(free_list);
242                                 rte_free(adj_list);
243                                 return rc;
244                         }
245                 } else {
246                         break;
247                 }
248         }
249
250 done:
251         rte_free(free_list);
252         rte_free(adj_list);
253         return largest_free_size;
254 }
255
256
257 uint32_t dpool_alloc(struct dpool *dpool,
258                      uint32_t size,
259                      uint8_t defrag)
260 {
261         uint32_t i;
262         uint32_t j;
263         uint32_t count = 0;
264         uint32_t first_entry_index;
265         int rc;
266
267         if (size > dpool->max_alloc_size || size == 0)
268                 return DP_INVALID_INDEX;
269
270         /*
271          * Defrag requires EM move support.
272          */
273         if (defrag != DP_DEFRAG_NONE &&
274             dpool->move_callback == NULL)
275                 return DP_INVALID_INDEX;
276
277         while (1) {
278                 /*
279                  * find <size> consecutive free entries
280                  */
281                 for (i = 0; i < dpool->size; i++) {
282                         if (DP_IS_FREE(dpool->entry[i].flags)) {
283                                 if (count == 0)
284                                         first_entry_index = i;
285
286                                 count++;
287
288                                 if (count == size) {
289                                         for (j = 0; j < size; j++) {
290                                                 dpool->entry[j + first_entry_index].flags = size;
291                                                 if (j == 0)
292                                                         dpool->entry[j + first_entry_index].flags |=
293                                                                 DP_FLAGS_START;
294                                         }
295
296                                         dpool->entry[i].entry_data = 0UL;
297                                         return (first_entry_index + dpool->start_index);
298                                 }
299                         } else {
300                                 count = 0;
301                         }
302                 }
303
304                 /*
305                  * If defragging then do it to it
306                  */
307                 if (defrag != DP_DEFRAG_NONE) {
308                         rc = dpool_defrag(dpool, size, defrag);
309
310                         if (rc < 0)
311                                 return DP_INVALID_INDEX;
312                 } else {
313                         break;
314                 }
315
316                 /*
317                  * If the defrag created enough space then try the
318                  * alloc again else quit.
319                  */
320                 if ((uint32_t)rc < size)
321                         break;
322         }
323
324         return DP_INVALID_INDEX;
325 }
326
327 int dpool_free(struct dpool *dpool,
328                uint32_t index)
329 {
330         uint32_t i;
331         int start = (index - dpool->start_index);
332         uint32_t size;
333
334         if (start < 0)
335                 return -1;
336
337         if (DP_IS_START(dpool->entry[start].flags)) {
338                 size = DP_FLAGS_SIZE(dpool->entry[start].flags);
339                 if (size > dpool->max_alloc_size || size == 0)
340                         return -1;
341
342                 for (i = start; i < (start + size); i++)
343                         dpool->entry[i].flags = 0;
344
345                 return 0;
346         }
347
348         return -1;
349 }
350
351 void dpool_free_all(struct dpool *dpool)
352 {
353         uint32_t i;
354
355         for (i = 0; i < dpool->size; i++)
356                 dpool_free(dpool, dpool->entry[i].index);
357 }
358
359 int dpool_set_entry_data(struct dpool *dpool,
360                          uint32_t index,
361                          uint64_t entry_data)
362 {
363         int start = (index - dpool->start_index);
364
365         if (start < 0)
366                 return -1;
367
368         if (DP_IS_START(dpool->entry[start].flags)) {
369                 dpool->entry[start].entry_data = entry_data;
370                 return 0;
371         }
372
373         return -1;
374 }