1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <sys/queue.h>
12 #include <rte_memory.h>
14 #include <rte_launch.h>
15 #include <rte_per_lcore.h>
16 #include <rte_lcore.h>
17 #include <rte_debug.h>
18 #include <rte_common.h>
19 #include <rte_spinlock.h>
21 #include "eal_memalloc.h"
22 #include "malloc_elem.h"
23 #include "malloc_heap.h"
25 #define MIN_DATA_SIZE (RTE_CACHE_LINE_SIZE)
28 * Initialize a general malloc_elem header structure
31 malloc_elem_init(struct malloc_elem *elem, struct malloc_heap *heap,
32 struct rte_memseg_list *msl, size_t size)
38 memset(&elem->free_list, 0, sizeof(elem->free_list));
39 elem->state = ELEM_FREE;
47 malloc_elem_insert(struct malloc_elem *elem)
49 struct malloc_elem *prev_elem, *next_elem;
50 struct malloc_heap *heap = elem->heap;
52 /* first and last elements must be both NULL or both non-NULL */
53 if ((heap->first == NULL) != (heap->last == NULL)) {
54 RTE_LOG(ERR, EAL, "Heap is probably corrupt\n");
58 if (heap->first == NULL && heap->last == NULL) {
64 } else if (elem < heap->first) {
65 /* if lower than start */
67 next_elem = heap->first;
69 } else if (elem > heap->last) {
70 /* if higher than end */
71 prev_elem = heap->last;
75 /* the new memory is somewhere inbetween start and end */
76 uint64_t dist_from_start, dist_from_end;
78 dist_from_end = RTE_PTR_DIFF(heap->last, elem);
79 dist_from_start = RTE_PTR_DIFF(elem, heap->first);
81 /* check which is closer, and find closest list entries */
82 if (dist_from_start < dist_from_end) {
83 prev_elem = heap->first;
84 while (prev_elem->next < elem)
85 prev_elem = prev_elem->next;
86 next_elem = prev_elem->next;
88 next_elem = heap->last;
89 while (next_elem->prev > elem)
90 next_elem = next_elem->prev;
91 prev_elem = next_elem->prev;
95 /* insert new element */
96 elem->prev = prev_elem;
97 elem->next = next_elem;
99 prev_elem->next = elem;
101 next_elem->prev = elem;
105 * Attempt to find enough physically contiguous memory in this block to store
106 * our data. Assume that element has at least enough space to fit in the data,
107 * so we just check the page addresses.
110 elem_check_phys_contig(const struct rte_memseg_list *msl,
111 void *start, size_t size)
113 return eal_memalloc_is_contig(msl, start, size);
117 * calculate the starting point of where data of the requested size
118 * and alignment would fit in the current element. If the data doesn't
122 elem_start_pt(struct malloc_elem *elem, size_t size, unsigned align,
123 size_t bound, bool contig)
125 size_t elem_size = elem->size;
128 * we're allocating from the end, so adjust the size of element by
131 while (elem_size >= size) {
132 const size_t bmask = ~(bound - 1);
133 uintptr_t end_pt = (uintptr_t)elem +
134 elem_size - MALLOC_ELEM_TRAILER_LEN;
135 uintptr_t new_data_start = RTE_ALIGN_FLOOR((end_pt - size),
137 uintptr_t new_elem_start;
140 if ((new_data_start & bmask) != ((end_pt - 1) & bmask)) {
141 end_pt = RTE_ALIGN_FLOOR(end_pt, bound);
142 new_data_start = RTE_ALIGN_FLOOR((end_pt - size),
144 end_pt = new_data_start + size;
146 if (((end_pt - 1) & bmask) != (new_data_start & bmask))
150 new_elem_start = new_data_start - MALLOC_ELEM_HEADER_LEN;
152 /* if the new start point is before the exist start,
155 if (new_elem_start < (uintptr_t)elem)
159 size_t new_data_size = end_pt - new_data_start;
162 * if physical contiguousness was requested and we
163 * couldn't fit all data into one physically contiguous
164 * block, try again with lower addresses.
166 if (!elem_check_phys_contig(elem->msl,
167 (void *)new_data_start,
173 return (void *)new_elem_start;
179 * use elem_start_pt to determine if we get meet the size and
180 * alignment request from the current element
183 malloc_elem_can_hold(struct malloc_elem *elem, size_t size, unsigned align,
184 size_t bound, bool contig)
186 return elem_start_pt(elem, size, align, bound, contig) != NULL;
190 * split an existing element into two smaller elements at the given
191 * split_pt parameter.
194 split_elem(struct malloc_elem *elem, struct malloc_elem *split_pt)
196 struct malloc_elem *next_elem = elem->next;
197 const size_t old_elem_size = (uintptr_t)split_pt - (uintptr_t)elem;
198 const size_t new_elem_size = elem->size - old_elem_size;
200 malloc_elem_init(split_pt, elem->heap, elem->msl, new_elem_size);
201 split_pt->prev = elem;
202 split_pt->next = next_elem;
204 next_elem->prev = split_pt;
206 elem->heap->last = split_pt;
207 elem->next = split_pt;
208 elem->size = old_elem_size;
213 * our malloc heap is a doubly linked list, so doubly remove our element.
215 static void __rte_unused
216 remove_elem(struct malloc_elem *elem)
218 struct malloc_elem *next, *prev;
225 elem->heap->last = prev;
229 elem->heap->first = next;
236 next_elem_is_adjacent(struct malloc_elem *elem)
238 return elem->next == RTE_PTR_ADD(elem, elem->size);
242 prev_elem_is_adjacent(struct malloc_elem *elem)
244 return elem == RTE_PTR_ADD(elem->prev, elem->prev->size);
248 * Given an element size, compute its freelist index.
249 * We free an element into the freelist containing similarly-sized elements.
250 * We try to allocate elements starting with the freelist containing
251 * similarly-sized elements, and if necessary, we search freelists
252 * containing larger elements.
254 * Example element size ranges for a heap with five free lists:
255 * heap->free_head[0] - (0 , 2^8]
256 * heap->free_head[1] - (2^8 , 2^10]
257 * heap->free_head[2] - (2^10 ,2^12]
258 * heap->free_head[3] - (2^12, 2^14]
259 * heap->free_head[4] - (2^14, MAX_SIZE]
262 malloc_elem_free_list_index(size_t size)
264 #define MALLOC_MINSIZE_LOG2 8
265 #define MALLOC_LOG2_INCREMENT 2
270 if (size <= (1UL << MALLOC_MINSIZE_LOG2))
273 /* Find next power of 2 >= size. */
274 log2 = sizeof(size) * 8 - __builtin_clzl(size-1);
276 /* Compute freelist index, based on log2(size). */
277 index = (log2 - MALLOC_MINSIZE_LOG2 + MALLOC_LOG2_INCREMENT - 1) /
278 MALLOC_LOG2_INCREMENT;
280 return index <= RTE_HEAP_NUM_FREELISTS-1?
281 index: RTE_HEAP_NUM_FREELISTS-1;
285 * Add the specified element to its heap's free list.
288 malloc_elem_free_list_insert(struct malloc_elem *elem)
292 idx = malloc_elem_free_list_index(elem->size - MALLOC_ELEM_HEADER_LEN);
293 elem->state = ELEM_FREE;
294 LIST_INSERT_HEAD(&elem->heap->free_head[idx], elem, free_list);
298 * Remove the specified element from its heap's free list.
301 malloc_elem_free_list_remove(struct malloc_elem *elem)
303 LIST_REMOVE(elem, free_list);
307 * reserve a block of data in an existing malloc_elem. If the malloc_elem
308 * is much larger than the data block requested, we split the element in two.
309 * This function is only called from malloc_heap_alloc so parameter checking
310 * is not done here, as it's done there previously.
313 malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align,
314 size_t bound, bool contig)
316 struct malloc_elem *new_elem = elem_start_pt(elem, size, align, bound,
318 const size_t old_elem_size = (uintptr_t)new_elem - (uintptr_t)elem;
319 const size_t trailer_size = elem->size - old_elem_size - size -
320 MALLOC_ELEM_OVERHEAD;
322 malloc_elem_free_list_remove(elem);
324 if (trailer_size > MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
325 /* split it, too much free space after elem */
326 struct malloc_elem *new_free_elem =
327 RTE_PTR_ADD(new_elem, size + MALLOC_ELEM_OVERHEAD);
329 split_elem(elem, new_free_elem);
330 malloc_elem_free_list_insert(new_free_elem);
332 if (elem == elem->heap->last)
333 elem->heap->last = new_free_elem;
336 if (old_elem_size < MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
337 /* don't split it, pad the element instead */
338 elem->state = ELEM_BUSY;
339 elem->pad = old_elem_size;
341 /* put a dummy header in padding, to point to real element header */
342 if (elem->pad > 0) { /* pad will be at least 64-bytes, as everything
343 * is cache-line aligned */
344 new_elem->pad = elem->pad;
345 new_elem->state = ELEM_PAD;
346 new_elem->size = elem->size - elem->pad;
347 set_header(new_elem);
353 /* we are going to split the element in two. The original element
354 * remains free, and the new element is the one allocated.
355 * Re-insert original element, in case its new size makes it
356 * belong on a different list.
358 split_elem(elem, new_elem);
359 new_elem->state = ELEM_BUSY;
360 malloc_elem_free_list_insert(elem);
366 * join two struct malloc_elem together. elem1 and elem2 must
367 * be contiguous in memory.
370 join_elem(struct malloc_elem *elem1, struct malloc_elem *elem2)
372 struct malloc_elem *next = elem2->next;
373 elem1->size += elem2->size;
377 elem1->heap->last = elem1;
382 malloc_elem_join_adjacent_free(struct malloc_elem *elem)
385 * check if next element exists, is adjacent and is free, if so join
386 * with it, need to remove from free list.
388 if (elem->next != NULL && elem->next->state == ELEM_FREE &&
389 next_elem_is_adjacent(elem)) {
392 /* we will want to erase the trailer and header */
393 erase = RTE_PTR_SUB(elem->next, MALLOC_ELEM_TRAILER_LEN);
395 /* remove from free list, join to this one */
396 malloc_elem_free_list_remove(elem->next);
397 join_elem(elem, elem->next);
399 /* erase header and trailer */
400 memset(erase, 0, MALLOC_ELEM_OVERHEAD);
404 * check if prev element exists, is adjacent and is free, if so join
405 * with it, need to remove from free list.
407 if (elem->prev != NULL && elem->prev->state == ELEM_FREE &&
408 prev_elem_is_adjacent(elem)) {
409 struct malloc_elem *new_elem;
412 /* we will want to erase trailer and header */
413 erase = RTE_PTR_SUB(elem, MALLOC_ELEM_TRAILER_LEN);
415 /* remove from free list, join to this one */
416 malloc_elem_free_list_remove(elem->prev);
418 new_elem = elem->prev;
419 join_elem(new_elem, elem);
421 /* erase header and trailer */
422 memset(erase, 0, MALLOC_ELEM_OVERHEAD);
431 * free a malloc_elem block by adding it to the free list. If the
432 * blocks either immediately before or immediately after newly freed block
433 * are also free, the blocks are merged together.
436 malloc_elem_free(struct malloc_elem *elem)
441 ptr = RTE_PTR_ADD(elem, sizeof(*elem));
442 data_len = elem->size - MALLOC_ELEM_OVERHEAD;
444 elem = malloc_elem_join_adjacent_free(elem);
446 malloc_elem_free_list_insert(elem);
450 /* decrease heap's count of allocated elements */
451 elem->heap->alloc_count--;
453 memset(ptr, 0, data_len);
458 /* assume all checks were already done */
460 malloc_elem_hide_region(struct malloc_elem *elem, void *start, size_t len)
462 struct malloc_elem *hide_start, *hide_end, *prev, *next;
463 size_t len_before, len_after;
466 hide_end = RTE_PTR_ADD(start, len);
471 /* we cannot do anything with non-adjacent elements */
472 if (next && next_elem_is_adjacent(elem)) {
473 len_after = RTE_PTR_DIFF(next, hide_end);
474 if (len_after >= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
476 split_elem(elem, hide_end);
478 malloc_elem_free_list_insert(hide_end);
479 } else if (len_after >= MALLOC_ELEM_HEADER_LEN) {
480 /* shrink current element */
481 elem->size -= len_after;
482 memset(hide_end, 0, sizeof(*hide_end));
484 /* copy next element's data to our pad */
485 memcpy(hide_end, next, sizeof(*hide_end));
487 /* pad next element */
488 next->state = ELEM_PAD;
489 next->pad = len_after;
490 next->size -= len_after;
492 /* next element busy, would've been merged otherwise */
493 hide_end->pad = len_after;
494 hide_end->size += len_after;
496 /* adjust pointers to point to our new pad */
498 next->next->prev = hide_end;
499 elem->next = hide_end;
500 } else if (len_after > 0) {
501 RTE_LOG(ERR, EAL, "Unaligned element, heap is probably corrupt\n");
506 /* we cannot do anything with non-adjacent elements */
507 if (prev && prev_elem_is_adjacent(elem)) {
508 len_before = RTE_PTR_DIFF(hide_start, elem);
509 if (len_before >= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
511 split_elem(elem, hide_start);
516 malloc_elem_free_list_insert(prev);
517 } else if (len_before > 0) {
519 * unlike with elements after current, here we don't
520 * need to pad elements, but rather just increase the
521 * size of previous element, copy the old header and set
524 void *trailer = RTE_PTR_ADD(prev,
525 prev->size - MALLOC_ELEM_TRAILER_LEN);
527 memcpy(hide_start, elem, sizeof(*elem));
528 hide_start->size = len;
530 prev->size += len_before;
533 /* update pointers */
534 prev->next = hide_start;
536 next->prev = hide_start;
538 /* erase old trailer */
539 memset(trailer, 0, MALLOC_ELEM_TRAILER_LEN);
540 /* erase old header */
541 memset(elem, 0, sizeof(*elem));
551 * attempt to resize a malloc_elem by expanding into any free space
552 * immediately after it in memory.
555 malloc_elem_resize(struct malloc_elem *elem, size_t size)
557 const size_t new_size = size + elem->pad + MALLOC_ELEM_OVERHEAD;
559 /* if we request a smaller size, then always return ok */
560 if (elem->size >= new_size)
563 /* check if there is a next element, it's free and adjacent */
564 if (!elem->next || elem->next->state != ELEM_FREE ||
565 !next_elem_is_adjacent(elem))
567 if (elem->size + elem->next->size < new_size)
570 /* we now know the element fits, so remove from free list,
573 malloc_elem_free_list_remove(elem->next);
574 join_elem(elem, elem->next);
576 if (elem->size - new_size >= MIN_DATA_SIZE + MALLOC_ELEM_OVERHEAD) {
577 /* now we have a big block together. Lets cut it down a bit, by splitting */
578 struct malloc_elem *split_pt = RTE_PTR_ADD(elem, new_size);
579 split_pt = RTE_PTR_ALIGN_CEIL(split_pt, RTE_CACHE_LINE_SIZE);
580 split_elem(elem, split_pt);
581 malloc_elem_free_list_insert(split_pt);
586 static inline const char *
587 elem_state_to_str(enum elem_state state)
601 malloc_elem_dump(const struct malloc_elem *elem, FILE *f)
603 fprintf(f, "Malloc element at %p (%s)\n", elem,
604 elem_state_to_str(elem->state));
605 fprintf(f, " len: 0x%zx pad: 0x%" PRIx32 "\n", elem->size, elem->pad);
606 fprintf(f, " prev: %p next: %p\n", elem->prev, elem->next);