1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <sys/queue.h>
12 #include <rte_memory.h>
14 #include <rte_launch.h>
15 #include <rte_per_lcore.h>
16 #include <rte_lcore.h>
17 #include <rte_debug.h>
18 #include <rte_common.h>
19 #include <rte_spinlock.h>
21 #include "eal_internal_cfg.h"
22 #include "eal_memalloc.h"
23 #include "malloc_elem.h"
24 #include "malloc_heap.h"
27 * If debugging is enabled, freed memory is set to poison value
28 * to catch buggy programs. Otherwise, freed memory is set to zero
29 * to avoid having to zero in zmalloc
31 #ifdef RTE_MALLOC_DEBUG
32 #define MALLOC_POISON 0x6b
34 #define MALLOC_POISON 0
38 malloc_elem_find_max_iova_contig(struct malloc_elem *elem, size_t align)
40 void *cur_page, *contig_seg_start, *page_end, *cur_seg_end;
41 void *data_start, *data_end;
42 rte_iova_t expected_iova;
43 struct rte_memseg *ms;
44 size_t page_sz, cur, max;
46 page_sz = (size_t)elem->msl->page_sz;
47 data_start = RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN);
48 data_end = RTE_PTR_ADD(elem, elem->size - MALLOC_ELEM_TRAILER_LEN);
49 /* segment must start after header and with specified alignment */
50 contig_seg_start = RTE_PTR_ALIGN_CEIL(data_start, align);
52 /* return if aligned address is already out of malloc element */
53 if (contig_seg_start > data_end)
56 /* if we're in IOVA as VA mode, or if we're in legacy mode with
57 * hugepages, all elements are IOVA-contiguous. however, we can only
58 * make these assumptions about internal memory - externally allocated
59 * segments have to be checked.
61 if (!elem->msl->external &&
62 (rte_eal_iova_mode() == RTE_IOVA_VA ||
63 (internal_config.legacy_mem &&
64 rte_eal_has_hugepages())))
65 return RTE_PTR_DIFF(data_end, contig_seg_start);
67 cur_page = RTE_PTR_ALIGN_FLOOR(contig_seg_start, page_sz);
68 ms = rte_mem_virt2memseg(cur_page, elem->msl);
70 /* do first iteration outside the loop */
71 page_end = RTE_PTR_ADD(cur_page, page_sz);
72 cur_seg_end = RTE_MIN(page_end, data_end);
73 cur = RTE_PTR_DIFF(cur_seg_end, contig_seg_start) -
74 MALLOC_ELEM_TRAILER_LEN;
76 expected_iova = ms->iova + page_sz;
77 /* memsegs are contiguous in memory */
80 cur_page = RTE_PTR_ADD(cur_page, page_sz);
82 while (cur_page < data_end) {
83 page_end = RTE_PTR_ADD(cur_page, page_sz);
84 cur_seg_end = RTE_MIN(page_end, data_end);
86 /* reset start of contiguous segment if unexpected iova */
87 if (ms->iova != expected_iova) {
88 /* next contiguous segment must start at specified
91 contig_seg_start = RTE_PTR_ALIGN(cur_page, align);
92 /* new segment start may be on a different page, so find
93 * the page and skip to next iteration to make sure
94 * we're not blowing past data end.
96 ms = rte_mem_virt2memseg(contig_seg_start, elem->msl);
98 /* don't trigger another recalculation */
99 expected_iova = ms->iova;
102 /* cur_seg_end ends on a page boundary or on data end. if we're
103 * looking at data end, then malloc trailer is already included
104 * in the calculations. if we're looking at page end, then we
105 * know there's more data past this page and thus there's space
106 * for malloc element trailer, so don't count it here.
108 cur = RTE_PTR_DIFF(cur_seg_end, contig_seg_start);
109 /* update max if cur value is bigger */
113 /* move to next page */
115 expected_iova = ms->iova + page_sz;
116 /* memsegs are contiguous in memory */
124 * Initialize a general malloc_elem header structure
127 malloc_elem_init(struct malloc_elem *elem, struct malloc_heap *heap,
128 struct rte_memseg_list *msl, size_t size,
129 struct malloc_elem *orig_elem, size_t orig_size)
135 memset(&elem->free_list, 0, sizeof(elem->free_list));
136 elem->state = ELEM_FREE;
139 elem->orig_elem = orig_elem;
140 elem->orig_size = orig_size;
146 malloc_elem_insert(struct malloc_elem *elem)
148 struct malloc_elem *prev_elem, *next_elem;
149 struct malloc_heap *heap = elem->heap;
151 /* first and last elements must be both NULL or both non-NULL */
152 if ((heap->first == NULL) != (heap->last == NULL)) {
153 RTE_LOG(ERR, EAL, "Heap is probably corrupt\n");
157 if (heap->first == NULL && heap->last == NULL) {
163 } else if (elem < heap->first) {
164 /* if lower than start */
166 next_elem = heap->first;
168 } else if (elem > heap->last) {
169 /* if higher than end */
170 prev_elem = heap->last;
174 /* the new memory is somewhere inbetween start and end */
175 uint64_t dist_from_start, dist_from_end;
177 dist_from_end = RTE_PTR_DIFF(heap->last, elem);
178 dist_from_start = RTE_PTR_DIFF(elem, heap->first);
180 /* check which is closer, and find closest list entries */
181 if (dist_from_start < dist_from_end) {
182 prev_elem = heap->first;
183 while (prev_elem->next < elem)
184 prev_elem = prev_elem->next;
185 next_elem = prev_elem->next;
187 next_elem = heap->last;
188 while (next_elem->prev > elem)
189 next_elem = next_elem->prev;
190 prev_elem = next_elem->prev;
194 /* insert new element */
195 elem->prev = prev_elem;
196 elem->next = next_elem;
198 prev_elem->next = elem;
200 next_elem->prev = elem;
204 * Attempt to find enough physically contiguous memory in this block to store
205 * our data. Assume that element has at least enough space to fit in the data,
206 * so we just check the page addresses.
209 elem_check_phys_contig(const struct rte_memseg_list *msl,
210 void *start, size_t size)
212 return eal_memalloc_is_contig(msl, start, size);
216 * calculate the starting point of where data of the requested size
217 * and alignment would fit in the current element. If the data doesn't
221 elem_start_pt(struct malloc_elem *elem, size_t size, unsigned align,
222 size_t bound, bool contig)
224 size_t elem_size = elem->size;
227 * we're allocating from the end, so adjust the size of element by
230 while (elem_size >= size) {
231 const size_t bmask = ~(bound - 1);
232 uintptr_t end_pt = (uintptr_t)elem +
233 elem_size - MALLOC_ELEM_TRAILER_LEN;
234 uintptr_t new_data_start = RTE_ALIGN_FLOOR((end_pt - size),
236 uintptr_t new_elem_start;
239 if ((new_data_start & bmask) != ((end_pt - 1) & bmask)) {
240 end_pt = RTE_ALIGN_FLOOR(end_pt, bound);
241 new_data_start = RTE_ALIGN_FLOOR((end_pt - size),
243 end_pt = new_data_start + size;
245 if (((end_pt - 1) & bmask) != (new_data_start & bmask))
249 new_elem_start = new_data_start - MALLOC_ELEM_HEADER_LEN;
251 /* if the new start point is before the exist start,
254 if (new_elem_start < (uintptr_t)elem)
258 size_t new_data_size = end_pt - new_data_start;
261 * if physical contiguousness was requested and we
262 * couldn't fit all data into one physically contiguous
263 * block, try again with lower addresses.
265 if (!elem_check_phys_contig(elem->msl,
266 (void *)new_data_start,
272 return (void *)new_elem_start;
278 * use elem_start_pt to determine if we get meet the size and
279 * alignment request from the current element
282 malloc_elem_can_hold(struct malloc_elem *elem, size_t size, unsigned align,
283 size_t bound, bool contig)
285 return elem_start_pt(elem, size, align, bound, contig) != NULL;
289 * split an existing element into two smaller elements at the given
290 * split_pt parameter.
293 split_elem(struct malloc_elem *elem, struct malloc_elem *split_pt)
295 struct malloc_elem *next_elem = elem->next;
296 const size_t old_elem_size = (uintptr_t)split_pt - (uintptr_t)elem;
297 const size_t new_elem_size = elem->size - old_elem_size;
299 malloc_elem_init(split_pt, elem->heap, elem->msl, new_elem_size,
300 elem->orig_elem, elem->orig_size);
301 split_pt->prev = elem;
302 split_pt->next = next_elem;
304 next_elem->prev = split_pt;
306 elem->heap->last = split_pt;
307 elem->next = split_pt;
308 elem->size = old_elem_size;
313 * our malloc heap is a doubly linked list, so doubly remove our element.
315 static void __rte_unused
316 remove_elem(struct malloc_elem *elem)
318 struct malloc_elem *next, *prev;
325 elem->heap->last = prev;
329 elem->heap->first = next;
336 next_elem_is_adjacent(struct malloc_elem *elem)
338 return elem->next == RTE_PTR_ADD(elem, elem->size) &&
339 elem->next->msl == elem->msl &&
340 (!internal_config.match_allocations ||
341 elem->orig_elem == elem->next->orig_elem);
345 prev_elem_is_adjacent(struct malloc_elem *elem)
347 return elem == RTE_PTR_ADD(elem->prev, elem->prev->size) &&
348 elem->prev->msl == elem->msl &&
349 (!internal_config.match_allocations ||
350 elem->orig_elem == elem->prev->orig_elem);
354 * Given an element size, compute its freelist index.
355 * We free an element into the freelist containing similarly-sized elements.
356 * We try to allocate elements starting with the freelist containing
357 * similarly-sized elements, and if necessary, we search freelists
358 * containing larger elements.
360 * Example element size ranges for a heap with five free lists:
361 * heap->free_head[0] - (0 , 2^8]
362 * heap->free_head[1] - (2^8 , 2^10]
363 * heap->free_head[2] - (2^10 ,2^12]
364 * heap->free_head[3] - (2^12, 2^14]
365 * heap->free_head[4] - (2^14, MAX_SIZE]
368 malloc_elem_free_list_index(size_t size)
370 #define MALLOC_MINSIZE_LOG2 8
371 #define MALLOC_LOG2_INCREMENT 2
376 if (size <= (1UL << MALLOC_MINSIZE_LOG2))
379 /* Find next power of 2 >= size. */
380 log2 = sizeof(size) * 8 - __builtin_clzl(size-1);
382 /* Compute freelist index, based on log2(size). */
383 index = (log2 - MALLOC_MINSIZE_LOG2 + MALLOC_LOG2_INCREMENT - 1) /
384 MALLOC_LOG2_INCREMENT;
386 return index <= RTE_HEAP_NUM_FREELISTS-1?
387 index: RTE_HEAP_NUM_FREELISTS-1;
391 * Add the specified element to its heap's free list.
394 malloc_elem_free_list_insert(struct malloc_elem *elem)
398 idx = malloc_elem_free_list_index(elem->size - MALLOC_ELEM_HEADER_LEN);
399 elem->state = ELEM_FREE;
400 LIST_INSERT_HEAD(&elem->heap->free_head[idx], elem, free_list);
404 * Remove the specified element from its heap's free list.
407 malloc_elem_free_list_remove(struct malloc_elem *elem)
409 LIST_REMOVE(elem, free_list);
413 * reserve a block of data in an existing malloc_elem. If the malloc_elem
414 * is much larger than the data block requested, we split the element in two.
415 * This function is only called from malloc_heap_alloc so parameter checking
416 * is not done here, as it's done there previously.
419 malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align,
420 size_t bound, bool contig)
422 struct malloc_elem *new_elem = elem_start_pt(elem, size, align, bound,
424 const size_t old_elem_size = (uintptr_t)new_elem - (uintptr_t)elem;
425 const size_t trailer_size = elem->size - old_elem_size - size -
426 MALLOC_ELEM_OVERHEAD;
428 malloc_elem_free_list_remove(elem);
430 if (trailer_size > MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
431 /* split it, too much free space after elem */
432 struct malloc_elem *new_free_elem =
433 RTE_PTR_ADD(new_elem, size + MALLOC_ELEM_OVERHEAD);
435 split_elem(elem, new_free_elem);
436 malloc_elem_free_list_insert(new_free_elem);
438 if (elem == elem->heap->last)
439 elem->heap->last = new_free_elem;
442 if (old_elem_size < MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
443 /* don't split it, pad the element instead */
444 elem->state = ELEM_BUSY;
445 elem->pad = old_elem_size;
447 /* put a dummy header in padding, to point to real element header */
448 if (elem->pad > 0) { /* pad will be at least 64-bytes, as everything
449 * is cache-line aligned */
450 new_elem->pad = elem->pad;
451 new_elem->state = ELEM_PAD;
452 new_elem->size = elem->size - elem->pad;
453 set_header(new_elem);
459 /* we are going to split the element in two. The original element
460 * remains free, and the new element is the one allocated.
461 * Re-insert original element, in case its new size makes it
462 * belong on a different list.
464 split_elem(elem, new_elem);
465 new_elem->state = ELEM_BUSY;
466 malloc_elem_free_list_insert(elem);
472 * join two struct malloc_elem together. elem1 and elem2 must
473 * be contiguous in memory.
476 join_elem(struct malloc_elem *elem1, struct malloc_elem *elem2)
478 struct malloc_elem *next = elem2->next;
479 elem1->size += elem2->size;
483 elem1->heap->last = elem1;
488 malloc_elem_join_adjacent_free(struct malloc_elem *elem)
491 * check if next element exists, is adjacent and is free, if so join
492 * with it, need to remove from free list.
494 if (elem->next != NULL && elem->next->state == ELEM_FREE &&
495 next_elem_is_adjacent(elem)) {
499 /* we will want to erase the trailer and header */
500 erase = RTE_PTR_SUB(elem->next, MALLOC_ELEM_TRAILER_LEN);
501 erase_len = MALLOC_ELEM_OVERHEAD + elem->next->pad;
503 /* remove from free list, join to this one */
504 malloc_elem_free_list_remove(elem->next);
505 join_elem(elem, elem->next);
507 /* erase header, trailer and pad */
508 memset(erase, MALLOC_POISON, erase_len);
512 * check if prev element exists, is adjacent and is free, if so join
513 * with it, need to remove from free list.
515 if (elem->prev != NULL && elem->prev->state == ELEM_FREE &&
516 prev_elem_is_adjacent(elem)) {
517 struct malloc_elem *new_elem;
521 /* we will want to erase trailer and header */
522 erase = RTE_PTR_SUB(elem, MALLOC_ELEM_TRAILER_LEN);
523 erase_len = MALLOC_ELEM_OVERHEAD + elem->pad;
525 /* remove from free list, join to this one */
526 malloc_elem_free_list_remove(elem->prev);
528 new_elem = elem->prev;
529 join_elem(new_elem, elem);
531 /* erase header, trailer and pad */
532 memset(erase, MALLOC_POISON, erase_len);
541 * free a malloc_elem block by adding it to the free list. If the
542 * blocks either immediately before or immediately after newly freed block
543 * are also free, the blocks are merged together.
546 malloc_elem_free(struct malloc_elem *elem)
551 ptr = RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN);
552 data_len = elem->size - MALLOC_ELEM_OVERHEAD;
554 elem = malloc_elem_join_adjacent_free(elem);
556 malloc_elem_free_list_insert(elem);
560 /* decrease heap's count of allocated elements */
561 elem->heap->alloc_count--;
564 memset(ptr, MALLOC_POISON, data_len);
569 /* assume all checks were already done */
571 malloc_elem_hide_region(struct malloc_elem *elem, void *start, size_t len)
573 struct malloc_elem *hide_start, *hide_end, *prev, *next;
574 size_t len_before, len_after;
577 hide_end = RTE_PTR_ADD(start, len);
582 /* we cannot do anything with non-adjacent elements */
583 if (next && next_elem_is_adjacent(elem)) {
584 len_after = RTE_PTR_DIFF(next, hide_end);
585 if (len_after >= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
587 split_elem(elem, hide_end);
589 malloc_elem_free_list_insert(hide_end);
590 } else if (len_after > 0) {
591 RTE_LOG(ERR, EAL, "Unaligned element, heap is probably corrupt\n");
596 /* we cannot do anything with non-adjacent elements */
597 if (prev && prev_elem_is_adjacent(elem)) {
598 len_before = RTE_PTR_DIFF(hide_start, elem);
599 if (len_before >= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
601 split_elem(elem, hide_start);
606 malloc_elem_free_list_insert(prev);
607 } else if (len_before > 0) {
608 RTE_LOG(ERR, EAL, "Unaligned element, heap is probably corrupt\n");
617 * attempt to resize a malloc_elem by expanding into any free space
618 * immediately after it in memory.
621 malloc_elem_resize(struct malloc_elem *elem, size_t size)
623 const size_t new_size = size + elem->pad + MALLOC_ELEM_OVERHEAD;
625 /* if we request a smaller size, then always return ok */
626 if (elem->size >= new_size)
629 /* check if there is a next element, it's free and adjacent */
630 if (!elem->next || elem->next->state != ELEM_FREE ||
631 !next_elem_is_adjacent(elem))
633 if (elem->size + elem->next->size < new_size)
636 /* we now know the element fits, so remove from free list,
639 malloc_elem_free_list_remove(elem->next);
640 join_elem(elem, elem->next);
642 if (elem->size - new_size >= MIN_DATA_SIZE + MALLOC_ELEM_OVERHEAD) {
643 /* now we have a big block together. Lets cut it down a bit, by splitting */
644 struct malloc_elem *split_pt = RTE_PTR_ADD(elem, new_size);
645 split_pt = RTE_PTR_ALIGN_CEIL(split_pt, RTE_CACHE_LINE_SIZE);
646 split_elem(elem, split_pt);
647 malloc_elem_free_list_insert(split_pt);
652 static inline const char *
653 elem_state_to_str(enum elem_state state)
667 malloc_elem_dump(const struct malloc_elem *elem, FILE *f)
669 fprintf(f, "Malloc element at %p (%s)\n", elem,
670 elem_state_to_str(elem->state));
671 fprintf(f, " len: 0x%zx pad: 0x%" PRIx32 "\n", elem->size, elem->pad);
672 fprintf(f, " prev: %p next: %p\n", elem->prev, elem->next);