1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <sys/queue.h>
12 #include <rte_memory.h>
14 #include <rte_eal_memconfig.h>
15 #include <rte_launch.h>
16 #include <rte_per_lcore.h>
17 #include <rte_lcore.h>
18 #include <rte_common.h>
19 #include <rte_string_fns.h>
20 #include <rte_spinlock.h>
21 #include <rte_memcpy.h>
22 #include <rte_atomic.h>
23 #include <rte_fbarray.h>
25 #include "eal_internal_cfg.h"
26 #include "eal_memalloc.h"
27 #include "malloc_elem.h"
28 #include "malloc_heap.h"
31 check_hugepage_sz(unsigned flags, uint64_t hugepage_sz)
33 unsigned check_flag = 0;
35 if (!(flags & ~RTE_MEMZONE_SIZE_HINT_ONLY))
38 switch (hugepage_sz) {
40 check_flag = RTE_MEMZONE_256KB;
43 check_flag = RTE_MEMZONE_2MB;
46 check_flag = RTE_MEMZONE_16MB;
49 check_flag = RTE_MEMZONE_256MB;
52 check_flag = RTE_MEMZONE_512MB;
55 check_flag = RTE_MEMZONE_1GB;
58 check_flag = RTE_MEMZONE_4GB;
61 check_flag = RTE_MEMZONE_16GB;
64 return check_flag & flags;
68 * Expand the heap with a memory area.
70 static struct malloc_elem *
71 malloc_heap_add_memory(struct malloc_heap *heap, struct rte_memseg_list *msl,
72 void *start, size_t len)
74 struct malloc_elem *elem = start;
76 malloc_elem_init(elem, heap, msl, len);
78 malloc_elem_insert(elem);
80 elem = malloc_elem_join_adjacent_free(elem);
82 malloc_elem_free_list_insert(elem);
84 heap->total_size += len;
90 malloc_add_seg(const struct rte_memseg_list *msl,
91 const struct rte_memseg *ms, size_t len, void *arg __rte_unused)
93 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
94 struct rte_memseg_list *found_msl;
95 struct malloc_heap *heap;
98 heap = &mcfg->malloc_heaps[msl->socket_id];
100 /* msl is const, so find it */
101 msl_idx = msl - mcfg->memsegs;
102 found_msl = &mcfg->memsegs[msl_idx];
104 if (msl_idx < 0 || msl_idx >= RTE_MAX_MEMSEG_LISTS)
107 malloc_heap_add_memory(heap, found_msl, ms->addr, len);
109 RTE_LOG(DEBUG, EAL, "Added %zuM to heap on socket %i\n", len >> 20,
115 * Iterates through the freelist for a heap to find a free element
116 * which can store data of the required size and with the requested alignment.
117 * If size is 0, find the biggest available elem.
118 * Returns null on failure, or pointer to element on success.
120 static struct malloc_elem *
121 find_suitable_element(struct malloc_heap *heap, size_t size,
122 unsigned int flags, size_t align, size_t bound, bool contig)
125 struct malloc_elem *elem, *alt_elem = NULL;
127 for (idx = malloc_elem_free_list_index(size);
128 idx < RTE_HEAP_NUM_FREELISTS; idx++) {
129 for (elem = LIST_FIRST(&heap->free_head[idx]);
130 !!elem; elem = LIST_NEXT(elem, free_list)) {
131 if (malloc_elem_can_hold(elem, size, align, bound,
133 if (check_hugepage_sz(flags,
136 if (alt_elem == NULL)
142 if ((alt_elem != NULL) && (flags & RTE_MEMZONE_SIZE_HINT_ONLY))
149 * Main function to allocate a block of memory from the heap.
150 * It locks the free list, scans it, and adds a new memseg if the
151 * scan fails. Once the new memseg is added, it re-scans and should return
152 * the new element after releasing the lock.
155 heap_alloc(struct malloc_heap *heap, const char *type __rte_unused, size_t size,
156 unsigned int flags, size_t align, size_t bound, bool contig)
158 struct malloc_elem *elem;
160 size = RTE_CACHE_LINE_ROUNDUP(size);
161 align = RTE_CACHE_LINE_ROUNDUP(align);
163 elem = find_suitable_element(heap, size, flags, align, bound, contig);
165 elem = malloc_elem_alloc(elem, size, align, bound, contig);
167 /* increase heap's count of allocated elements */
171 return elem == NULL ? NULL : (void *)(&elem[1]);
175 try_expand_heap(struct malloc_heap *heap, size_t pg_sz, size_t elt_size,
176 int socket, unsigned int flags, size_t align, size_t bound,
180 struct rte_memseg_list *msl;
181 struct rte_memseg **ms;
182 struct malloc_elem *elem;
183 int n_segs, allocd_pages;
184 void *ret, *map_addr;
186 align = RTE_MAX(align, MALLOC_ELEM_HEADER_LEN);
187 map_len = RTE_ALIGN_CEIL(align + elt_size + MALLOC_ELEM_TRAILER_LEN,
190 n_segs = map_len / pg_sz;
192 /* we can't know in advance how many pages we'll need, so malloc */
193 ms = malloc(sizeof(*ms) * n_segs);
195 allocd_pages = eal_memalloc_alloc_seg_bulk(ms, n_segs, pg_sz,
198 /* make sure we've allocated our pages... */
199 if (allocd_pages < 0)
202 map_addr = ms[0]->addr;
203 msl = rte_mem_virt2memseg_list(map_addr);
205 /* check if we wanted contiguous memory but didn't get it */
206 if (contig && !eal_memalloc_is_contig(msl, map_addr, map_len)) {
207 RTE_LOG(DEBUG, EAL, "%s(): couldn't allocate physically contiguous space\n",
212 /* add newly minted memsegs to malloc heap */
213 elem = malloc_heap_add_memory(heap, msl, map_addr, map_len);
215 /* try once more, as now we have allocated new memory */
216 ret = find_suitable_element(heap, elt_size, flags, align, bound,
222 RTE_LOG(DEBUG, EAL, "Heap on socket %d was expanded by %zdMB\n",
223 socket, map_len >> 20ULL);
230 malloc_elem_free_list_remove(elem);
231 malloc_elem_hide_region(elem, map_addr, map_len);
232 heap->total_size -= map_len;
235 eal_memalloc_free_seg_bulk(ms, n_segs);
243 compare_pagesz(const void *a, const void *b)
245 const struct rte_memseg_list * const*mpa = a;
246 const struct rte_memseg_list * const*mpb = b;
247 const struct rte_memseg_list *msla = *mpa;
248 const struct rte_memseg_list *mslb = *mpb;
249 uint64_t pg_sz_a = msla->page_sz;
250 uint64_t pg_sz_b = mslb->page_sz;
252 if (pg_sz_a < pg_sz_b)
254 if (pg_sz_a > pg_sz_b)
260 alloc_mem_on_socket(size_t size, int socket, unsigned int flags, size_t align,
261 size_t bound, bool contig)
263 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
264 struct malloc_heap *heap = &mcfg->malloc_heaps[socket];
265 struct rte_memseg_list *requested_msls[RTE_MAX_MEMSEG_LISTS];
266 struct rte_memseg_list *other_msls[RTE_MAX_MEMSEG_LISTS];
267 uint64_t requested_pg_sz[RTE_MAX_MEMSEG_LISTS];
268 uint64_t other_pg_sz[RTE_MAX_MEMSEG_LISTS];
270 int i, n_other_msls, n_other_pg_sz, n_requested_msls, n_requested_pg_sz;
271 bool size_hint = (flags & RTE_MEMZONE_SIZE_HINT_ONLY) > 0;
272 unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY;
275 memset(requested_msls, 0, sizeof(requested_msls));
276 memset(other_msls, 0, sizeof(other_msls));
277 memset(requested_pg_sz, 0, sizeof(requested_pg_sz));
278 memset(other_pg_sz, 0, sizeof(other_pg_sz));
281 * go through memseg list and take note of all the page sizes available,
282 * and if any of them were specifically requested by the user.
284 n_requested_msls = 0;
286 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
287 struct rte_memseg_list *msl = &mcfg->memsegs[i];
289 if (msl->socket_id != socket)
292 if (msl->base_va == NULL)
295 /* if pages of specific size were requested */
296 if (size_flags != 0 && check_hugepage_sz(size_flags,
298 requested_msls[n_requested_msls++] = msl;
299 else if (size_flags == 0 || size_hint)
300 other_msls[n_other_msls++] = msl;
303 /* sort the lists, smallest first */
304 qsort(requested_msls, n_requested_msls, sizeof(requested_msls[0]),
306 qsort(other_msls, n_other_msls, sizeof(other_msls[0]),
309 /* now, extract page sizes we are supposed to try */
311 n_requested_pg_sz = 0;
312 for (i = 0; i < n_requested_msls; i++) {
313 uint64_t pg_sz = requested_msls[i]->page_sz;
315 if (prev_pg_sz != pg_sz) {
316 requested_pg_sz[n_requested_pg_sz++] = pg_sz;
322 for (i = 0; i < n_other_msls; i++) {
323 uint64_t pg_sz = other_msls[i]->page_sz;
325 if (prev_pg_sz != pg_sz) {
326 other_pg_sz[n_other_pg_sz++] = pg_sz;
331 /* finally, try allocating memory of specified page sizes, starting from
334 for (i = 0; i < n_requested_pg_sz; i++) {
335 uint64_t pg_sz = requested_pg_sz[i];
338 * do not pass the size hint here, as user expects other page
339 * sizes first, before resorting to best effort allocation.
341 if (!try_expand_heap(heap, pg_sz, size, socket, size_flags,
342 align, bound, contig))
345 if (n_other_pg_sz == 0)
348 /* now, check if we can reserve anything with size hint */
349 ret = find_suitable_element(heap, size, flags, align, bound, contig);
354 * we still couldn't reserve memory, so try expanding heap with other
355 * page sizes, if there are any
357 for (i = 0; i < n_other_pg_sz; i++) {
358 uint64_t pg_sz = other_pg_sz[i];
360 if (!try_expand_heap(heap, pg_sz, size, socket, flags,
361 align, bound, contig))
367 /* this will try lower page sizes first */
369 heap_alloc_on_socket(const char *type, size_t size, int socket,
370 unsigned int flags, size_t align, size_t bound, bool contig)
372 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
373 struct malloc_heap *heap = &mcfg->malloc_heaps[socket];
374 unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY;
377 rte_spinlock_lock(&(heap->lock));
379 align = align == 0 ? 1 : align;
381 /* for legacy mode, try once and with all flags */
382 if (internal_config.legacy_mem) {
383 ret = heap_alloc(heap, type, size, flags, align, bound, contig);
388 * we do not pass the size hint here, because even if allocation fails,
389 * we may still be able to allocate memory from appropriate page sizes,
390 * we just need to request more memory first.
392 ret = heap_alloc(heap, type, size, size_flags, align, bound, contig);
396 if (!alloc_mem_on_socket(size, socket, flags, align, bound, contig)) {
397 ret = heap_alloc(heap, type, size, flags, align, bound, contig);
399 /* this should have succeeded */
401 RTE_LOG(ERR, EAL, "Error allocating from heap\n");
404 rte_spinlock_unlock(&(heap->lock));
409 malloc_heap_alloc(const char *type, size_t size, int socket_arg,
410 unsigned int flags, size_t align, size_t bound, bool contig)
412 int socket, i, cur_socket;
415 /* return NULL if size is 0 or alignment is not power-of-2 */
416 if (size == 0 || (align && !rte_is_power_of_2(align)))
419 if (!rte_eal_has_hugepages())
420 socket_arg = SOCKET_ID_ANY;
422 if (socket_arg == SOCKET_ID_ANY)
423 socket = malloc_get_numa_socket();
427 /* Check socket parameter */
428 if (socket >= RTE_MAX_NUMA_NODES)
431 ret = heap_alloc_on_socket(type, size, socket, flags, align, bound,
433 if (ret != NULL || socket_arg != SOCKET_ID_ANY)
436 /* try other heaps */
437 for (i = 0; i < (int) rte_socket_count(); i++) {
438 cur_socket = rte_socket_id_by_idx(i);
439 if (cur_socket == socket)
441 ret = heap_alloc_on_socket(type, size, cur_socket, flags,
442 align, bound, contig);
450 malloc_heap_free(struct malloc_elem *elem)
452 struct malloc_heap *heap;
453 void *start, *aligned_start, *end, *aligned_end;
454 size_t len, aligned_len, page_sz;
455 struct rte_memseg_list *msl;
456 int n_segs, seg_idx, max_seg_idx, ret;
458 if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
461 /* elem may be merged with previous element, so keep heap address */
464 page_sz = (size_t)msl->page_sz;
466 rte_spinlock_lock(&(heap->lock));
468 /* mark element as free */
469 elem->state = ELEM_FREE;
471 elem = malloc_elem_free(elem);
473 /* anything after this is a bonus */
476 /* ...of which we can't avail if we are in legacy mode */
477 if (internal_config.legacy_mem)
480 /* check if we can free any memory back to the system */
481 if (elem->size < page_sz)
484 /* probably, but let's make sure, as we may not be using up full page */
487 aligned_start = RTE_PTR_ALIGN_CEIL(start, page_sz);
488 end = RTE_PTR_ADD(elem, len);
489 aligned_end = RTE_PTR_ALIGN_FLOOR(end, page_sz);
491 aligned_len = RTE_PTR_DIFF(aligned_end, aligned_start);
493 /* can't free anything */
494 if (aligned_len < page_sz)
497 malloc_elem_free_list_remove(elem);
499 malloc_elem_hide_region(elem, (void *) aligned_start, aligned_len);
501 /* we don't really care if we fail to deallocate memory */
502 n_segs = aligned_len / page_sz;
503 seg_idx = RTE_PTR_DIFF(aligned_start, msl->base_va) / page_sz;
504 max_seg_idx = seg_idx + n_segs;
506 for (; seg_idx < max_seg_idx; seg_idx++) {
507 struct rte_memseg *ms;
509 ms = rte_fbarray_get(&msl->memseg_arr, seg_idx);
510 eal_memalloc_free_seg(ms);
512 heap->total_size -= aligned_len;
514 RTE_LOG(DEBUG, EAL, "Heap on socket %d was shrunk by %zdMB\n",
515 msl->socket_id, aligned_len >> 20ULL);
517 rte_spinlock_unlock(&(heap->lock));
522 malloc_heap_resize(struct malloc_elem *elem, size_t size)
526 if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
529 rte_spinlock_lock(&(elem->heap->lock));
531 ret = malloc_elem_resize(elem, size);
533 rte_spinlock_unlock(&(elem->heap->lock));
539 * Function to retrieve data for heap on given socket
542 malloc_heap_get_stats(struct malloc_heap *heap,
543 struct rte_malloc_socket_stats *socket_stats)
546 struct malloc_elem *elem;
548 rte_spinlock_lock(&heap->lock);
550 /* Initialise variables for heap */
551 socket_stats->free_count = 0;
552 socket_stats->heap_freesz_bytes = 0;
553 socket_stats->greatest_free_size = 0;
555 /* Iterate through free list */
556 for (idx = 0; idx < RTE_HEAP_NUM_FREELISTS; idx++) {
557 for (elem = LIST_FIRST(&heap->free_head[idx]);
558 !!elem; elem = LIST_NEXT(elem, free_list))
560 socket_stats->free_count++;
561 socket_stats->heap_freesz_bytes += elem->size;
562 if (elem->size > socket_stats->greatest_free_size)
563 socket_stats->greatest_free_size = elem->size;
566 /* Get stats on overall heap and allocated memory on this heap */
567 socket_stats->heap_totalsz_bytes = heap->total_size;
568 socket_stats->heap_allocsz_bytes = (socket_stats->heap_totalsz_bytes -
569 socket_stats->heap_freesz_bytes);
570 socket_stats->alloc_count = heap->alloc_count;
572 rte_spinlock_unlock(&heap->lock);
577 * Function to retrieve data for heap on given socket
580 malloc_heap_dump(struct malloc_heap *heap, FILE *f)
582 struct malloc_elem *elem;
584 rte_spinlock_lock(&heap->lock);
586 fprintf(f, "Heap size: 0x%zx\n", heap->total_size);
587 fprintf(f, "Heap alloc count: %u\n", heap->alloc_count);
591 malloc_elem_dump(elem, f);
595 rte_spinlock_unlock(&heap->lock);
599 rte_eal_malloc_heap_init(void)
601 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
606 /* secondary process does not need to initialize anything */
607 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
610 /* add all IOVA-contiguous areas to the heap */
611 return rte_memseg_contig_walk(malloc_add_seg, NULL);