1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <sys/queue.h>
12 #include <rte_memory.h>
13 #include <rte_errno.h>
15 #include <rte_eal_memconfig.h>
16 #include <rte_launch.h>
17 #include <rte_per_lcore.h>
18 #include <rte_lcore.h>
19 #include <rte_common.h>
20 #include <rte_string_fns.h>
21 #include <rte_spinlock.h>
22 #include <rte_memcpy.h>
23 #include <rte_atomic.h>
24 #include <rte_fbarray.h>
26 #include "eal_internal_cfg.h"
27 #include "eal_memalloc.h"
28 #include "malloc_elem.h"
29 #include "malloc_heap.h"
30 #include "malloc_mp.h"
33 check_hugepage_sz(unsigned flags, uint64_t hugepage_sz)
35 unsigned check_flag = 0;
37 if (!(flags & ~RTE_MEMZONE_SIZE_HINT_ONLY))
40 switch (hugepage_sz) {
42 check_flag = RTE_MEMZONE_256KB;
45 check_flag = RTE_MEMZONE_2MB;
48 check_flag = RTE_MEMZONE_16MB;
51 check_flag = RTE_MEMZONE_256MB;
54 check_flag = RTE_MEMZONE_512MB;
57 check_flag = RTE_MEMZONE_1GB;
60 check_flag = RTE_MEMZONE_4GB;
63 check_flag = RTE_MEMZONE_16GB;
66 return check_flag & flags;
70 * Expand the heap with a memory area.
72 static struct malloc_elem *
73 malloc_heap_add_memory(struct malloc_heap *heap, struct rte_memseg_list *msl,
74 void *start, size_t len)
76 struct malloc_elem *elem = start;
78 malloc_elem_init(elem, heap, msl, len);
80 malloc_elem_insert(elem);
82 elem = malloc_elem_join_adjacent_free(elem);
84 malloc_elem_free_list_insert(elem);
90 malloc_add_seg(const struct rte_memseg_list *msl,
91 const struct rte_memseg *ms, size_t len, void *arg __rte_unused)
93 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
94 struct rte_memseg_list *found_msl;
95 struct malloc_heap *heap;
98 heap = &mcfg->malloc_heaps[msl->socket_id];
100 /* msl is const, so find it */
101 msl_idx = msl - mcfg->memsegs;
103 if (msl_idx < 0 || msl_idx >= RTE_MAX_MEMSEG_LISTS)
106 found_msl = &mcfg->memsegs[msl_idx];
108 malloc_heap_add_memory(heap, found_msl, ms->addr, len);
110 RTE_LOG(DEBUG, EAL, "Added %zuM to heap on socket %i\n", len >> 20,
116 * Iterates through the freelist for a heap to find a free element
117 * which can store data of the required size and with the requested alignment.
118 * If size is 0, find the biggest available elem.
119 * Returns null on failure, or pointer to element on success.
121 static struct malloc_elem *
122 find_suitable_element(struct malloc_heap *heap, size_t size,
123 unsigned int flags, size_t align, size_t bound, bool contig)
126 struct malloc_elem *elem, *alt_elem = NULL;
128 for (idx = malloc_elem_free_list_index(size);
129 idx < RTE_HEAP_NUM_FREELISTS; idx++) {
130 for (elem = LIST_FIRST(&heap->free_head[idx]);
131 !!elem; elem = LIST_NEXT(elem, free_list)) {
132 if (malloc_elem_can_hold(elem, size, align, bound,
134 if (check_hugepage_sz(flags,
137 if (alt_elem == NULL)
143 if ((alt_elem != NULL) && (flags & RTE_MEMZONE_SIZE_HINT_ONLY))
150 * Main function to allocate a block of memory from the heap.
151 * It locks the free list, scans it, and adds a new memseg if the
152 * scan fails. Once the new memseg is added, it re-scans and should return
153 * the new element after releasing the lock.
156 heap_alloc(struct malloc_heap *heap, const char *type __rte_unused, size_t size,
157 unsigned int flags, size_t align, size_t bound, bool contig)
159 struct malloc_elem *elem;
161 size = RTE_CACHE_LINE_ROUNDUP(size);
162 align = RTE_CACHE_LINE_ROUNDUP(align);
164 elem = find_suitable_element(heap, size, flags, align, bound, contig);
166 elem = malloc_elem_alloc(elem, size, align, bound, contig);
168 /* increase heap's count of allocated elements */
172 return elem == NULL ? NULL : (void *)(&elem[1]);
175 /* this function is exposed in malloc_mp.h */
177 rollback_expand_heap(struct rte_memseg **ms, int n_segs,
178 struct malloc_elem *elem, void *map_addr, size_t map_len)
181 malloc_elem_free_list_remove(elem);
182 malloc_elem_hide_region(elem, map_addr, map_len);
185 eal_memalloc_free_seg_bulk(ms, n_segs);
188 /* this function is exposed in malloc_mp.h */
190 alloc_pages_on_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
191 int socket, unsigned int flags, size_t align, size_t bound,
192 bool contig, struct rte_memseg **ms, int n_segs)
194 struct rte_memseg_list *msl;
195 struct malloc_elem *elem = NULL;
198 void *ret, *map_addr;
200 alloc_sz = (size_t)pg_sz * n_segs;
202 /* first, check if we're allowed to allocate this memory */
203 if (eal_memalloc_mem_alloc_validate(socket,
204 heap->total_size + alloc_sz) < 0) {
205 RTE_LOG(DEBUG, EAL, "User has disallowed allocation\n");
209 allocd_pages = eal_memalloc_alloc_seg_bulk(ms, n_segs, pg_sz,
212 /* make sure we've allocated our pages... */
213 if (allocd_pages < 0)
216 map_addr = ms[0]->addr;
217 msl = rte_mem_virt2memseg_list(map_addr);
219 /* check if we wanted contiguous memory but didn't get it */
220 if (contig && !eal_memalloc_is_contig(msl, map_addr, alloc_sz)) {
221 RTE_LOG(DEBUG, EAL, "%s(): couldn't allocate physically contiguous space\n",
226 /* add newly minted memsegs to malloc heap */
227 elem = malloc_heap_add_memory(heap, msl, map_addr, alloc_sz);
229 /* try once more, as now we have allocated new memory */
230 ret = find_suitable_element(heap, elt_size, flags, align, bound,
239 rollback_expand_heap(ms, n_segs, elem, map_addr, alloc_sz);
244 try_expand_heap_primary(struct malloc_heap *heap, uint64_t pg_sz,
245 size_t elt_size, int socket, unsigned int flags, size_t align,
246 size_t bound, bool contig)
248 struct malloc_elem *elem;
249 struct rte_memseg **ms;
253 bool callback_triggered = false;
255 alloc_sz = RTE_ALIGN_CEIL(align + elt_size +
256 MALLOC_ELEM_TRAILER_LEN, pg_sz);
257 n_segs = alloc_sz / pg_sz;
259 /* we can't know in advance how many pages we'll need, so we malloc */
260 ms = malloc(sizeof(*ms) * n_segs);
262 memset(ms, 0, sizeof(*ms) * n_segs);
267 elem = alloc_pages_on_heap(heap, pg_sz, elt_size, socket, flags, align,
268 bound, contig, ms, n_segs);
273 map_addr = ms[0]->addr;
275 /* notify user about changes in memory map */
276 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC, map_addr, alloc_sz);
278 /* notify other processes that this has happened */
279 if (request_sync()) {
280 /* we couldn't ensure all processes have mapped memory,
281 * so free it back and notify everyone that it's been
284 * technically, we could've avoided adding memory addresses to
285 * the map, but that would've led to inconsistent behavior
286 * between primary and secondary processes, as those get
287 * callbacks during sync. therefore, force primary process to
288 * do alloc-and-rollback syncs as well.
290 callback_triggered = true;
293 heap->total_size += alloc_sz;
295 RTE_LOG(DEBUG, EAL, "Heap on socket %d was expanded by %zdMB\n",
296 socket, alloc_sz >> 20ULL);
303 if (callback_triggered)
304 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
307 rollback_expand_heap(ms, n_segs, elem, map_addr, alloc_sz);
317 try_expand_heap_secondary(struct malloc_heap *heap, uint64_t pg_sz,
318 size_t elt_size, int socket, unsigned int flags, size_t align,
319 size_t bound, bool contig)
321 struct malloc_mp_req req;
324 memset(&req, 0, sizeof(req));
326 req.t = REQ_TYPE_ALLOC;
327 req.alloc_req.align = align;
328 req.alloc_req.bound = bound;
329 req.alloc_req.contig = contig;
330 req.alloc_req.flags = flags;
331 req.alloc_req.elt_size = elt_size;
332 req.alloc_req.page_sz = pg_sz;
333 req.alloc_req.socket = socket;
334 req.alloc_req.heap = heap; /* it's in shared memory */
336 req_result = request_to_primary(&req);
341 if (req.result != REQ_RESULT_SUCCESS)
348 try_expand_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
349 int socket, unsigned int flags, size_t align, size_t bound,
352 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
355 rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
357 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
358 ret = try_expand_heap_primary(heap, pg_sz, elt_size, socket,
359 flags, align, bound, contig);
361 ret = try_expand_heap_secondary(heap, pg_sz, elt_size, socket,
362 flags, align, bound, contig);
365 rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
370 compare_pagesz(const void *a, const void *b)
372 const struct rte_memseg_list * const*mpa = a;
373 const struct rte_memseg_list * const*mpb = b;
374 const struct rte_memseg_list *msla = *mpa;
375 const struct rte_memseg_list *mslb = *mpb;
376 uint64_t pg_sz_a = msla->page_sz;
377 uint64_t pg_sz_b = mslb->page_sz;
379 if (pg_sz_a < pg_sz_b)
381 if (pg_sz_a > pg_sz_b)
387 alloc_more_mem_on_socket(struct malloc_heap *heap, size_t size, int socket,
388 unsigned int flags, size_t align, size_t bound, bool contig)
390 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
391 struct rte_memseg_list *requested_msls[RTE_MAX_MEMSEG_LISTS];
392 struct rte_memseg_list *other_msls[RTE_MAX_MEMSEG_LISTS];
393 uint64_t requested_pg_sz[RTE_MAX_MEMSEG_LISTS];
394 uint64_t other_pg_sz[RTE_MAX_MEMSEG_LISTS];
396 int i, n_other_msls, n_other_pg_sz, n_requested_msls, n_requested_pg_sz;
397 bool size_hint = (flags & RTE_MEMZONE_SIZE_HINT_ONLY) > 0;
398 unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY;
401 memset(requested_msls, 0, sizeof(requested_msls));
402 memset(other_msls, 0, sizeof(other_msls));
403 memset(requested_pg_sz, 0, sizeof(requested_pg_sz));
404 memset(other_pg_sz, 0, sizeof(other_pg_sz));
407 * go through memseg list and take note of all the page sizes available,
408 * and if any of them were specifically requested by the user.
410 n_requested_msls = 0;
412 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
413 struct rte_memseg_list *msl = &mcfg->memsegs[i];
415 if (msl->socket_id != socket)
418 if (msl->base_va == NULL)
421 /* if pages of specific size were requested */
422 if (size_flags != 0 && check_hugepage_sz(size_flags,
424 requested_msls[n_requested_msls++] = msl;
425 else if (size_flags == 0 || size_hint)
426 other_msls[n_other_msls++] = msl;
429 /* sort the lists, smallest first */
430 qsort(requested_msls, n_requested_msls, sizeof(requested_msls[0]),
432 qsort(other_msls, n_other_msls, sizeof(other_msls[0]),
435 /* now, extract page sizes we are supposed to try */
437 n_requested_pg_sz = 0;
438 for (i = 0; i < n_requested_msls; i++) {
439 uint64_t pg_sz = requested_msls[i]->page_sz;
441 if (prev_pg_sz != pg_sz) {
442 requested_pg_sz[n_requested_pg_sz++] = pg_sz;
448 for (i = 0; i < n_other_msls; i++) {
449 uint64_t pg_sz = other_msls[i]->page_sz;
451 if (prev_pg_sz != pg_sz) {
452 other_pg_sz[n_other_pg_sz++] = pg_sz;
457 /* finally, try allocating memory of specified page sizes, starting from
460 for (i = 0; i < n_requested_pg_sz; i++) {
461 uint64_t pg_sz = requested_pg_sz[i];
464 * do not pass the size hint here, as user expects other page
465 * sizes first, before resorting to best effort allocation.
467 if (!try_expand_heap(heap, pg_sz, size, socket, size_flags,
468 align, bound, contig))
471 if (n_other_pg_sz == 0)
474 /* now, check if we can reserve anything with size hint */
475 ret = find_suitable_element(heap, size, flags, align, bound, contig);
480 * we still couldn't reserve memory, so try expanding heap with other
481 * page sizes, if there are any
483 for (i = 0; i < n_other_pg_sz; i++) {
484 uint64_t pg_sz = other_pg_sz[i];
486 if (!try_expand_heap(heap, pg_sz, size, socket, flags,
487 align, bound, contig))
493 /* this will try lower page sizes first */
495 heap_alloc_on_socket(const char *type, size_t size, int socket,
496 unsigned int flags, size_t align, size_t bound, bool contig)
498 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
499 struct malloc_heap *heap = &mcfg->malloc_heaps[socket];
500 unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY;
503 rte_spinlock_lock(&(heap->lock));
505 align = align == 0 ? 1 : align;
507 /* for legacy mode, try once and with all flags */
508 if (internal_config.legacy_mem) {
509 ret = heap_alloc(heap, type, size, flags, align, bound, contig);
514 * we do not pass the size hint here, because even if allocation fails,
515 * we may still be able to allocate memory from appropriate page sizes,
516 * we just need to request more memory first.
518 ret = heap_alloc(heap, type, size, size_flags, align, bound, contig);
522 if (!alloc_more_mem_on_socket(heap, size, socket, flags, align, bound,
524 ret = heap_alloc(heap, type, size, flags, align, bound, contig);
526 /* this should have succeeded */
528 RTE_LOG(ERR, EAL, "Error allocating from heap\n");
531 rte_spinlock_unlock(&(heap->lock));
536 malloc_heap_alloc(const char *type, size_t size, int socket_arg,
537 unsigned int flags, size_t align, size_t bound, bool contig)
539 int socket, i, cur_socket;
542 /* return NULL if size is 0 or alignment is not power-of-2 */
543 if (size == 0 || (align && !rte_is_power_of_2(align)))
546 if (!rte_eal_has_hugepages())
547 socket_arg = SOCKET_ID_ANY;
549 if (socket_arg == SOCKET_ID_ANY)
550 socket = malloc_get_numa_socket();
554 /* Check socket parameter */
555 if (socket >= RTE_MAX_NUMA_NODES)
558 ret = heap_alloc_on_socket(type, size, socket, flags, align, bound,
560 if (ret != NULL || socket_arg != SOCKET_ID_ANY)
563 /* try other heaps */
564 for (i = 0; i < (int) rte_socket_count(); i++) {
565 cur_socket = rte_socket_id_by_idx(i);
566 if (cur_socket == socket)
568 ret = heap_alloc_on_socket(type, size, cur_socket, flags,
569 align, bound, contig);
576 /* this function is exposed in malloc_mp.h */
578 malloc_heap_free_pages(void *aligned_start, size_t aligned_len)
580 int n_segs, seg_idx, max_seg_idx;
581 struct rte_memseg_list *msl;
584 msl = rte_mem_virt2memseg_list(aligned_start);
588 page_sz = (size_t)msl->page_sz;
589 n_segs = aligned_len / page_sz;
590 seg_idx = RTE_PTR_DIFF(aligned_start, msl->base_va) / page_sz;
591 max_seg_idx = seg_idx + n_segs;
593 for (; seg_idx < max_seg_idx; seg_idx++) {
594 struct rte_memseg *ms;
596 ms = rte_fbarray_get(&msl->memseg_arr, seg_idx);
597 eal_memalloc_free_seg(ms);
603 malloc_heap_free(struct malloc_elem *elem)
605 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
606 struct malloc_heap *heap;
607 void *start, *aligned_start, *end, *aligned_end;
608 size_t len, aligned_len, page_sz;
609 struct rte_memseg_list *msl;
610 unsigned int i, n_segs;
613 if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
616 /* elem may be merged with previous element, so keep heap address */
619 page_sz = (size_t)msl->page_sz;
621 rte_spinlock_lock(&(heap->lock));
623 /* mark element as free */
624 elem->state = ELEM_FREE;
626 elem = malloc_elem_free(elem);
628 /* anything after this is a bonus */
631 /* ...of which we can't avail if we are in legacy mode */
632 if (internal_config.legacy_mem)
635 /* check if we can free any memory back to the system */
636 if (elem->size < page_sz)
639 /* probably, but let's make sure, as we may not be using up full page */
642 aligned_start = RTE_PTR_ALIGN_CEIL(start, page_sz);
643 end = RTE_PTR_ADD(elem, len);
644 aligned_end = RTE_PTR_ALIGN_FLOOR(end, page_sz);
646 aligned_len = RTE_PTR_DIFF(aligned_end, aligned_start);
648 /* can't free anything */
649 if (aligned_len < page_sz)
652 /* we can free something. however, some of these pages may be marked as
653 * unfreeable, so also check that as well
655 n_segs = aligned_len / page_sz;
656 for (i = 0; i < n_segs; i++) {
657 const struct rte_memseg *tmp =
658 rte_mem_virt2memseg(aligned_start, msl);
660 if (tmp->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE) {
661 /* this is an unfreeable segment, so move start */
662 aligned_start = RTE_PTR_ADD(tmp->addr, tmp->len);
666 /* recalculate length and number of segments */
667 aligned_len = RTE_PTR_DIFF(aligned_end, aligned_start);
668 n_segs = aligned_len / page_sz;
670 /* check if we can still free some pages */
674 rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
677 * we allow secondary processes to clear the heap of this allocated
678 * memory because it is safe to do so, as even if notifications about
679 * unmapped pages don't make it to other processes, heap is shared
680 * across all processes, and will become empty of this memory anyway,
681 * and nothing can allocate it back unless primary process will be able
682 * to deliver allocation message to every single running process.
685 malloc_elem_free_list_remove(elem);
687 malloc_elem_hide_region(elem, (void *) aligned_start, aligned_len);
689 heap->total_size -= aligned_len;
691 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
692 /* notify user about changes in memory map */
693 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
694 aligned_start, aligned_len);
696 /* don't care if any of this fails */
697 malloc_heap_free_pages(aligned_start, aligned_len);
701 struct malloc_mp_req req;
703 memset(&req, 0, sizeof(req));
705 req.t = REQ_TYPE_FREE;
706 req.free_req.addr = aligned_start;
707 req.free_req.len = aligned_len;
710 * we request primary to deallocate pages, but we don't do it
711 * in this thread. instead, we notify primary that we would like
712 * to deallocate pages, and this process will receive another
713 * request (in parallel) that will do it for us on another
716 * we also don't really care if this succeeds - the data is
717 * already removed from the heap, so it is, for all intents and
718 * purposes, hidden from the rest of DPDK even if some other
719 * process (including this one) may have these pages mapped.
721 * notifications about deallocated memory happen during sync.
723 request_to_primary(&req);
726 RTE_LOG(DEBUG, EAL, "Heap on socket %d was shrunk by %zdMB\n",
727 msl->socket_id, aligned_len >> 20ULL);
729 rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
731 rte_spinlock_unlock(&(heap->lock));
736 malloc_heap_resize(struct malloc_elem *elem, size_t size)
740 if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
743 rte_spinlock_lock(&(elem->heap->lock));
745 ret = malloc_elem_resize(elem, size);
747 rte_spinlock_unlock(&(elem->heap->lock));
753 * Function to retrieve data for heap on given socket
756 malloc_heap_get_stats(struct malloc_heap *heap,
757 struct rte_malloc_socket_stats *socket_stats)
760 struct malloc_elem *elem;
762 rte_spinlock_lock(&heap->lock);
764 /* Initialise variables for heap */
765 socket_stats->free_count = 0;
766 socket_stats->heap_freesz_bytes = 0;
767 socket_stats->greatest_free_size = 0;
769 /* Iterate through free list */
770 for (idx = 0; idx < RTE_HEAP_NUM_FREELISTS; idx++) {
771 for (elem = LIST_FIRST(&heap->free_head[idx]);
772 !!elem; elem = LIST_NEXT(elem, free_list))
774 socket_stats->free_count++;
775 socket_stats->heap_freesz_bytes += elem->size;
776 if (elem->size > socket_stats->greatest_free_size)
777 socket_stats->greatest_free_size = elem->size;
780 /* Get stats on overall heap and allocated memory on this heap */
781 socket_stats->heap_totalsz_bytes = heap->total_size;
782 socket_stats->heap_allocsz_bytes = (socket_stats->heap_totalsz_bytes -
783 socket_stats->heap_freesz_bytes);
784 socket_stats->alloc_count = heap->alloc_count;
786 rte_spinlock_unlock(&heap->lock);
791 * Function to retrieve data for heap on given socket
794 malloc_heap_dump(struct malloc_heap *heap, FILE *f)
796 struct malloc_elem *elem;
798 rte_spinlock_lock(&heap->lock);
800 fprintf(f, "Heap size: 0x%zx\n", heap->total_size);
801 fprintf(f, "Heap alloc count: %u\n", heap->alloc_count);
805 malloc_elem_dump(elem, f);
809 rte_spinlock_unlock(&heap->lock);
813 rte_eal_malloc_heap_init(void)
815 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
817 if (register_mp_requests()) {
818 RTE_LOG(ERR, EAL, "Couldn't register malloc multiprocess actions\n");
819 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
823 /* unlock mem hotplug here. it's safe for primary as no requests can
824 * even come before primary itself is fully initialized, and secondaries
825 * do not need to initialize the heap.
827 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
829 /* secondary process does not need to initialize anything */
830 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
833 /* add all IOVA-contiguous areas to the heap */
834 return rte_memseg_contig_walk(malloc_add_seg, NULL);