1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <sys/queue.h>
12 #include <rte_memory.h>
13 #include <rte_errno.h>
15 #include <rte_eal_memconfig.h>
16 #include <rte_launch.h>
17 #include <rte_per_lcore.h>
18 #include <rte_lcore.h>
19 #include <rte_common.h>
20 #include <rte_string_fns.h>
21 #include <rte_spinlock.h>
22 #include <rte_memcpy.h>
23 #include <rte_atomic.h>
24 #include <rte_fbarray.h>
26 #include "eal_internal_cfg.h"
27 #include "eal_memalloc.h"
28 #include "malloc_elem.h"
29 #include "malloc_heap.h"
30 #include "malloc_mp.h"
33 check_hugepage_sz(unsigned flags, uint64_t hugepage_sz)
35 unsigned check_flag = 0;
37 if (!(flags & ~RTE_MEMZONE_SIZE_HINT_ONLY))
40 switch (hugepage_sz) {
42 check_flag = RTE_MEMZONE_256KB;
45 check_flag = RTE_MEMZONE_2MB;
48 check_flag = RTE_MEMZONE_16MB;
51 check_flag = RTE_MEMZONE_256MB;
54 check_flag = RTE_MEMZONE_512MB;
57 check_flag = RTE_MEMZONE_1GB;
60 check_flag = RTE_MEMZONE_4GB;
63 check_flag = RTE_MEMZONE_16GB;
66 return check_flag & flags;
70 malloc_socket_to_heap_id(unsigned int socket_id)
72 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
75 for (i = 0; i < RTE_MAX_HEAPS; i++) {
76 struct malloc_heap *heap = &mcfg->malloc_heaps[i];
78 if (heap->socket_id == socket_id)
85 * Expand the heap with a memory area.
87 static struct malloc_elem *
88 malloc_heap_add_memory(struct malloc_heap *heap, struct rte_memseg_list *msl,
89 void *start, size_t len)
91 struct malloc_elem *elem = start;
93 malloc_elem_init(elem, heap, msl, len);
95 malloc_elem_insert(elem);
97 elem = malloc_elem_join_adjacent_free(elem);
99 malloc_elem_free_list_insert(elem);
105 malloc_add_seg(const struct rte_memseg_list *msl,
106 const struct rte_memseg *ms, size_t len, void *arg __rte_unused)
108 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
109 struct rte_memseg_list *found_msl;
110 struct malloc_heap *heap;
111 int msl_idx, heap_idx;
116 heap_idx = malloc_socket_to_heap_id(msl->socket_id);
118 RTE_LOG(ERR, EAL, "Memseg list has invalid socket id\n");
121 heap = &mcfg->malloc_heaps[heap_idx];
123 /* msl is const, so find it */
124 msl_idx = msl - mcfg->memsegs;
126 if (msl_idx < 0 || msl_idx >= RTE_MAX_MEMSEG_LISTS)
129 found_msl = &mcfg->memsegs[msl_idx];
131 malloc_heap_add_memory(heap, found_msl, ms->addr, len);
133 heap->total_size += len;
135 RTE_LOG(DEBUG, EAL, "Added %zuM to heap on socket %i\n", len >> 20,
141 * Iterates through the freelist for a heap to find a free element
142 * which can store data of the required size and with the requested alignment.
143 * If size is 0, find the biggest available elem.
144 * Returns null on failure, or pointer to element on success.
146 static struct malloc_elem *
147 find_suitable_element(struct malloc_heap *heap, size_t size,
148 unsigned int flags, size_t align, size_t bound, bool contig)
151 struct malloc_elem *elem, *alt_elem = NULL;
153 for (idx = malloc_elem_free_list_index(size);
154 idx < RTE_HEAP_NUM_FREELISTS; idx++) {
155 for (elem = LIST_FIRST(&heap->free_head[idx]);
156 !!elem; elem = LIST_NEXT(elem, free_list)) {
157 if (malloc_elem_can_hold(elem, size, align, bound,
159 if (check_hugepage_sz(flags,
162 if (alt_elem == NULL)
168 if ((alt_elem != NULL) && (flags & RTE_MEMZONE_SIZE_HINT_ONLY))
175 * Iterates through the freelist for a heap to find a free element with the
176 * biggest size and requested alignment. Will also set size to whatever element
177 * size that was found.
178 * Returns null on failure, or pointer to element on success.
180 static struct malloc_elem *
181 find_biggest_element(struct malloc_heap *heap, size_t *size,
182 unsigned int flags, size_t align, bool contig)
184 struct malloc_elem *elem, *max_elem = NULL;
185 size_t idx, max_size = 0;
187 for (idx = 0; idx < RTE_HEAP_NUM_FREELISTS; idx++) {
188 for (elem = LIST_FIRST(&heap->free_head[idx]);
189 !!elem; elem = LIST_NEXT(elem, free_list)) {
191 if (!check_hugepage_sz(flags, elem->msl->page_sz))
195 malloc_elem_find_max_iova_contig(elem,
198 void *data_start = RTE_PTR_ADD(elem,
199 MALLOC_ELEM_HEADER_LEN);
200 void *data_end = RTE_PTR_ADD(elem, elem->size -
201 MALLOC_ELEM_TRAILER_LEN);
202 void *aligned = RTE_PTR_ALIGN_CEIL(data_start,
204 /* check if aligned data start is beyond end */
205 if (aligned >= data_end)
207 cur_size = RTE_PTR_DIFF(data_end, aligned);
209 if (cur_size > max_size) {
221 * Main function to allocate a block of memory from the heap.
222 * It locks the free list, scans it, and adds a new memseg if the
223 * scan fails. Once the new memseg is added, it re-scans and should return
224 * the new element after releasing the lock.
227 heap_alloc(struct malloc_heap *heap, const char *type __rte_unused, size_t size,
228 unsigned int flags, size_t align, size_t bound, bool contig)
230 struct malloc_elem *elem;
232 size = RTE_CACHE_LINE_ROUNDUP(size);
233 align = RTE_CACHE_LINE_ROUNDUP(align);
235 elem = find_suitable_element(heap, size, flags, align, bound, contig);
237 elem = malloc_elem_alloc(elem, size, align, bound, contig);
239 /* increase heap's count of allocated elements */
243 return elem == NULL ? NULL : (void *)(&elem[1]);
247 heap_alloc_biggest(struct malloc_heap *heap, const char *type __rte_unused,
248 unsigned int flags, size_t align, bool contig)
250 struct malloc_elem *elem;
253 align = RTE_CACHE_LINE_ROUNDUP(align);
255 elem = find_biggest_element(heap, &size, flags, align, contig);
257 elem = malloc_elem_alloc(elem, size, align, 0, contig);
259 /* increase heap's count of allocated elements */
263 return elem == NULL ? NULL : (void *)(&elem[1]);
266 /* this function is exposed in malloc_mp.h */
268 rollback_expand_heap(struct rte_memseg **ms, int n_segs,
269 struct malloc_elem *elem, void *map_addr, size_t map_len)
272 malloc_elem_free_list_remove(elem);
273 malloc_elem_hide_region(elem, map_addr, map_len);
276 eal_memalloc_free_seg_bulk(ms, n_segs);
279 /* this function is exposed in malloc_mp.h */
281 alloc_pages_on_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
282 int socket, unsigned int flags, size_t align, size_t bound,
283 bool contig, struct rte_memseg **ms, int n_segs)
285 struct rte_memseg_list *msl;
286 struct malloc_elem *elem = NULL;
289 void *ret, *map_addr;
291 alloc_sz = (size_t)pg_sz * n_segs;
293 /* first, check if we're allowed to allocate this memory */
294 if (eal_memalloc_mem_alloc_validate(socket,
295 heap->total_size + alloc_sz) < 0) {
296 RTE_LOG(DEBUG, EAL, "User has disallowed allocation\n");
300 allocd_pages = eal_memalloc_alloc_seg_bulk(ms, n_segs, pg_sz,
303 /* make sure we've allocated our pages... */
304 if (allocd_pages < 0)
307 map_addr = ms[0]->addr;
308 msl = rte_mem_virt2memseg_list(map_addr);
310 /* check if we wanted contiguous memory but didn't get it */
311 if (contig && !eal_memalloc_is_contig(msl, map_addr, alloc_sz)) {
312 RTE_LOG(DEBUG, EAL, "%s(): couldn't allocate physically contiguous space\n",
317 /* add newly minted memsegs to malloc heap */
318 elem = malloc_heap_add_memory(heap, msl, map_addr, alloc_sz);
320 /* try once more, as now we have allocated new memory */
321 ret = find_suitable_element(heap, elt_size, flags, align, bound,
330 rollback_expand_heap(ms, n_segs, elem, map_addr, alloc_sz);
335 try_expand_heap_primary(struct malloc_heap *heap, uint64_t pg_sz,
336 size_t elt_size, int socket, unsigned int flags, size_t align,
337 size_t bound, bool contig)
339 struct malloc_elem *elem;
340 struct rte_memseg **ms;
344 bool callback_triggered = false;
346 alloc_sz = RTE_ALIGN_CEIL(align + elt_size +
347 MALLOC_ELEM_TRAILER_LEN, pg_sz);
348 n_segs = alloc_sz / pg_sz;
350 /* we can't know in advance how many pages we'll need, so we malloc */
351 ms = malloc(sizeof(*ms) * n_segs);
354 memset(ms, 0, sizeof(*ms) * n_segs);
356 elem = alloc_pages_on_heap(heap, pg_sz, elt_size, socket, flags, align,
357 bound, contig, ms, n_segs);
362 map_addr = ms[0]->addr;
364 /* notify user about changes in memory map */
365 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC, map_addr, alloc_sz);
367 /* notify other processes that this has happened */
368 if (request_sync()) {
369 /* we couldn't ensure all processes have mapped memory,
370 * so free it back and notify everyone that it's been
373 * technically, we could've avoided adding memory addresses to
374 * the map, but that would've led to inconsistent behavior
375 * between primary and secondary processes, as those get
376 * callbacks during sync. therefore, force primary process to
377 * do alloc-and-rollback syncs as well.
379 callback_triggered = true;
382 heap->total_size += alloc_sz;
384 RTE_LOG(DEBUG, EAL, "Heap on socket %d was expanded by %zdMB\n",
385 socket, alloc_sz >> 20ULL);
392 if (callback_triggered)
393 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
396 rollback_expand_heap(ms, n_segs, elem, map_addr, alloc_sz);
406 try_expand_heap_secondary(struct malloc_heap *heap, uint64_t pg_sz,
407 size_t elt_size, int socket, unsigned int flags, size_t align,
408 size_t bound, bool contig)
410 struct malloc_mp_req req;
413 memset(&req, 0, sizeof(req));
415 req.t = REQ_TYPE_ALLOC;
416 req.alloc_req.align = align;
417 req.alloc_req.bound = bound;
418 req.alloc_req.contig = contig;
419 req.alloc_req.flags = flags;
420 req.alloc_req.elt_size = elt_size;
421 req.alloc_req.page_sz = pg_sz;
422 req.alloc_req.socket = socket;
423 req.alloc_req.heap = heap; /* it's in shared memory */
425 req_result = request_to_primary(&req);
430 if (req.result != REQ_RESULT_SUCCESS)
437 try_expand_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
438 int socket, unsigned int flags, size_t align, size_t bound,
441 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
444 rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
446 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
447 ret = try_expand_heap_primary(heap, pg_sz, elt_size, socket,
448 flags, align, bound, contig);
450 ret = try_expand_heap_secondary(heap, pg_sz, elt_size, socket,
451 flags, align, bound, contig);
454 rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
459 compare_pagesz(const void *a, const void *b)
461 const struct rte_memseg_list * const*mpa = a;
462 const struct rte_memseg_list * const*mpb = b;
463 const struct rte_memseg_list *msla = *mpa;
464 const struct rte_memseg_list *mslb = *mpb;
465 uint64_t pg_sz_a = msla->page_sz;
466 uint64_t pg_sz_b = mslb->page_sz;
468 if (pg_sz_a < pg_sz_b)
470 if (pg_sz_a > pg_sz_b)
476 alloc_more_mem_on_socket(struct malloc_heap *heap, size_t size, int socket,
477 unsigned int flags, size_t align, size_t bound, bool contig)
479 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
480 struct rte_memseg_list *requested_msls[RTE_MAX_MEMSEG_LISTS];
481 struct rte_memseg_list *other_msls[RTE_MAX_MEMSEG_LISTS];
482 uint64_t requested_pg_sz[RTE_MAX_MEMSEG_LISTS];
483 uint64_t other_pg_sz[RTE_MAX_MEMSEG_LISTS];
485 int i, n_other_msls, n_other_pg_sz, n_requested_msls, n_requested_pg_sz;
486 bool size_hint = (flags & RTE_MEMZONE_SIZE_HINT_ONLY) > 0;
487 unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY;
490 memset(requested_msls, 0, sizeof(requested_msls));
491 memset(other_msls, 0, sizeof(other_msls));
492 memset(requested_pg_sz, 0, sizeof(requested_pg_sz));
493 memset(other_pg_sz, 0, sizeof(other_pg_sz));
496 * go through memseg list and take note of all the page sizes available,
497 * and if any of them were specifically requested by the user.
499 n_requested_msls = 0;
501 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
502 struct rte_memseg_list *msl = &mcfg->memsegs[i];
504 if (msl->socket_id != socket)
507 if (msl->base_va == NULL)
510 /* if pages of specific size were requested */
511 if (size_flags != 0 && check_hugepage_sz(size_flags,
513 requested_msls[n_requested_msls++] = msl;
514 else if (size_flags == 0 || size_hint)
515 other_msls[n_other_msls++] = msl;
518 /* sort the lists, smallest first */
519 qsort(requested_msls, n_requested_msls, sizeof(requested_msls[0]),
521 qsort(other_msls, n_other_msls, sizeof(other_msls[0]),
524 /* now, extract page sizes we are supposed to try */
526 n_requested_pg_sz = 0;
527 for (i = 0; i < n_requested_msls; i++) {
528 uint64_t pg_sz = requested_msls[i]->page_sz;
530 if (prev_pg_sz != pg_sz) {
531 requested_pg_sz[n_requested_pg_sz++] = pg_sz;
537 for (i = 0; i < n_other_msls; i++) {
538 uint64_t pg_sz = other_msls[i]->page_sz;
540 if (prev_pg_sz != pg_sz) {
541 other_pg_sz[n_other_pg_sz++] = pg_sz;
546 /* finally, try allocating memory of specified page sizes, starting from
549 for (i = 0; i < n_requested_pg_sz; i++) {
550 uint64_t pg_sz = requested_pg_sz[i];
553 * do not pass the size hint here, as user expects other page
554 * sizes first, before resorting to best effort allocation.
556 if (!try_expand_heap(heap, pg_sz, size, socket, size_flags,
557 align, bound, contig))
560 if (n_other_pg_sz == 0)
563 /* now, check if we can reserve anything with size hint */
564 ret = find_suitable_element(heap, size, flags, align, bound, contig);
569 * we still couldn't reserve memory, so try expanding heap with other
570 * page sizes, if there are any
572 for (i = 0; i < n_other_pg_sz; i++) {
573 uint64_t pg_sz = other_pg_sz[i];
575 if (!try_expand_heap(heap, pg_sz, size, socket, flags,
576 align, bound, contig))
582 /* this will try lower page sizes first */
584 malloc_heap_alloc_on_heap_id(const char *type, size_t size,
585 unsigned int heap_id, unsigned int flags, size_t align,
586 size_t bound, bool contig)
588 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
589 struct malloc_heap *heap = &mcfg->malloc_heaps[heap_id];
590 unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY;
594 rte_spinlock_lock(&(heap->lock));
596 align = align == 0 ? 1 : align;
598 /* for legacy mode, try once and with all flags */
599 if (internal_config.legacy_mem) {
600 ret = heap_alloc(heap, type, size, flags, align, bound, contig);
605 * we do not pass the size hint here, because even if allocation fails,
606 * we may still be able to allocate memory from appropriate page sizes,
607 * we just need to request more memory first.
610 socket_id = rte_socket_id_by_idx(heap_id);
612 * if socket ID is negative, we cannot find a socket ID for this heap -
613 * which means it's an external heap. those can have unexpected page
614 * sizes, so if the user asked to allocate from there - assume user
615 * knows what they're doing, and allow allocating from there with any
619 size_flags |= RTE_MEMZONE_SIZE_HINT_ONLY;
621 ret = heap_alloc(heap, type, size, size_flags, align, bound, contig);
625 /* if socket ID is invalid, this is an external heap */
629 if (!alloc_more_mem_on_socket(heap, size, socket_id, flags, align,
631 ret = heap_alloc(heap, type, size, flags, align, bound, contig);
633 /* this should have succeeded */
635 RTE_LOG(ERR, EAL, "Error allocating from heap\n");
638 rte_spinlock_unlock(&(heap->lock));
643 malloc_heap_alloc(const char *type, size_t size, int socket_arg,
644 unsigned int flags, size_t align, size_t bound, bool contig)
646 int socket, heap_id, i;
649 /* return NULL if size is 0 or alignment is not power-of-2 */
650 if (size == 0 || (align && !rte_is_power_of_2(align)))
653 if (!rte_eal_has_hugepages() && socket_arg < RTE_MAX_NUMA_NODES)
654 socket_arg = SOCKET_ID_ANY;
656 if (socket_arg == SOCKET_ID_ANY)
657 socket = malloc_get_numa_socket();
661 /* turn socket ID into heap ID */
662 heap_id = malloc_socket_to_heap_id(socket);
663 /* if heap id is negative, socket ID was invalid */
667 ret = malloc_heap_alloc_on_heap_id(type, size, heap_id, flags, align,
669 if (ret != NULL || socket_arg != SOCKET_ID_ANY)
672 /* try other heaps. we are only iterating through native DPDK sockets,
673 * so external heaps won't be included.
675 for (i = 0; i < (int) rte_socket_count(); i++) {
678 ret = malloc_heap_alloc_on_heap_id(type, size, i, flags, align,
687 heap_alloc_biggest_on_heap_id(const char *type, unsigned int heap_id,
688 unsigned int flags, size_t align, bool contig)
690 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
691 struct malloc_heap *heap = &mcfg->malloc_heaps[heap_id];
694 rte_spinlock_lock(&(heap->lock));
696 align = align == 0 ? 1 : align;
698 ret = heap_alloc_biggest(heap, type, flags, align, contig);
700 rte_spinlock_unlock(&(heap->lock));
706 malloc_heap_alloc_biggest(const char *type, int socket_arg, unsigned int flags,
707 size_t align, bool contig)
709 int socket, i, cur_socket, heap_id;
712 /* return NULL if align is not power-of-2 */
713 if ((align && !rte_is_power_of_2(align)))
716 if (!rte_eal_has_hugepages())
717 socket_arg = SOCKET_ID_ANY;
719 if (socket_arg == SOCKET_ID_ANY)
720 socket = malloc_get_numa_socket();
724 /* turn socket ID into heap ID */
725 heap_id = malloc_socket_to_heap_id(socket);
726 /* if heap id is negative, socket ID was invalid */
730 ret = heap_alloc_biggest_on_heap_id(type, heap_id, flags, align,
732 if (ret != NULL || socket_arg != SOCKET_ID_ANY)
735 /* try other heaps */
736 for (i = 0; i < (int) rte_socket_count(); i++) {
737 cur_socket = rte_socket_id_by_idx(i);
738 if (cur_socket == socket)
740 ret = heap_alloc_biggest_on_heap_id(type, i, flags, align,
748 /* this function is exposed in malloc_mp.h */
750 malloc_heap_free_pages(void *aligned_start, size_t aligned_len)
752 int n_segs, seg_idx, max_seg_idx;
753 struct rte_memseg_list *msl;
756 msl = rte_mem_virt2memseg_list(aligned_start);
760 page_sz = (size_t)msl->page_sz;
761 n_segs = aligned_len / page_sz;
762 seg_idx = RTE_PTR_DIFF(aligned_start, msl->base_va) / page_sz;
763 max_seg_idx = seg_idx + n_segs;
765 for (; seg_idx < max_seg_idx; seg_idx++) {
766 struct rte_memseg *ms;
768 ms = rte_fbarray_get(&msl->memseg_arr, seg_idx);
769 eal_memalloc_free_seg(ms);
775 malloc_heap_free(struct malloc_elem *elem)
777 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
778 struct malloc_heap *heap;
779 void *start, *aligned_start, *end, *aligned_end;
780 size_t len, aligned_len, page_sz;
781 struct rte_memseg_list *msl;
782 unsigned int i, n_segs, before_space, after_space;
785 if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
788 /* elem may be merged with previous element, so keep heap address */
791 page_sz = (size_t)msl->page_sz;
793 rte_spinlock_lock(&(heap->lock));
795 /* mark element as free */
796 elem->state = ELEM_FREE;
798 elem = malloc_elem_free(elem);
800 /* anything after this is a bonus */
803 /* ...of which we can't avail if we are in legacy mode, or if this is an
804 * externally allocated segment.
806 if (internal_config.legacy_mem || (msl->external > 0))
809 /* check if we can free any memory back to the system */
810 if (elem->size < page_sz)
813 /* probably, but let's make sure, as we may not be using up full page */
816 aligned_start = RTE_PTR_ALIGN_CEIL(start, page_sz);
817 end = RTE_PTR_ADD(elem, len);
818 aligned_end = RTE_PTR_ALIGN_FLOOR(end, page_sz);
820 aligned_len = RTE_PTR_DIFF(aligned_end, aligned_start);
822 /* can't free anything */
823 if (aligned_len < page_sz)
826 /* we can free something. however, some of these pages may be marked as
827 * unfreeable, so also check that as well
829 n_segs = aligned_len / page_sz;
830 for (i = 0; i < n_segs; i++) {
831 const struct rte_memseg *tmp =
832 rte_mem_virt2memseg(aligned_start, msl);
834 if (tmp->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE) {
835 /* this is an unfreeable segment, so move start */
836 aligned_start = RTE_PTR_ADD(tmp->addr, tmp->len);
840 /* recalculate length and number of segments */
841 aligned_len = RTE_PTR_DIFF(aligned_end, aligned_start);
842 n_segs = aligned_len / page_sz;
844 /* check if we can still free some pages */
848 /* We're not done yet. We also have to check if by freeing space we will
849 * be leaving free elements that are too small to store new elements.
850 * Check if we have enough space in the beginning and at the end, or if
851 * start/end are exactly page aligned.
853 before_space = RTE_PTR_DIFF(aligned_start, elem);
854 after_space = RTE_PTR_DIFF(end, aligned_end);
855 if (before_space != 0 &&
856 before_space < MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
857 /* There is not enough space before start, but we may be able to
858 * move the start forward by one page.
864 aligned_start = RTE_PTR_ADD(aligned_start, page_sz);
865 aligned_len -= page_sz;
868 if (after_space != 0 && after_space <
869 MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
870 /* There is not enough space after end, but we may be able to
871 * move the end backwards by one page.
877 aligned_end = RTE_PTR_SUB(aligned_end, page_sz);
878 aligned_len -= page_sz;
882 /* now we can finally free us some pages */
884 rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
887 * we allow secondary processes to clear the heap of this allocated
888 * memory because it is safe to do so, as even if notifications about
889 * unmapped pages don't make it to other processes, heap is shared
890 * across all processes, and will become empty of this memory anyway,
891 * and nothing can allocate it back unless primary process will be able
892 * to deliver allocation message to every single running process.
895 malloc_elem_free_list_remove(elem);
897 malloc_elem_hide_region(elem, (void *) aligned_start, aligned_len);
899 heap->total_size -= aligned_len;
901 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
902 /* notify user about changes in memory map */
903 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
904 aligned_start, aligned_len);
906 /* don't care if any of this fails */
907 malloc_heap_free_pages(aligned_start, aligned_len);
911 struct malloc_mp_req req;
913 memset(&req, 0, sizeof(req));
915 req.t = REQ_TYPE_FREE;
916 req.free_req.addr = aligned_start;
917 req.free_req.len = aligned_len;
920 * we request primary to deallocate pages, but we don't do it
921 * in this thread. instead, we notify primary that we would like
922 * to deallocate pages, and this process will receive another
923 * request (in parallel) that will do it for us on another
926 * we also don't really care if this succeeds - the data is
927 * already removed from the heap, so it is, for all intents and
928 * purposes, hidden from the rest of DPDK even if some other
929 * process (including this one) may have these pages mapped.
931 * notifications about deallocated memory happen during sync.
933 request_to_primary(&req);
936 RTE_LOG(DEBUG, EAL, "Heap on socket %d was shrunk by %zdMB\n",
937 msl->socket_id, aligned_len >> 20ULL);
939 rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
941 rte_spinlock_unlock(&(heap->lock));
946 malloc_heap_resize(struct malloc_elem *elem, size_t size)
950 if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
953 rte_spinlock_lock(&(elem->heap->lock));
955 ret = malloc_elem_resize(elem, size);
957 rte_spinlock_unlock(&(elem->heap->lock));
963 * Function to retrieve data for a given heap
966 malloc_heap_get_stats(struct malloc_heap *heap,
967 struct rte_malloc_socket_stats *socket_stats)
970 struct malloc_elem *elem;
972 rte_spinlock_lock(&heap->lock);
974 /* Initialise variables for heap */
975 socket_stats->free_count = 0;
976 socket_stats->heap_freesz_bytes = 0;
977 socket_stats->greatest_free_size = 0;
979 /* Iterate through free list */
980 for (idx = 0; idx < RTE_HEAP_NUM_FREELISTS; idx++) {
981 for (elem = LIST_FIRST(&heap->free_head[idx]);
982 !!elem; elem = LIST_NEXT(elem, free_list))
984 socket_stats->free_count++;
985 socket_stats->heap_freesz_bytes += elem->size;
986 if (elem->size > socket_stats->greatest_free_size)
987 socket_stats->greatest_free_size = elem->size;
990 /* Get stats on overall heap and allocated memory on this heap */
991 socket_stats->heap_totalsz_bytes = heap->total_size;
992 socket_stats->heap_allocsz_bytes = (socket_stats->heap_totalsz_bytes -
993 socket_stats->heap_freesz_bytes);
994 socket_stats->alloc_count = heap->alloc_count;
996 rte_spinlock_unlock(&heap->lock);
1001 * Function to retrieve data for a given heap
1004 malloc_heap_dump(struct malloc_heap *heap, FILE *f)
1006 struct malloc_elem *elem;
1008 rte_spinlock_lock(&heap->lock);
1010 fprintf(f, "Heap size: 0x%zx\n", heap->total_size);
1011 fprintf(f, "Heap alloc count: %u\n", heap->alloc_count);
1015 malloc_elem_dump(elem, f);
1019 rte_spinlock_unlock(&heap->lock);
1023 rte_eal_malloc_heap_init(void)
1025 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1028 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1029 /* assign names to default DPDK heaps */
1030 for (i = 0; i < rte_socket_count(); i++) {
1031 struct malloc_heap *heap = &mcfg->malloc_heaps[i];
1032 char heap_name[RTE_HEAP_NAME_MAX_LEN];
1033 int socket_id = rte_socket_id_by_idx(i);
1035 snprintf(heap_name, sizeof(heap_name) - 1,
1036 "socket_%i", socket_id);
1037 strlcpy(heap->name, heap_name, RTE_HEAP_NAME_MAX_LEN);
1038 heap->socket_id = socket_id;
1043 if (register_mp_requests()) {
1044 RTE_LOG(ERR, EAL, "Couldn't register malloc multiprocess actions\n");
1045 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
1049 /* unlock mem hotplug here. it's safe for primary as no requests can
1050 * even come before primary itself is fully initialized, and secondaries
1051 * do not need to initialize the heap.
1053 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
1055 /* secondary process does not need to initialize anything */
1056 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1059 /* add all IOVA-contiguous areas to the heap */
1060 return rte_memseg_contig_walk(malloc_add_seg, NULL);