1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <sys/queue.h>
12 #include <rte_memory.h>
13 #include <rte_errno.h>
15 #include <rte_eal_memconfig.h>
16 #include <rte_launch.h>
17 #include <rte_per_lcore.h>
18 #include <rte_lcore.h>
19 #include <rte_common.h>
20 #include <rte_string_fns.h>
21 #include <rte_spinlock.h>
22 #include <rte_memcpy.h>
23 #include <rte_atomic.h>
24 #include <rte_fbarray.h>
26 #include "eal_internal_cfg.h"
27 #include "eal_memalloc.h"
28 #include "malloc_elem.h"
29 #include "malloc_heap.h"
30 #include "malloc_mp.h"
32 /* start external socket ID's at a very high number */
33 #define CONST_MAX(a, b) (a > b ? a : b) /* RTE_MAX is not a constant */
34 #define EXTERNAL_HEAP_MIN_SOCKET_ID (CONST_MAX((1 << 8), RTE_MAX_NUMA_NODES))
37 check_hugepage_sz(unsigned flags, uint64_t hugepage_sz)
39 unsigned check_flag = 0;
41 if (!(flags & ~RTE_MEMZONE_SIZE_HINT_ONLY))
44 switch (hugepage_sz) {
46 check_flag = RTE_MEMZONE_256KB;
49 check_flag = RTE_MEMZONE_2MB;
52 check_flag = RTE_MEMZONE_16MB;
55 check_flag = RTE_MEMZONE_256MB;
58 check_flag = RTE_MEMZONE_512MB;
61 check_flag = RTE_MEMZONE_1GB;
64 check_flag = RTE_MEMZONE_4GB;
67 check_flag = RTE_MEMZONE_16GB;
70 return check_flag & flags;
74 malloc_socket_to_heap_id(unsigned int socket_id)
76 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
79 for (i = 0; i < RTE_MAX_HEAPS; i++) {
80 struct malloc_heap *heap = &mcfg->malloc_heaps[i];
82 if (heap->socket_id == socket_id)
89 * Expand the heap with a memory area.
91 static struct malloc_elem *
92 malloc_heap_add_memory(struct malloc_heap *heap, struct rte_memseg_list *msl,
93 void *start, size_t len)
95 struct malloc_elem *elem = start;
97 malloc_elem_init(elem, heap, msl, len);
99 malloc_elem_insert(elem);
101 elem = malloc_elem_join_adjacent_free(elem);
103 malloc_elem_free_list_insert(elem);
109 malloc_add_seg(const struct rte_memseg_list *msl,
110 const struct rte_memseg *ms, size_t len, void *arg __rte_unused)
112 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
113 struct rte_memseg_list *found_msl;
114 struct malloc_heap *heap;
115 int msl_idx, heap_idx;
120 heap_idx = malloc_socket_to_heap_id(msl->socket_id);
122 RTE_LOG(ERR, EAL, "Memseg list has invalid socket id\n");
125 heap = &mcfg->malloc_heaps[heap_idx];
127 /* msl is const, so find it */
128 msl_idx = msl - mcfg->memsegs;
130 if (msl_idx < 0 || msl_idx >= RTE_MAX_MEMSEG_LISTS)
133 found_msl = &mcfg->memsegs[msl_idx];
135 malloc_heap_add_memory(heap, found_msl, ms->addr, len);
137 heap->total_size += len;
139 RTE_LOG(DEBUG, EAL, "Added %zuM to heap on socket %i\n", len >> 20,
145 * Iterates through the freelist for a heap to find a free element
146 * which can store data of the required size and with the requested alignment.
147 * If size is 0, find the biggest available elem.
148 * Returns null on failure, or pointer to element on success.
150 static struct malloc_elem *
151 find_suitable_element(struct malloc_heap *heap, size_t size,
152 unsigned int flags, size_t align, size_t bound, bool contig)
155 struct malloc_elem *elem, *alt_elem = NULL;
157 for (idx = malloc_elem_free_list_index(size);
158 idx < RTE_HEAP_NUM_FREELISTS; idx++) {
159 for (elem = LIST_FIRST(&heap->free_head[idx]);
160 !!elem; elem = LIST_NEXT(elem, free_list)) {
161 if (malloc_elem_can_hold(elem, size, align, bound,
163 if (check_hugepage_sz(flags,
166 if (alt_elem == NULL)
172 if ((alt_elem != NULL) && (flags & RTE_MEMZONE_SIZE_HINT_ONLY))
179 * Iterates through the freelist for a heap to find a free element with the
180 * biggest size and requested alignment. Will also set size to whatever element
181 * size that was found.
182 * Returns null on failure, or pointer to element on success.
184 static struct malloc_elem *
185 find_biggest_element(struct malloc_heap *heap, size_t *size,
186 unsigned int flags, size_t align, bool contig)
188 struct malloc_elem *elem, *max_elem = NULL;
189 size_t idx, max_size = 0;
191 for (idx = 0; idx < RTE_HEAP_NUM_FREELISTS; idx++) {
192 for (elem = LIST_FIRST(&heap->free_head[idx]);
193 !!elem; elem = LIST_NEXT(elem, free_list)) {
195 if ((flags & RTE_MEMZONE_SIZE_HINT_ONLY) == 0 &&
196 !check_hugepage_sz(flags,
201 malloc_elem_find_max_iova_contig(elem,
204 void *data_start = RTE_PTR_ADD(elem,
205 MALLOC_ELEM_HEADER_LEN);
206 void *data_end = RTE_PTR_ADD(elem, elem->size -
207 MALLOC_ELEM_TRAILER_LEN);
208 void *aligned = RTE_PTR_ALIGN_CEIL(data_start,
210 /* check if aligned data start is beyond end */
211 if (aligned >= data_end)
213 cur_size = RTE_PTR_DIFF(data_end, aligned);
215 if (cur_size > max_size) {
227 * Main function to allocate a block of memory from the heap.
228 * It locks the free list, scans it, and adds a new memseg if the
229 * scan fails. Once the new memseg is added, it re-scans and should return
230 * the new element after releasing the lock.
233 heap_alloc(struct malloc_heap *heap, const char *type __rte_unused, size_t size,
234 unsigned int flags, size_t align, size_t bound, bool contig)
236 struct malloc_elem *elem;
238 size = RTE_CACHE_LINE_ROUNDUP(size);
239 align = RTE_CACHE_LINE_ROUNDUP(align);
241 elem = find_suitable_element(heap, size, flags, align, bound, contig);
243 elem = malloc_elem_alloc(elem, size, align, bound, contig);
245 /* increase heap's count of allocated elements */
249 return elem == NULL ? NULL : (void *)(&elem[1]);
253 heap_alloc_biggest(struct malloc_heap *heap, const char *type __rte_unused,
254 unsigned int flags, size_t align, bool contig)
256 struct malloc_elem *elem;
259 align = RTE_CACHE_LINE_ROUNDUP(align);
261 elem = find_biggest_element(heap, &size, flags, align, contig);
263 elem = malloc_elem_alloc(elem, size, align, 0, contig);
265 /* increase heap's count of allocated elements */
269 return elem == NULL ? NULL : (void *)(&elem[1]);
272 /* this function is exposed in malloc_mp.h */
274 rollback_expand_heap(struct rte_memseg **ms, int n_segs,
275 struct malloc_elem *elem, void *map_addr, size_t map_len)
278 malloc_elem_free_list_remove(elem);
279 malloc_elem_hide_region(elem, map_addr, map_len);
282 eal_memalloc_free_seg_bulk(ms, n_segs);
285 /* this function is exposed in malloc_mp.h */
287 alloc_pages_on_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
288 int socket, unsigned int flags, size_t align, size_t bound,
289 bool contig, struct rte_memseg **ms, int n_segs)
291 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
292 struct rte_memseg_list *msl;
293 struct malloc_elem *elem = NULL;
296 void *ret, *map_addr;
299 alloc_sz = (size_t)pg_sz * n_segs;
301 /* first, check if we're allowed to allocate this memory */
302 if (eal_memalloc_mem_alloc_validate(socket,
303 heap->total_size + alloc_sz) < 0) {
304 RTE_LOG(DEBUG, EAL, "User has disallowed allocation\n");
308 allocd_pages = eal_memalloc_alloc_seg_bulk(ms, n_segs, pg_sz,
311 /* make sure we've allocated our pages... */
312 if (allocd_pages < 0)
315 map_addr = ms[0]->addr;
316 msl = rte_mem_virt2memseg_list(map_addr);
318 /* check if we wanted contiguous memory but didn't get it */
319 if (contig && !eal_memalloc_is_contig(msl, map_addr, alloc_sz)) {
320 RTE_LOG(DEBUG, EAL, "%s(): couldn't allocate physically contiguous space\n",
325 if (mcfg->dma_maskbits) {
326 mask = ~((1ULL << mcfg->dma_maskbits) - 1);
327 if (rte_eal_check_dma_mask(mask)) {
329 "%s(): couldn't allocate memory due to DMA mask\n",
335 /* add newly minted memsegs to malloc heap */
336 elem = malloc_heap_add_memory(heap, msl, map_addr, alloc_sz);
338 /* try once more, as now we have allocated new memory */
339 ret = find_suitable_element(heap, elt_size, flags, align, bound,
348 rollback_expand_heap(ms, n_segs, elem, map_addr, alloc_sz);
353 try_expand_heap_primary(struct malloc_heap *heap, uint64_t pg_sz,
354 size_t elt_size, int socket, unsigned int flags, size_t align,
355 size_t bound, bool contig)
357 struct malloc_elem *elem;
358 struct rte_memseg **ms;
362 bool callback_triggered = false;
364 alloc_sz = RTE_ALIGN_CEIL(align + elt_size +
365 MALLOC_ELEM_TRAILER_LEN, pg_sz);
366 n_segs = alloc_sz / pg_sz;
368 /* we can't know in advance how many pages we'll need, so we malloc */
369 ms = malloc(sizeof(*ms) * n_segs);
372 memset(ms, 0, sizeof(*ms) * n_segs);
374 elem = alloc_pages_on_heap(heap, pg_sz, elt_size, socket, flags, align,
375 bound, contig, ms, n_segs);
380 map_addr = ms[0]->addr;
382 /* notify user about changes in memory map */
383 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC, map_addr, alloc_sz);
385 /* notify other processes that this has happened */
386 if (request_sync()) {
387 /* we couldn't ensure all processes have mapped memory,
388 * so free it back and notify everyone that it's been
391 * technically, we could've avoided adding memory addresses to
392 * the map, but that would've led to inconsistent behavior
393 * between primary and secondary processes, as those get
394 * callbacks during sync. therefore, force primary process to
395 * do alloc-and-rollback syncs as well.
397 callback_triggered = true;
400 heap->total_size += alloc_sz;
402 RTE_LOG(DEBUG, EAL, "Heap on socket %d was expanded by %zdMB\n",
403 socket, alloc_sz >> 20ULL);
410 if (callback_triggered)
411 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
414 rollback_expand_heap(ms, n_segs, elem, map_addr, alloc_sz);
424 try_expand_heap_secondary(struct malloc_heap *heap, uint64_t pg_sz,
425 size_t elt_size, int socket, unsigned int flags, size_t align,
426 size_t bound, bool contig)
428 struct malloc_mp_req req;
431 memset(&req, 0, sizeof(req));
433 req.t = REQ_TYPE_ALLOC;
434 req.alloc_req.align = align;
435 req.alloc_req.bound = bound;
436 req.alloc_req.contig = contig;
437 req.alloc_req.flags = flags;
438 req.alloc_req.elt_size = elt_size;
439 req.alloc_req.page_sz = pg_sz;
440 req.alloc_req.socket = socket;
441 req.alloc_req.heap = heap; /* it's in shared memory */
443 req_result = request_to_primary(&req);
448 if (req.result != REQ_RESULT_SUCCESS)
455 try_expand_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
456 int socket, unsigned int flags, size_t align, size_t bound,
459 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
462 rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
464 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
465 ret = try_expand_heap_primary(heap, pg_sz, elt_size, socket,
466 flags, align, bound, contig);
468 ret = try_expand_heap_secondary(heap, pg_sz, elt_size, socket,
469 flags, align, bound, contig);
472 rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
477 compare_pagesz(const void *a, const void *b)
479 const struct rte_memseg_list * const*mpa = a;
480 const struct rte_memseg_list * const*mpb = b;
481 const struct rte_memseg_list *msla = *mpa;
482 const struct rte_memseg_list *mslb = *mpb;
483 uint64_t pg_sz_a = msla->page_sz;
484 uint64_t pg_sz_b = mslb->page_sz;
486 if (pg_sz_a < pg_sz_b)
488 if (pg_sz_a > pg_sz_b)
494 alloc_more_mem_on_socket(struct malloc_heap *heap, size_t size, int socket,
495 unsigned int flags, size_t align, size_t bound, bool contig)
497 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
498 struct rte_memseg_list *requested_msls[RTE_MAX_MEMSEG_LISTS];
499 struct rte_memseg_list *other_msls[RTE_MAX_MEMSEG_LISTS];
500 uint64_t requested_pg_sz[RTE_MAX_MEMSEG_LISTS];
501 uint64_t other_pg_sz[RTE_MAX_MEMSEG_LISTS];
503 int i, n_other_msls, n_other_pg_sz, n_requested_msls, n_requested_pg_sz;
504 bool size_hint = (flags & RTE_MEMZONE_SIZE_HINT_ONLY) > 0;
505 unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY;
508 memset(requested_msls, 0, sizeof(requested_msls));
509 memset(other_msls, 0, sizeof(other_msls));
510 memset(requested_pg_sz, 0, sizeof(requested_pg_sz));
511 memset(other_pg_sz, 0, sizeof(other_pg_sz));
514 * go through memseg list and take note of all the page sizes available,
515 * and if any of them were specifically requested by the user.
517 n_requested_msls = 0;
519 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
520 struct rte_memseg_list *msl = &mcfg->memsegs[i];
522 if (msl->socket_id != socket)
525 if (msl->base_va == NULL)
528 /* if pages of specific size were requested */
529 if (size_flags != 0 && check_hugepage_sz(size_flags,
531 requested_msls[n_requested_msls++] = msl;
532 else if (size_flags == 0 || size_hint)
533 other_msls[n_other_msls++] = msl;
536 /* sort the lists, smallest first */
537 qsort(requested_msls, n_requested_msls, sizeof(requested_msls[0]),
539 qsort(other_msls, n_other_msls, sizeof(other_msls[0]),
542 /* now, extract page sizes we are supposed to try */
544 n_requested_pg_sz = 0;
545 for (i = 0; i < n_requested_msls; i++) {
546 uint64_t pg_sz = requested_msls[i]->page_sz;
548 if (prev_pg_sz != pg_sz) {
549 requested_pg_sz[n_requested_pg_sz++] = pg_sz;
555 for (i = 0; i < n_other_msls; i++) {
556 uint64_t pg_sz = other_msls[i]->page_sz;
558 if (prev_pg_sz != pg_sz) {
559 other_pg_sz[n_other_pg_sz++] = pg_sz;
564 /* finally, try allocating memory of specified page sizes, starting from
567 for (i = 0; i < n_requested_pg_sz; i++) {
568 uint64_t pg_sz = requested_pg_sz[i];
571 * do not pass the size hint here, as user expects other page
572 * sizes first, before resorting to best effort allocation.
574 if (!try_expand_heap(heap, pg_sz, size, socket, size_flags,
575 align, bound, contig))
578 if (n_other_pg_sz == 0)
581 /* now, check if we can reserve anything with size hint */
582 ret = find_suitable_element(heap, size, flags, align, bound, contig);
587 * we still couldn't reserve memory, so try expanding heap with other
588 * page sizes, if there are any
590 for (i = 0; i < n_other_pg_sz; i++) {
591 uint64_t pg_sz = other_pg_sz[i];
593 if (!try_expand_heap(heap, pg_sz, size, socket, flags,
594 align, bound, contig))
600 /* this will try lower page sizes first */
602 malloc_heap_alloc_on_heap_id(const char *type, size_t size,
603 unsigned int heap_id, unsigned int flags, size_t align,
604 size_t bound, bool contig)
606 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
607 struct malloc_heap *heap = &mcfg->malloc_heaps[heap_id];
608 unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY;
612 rte_spinlock_lock(&(heap->lock));
614 align = align == 0 ? 1 : align;
616 /* for legacy mode, try once and with all flags */
617 if (internal_config.legacy_mem) {
618 ret = heap_alloc(heap, type, size, flags, align, bound, contig);
623 * we do not pass the size hint here, because even if allocation fails,
624 * we may still be able to allocate memory from appropriate page sizes,
625 * we just need to request more memory first.
628 socket_id = rte_socket_id_by_idx(heap_id);
630 * if socket ID is negative, we cannot find a socket ID for this heap -
631 * which means it's an external heap. those can have unexpected page
632 * sizes, so if the user asked to allocate from there - assume user
633 * knows what they're doing, and allow allocating from there with any
637 size_flags |= RTE_MEMZONE_SIZE_HINT_ONLY;
639 ret = heap_alloc(heap, type, size, size_flags, align, bound, contig);
643 /* if socket ID is invalid, this is an external heap */
647 if (!alloc_more_mem_on_socket(heap, size, socket_id, flags, align,
649 ret = heap_alloc(heap, type, size, flags, align, bound, contig);
651 /* this should have succeeded */
653 RTE_LOG(ERR, EAL, "Error allocating from heap\n");
656 rte_spinlock_unlock(&(heap->lock));
661 malloc_heap_alloc(const char *type, size_t size, int socket_arg,
662 unsigned int flags, size_t align, size_t bound, bool contig)
664 int socket, heap_id, i;
667 /* return NULL if size is 0 or alignment is not power-of-2 */
668 if (size == 0 || (align && !rte_is_power_of_2(align)))
671 if (!rte_eal_has_hugepages() && socket_arg < RTE_MAX_NUMA_NODES)
672 socket_arg = SOCKET_ID_ANY;
674 if (socket_arg == SOCKET_ID_ANY)
675 socket = malloc_get_numa_socket();
679 /* turn socket ID into heap ID */
680 heap_id = malloc_socket_to_heap_id(socket);
681 /* if heap id is negative, socket ID was invalid */
685 ret = malloc_heap_alloc_on_heap_id(type, size, heap_id, flags, align,
687 if (ret != NULL || socket_arg != SOCKET_ID_ANY)
690 /* try other heaps. we are only iterating through native DPDK sockets,
691 * so external heaps won't be included.
693 for (i = 0; i < (int) rte_socket_count(); i++) {
696 ret = malloc_heap_alloc_on_heap_id(type, size, i, flags, align,
705 heap_alloc_biggest_on_heap_id(const char *type, unsigned int heap_id,
706 unsigned int flags, size_t align, bool contig)
708 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
709 struct malloc_heap *heap = &mcfg->malloc_heaps[heap_id];
712 rte_spinlock_lock(&(heap->lock));
714 align = align == 0 ? 1 : align;
716 ret = heap_alloc_biggest(heap, type, flags, align, contig);
718 rte_spinlock_unlock(&(heap->lock));
724 malloc_heap_alloc_biggest(const char *type, int socket_arg, unsigned int flags,
725 size_t align, bool contig)
727 int socket, i, cur_socket, heap_id;
730 /* return NULL if align is not power-of-2 */
731 if ((align && !rte_is_power_of_2(align)))
734 if (!rte_eal_has_hugepages())
735 socket_arg = SOCKET_ID_ANY;
737 if (socket_arg == SOCKET_ID_ANY)
738 socket = malloc_get_numa_socket();
742 /* turn socket ID into heap ID */
743 heap_id = malloc_socket_to_heap_id(socket);
744 /* if heap id is negative, socket ID was invalid */
748 ret = heap_alloc_biggest_on_heap_id(type, heap_id, flags, align,
750 if (ret != NULL || socket_arg != SOCKET_ID_ANY)
753 /* try other heaps */
754 for (i = 0; i < (int) rte_socket_count(); i++) {
755 cur_socket = rte_socket_id_by_idx(i);
756 if (cur_socket == socket)
758 ret = heap_alloc_biggest_on_heap_id(type, i, flags, align,
766 /* this function is exposed in malloc_mp.h */
768 malloc_heap_free_pages(void *aligned_start, size_t aligned_len)
770 int n_segs, seg_idx, max_seg_idx;
771 struct rte_memseg_list *msl;
774 msl = rte_mem_virt2memseg_list(aligned_start);
778 page_sz = (size_t)msl->page_sz;
779 n_segs = aligned_len / page_sz;
780 seg_idx = RTE_PTR_DIFF(aligned_start, msl->base_va) / page_sz;
781 max_seg_idx = seg_idx + n_segs;
783 for (; seg_idx < max_seg_idx; seg_idx++) {
784 struct rte_memseg *ms;
786 ms = rte_fbarray_get(&msl->memseg_arr, seg_idx);
787 eal_memalloc_free_seg(ms);
793 malloc_heap_free(struct malloc_elem *elem)
795 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
796 struct malloc_heap *heap;
797 void *start, *aligned_start, *end, *aligned_end;
798 size_t len, aligned_len, page_sz;
799 struct rte_memseg_list *msl;
800 unsigned int i, n_segs, before_space, after_space;
803 if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
806 /* elem may be merged with previous element, so keep heap address */
809 page_sz = (size_t)msl->page_sz;
811 rte_spinlock_lock(&(heap->lock));
813 /* mark element as free */
814 elem->state = ELEM_FREE;
816 elem = malloc_elem_free(elem);
818 /* anything after this is a bonus */
821 /* ...of which we can't avail if we are in legacy mode, or if this is an
822 * externally allocated segment.
824 if (internal_config.legacy_mem || (msl->external > 0))
827 /* check if we can free any memory back to the system */
828 if (elem->size < page_sz)
831 /* probably, but let's make sure, as we may not be using up full page */
834 aligned_start = RTE_PTR_ALIGN_CEIL(start, page_sz);
835 end = RTE_PTR_ADD(elem, len);
836 aligned_end = RTE_PTR_ALIGN_FLOOR(end, page_sz);
838 aligned_len = RTE_PTR_DIFF(aligned_end, aligned_start);
840 /* can't free anything */
841 if (aligned_len < page_sz)
844 /* we can free something. however, some of these pages may be marked as
845 * unfreeable, so also check that as well
847 n_segs = aligned_len / page_sz;
848 for (i = 0; i < n_segs; i++) {
849 const struct rte_memseg *tmp =
850 rte_mem_virt2memseg(aligned_start, msl);
852 if (tmp->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE) {
853 /* this is an unfreeable segment, so move start */
854 aligned_start = RTE_PTR_ADD(tmp->addr, tmp->len);
858 /* recalculate length and number of segments */
859 aligned_len = RTE_PTR_DIFF(aligned_end, aligned_start);
860 n_segs = aligned_len / page_sz;
862 /* check if we can still free some pages */
866 /* We're not done yet. We also have to check if by freeing space we will
867 * be leaving free elements that are too small to store new elements.
868 * Check if we have enough space in the beginning and at the end, or if
869 * start/end are exactly page aligned.
871 before_space = RTE_PTR_DIFF(aligned_start, elem);
872 after_space = RTE_PTR_DIFF(end, aligned_end);
873 if (before_space != 0 &&
874 before_space < MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
875 /* There is not enough space before start, but we may be able to
876 * move the start forward by one page.
882 aligned_start = RTE_PTR_ADD(aligned_start, page_sz);
883 aligned_len -= page_sz;
886 if (after_space != 0 && after_space <
887 MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
888 /* There is not enough space after end, but we may be able to
889 * move the end backwards by one page.
895 aligned_end = RTE_PTR_SUB(aligned_end, page_sz);
896 aligned_len -= page_sz;
900 /* now we can finally free us some pages */
902 rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
905 * we allow secondary processes to clear the heap of this allocated
906 * memory because it is safe to do so, as even if notifications about
907 * unmapped pages don't make it to other processes, heap is shared
908 * across all processes, and will become empty of this memory anyway,
909 * and nothing can allocate it back unless primary process will be able
910 * to deliver allocation message to every single running process.
913 malloc_elem_free_list_remove(elem);
915 malloc_elem_hide_region(elem, (void *) aligned_start, aligned_len);
917 heap->total_size -= aligned_len;
919 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
920 /* notify user about changes in memory map */
921 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
922 aligned_start, aligned_len);
924 /* don't care if any of this fails */
925 malloc_heap_free_pages(aligned_start, aligned_len);
929 struct malloc_mp_req req;
931 memset(&req, 0, sizeof(req));
933 req.t = REQ_TYPE_FREE;
934 req.free_req.addr = aligned_start;
935 req.free_req.len = aligned_len;
938 * we request primary to deallocate pages, but we don't do it
939 * in this thread. instead, we notify primary that we would like
940 * to deallocate pages, and this process will receive another
941 * request (in parallel) that will do it for us on another
944 * we also don't really care if this succeeds - the data is
945 * already removed from the heap, so it is, for all intents and
946 * purposes, hidden from the rest of DPDK even if some other
947 * process (including this one) may have these pages mapped.
949 * notifications about deallocated memory happen during sync.
951 request_to_primary(&req);
954 RTE_LOG(DEBUG, EAL, "Heap on socket %d was shrunk by %zdMB\n",
955 msl->socket_id, aligned_len >> 20ULL);
957 rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
959 rte_spinlock_unlock(&(heap->lock));
964 malloc_heap_resize(struct malloc_elem *elem, size_t size)
968 if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
971 rte_spinlock_lock(&(elem->heap->lock));
973 ret = malloc_elem_resize(elem, size);
975 rte_spinlock_unlock(&(elem->heap->lock));
981 * Function to retrieve data for a given heap
984 malloc_heap_get_stats(struct malloc_heap *heap,
985 struct rte_malloc_socket_stats *socket_stats)
988 struct malloc_elem *elem;
990 rte_spinlock_lock(&heap->lock);
992 /* Initialise variables for heap */
993 socket_stats->free_count = 0;
994 socket_stats->heap_freesz_bytes = 0;
995 socket_stats->greatest_free_size = 0;
997 /* Iterate through free list */
998 for (idx = 0; idx < RTE_HEAP_NUM_FREELISTS; idx++) {
999 for (elem = LIST_FIRST(&heap->free_head[idx]);
1000 !!elem; elem = LIST_NEXT(elem, free_list))
1002 socket_stats->free_count++;
1003 socket_stats->heap_freesz_bytes += elem->size;
1004 if (elem->size > socket_stats->greatest_free_size)
1005 socket_stats->greatest_free_size = elem->size;
1008 /* Get stats on overall heap and allocated memory on this heap */
1009 socket_stats->heap_totalsz_bytes = heap->total_size;
1010 socket_stats->heap_allocsz_bytes = (socket_stats->heap_totalsz_bytes -
1011 socket_stats->heap_freesz_bytes);
1012 socket_stats->alloc_count = heap->alloc_count;
1014 rte_spinlock_unlock(&heap->lock);
1019 * Function to retrieve data for a given heap
1022 malloc_heap_dump(struct malloc_heap *heap, FILE *f)
1024 struct malloc_elem *elem;
1026 rte_spinlock_lock(&heap->lock);
1028 fprintf(f, "Heap size: 0x%zx\n", heap->total_size);
1029 fprintf(f, "Heap alloc count: %u\n", heap->alloc_count);
1033 malloc_elem_dump(elem, f);
1037 rte_spinlock_unlock(&heap->lock);
1041 destroy_seg(struct malloc_elem *elem, size_t len)
1043 struct malloc_heap *heap = elem->heap;
1044 struct rte_memseg_list *msl;
1048 /* notify all subscribers that a memory area is going to be removed */
1049 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE, elem, len);
1051 /* this element can be removed */
1052 malloc_elem_free_list_remove(elem);
1053 malloc_elem_hide_region(elem, elem, len);
1055 heap->total_size -= len;
1057 memset(elem, 0, sizeof(*elem));
1059 /* destroy the fbarray backing this memory */
1060 if (rte_fbarray_destroy(&msl->memseg_arr) < 0)
1063 /* reset the memseg list */
1064 memset(msl, 0, sizeof(*msl));
1070 malloc_heap_add_external_memory(struct malloc_heap *heap, void *va_addr,
1071 rte_iova_t iova_addrs[], unsigned int n_pages, size_t page_sz)
1073 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1074 char fbarray_name[RTE_FBARRAY_NAME_LEN];
1075 struct rte_memseg_list *msl = NULL;
1076 struct rte_fbarray *arr;
1077 size_t seg_len = n_pages * page_sz;
1080 /* first, find a free memseg list */
1081 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
1082 struct rte_memseg_list *tmp = &mcfg->memsegs[i];
1083 if (tmp->base_va == NULL) {
1089 RTE_LOG(ERR, EAL, "Couldn't find empty memseg list\n");
1094 snprintf(fbarray_name, sizeof(fbarray_name) - 1, "%s_%p",
1095 heap->name, va_addr);
1097 /* create the backing fbarray */
1098 if (rte_fbarray_init(&msl->memseg_arr, fbarray_name, n_pages,
1099 sizeof(struct rte_memseg)) < 0) {
1100 RTE_LOG(ERR, EAL, "Couldn't create fbarray backing the memseg list\n");
1103 arr = &msl->memseg_arr;
1105 /* fbarray created, fill it up */
1106 for (i = 0; i < n_pages; i++) {
1107 struct rte_memseg *ms;
1109 rte_fbarray_set_used(arr, i);
1110 ms = rte_fbarray_get(arr, i);
1111 ms->addr = RTE_PTR_ADD(va_addr, i * page_sz);
1112 ms->iova = iova_addrs == NULL ? RTE_BAD_IOVA : iova_addrs[i];
1113 ms->hugepage_sz = page_sz;
1115 ms->nchannel = rte_memory_get_nchannel();
1116 ms->nrank = rte_memory_get_nrank();
1117 ms->socket_id = heap->socket_id;
1120 /* set up the memseg list */
1121 msl->base_va = va_addr;
1122 msl->page_sz = page_sz;
1123 msl->socket_id = heap->socket_id;
1128 /* erase contents of new memory */
1129 memset(va_addr, 0, seg_len);
1131 /* now, add newly minted memory to the malloc heap */
1132 malloc_heap_add_memory(heap, msl, va_addr, seg_len);
1134 heap->total_size += seg_len;
1137 RTE_LOG(DEBUG, EAL, "Added segment for heap %s starting at %p\n",
1138 heap->name, va_addr);
1140 /* notify all subscribers that a new memory area has been added */
1141 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC,
1148 malloc_heap_remove_external_memory(struct malloc_heap *heap, void *va_addr,
1151 struct malloc_elem *elem = heap->first;
1153 /* find element with specified va address */
1154 while (elem != NULL && elem != va_addr) {
1156 /* stop if we've blown past our VA */
1157 if (elem > (struct malloc_elem *)va_addr) {
1162 /* check if element was found */
1163 if (elem == NULL || elem->msl->len != len) {
1167 /* if element's size is not equal to segment len, segment is busy */
1168 if (elem->state == ELEM_BUSY || elem->size != len) {
1172 return destroy_seg(elem, len);
1176 malloc_heap_create(struct malloc_heap *heap, const char *heap_name)
1178 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1179 uint32_t next_socket_id = mcfg->next_socket_id;
1181 /* prevent overflow. did you really create 2 billion heaps??? */
1182 if (next_socket_id > INT32_MAX) {
1183 RTE_LOG(ERR, EAL, "Cannot assign new socket ID's\n");
1188 /* initialize empty heap */
1189 heap->alloc_count = 0;
1192 LIST_INIT(heap->free_head);
1193 rte_spinlock_init(&heap->lock);
1194 heap->total_size = 0;
1195 heap->socket_id = next_socket_id;
1197 /* we hold a global mem hotplug writelock, so it's safe to increment */
1198 mcfg->next_socket_id++;
1201 strlcpy(heap->name, heap_name, RTE_HEAP_NAME_MAX_LEN);
1206 malloc_heap_destroy(struct malloc_heap *heap)
1208 if (heap->alloc_count != 0) {
1209 RTE_LOG(ERR, EAL, "Heap is still in use\n");
1213 if (heap->first != NULL || heap->last != NULL) {
1214 RTE_LOG(ERR, EAL, "Heap still contains memory segments\n");
1218 if (heap->total_size != 0)
1219 RTE_LOG(ERR, EAL, "Total size not zero, heap is likely corrupt\n");
1221 /* after this, the lock will be dropped */
1222 memset(heap, 0, sizeof(*heap));
1228 rte_eal_malloc_heap_init(void)
1230 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1233 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1234 /* assign min socket ID to external heaps */
1235 mcfg->next_socket_id = EXTERNAL_HEAP_MIN_SOCKET_ID;
1237 /* assign names to default DPDK heaps */
1238 for (i = 0; i < rte_socket_count(); i++) {
1239 struct malloc_heap *heap = &mcfg->malloc_heaps[i];
1240 char heap_name[RTE_HEAP_NAME_MAX_LEN];
1241 int socket_id = rte_socket_id_by_idx(i);
1243 snprintf(heap_name, sizeof(heap_name) - 1,
1244 "socket_%i", socket_id);
1245 strlcpy(heap->name, heap_name, RTE_HEAP_NAME_MAX_LEN);
1246 heap->socket_id = socket_id;
1251 if (register_mp_requests()) {
1252 RTE_LOG(ERR, EAL, "Couldn't register malloc multiprocess actions\n");
1253 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
1257 /* unlock mem hotplug here. it's safe for primary as no requests can
1258 * even come before primary itself is fully initialized, and secondaries
1259 * do not need to initialize the heap.
1261 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
1263 /* secondary process does not need to initialize anything */
1264 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1267 /* add all IOVA-contiguous areas to the heap */
1268 return rte_memseg_contig_walk(malloc_add_seg, NULL);