1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
11 #include <rte_memory.h>
12 #include <rte_errno.h>
14 #include <rte_eal_memconfig.h>
15 #include <rte_lcore.h>
16 #include <rte_common.h>
17 #include <rte_string_fns.h>
18 #include <rte_spinlock.h>
19 #include <rte_memzone.h>
20 #include <rte_fbarray.h>
22 #include "eal_internal_cfg.h"
23 #include "eal_memalloc.h"
24 #include "eal_memcfg.h"
25 #include "eal_private.h"
26 #include "malloc_elem.h"
27 #include "malloc_heap.h"
28 #include "malloc_mp.h"
30 /* start external socket ID's at a very high number */
31 #define CONST_MAX(a, b) (a > b ? a : b) /* RTE_MAX is not a constant */
32 #define EXTERNAL_HEAP_MIN_SOCKET_ID (CONST_MAX((1 << 8), RTE_MAX_NUMA_NODES))
35 check_hugepage_sz(unsigned flags, uint64_t hugepage_sz)
37 unsigned check_flag = 0;
39 if (!(flags & ~RTE_MEMZONE_SIZE_HINT_ONLY))
42 switch (hugepage_sz) {
44 check_flag = RTE_MEMZONE_256KB;
47 check_flag = RTE_MEMZONE_2MB;
50 check_flag = RTE_MEMZONE_16MB;
53 check_flag = RTE_MEMZONE_256MB;
56 check_flag = RTE_MEMZONE_512MB;
59 check_flag = RTE_MEMZONE_1GB;
62 check_flag = RTE_MEMZONE_4GB;
65 check_flag = RTE_MEMZONE_16GB;
68 return check_flag & flags;
72 malloc_socket_to_heap_id(unsigned int socket_id)
74 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
77 for (i = 0; i < RTE_MAX_HEAPS; i++) {
78 struct malloc_heap *heap = &mcfg->malloc_heaps[i];
80 if (heap->socket_id == socket_id)
87 * Expand the heap with a memory area.
89 static struct malloc_elem *
90 malloc_heap_add_memory(struct malloc_heap *heap, struct rte_memseg_list *msl,
91 void *start, size_t len, bool dirty)
93 struct malloc_elem *elem = start;
95 malloc_elem_init(elem, heap, msl, len, elem, len, dirty);
97 malloc_elem_insert(elem);
99 elem = malloc_elem_join_adjacent_free(elem);
101 malloc_elem_free_list_insert(elem);
107 malloc_add_seg(const struct rte_memseg_list *msl,
108 const struct rte_memseg *ms, size_t len, void *arg __rte_unused)
110 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
111 struct rte_memseg_list *found_msl;
112 struct malloc_heap *heap;
113 int msl_idx, heap_idx;
118 heap_idx = malloc_socket_to_heap_id(msl->socket_id);
120 RTE_LOG(ERR, EAL, "Memseg list has invalid socket id\n");
123 heap = &mcfg->malloc_heaps[heap_idx];
125 /* msl is const, so find it */
126 msl_idx = msl - mcfg->memsegs;
128 if (msl_idx < 0 || msl_idx >= RTE_MAX_MEMSEG_LISTS)
131 found_msl = &mcfg->memsegs[msl_idx];
133 malloc_heap_add_memory(heap, found_msl, ms->addr, len,
134 ms->flags & RTE_MEMSEG_FLAG_DIRTY);
136 heap->total_size += len;
138 RTE_LOG(DEBUG, EAL, "Added %zuM to heap on socket %i\n", len >> 20,
144 * Iterates through the freelist for a heap to find a free element
145 * which can store data of the required size and with the requested alignment.
146 * If size is 0, find the biggest available elem.
147 * Returns null on failure, or pointer to element on success.
149 static struct malloc_elem *
150 find_suitable_element(struct malloc_heap *heap, size_t size,
151 unsigned int flags, size_t align, size_t bound, bool contig)
154 struct malloc_elem *elem, *alt_elem = NULL;
156 for (idx = malloc_elem_free_list_index(size);
157 idx < RTE_HEAP_NUM_FREELISTS; idx++) {
158 for (elem = LIST_FIRST(&heap->free_head[idx]);
159 !!elem; elem = LIST_NEXT(elem, free_list)) {
160 if (malloc_elem_can_hold(elem, size, align, bound,
162 if (check_hugepage_sz(flags,
165 if (alt_elem == NULL)
171 if ((alt_elem != NULL) && (flags & RTE_MEMZONE_SIZE_HINT_ONLY))
178 * Iterates through the freelist for a heap to find a free element with the
179 * biggest size and requested alignment. Will also set size to whatever element
180 * size that was found.
181 * Returns null on failure, or pointer to element on success.
183 static struct malloc_elem *
184 find_biggest_element(struct malloc_heap *heap, size_t *size,
185 unsigned int flags, size_t align, bool contig)
187 struct malloc_elem *elem, *max_elem = NULL;
188 size_t idx, max_size = 0;
190 for (idx = 0; idx < RTE_HEAP_NUM_FREELISTS; idx++) {
191 for (elem = LIST_FIRST(&heap->free_head[idx]);
192 !!elem; elem = LIST_NEXT(elem, free_list)) {
194 if ((flags & RTE_MEMZONE_SIZE_HINT_ONLY) == 0 &&
195 !check_hugepage_sz(flags,
200 malloc_elem_find_max_iova_contig(elem,
203 void *data_start = RTE_PTR_ADD(elem,
204 MALLOC_ELEM_HEADER_LEN);
205 void *data_end = RTE_PTR_ADD(elem, elem->size -
206 MALLOC_ELEM_TRAILER_LEN);
207 void *aligned = RTE_PTR_ALIGN_CEIL(data_start,
209 /* check if aligned data start is beyond end */
210 if (aligned >= data_end)
212 cur_size = RTE_PTR_DIFF(data_end, aligned);
214 if (cur_size > max_size) {
226 * Main function to allocate a block of memory from the heap.
227 * It locks the free list, scans it, and adds a new memseg if the
228 * scan fails. Once the new memseg is added, it re-scans and should return
229 * the new element after releasing the lock.
232 heap_alloc(struct malloc_heap *heap, const char *type __rte_unused, size_t size,
233 unsigned int flags, size_t align, size_t bound, bool contig)
235 struct malloc_elem *elem;
236 size_t user_size = size;
238 size = RTE_CACHE_LINE_ROUNDUP(size);
239 align = RTE_CACHE_LINE_ROUNDUP(align);
241 /* roundup might cause an overflow */
244 elem = find_suitable_element(heap, size, flags, align, bound, contig);
246 elem = malloc_elem_alloc(elem, size, align, bound, contig);
248 /* increase heap's count of allocated elements */
251 asan_set_redzone(elem, user_size);
254 return elem == NULL ? NULL : (void *)(&elem[1]);
258 heap_alloc_biggest(struct malloc_heap *heap, const char *type __rte_unused,
259 unsigned int flags, size_t align, bool contig)
261 struct malloc_elem *elem;
264 align = RTE_CACHE_LINE_ROUNDUP(align);
266 elem = find_biggest_element(heap, &size, flags, align, contig);
268 elem = malloc_elem_alloc(elem, size, align, 0, contig);
270 /* increase heap's count of allocated elements */
273 asan_set_redzone(elem, size);
276 return elem == NULL ? NULL : (void *)(&elem[1]);
279 /* this function is exposed in malloc_mp.h */
281 rollback_expand_heap(struct rte_memseg **ms, int n_segs,
282 struct malloc_elem *elem, void *map_addr, size_t map_len)
285 malloc_elem_free_list_remove(elem);
286 malloc_elem_hide_region(elem, map_addr, map_len);
289 eal_memalloc_free_seg_bulk(ms, n_segs);
292 /* this function is exposed in malloc_mp.h */
294 alloc_pages_on_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
295 int socket, unsigned int flags, size_t align, size_t bound,
296 bool contig, struct rte_memseg **ms, int n_segs)
298 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
299 struct rte_memseg_list *msl;
300 struct malloc_elem *elem = NULL;
304 void *ret, *map_addr;
306 alloc_sz = (size_t)pg_sz * n_segs;
308 /* first, check if we're allowed to allocate this memory */
309 if (eal_memalloc_mem_alloc_validate(socket,
310 heap->total_size + alloc_sz) < 0) {
311 RTE_LOG(DEBUG, EAL, "User has disallowed allocation\n");
315 allocd_pages = eal_memalloc_alloc_seg_bulk(ms, n_segs, pg_sz,
318 /* make sure we've allocated our pages... */
319 if (allocd_pages < 0)
322 map_addr = ms[0]->addr;
323 msl = rte_mem_virt2memseg_list(map_addr);
325 /* check if we wanted contiguous memory but didn't get it */
326 if (contig && !eal_memalloc_is_contig(msl, map_addr, alloc_sz)) {
327 RTE_LOG(DEBUG, EAL, "%s(): couldn't allocate physically contiguous space\n",
333 * Once we have all the memseg lists configured, if there is a dma mask
334 * set, check iova addresses are not out of range. Otherwise the device
335 * setting the dma mask could have problems with the mapped memory.
337 * There are two situations when this can happen:
338 * 1) memory initialization
339 * 2) dynamic memory allocation
341 * For 1), an error when checking dma mask implies app can not be
342 * executed. For 2) implies the new memory can not be added.
344 if (mcfg->dma_maskbits &&
345 rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
347 * Currently this can only happen if IOMMU is enabled
348 * and the address width supported by the IOMMU hw is
349 * not enough for using the memory mapped IOVAs.
351 * If IOVA is VA, advice to try with '--iova-mode pa'
352 * which could solve some situations when IOVA VA is not
356 "%s(): couldn't allocate memory due to IOVA exceeding limits of current DMA mask\n",
360 * If IOVA is VA and it is possible to run with IOVA PA,
361 * because user is root, give and advice for solving the
364 if ((rte_eal_iova_mode() == RTE_IOVA_VA) &&
365 rte_eal_using_phys_addrs())
367 "%s(): Please try initializing EAL with --iova-mode=pa parameter\n",
372 /* Element is dirty if it contains at least one dirty page. */
373 for (i = 0; i < allocd_pages; i++)
374 dirty |= ms[i]->flags & RTE_MEMSEG_FLAG_DIRTY;
376 /* add newly minted memsegs to malloc heap */
377 elem = malloc_heap_add_memory(heap, msl, map_addr, alloc_sz, dirty);
379 /* try once more, as now we have allocated new memory */
380 ret = find_suitable_element(heap, elt_size, flags, align, bound,
389 rollback_expand_heap(ms, n_segs, elem, map_addr, alloc_sz);
394 try_expand_heap_primary(struct malloc_heap *heap, uint64_t pg_sz,
395 size_t elt_size, int socket, unsigned int flags, size_t align,
396 size_t bound, bool contig)
398 struct malloc_elem *elem;
399 struct rte_memseg **ms;
403 bool callback_triggered = false;
405 alloc_sz = RTE_ALIGN_CEIL(align + elt_size +
406 MALLOC_ELEM_TRAILER_LEN, pg_sz);
407 n_segs = alloc_sz / pg_sz;
409 /* we can't know in advance how many pages we'll need, so we malloc */
410 ms = malloc(sizeof(*ms) * n_segs);
413 memset(ms, 0, sizeof(*ms) * n_segs);
415 elem = alloc_pages_on_heap(heap, pg_sz, elt_size, socket, flags, align,
416 bound, contig, ms, n_segs);
421 map_addr = ms[0]->addr;
423 /* notify user about changes in memory map */
424 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC, map_addr, alloc_sz);
426 /* notify other processes that this has happened */
427 if (request_sync()) {
428 /* we couldn't ensure all processes have mapped memory,
429 * so free it back and notify everyone that it's been
432 * technically, we could've avoided adding memory addresses to
433 * the map, but that would've led to inconsistent behavior
434 * between primary and secondary processes, as those get
435 * callbacks during sync. therefore, force primary process to
436 * do alloc-and-rollback syncs as well.
438 callback_triggered = true;
441 heap->total_size += alloc_sz;
443 RTE_LOG(DEBUG, EAL, "Heap on socket %d was expanded by %zdMB\n",
444 socket, alloc_sz >> 20ULL);
451 if (callback_triggered)
452 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
455 rollback_expand_heap(ms, n_segs, elem, map_addr, alloc_sz);
465 try_expand_heap_secondary(struct malloc_heap *heap, uint64_t pg_sz,
466 size_t elt_size, int socket, unsigned int flags, size_t align,
467 size_t bound, bool contig)
469 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
470 struct malloc_mp_req req;
473 memset(&req, 0, sizeof(req));
475 req.t = REQ_TYPE_ALLOC;
476 req.alloc_req.align = align;
477 req.alloc_req.bound = bound;
478 req.alloc_req.contig = contig;
479 req.alloc_req.flags = flags;
480 req.alloc_req.elt_size = elt_size;
481 req.alloc_req.page_sz = pg_sz;
482 req.alloc_req.socket = socket;
483 req.alloc_req.malloc_heap_idx = heap - mcfg->malloc_heaps;
485 req_result = request_to_primary(&req);
490 if (req.result != REQ_RESULT_SUCCESS)
497 try_expand_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
498 int socket, unsigned int flags, size_t align, size_t bound,
503 rte_mcfg_mem_write_lock();
505 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
506 ret = try_expand_heap_primary(heap, pg_sz, elt_size, socket,
507 flags, align, bound, contig);
509 ret = try_expand_heap_secondary(heap, pg_sz, elt_size, socket,
510 flags, align, bound, contig);
513 rte_mcfg_mem_write_unlock();
518 compare_pagesz(const void *a, const void *b)
520 const struct rte_memseg_list * const*mpa = a;
521 const struct rte_memseg_list * const*mpb = b;
522 const struct rte_memseg_list *msla = *mpa;
523 const struct rte_memseg_list *mslb = *mpb;
524 uint64_t pg_sz_a = msla->page_sz;
525 uint64_t pg_sz_b = mslb->page_sz;
527 if (pg_sz_a < pg_sz_b)
529 if (pg_sz_a > pg_sz_b)
535 alloc_more_mem_on_socket(struct malloc_heap *heap, size_t size, int socket,
536 unsigned int flags, size_t align, size_t bound, bool contig)
538 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
539 struct rte_memseg_list *requested_msls[RTE_MAX_MEMSEG_LISTS];
540 struct rte_memseg_list *other_msls[RTE_MAX_MEMSEG_LISTS];
541 uint64_t requested_pg_sz[RTE_MAX_MEMSEG_LISTS];
542 uint64_t other_pg_sz[RTE_MAX_MEMSEG_LISTS];
544 int i, n_other_msls, n_other_pg_sz, n_requested_msls, n_requested_pg_sz;
545 bool size_hint = (flags & RTE_MEMZONE_SIZE_HINT_ONLY) > 0;
546 unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY;
549 memset(requested_msls, 0, sizeof(requested_msls));
550 memset(other_msls, 0, sizeof(other_msls));
551 memset(requested_pg_sz, 0, sizeof(requested_pg_sz));
552 memset(other_pg_sz, 0, sizeof(other_pg_sz));
555 * go through memseg list and take note of all the page sizes available,
556 * and if any of them were specifically requested by the user.
558 n_requested_msls = 0;
560 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
561 struct rte_memseg_list *msl = &mcfg->memsegs[i];
563 if (msl->socket_id != socket)
566 if (msl->base_va == NULL)
569 /* if pages of specific size were requested */
570 if (size_flags != 0 && check_hugepage_sz(size_flags,
572 requested_msls[n_requested_msls++] = msl;
573 else if (size_flags == 0 || size_hint)
574 other_msls[n_other_msls++] = msl;
577 /* sort the lists, smallest first */
578 qsort(requested_msls, n_requested_msls, sizeof(requested_msls[0]),
580 qsort(other_msls, n_other_msls, sizeof(other_msls[0]),
583 /* now, extract page sizes we are supposed to try */
585 n_requested_pg_sz = 0;
586 for (i = 0; i < n_requested_msls; i++) {
587 uint64_t pg_sz = requested_msls[i]->page_sz;
589 if (prev_pg_sz != pg_sz) {
590 requested_pg_sz[n_requested_pg_sz++] = pg_sz;
596 for (i = 0; i < n_other_msls; i++) {
597 uint64_t pg_sz = other_msls[i]->page_sz;
599 if (prev_pg_sz != pg_sz) {
600 other_pg_sz[n_other_pg_sz++] = pg_sz;
605 /* finally, try allocating memory of specified page sizes, starting from
608 for (i = 0; i < n_requested_pg_sz; i++) {
609 uint64_t pg_sz = requested_pg_sz[i];
612 * do not pass the size hint here, as user expects other page
613 * sizes first, before resorting to best effort allocation.
615 if (!try_expand_heap(heap, pg_sz, size, socket, size_flags,
616 align, bound, contig))
619 if (n_other_pg_sz == 0)
622 /* now, check if we can reserve anything with size hint */
623 ret = find_suitable_element(heap, size, flags, align, bound, contig);
628 * we still couldn't reserve memory, so try expanding heap with other
629 * page sizes, if there are any
631 for (i = 0; i < n_other_pg_sz; i++) {
632 uint64_t pg_sz = other_pg_sz[i];
634 if (!try_expand_heap(heap, pg_sz, size, socket, flags,
635 align, bound, contig))
641 /* this will try lower page sizes first */
643 malloc_heap_alloc_on_heap_id(const char *type, size_t size,
644 unsigned int heap_id, unsigned int flags, size_t align,
645 size_t bound, bool contig)
647 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
648 struct malloc_heap *heap = &mcfg->malloc_heaps[heap_id];
649 unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY;
652 const struct internal_config *internal_conf =
653 eal_get_internal_configuration();
655 rte_spinlock_lock(&(heap->lock));
657 align = align == 0 ? 1 : align;
659 /* for legacy mode, try once and with all flags */
660 if (internal_conf->legacy_mem) {
661 ret = heap_alloc(heap, type, size, flags, align, bound, contig);
666 * we do not pass the size hint here, because even if allocation fails,
667 * we may still be able to allocate memory from appropriate page sizes,
668 * we just need to request more memory first.
671 socket_id = rte_socket_id_by_idx(heap_id);
673 * if socket ID is negative, we cannot find a socket ID for this heap -
674 * which means it's an external heap. those can have unexpected page
675 * sizes, so if the user asked to allocate from there - assume user
676 * knows what they're doing, and allow allocating from there with any
680 size_flags |= RTE_MEMZONE_SIZE_HINT_ONLY;
682 ret = heap_alloc(heap, type, size, size_flags, align, bound, contig);
686 /* if socket ID is invalid, this is an external heap */
690 if (!alloc_more_mem_on_socket(heap, size, socket_id, flags, align,
692 ret = heap_alloc(heap, type, size, flags, align, bound, contig);
694 /* this should have succeeded */
696 RTE_LOG(ERR, EAL, "Error allocating from heap\n");
699 rte_spinlock_unlock(&(heap->lock));
704 malloc_get_numa_socket(void)
706 const struct internal_config *conf = eal_get_internal_configuration();
707 unsigned int socket_id = rte_socket_id();
710 if (socket_id != (unsigned int)SOCKET_ID_ANY)
713 /* for control threads, return first socket where memory is available */
714 for (idx = 0; idx < rte_socket_count(); idx++) {
715 socket_id = rte_socket_id_by_idx(idx);
716 if (conf->socket_mem[socket_id] != 0)
720 return rte_socket_id_by_idx(0);
724 malloc_heap_alloc(const char *type, size_t size, int socket_arg,
725 unsigned int flags, size_t align, size_t bound, bool contig)
727 int socket, heap_id, i;
730 /* return NULL if size is 0 or alignment is not power-of-2 */
731 if (size == 0 || (align && !rte_is_power_of_2(align)))
734 if (!rte_eal_has_hugepages() && socket_arg < RTE_MAX_NUMA_NODES)
735 socket_arg = SOCKET_ID_ANY;
737 if (socket_arg == SOCKET_ID_ANY)
738 socket = malloc_get_numa_socket();
742 /* turn socket ID into heap ID */
743 heap_id = malloc_socket_to_heap_id(socket);
744 /* if heap id is negative, socket ID was invalid */
748 ret = malloc_heap_alloc_on_heap_id(type, size, heap_id, flags, align,
750 if (ret != NULL || socket_arg != SOCKET_ID_ANY)
753 /* try other heaps. we are only iterating through native DPDK sockets,
754 * so external heaps won't be included.
756 for (i = 0; i < (int) rte_socket_count(); i++) {
759 ret = malloc_heap_alloc_on_heap_id(type, size, i, flags, align,
768 heap_alloc_biggest_on_heap_id(const char *type, unsigned int heap_id,
769 unsigned int flags, size_t align, bool contig)
771 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
772 struct malloc_heap *heap = &mcfg->malloc_heaps[heap_id];
775 rte_spinlock_lock(&(heap->lock));
777 align = align == 0 ? 1 : align;
779 ret = heap_alloc_biggest(heap, type, flags, align, contig);
781 rte_spinlock_unlock(&(heap->lock));
787 malloc_heap_alloc_biggest(const char *type, int socket_arg, unsigned int flags,
788 size_t align, bool contig)
790 int socket, i, cur_socket, heap_id;
793 /* return NULL if align is not power-of-2 */
794 if ((align && !rte_is_power_of_2(align)))
797 if (!rte_eal_has_hugepages())
798 socket_arg = SOCKET_ID_ANY;
800 if (socket_arg == SOCKET_ID_ANY)
801 socket = malloc_get_numa_socket();
805 /* turn socket ID into heap ID */
806 heap_id = malloc_socket_to_heap_id(socket);
807 /* if heap id is negative, socket ID was invalid */
811 ret = heap_alloc_biggest_on_heap_id(type, heap_id, flags, align,
813 if (ret != NULL || socket_arg != SOCKET_ID_ANY)
816 /* try other heaps */
817 for (i = 0; i < (int) rte_socket_count(); i++) {
818 cur_socket = rte_socket_id_by_idx(i);
819 if (cur_socket == socket)
821 ret = heap_alloc_biggest_on_heap_id(type, i, flags, align,
829 /* this function is exposed in malloc_mp.h */
831 malloc_heap_free_pages(void *aligned_start, size_t aligned_len)
833 int n_segs, seg_idx, max_seg_idx;
834 struct rte_memseg_list *msl;
837 msl = rte_mem_virt2memseg_list(aligned_start);
841 page_sz = (size_t)msl->page_sz;
842 n_segs = aligned_len / page_sz;
843 seg_idx = RTE_PTR_DIFF(aligned_start, msl->base_va) / page_sz;
844 max_seg_idx = seg_idx + n_segs;
846 for (; seg_idx < max_seg_idx; seg_idx++) {
847 struct rte_memseg *ms;
849 ms = rte_fbarray_get(&msl->memseg_arr, seg_idx);
850 eal_memalloc_free_seg(ms);
856 malloc_heap_free(struct malloc_elem *elem)
858 struct malloc_heap *heap;
859 void *start, *aligned_start, *end, *aligned_end;
860 size_t len, aligned_len, page_sz;
861 struct rte_memseg_list *msl;
862 unsigned int i, n_segs, before_space, after_space;
864 bool unmapped = false;
865 const struct internal_config *internal_conf =
866 eal_get_internal_configuration();
868 if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
871 asan_clear_redzone(elem);
873 /* elem may be merged with previous element, so keep heap address */
876 page_sz = (size_t)msl->page_sz;
878 rte_spinlock_lock(&(heap->lock));
880 void *asan_ptr = RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN + elem->pad);
881 size_t asan_data_len = elem->size - MALLOC_ELEM_OVERHEAD - elem->pad;
883 /* mark element as free */
884 elem->state = ELEM_FREE;
886 elem = malloc_elem_free(elem);
888 /* anything after this is a bonus */
891 /* ...of which we can't avail if we are in legacy mode, or if this is an
892 * externally allocated segment.
894 if (internal_conf->legacy_mem || (msl->external > 0))
897 /* check if we can free any memory back to the system */
898 if (elem->size < page_sz)
901 /* if user requested to match allocations, the sizes must match - if not,
902 * we will defer freeing these hugepages until the entire original allocation
905 if (internal_conf->match_allocations && elem->size != elem->orig_size)
908 /* probably, but let's make sure, as we may not be using up full page */
911 aligned_start = RTE_PTR_ALIGN_CEIL(start, page_sz);
912 end = RTE_PTR_ADD(elem, len);
913 aligned_end = RTE_PTR_ALIGN_FLOOR(end, page_sz);
915 aligned_len = RTE_PTR_DIFF(aligned_end, aligned_start);
917 /* can't free anything */
918 if (aligned_len < page_sz)
921 /* we can free something. however, some of these pages may be marked as
922 * unfreeable, so also check that as well
924 n_segs = aligned_len / page_sz;
925 for (i = 0; i < n_segs; i++) {
926 const struct rte_memseg *tmp =
927 rte_mem_virt2memseg(aligned_start, msl);
929 if (tmp->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE) {
930 /* this is an unfreeable segment, so move start */
931 aligned_start = RTE_PTR_ADD(tmp->addr, tmp->len);
935 /* recalculate length and number of segments */
936 aligned_len = RTE_PTR_DIFF(aligned_end, aligned_start);
937 n_segs = aligned_len / page_sz;
939 /* check if we can still free some pages */
943 /* We're not done yet. We also have to check if by freeing space we will
944 * be leaving free elements that are too small to store new elements.
945 * Check if we have enough space in the beginning and at the end, or if
946 * start/end are exactly page aligned.
948 before_space = RTE_PTR_DIFF(aligned_start, elem);
949 after_space = RTE_PTR_DIFF(end, aligned_end);
950 if (before_space != 0 &&
951 before_space < MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
952 /* There is not enough space before start, but we may be able to
953 * move the start forward by one page.
959 aligned_start = RTE_PTR_ADD(aligned_start, page_sz);
960 aligned_len -= page_sz;
963 if (after_space != 0 && after_space <
964 MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
965 /* There is not enough space after end, but we may be able to
966 * move the end backwards by one page.
972 aligned_end = RTE_PTR_SUB(aligned_end, page_sz);
973 aligned_len -= page_sz;
977 /* now we can finally free us some pages */
979 rte_mcfg_mem_write_lock();
982 * we allow secondary processes to clear the heap of this allocated
983 * memory because it is safe to do so, as even if notifications about
984 * unmapped pages don't make it to other processes, heap is shared
985 * across all processes, and will become empty of this memory anyway,
986 * and nothing can allocate it back unless primary process will be able
987 * to deliver allocation message to every single running process.
990 malloc_elem_free_list_remove(elem);
992 malloc_elem_hide_region(elem, (void *) aligned_start, aligned_len);
994 heap->total_size -= aligned_len;
996 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
997 /* notify user about changes in memory map */
998 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
999 aligned_start, aligned_len);
1001 /* don't care if any of this fails */
1002 malloc_heap_free_pages(aligned_start, aligned_len);
1006 struct malloc_mp_req req;
1008 memset(&req, 0, sizeof(req));
1010 req.t = REQ_TYPE_FREE;
1011 req.free_req.addr = aligned_start;
1012 req.free_req.len = aligned_len;
1015 * we request primary to deallocate pages, but we don't do it
1016 * in this thread. instead, we notify primary that we would like
1017 * to deallocate pages, and this process will receive another
1018 * request (in parallel) that will do it for us on another
1021 * we also don't really care if this succeeds - the data is
1022 * already removed from the heap, so it is, for all intents and
1023 * purposes, hidden from the rest of DPDK even if some other
1024 * process (including this one) may have these pages mapped.
1026 * notifications about deallocated memory happen during sync.
1028 request_to_primary(&req);
1031 /* we didn't exit early, meaning we have unmapped some pages */
1034 RTE_LOG(DEBUG, EAL, "Heap on socket %d was shrunk by %zdMB\n",
1035 msl->socket_id, aligned_len >> 20ULL);
1037 rte_mcfg_mem_write_unlock();
1039 asan_set_freezone(asan_ptr, asan_data_len);
1041 /* if we unmapped some memory, we need to do additional work for ASan */
1043 void *asan_end = RTE_PTR_ADD(asan_ptr, asan_data_len);
1044 void *aligned_end = RTE_PTR_ADD(aligned_start, aligned_len);
1045 void *aligned_trailer = RTE_PTR_SUB(aligned_start,
1046 MALLOC_ELEM_TRAILER_LEN);
1049 * There was a memory area that was unmapped. This memory area
1050 * will have to be marked as available for ASan, because we will
1051 * want to use it next time it gets mapped again. The OS memory
1052 * protection should trigger a fault on access to these areas
1053 * anyway, so we are not giving up any protection.
1055 asan_set_zone(aligned_start, aligned_len, 0x00);
1058 * ...however, when we unmap pages, we create new free elements
1059 * which might have been marked as "freed" with an earlier
1060 * `asan_set_freezone` call. So, if there is an area past the
1061 * unmapped space that was marked as freezone for ASan, we need
1062 * to mark the malloc header as available.
1064 if (asan_end > aligned_end)
1065 asan_set_zone(aligned_end, MALLOC_ELEM_HEADER_LEN, 0x00);
1067 /* if there's space before unmapped memory, mark as available */
1068 if (asan_ptr < aligned_start)
1069 asan_set_zone(aligned_trailer, MALLOC_ELEM_TRAILER_LEN, 0x00);
1072 rte_spinlock_unlock(&(heap->lock));
1077 malloc_heap_resize(struct malloc_elem *elem, size_t size)
1081 if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
1084 rte_spinlock_lock(&(elem->heap->lock));
1086 ret = malloc_elem_resize(elem, size);
1088 rte_spinlock_unlock(&(elem->heap->lock));
1094 * Function to retrieve data for a given heap
1097 malloc_heap_get_stats(struct malloc_heap *heap,
1098 struct rte_malloc_socket_stats *socket_stats)
1101 struct malloc_elem *elem;
1103 rte_spinlock_lock(&heap->lock);
1105 /* Initialise variables for heap */
1106 socket_stats->free_count = 0;
1107 socket_stats->heap_freesz_bytes = 0;
1108 socket_stats->greatest_free_size = 0;
1110 /* Iterate through free list */
1111 for (idx = 0; idx < RTE_HEAP_NUM_FREELISTS; idx++) {
1112 for (elem = LIST_FIRST(&heap->free_head[idx]);
1113 !!elem; elem = LIST_NEXT(elem, free_list))
1115 socket_stats->free_count++;
1116 socket_stats->heap_freesz_bytes += elem->size;
1117 if (elem->size > socket_stats->greatest_free_size)
1118 socket_stats->greatest_free_size = elem->size;
1121 /* Get stats on overall heap and allocated memory on this heap */
1122 socket_stats->heap_totalsz_bytes = heap->total_size;
1123 socket_stats->heap_allocsz_bytes = (socket_stats->heap_totalsz_bytes -
1124 socket_stats->heap_freesz_bytes);
1125 socket_stats->alloc_count = heap->alloc_count;
1127 rte_spinlock_unlock(&heap->lock);
1132 * Function to retrieve data for a given heap
1135 malloc_heap_dump(struct malloc_heap *heap, FILE *f)
1137 struct malloc_elem *elem;
1139 rte_spinlock_lock(&heap->lock);
1141 fprintf(f, "Heap size: 0x%zx\n", heap->total_size);
1142 fprintf(f, "Heap alloc count: %u\n", heap->alloc_count);
1146 malloc_elem_dump(elem, f);
1150 rte_spinlock_unlock(&heap->lock);
1154 destroy_elem(struct malloc_elem *elem, size_t len)
1156 struct malloc_heap *heap = elem->heap;
1158 /* notify all subscribers that a memory area is going to be removed */
1159 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE, elem, len);
1161 /* this element can be removed */
1162 malloc_elem_free_list_remove(elem);
1163 malloc_elem_hide_region(elem, elem, len);
1165 heap->total_size -= len;
1167 memset(elem, 0, sizeof(*elem));
1172 struct rte_memseg_list *
1173 malloc_heap_create_external_seg(void *va_addr, rte_iova_t iova_addrs[],
1174 unsigned int n_pages, size_t page_sz, const char *seg_name,
1175 unsigned int socket_id)
1177 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1178 char fbarray_name[RTE_FBARRAY_NAME_LEN];
1179 struct rte_memseg_list *msl = NULL;
1180 struct rte_fbarray *arr;
1181 size_t seg_len = n_pages * page_sz;
1184 /* first, find a free memseg list */
1185 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
1186 struct rte_memseg_list *tmp = &mcfg->memsegs[i];
1187 if (tmp->base_va == NULL) {
1193 RTE_LOG(ERR, EAL, "Couldn't find empty memseg list\n");
1198 snprintf(fbarray_name, sizeof(fbarray_name), "%s_%p",
1201 /* create the backing fbarray */
1202 if (rte_fbarray_init(&msl->memseg_arr, fbarray_name, n_pages,
1203 sizeof(struct rte_memseg)) < 0) {
1204 RTE_LOG(ERR, EAL, "Couldn't create fbarray backing the memseg list\n");
1207 arr = &msl->memseg_arr;
1209 /* fbarray created, fill it up */
1210 for (i = 0; i < n_pages; i++) {
1211 struct rte_memseg *ms;
1213 rte_fbarray_set_used(arr, i);
1214 ms = rte_fbarray_get(arr, i);
1215 ms->addr = RTE_PTR_ADD(va_addr, i * page_sz);
1216 ms->iova = iova_addrs == NULL ? RTE_BAD_IOVA : iova_addrs[i];
1217 ms->hugepage_sz = page_sz;
1219 ms->nchannel = rte_memory_get_nchannel();
1220 ms->nrank = rte_memory_get_nrank();
1221 ms->socket_id = socket_id;
1224 /* set up the memseg list */
1225 msl->base_va = va_addr;
1226 msl->page_sz = page_sz;
1227 msl->socket_id = socket_id;
1235 struct extseg_walk_arg {
1238 struct rte_memseg_list *msl;
1242 extseg_walk(const struct rte_memseg_list *msl, void *arg)
1244 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1245 struct extseg_walk_arg *wa = arg;
1247 if (msl->base_va == wa->va_addr && msl->len == wa->len) {
1248 unsigned int found_idx;
1251 found_idx = msl - mcfg->memsegs;
1252 wa->msl = &mcfg->memsegs[found_idx];
1258 struct rte_memseg_list *
1259 malloc_heap_find_external_seg(void *va_addr, size_t len)
1261 struct extseg_walk_arg wa;
1264 wa.va_addr = va_addr;
1267 res = rte_memseg_list_walk_thread_unsafe(extseg_walk, &wa);
1270 /* 0 means nothing was found, -1 shouldn't happen */
1279 malloc_heap_destroy_external_seg(struct rte_memseg_list *msl)
1281 /* destroy the fbarray backing this memory */
1282 if (rte_fbarray_destroy(&msl->memseg_arr) < 0)
1285 /* reset the memseg list */
1286 memset(msl, 0, sizeof(*msl));
1292 malloc_heap_add_external_memory(struct malloc_heap *heap,
1293 struct rte_memseg_list *msl)
1295 /* erase contents of new memory */
1296 memset(msl->base_va, 0, msl->len);
1298 /* now, add newly minted memory to the malloc heap */
1299 malloc_heap_add_memory(heap, msl, msl->base_va, msl->len, false);
1301 heap->total_size += msl->len;
1304 RTE_LOG(DEBUG, EAL, "Added segment for heap %s starting at %p\n",
1305 heap->name, msl->base_va);
1307 /* notify all subscribers that a new memory area has been added */
1308 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC,
1309 msl->base_va, msl->len);
1315 malloc_heap_remove_external_memory(struct malloc_heap *heap, void *va_addr,
1318 struct malloc_elem *elem = heap->first;
1320 /* find element with specified va address */
1321 while (elem != NULL && elem != va_addr) {
1323 /* stop if we've blown past our VA */
1324 if (elem > (struct malloc_elem *)va_addr) {
1329 /* check if element was found */
1330 if (elem == NULL || elem->msl->len != len) {
1334 /* if element's size is not equal to segment len, segment is busy */
1335 if (elem->state == ELEM_BUSY || elem->size != len) {
1339 return destroy_elem(elem, len);
1343 malloc_heap_create(struct malloc_heap *heap, const char *heap_name)
1345 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1346 uint32_t next_socket_id = mcfg->next_socket_id;
1348 /* prevent overflow. did you really create 2 billion heaps??? */
1349 if (next_socket_id > INT32_MAX) {
1350 RTE_LOG(ERR, EAL, "Cannot assign new socket ID's\n");
1355 /* initialize empty heap */
1356 heap->alloc_count = 0;
1359 LIST_INIT(heap->free_head);
1360 rte_spinlock_init(&heap->lock);
1361 heap->total_size = 0;
1362 heap->socket_id = next_socket_id;
1364 /* we hold a global mem hotplug writelock, so it's safe to increment */
1365 mcfg->next_socket_id++;
1368 strlcpy(heap->name, heap_name, RTE_HEAP_NAME_MAX_LEN);
1373 malloc_heap_destroy(struct malloc_heap *heap)
1375 if (heap->alloc_count != 0) {
1376 RTE_LOG(ERR, EAL, "Heap is still in use\n");
1380 if (heap->first != NULL || heap->last != NULL) {
1381 RTE_LOG(ERR, EAL, "Heap still contains memory segments\n");
1385 if (heap->total_size != 0)
1386 RTE_LOG(ERR, EAL, "Total size not zero, heap is likely corrupt\n");
1388 /* after this, the lock will be dropped */
1389 memset(heap, 0, sizeof(*heap));
1395 rte_eal_malloc_heap_init(void)
1397 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1399 const struct internal_config *internal_conf =
1400 eal_get_internal_configuration();
1402 if (internal_conf->match_allocations)
1403 RTE_LOG(DEBUG, EAL, "Hugepages will be freed exactly as allocated.\n");
1405 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1406 /* assign min socket ID to external heaps */
1407 mcfg->next_socket_id = EXTERNAL_HEAP_MIN_SOCKET_ID;
1409 /* assign names to default DPDK heaps */
1410 for (i = 0; i < rte_socket_count(); i++) {
1411 struct malloc_heap *heap = &mcfg->malloc_heaps[i];
1412 char heap_name[RTE_HEAP_NAME_MAX_LEN];
1413 int socket_id = rte_socket_id_by_idx(i);
1415 snprintf(heap_name, sizeof(heap_name),
1416 "socket_%i", socket_id);
1417 strlcpy(heap->name, heap_name, RTE_HEAP_NAME_MAX_LEN);
1418 heap->socket_id = socket_id;
1423 if (register_mp_requests()) {
1424 RTE_LOG(ERR, EAL, "Couldn't register malloc multiprocess actions\n");
1425 rte_mcfg_mem_read_unlock();
1429 /* unlock mem hotplug here. it's safe for primary as no requests can
1430 * even come before primary itself is fully initialized, and secondaries
1431 * do not need to initialize the heap.
1433 rte_mcfg_mem_read_unlock();
1435 /* secondary process does not need to initialize anything */
1436 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1439 /* add all IOVA-contiguous areas to the heap */
1440 return rte_memseg_contig_walk(malloc_add_seg, NULL);
1444 rte_eal_malloc_heap_cleanup(void)
1446 unregister_mp_requests();