1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2019 Intel Corporation
11 #include <rte_errno.h>
12 #include <rte_memcpy.h>
13 #include <rte_memory.h>
15 #include <rte_eal_memconfig.h>
16 #include <rte_branch_prediction.h>
17 #include <rte_debug.h>
18 #include <rte_launch.h>
19 #include <rte_per_lcore.h>
20 #include <rte_lcore.h>
21 #include <rte_common.h>
22 #include <rte_spinlock.h>
24 #include <rte_malloc.h>
25 #include "malloc_elem.h"
26 #include "malloc_heap.h"
27 #include "eal_memalloc.h"
28 #include "eal_memcfg.h"
29 #include "eal_private.h"
32 /* Free the memory space back to heap */
33 void rte_free(void *addr)
35 if (addr == NULL) return;
36 if (malloc_heap_free(malloc_elem_from_data(addr)) < 0)
37 RTE_LOG(ERR, EAL, "Error: Invalid memory\n");
41 * Allocate memory on specified heap.
44 rte_malloc_socket(const char *type, size_t size, unsigned int align,
47 /* return NULL if size is 0 or alignment is not power-of-2 */
48 if (size == 0 || (align && !rte_is_power_of_2(align)))
51 /* if there are no hugepages and if we are not allocating from an
52 * external heap, use memory from any socket available. checking for
53 * socket being external may return -1 in case of invalid socket, but
54 * that's OK - if there are no hugepages, it doesn't matter.
56 if (rte_malloc_heap_socket_is_external(socket_arg) != 1 &&
57 !rte_eal_has_hugepages())
58 socket_arg = SOCKET_ID_ANY;
60 return malloc_heap_alloc(type, size, socket_arg, 0,
61 align == 0 ? 1 : align, 0, false);
65 * Allocate memory on default heap.
68 rte_malloc(const char *type, size_t size, unsigned align)
70 return rte_malloc_socket(type, size, align, SOCKET_ID_ANY);
74 * Allocate zero'd memory on specified heap.
77 rte_zmalloc_socket(const char *type, size_t size, unsigned align, int socket)
79 void *ptr = rte_malloc_socket(type, size, align, socket);
81 #ifdef RTE_MALLOC_DEBUG
83 * If DEBUG is enabled, then freed memory is marked with poison
84 * value and set to zero on allocation.
85 * If DEBUG is not enabled then memory is already zeroed.
94 * Allocate zero'd memory on default heap.
97 rte_zmalloc(const char *type, size_t size, unsigned align)
99 return rte_zmalloc_socket(type, size, align, SOCKET_ID_ANY);
103 * Allocate zero'd memory on specified heap.
106 rte_calloc_socket(const char *type, size_t num, size_t size, unsigned align, int socket)
108 return rte_zmalloc_socket(type, num * size, align, socket);
112 * Allocate zero'd memory on default heap.
115 rte_calloc(const char *type, size_t num, size_t size, unsigned align)
117 return rte_zmalloc(type, num * size, align);
121 * Resize allocated memory on specified heap.
124 rte_realloc_socket(void *ptr, size_t size, unsigned int align, int socket)
127 return rte_malloc_socket(NULL, size, align, socket);
129 struct malloc_elem *elem = malloc_elem_from_data(ptr);
131 RTE_LOG(ERR, EAL, "Error: memory corruption detected\n");
135 size = RTE_CACHE_LINE_ROUNDUP(size), align = RTE_CACHE_LINE_ROUNDUP(align);
137 /* check requested socket id and alignment matches first, and if ok,
138 * see if we can resize block
140 if ((socket == SOCKET_ID_ANY ||
141 (unsigned int)socket == elem->heap->socket_id) &&
142 RTE_PTR_ALIGN(ptr, align) == ptr &&
143 malloc_heap_resize(elem, size) == 0)
146 /* either requested socket id doesn't match, alignment is off
147 * or we have no room to expand,
150 void *new_ptr = rte_malloc_socket(NULL, size, align, socket);
153 /* elem: |pad|data_elem|data|trailer| */
154 const size_t old_size = elem->size - elem->pad - MALLOC_ELEM_OVERHEAD;
155 rte_memcpy(new_ptr, ptr, old_size < size ? old_size : size);
162 * Resize allocated memory.
165 rte_realloc(void *ptr, size_t size, unsigned int align)
167 return rte_realloc_socket(ptr, size, align, SOCKET_ID_ANY);
171 rte_malloc_validate(const void *ptr, size_t *size)
173 const struct malloc_elem *elem = malloc_elem_from_data(ptr);
174 if (!malloc_elem_cookies_ok(elem))
177 *size = elem->size - elem->pad - MALLOC_ELEM_OVERHEAD;
182 * Function to retrieve data for heap on given socket
185 rte_malloc_get_socket_stats(int socket,
186 struct rte_malloc_socket_stats *socket_stats)
188 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
191 heap_idx = malloc_socket_to_heap_id(socket);
195 return malloc_heap_get_stats(&mcfg->malloc_heaps[heap_idx],
200 * Function to dump contents of all heaps
203 rte_malloc_dump_heaps(FILE *f)
205 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
208 for (idx = 0; idx < RTE_MAX_HEAPS; idx++) {
209 fprintf(f, "Heap id: %u\n", idx);
210 malloc_heap_dump(&mcfg->malloc_heaps[idx], f);
215 rte_malloc_heap_get_socket(const char *name)
217 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
218 struct malloc_heap *heap = NULL;
223 strnlen(name, RTE_HEAP_NAME_MAX_LEN) == 0 ||
224 strnlen(name, RTE_HEAP_NAME_MAX_LEN) ==
225 RTE_HEAP_NAME_MAX_LEN) {
229 rte_mcfg_mem_read_lock();
230 for (idx = 0; idx < RTE_MAX_HEAPS; idx++) {
231 struct malloc_heap *tmp = &mcfg->malloc_heaps[idx];
233 if (!strncmp(name, tmp->name, RTE_HEAP_NAME_MAX_LEN)) {
240 ret = heap->socket_id;
245 rte_mcfg_mem_read_unlock();
251 rte_malloc_heap_socket_is_external(int socket_id)
253 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
257 if (socket_id == SOCKET_ID_ANY)
260 rte_mcfg_mem_read_lock();
261 for (idx = 0; idx < RTE_MAX_HEAPS; idx++) {
262 struct malloc_heap *tmp = &mcfg->malloc_heaps[idx];
264 if ((int)tmp->socket_id == socket_id) {
265 /* external memory always has large socket ID's */
266 ret = tmp->socket_id >= RTE_MAX_NUMA_NODES;
270 rte_mcfg_mem_read_unlock();
276 * Print stats on memory type. If type is NULL, info on all types is printed
279 rte_malloc_dump_stats(FILE *f, __rte_unused const char *type)
281 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
282 unsigned int heap_id;
283 struct rte_malloc_socket_stats sock_stats;
285 /* Iterate through all initialised heaps */
286 for (heap_id = 0; heap_id < RTE_MAX_HEAPS; heap_id++) {
287 struct malloc_heap *heap = &mcfg->malloc_heaps[heap_id];
289 malloc_heap_get_stats(heap, &sock_stats);
291 fprintf(f, "Heap id:%u\n", heap_id);
292 fprintf(f, "\tHeap name:%s\n", heap->name);
293 fprintf(f, "\tHeap_size:%zu,\n", sock_stats.heap_totalsz_bytes);
294 fprintf(f, "\tFree_size:%zu,\n", sock_stats.heap_freesz_bytes);
295 fprintf(f, "\tAlloc_size:%zu,\n", sock_stats.heap_allocsz_bytes);
296 fprintf(f, "\tGreatest_free_size:%zu,\n",
297 sock_stats.greatest_free_size);
298 fprintf(f, "\tAlloc_count:%u,\n",sock_stats.alloc_count);
299 fprintf(f, "\tFree_count:%u,\n", sock_stats.free_count);
305 * TODO: Set limit to memory that can be allocated to memory type
308 rte_malloc_set_limit(__rte_unused const char *type,
309 __rte_unused size_t max)
315 * Return the IO address of a virtual address obtained through rte_malloc
318 rte_malloc_virt2iova(const void *addr)
320 const struct rte_memseg *ms;
321 struct malloc_elem *elem = malloc_elem_from_data(addr);
326 if (!elem->msl->external && rte_eal_iova_mode() == RTE_IOVA_VA)
327 return (uintptr_t) addr;
329 ms = rte_mem_virt2memseg(addr, elem->msl);
333 if (ms->iova == RTE_BAD_IOVA)
336 return ms->iova + RTE_PTR_DIFF(addr, ms->addr);
339 static struct malloc_heap *
340 find_named_heap(const char *name)
342 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
345 for (i = 0; i < RTE_MAX_HEAPS; i++) {
346 struct malloc_heap *heap = &mcfg->malloc_heaps[i];
348 if (!strncmp(name, heap->name, RTE_HEAP_NAME_MAX_LEN))
355 rte_malloc_heap_memory_add(const char *heap_name, void *va_addr, size_t len,
356 rte_iova_t iova_addrs[], unsigned int n_pages, size_t page_sz)
358 struct malloc_heap *heap = NULL;
359 struct rte_memseg_list *msl;
363 if (heap_name == NULL || va_addr == NULL ||
364 page_sz == 0 || !rte_is_power_of_2(page_sz) ||
365 RTE_ALIGN(len, page_sz) != len ||
366 !rte_is_aligned(va_addr, page_sz) ||
367 ((len / page_sz) != n_pages && iova_addrs != NULL) ||
368 strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) == 0 ||
369 strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) ==
370 RTE_HEAP_NAME_MAX_LEN) {
374 rte_mcfg_mem_write_lock();
377 heap = find_named_heap(heap_name);
383 if (heap->socket_id < RTE_MAX_NUMA_NODES) {
384 /* cannot add memory to internal heaps */
391 msl = malloc_heap_create_external_seg(va_addr, iova_addrs, n, page_sz,
392 heap_name, heap->socket_id);
398 rte_spinlock_lock(&heap->lock);
399 ret = malloc_heap_add_external_memory(heap, msl);
400 msl->heap = 1; /* mark it as heap segment */
401 rte_spinlock_unlock(&heap->lock);
404 rte_mcfg_mem_write_unlock();
410 rte_malloc_heap_memory_remove(const char *heap_name, void *va_addr, size_t len)
412 struct malloc_heap *heap = NULL;
413 struct rte_memseg_list *msl;
416 if (heap_name == NULL || va_addr == NULL || len == 0 ||
417 strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) == 0 ||
418 strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) ==
419 RTE_HEAP_NAME_MAX_LEN) {
423 rte_mcfg_mem_write_lock();
425 heap = find_named_heap(heap_name);
431 if (heap->socket_id < RTE_MAX_NUMA_NODES) {
432 /* cannot remove memory from internal heaps */
438 msl = malloc_heap_find_external_seg(va_addr, len);
444 rte_spinlock_lock(&heap->lock);
445 ret = malloc_heap_remove_external_memory(heap, va_addr, len);
446 rte_spinlock_unlock(&heap->lock);
450 ret = malloc_heap_destroy_external_seg(msl);
453 rte_mcfg_mem_write_unlock();
459 sync_memory(const char *heap_name, void *va_addr, size_t len, bool attach)
461 struct malloc_heap *heap = NULL;
462 struct rte_memseg_list *msl;
465 if (heap_name == NULL || va_addr == NULL || len == 0 ||
466 strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) == 0 ||
467 strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) ==
468 RTE_HEAP_NAME_MAX_LEN) {
472 rte_mcfg_mem_read_lock();
475 heap = find_named_heap(heap_name);
481 /* we shouldn't be able to sync to internal heaps */
482 if (heap->socket_id < RTE_MAX_NUMA_NODES) {
488 /* find corresponding memseg list to sync to */
489 msl = malloc_heap_find_external_seg(va_addr, len);
496 ret = rte_fbarray_attach(&msl->memseg_arr);
498 /* notify all subscribers that a new memory area was
501 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC,
508 /* notify all subscribers that a memory area is about to
511 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
512 msl->base_va, msl->len);
513 ret = rte_fbarray_detach(&msl->memseg_arr);
520 rte_mcfg_mem_read_unlock();
525 rte_malloc_heap_memory_attach(const char *heap_name, void *va_addr, size_t len)
527 return sync_memory(heap_name, va_addr, len, true);
531 rte_malloc_heap_memory_detach(const char *heap_name, void *va_addr, size_t len)
533 return sync_memory(heap_name, va_addr, len, false);
537 rte_malloc_heap_create(const char *heap_name)
539 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
540 struct malloc_heap *heap = NULL;
543 if (heap_name == NULL ||
544 strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) == 0 ||
545 strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) ==
546 RTE_HEAP_NAME_MAX_LEN) {
550 /* check if there is space in the heap list, or if heap with this name
553 rte_mcfg_mem_write_lock();
555 for (i = 0; i < RTE_MAX_HEAPS; i++) {
556 struct malloc_heap *tmp = &mcfg->malloc_heaps[i];
558 if (strncmp(heap_name, tmp->name,
559 RTE_HEAP_NAME_MAX_LEN) == 0) {
560 RTE_LOG(ERR, EAL, "Heap %s already exists\n",
567 if (strnlen(tmp->name, RTE_HEAP_NAME_MAX_LEN) == 0) {
573 RTE_LOG(ERR, EAL, "Cannot create new heap: no space\n");
579 /* we're sure that we can create a new heap, so do it */
580 ret = malloc_heap_create(heap, heap_name);
582 rte_mcfg_mem_write_unlock();
588 rte_malloc_heap_destroy(const char *heap_name)
590 struct malloc_heap *heap = NULL;
593 if (heap_name == NULL ||
594 strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) == 0 ||
595 strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) ==
596 RTE_HEAP_NAME_MAX_LEN) {
600 rte_mcfg_mem_write_lock();
602 /* start from non-socket heaps */
603 heap = find_named_heap(heap_name);
605 RTE_LOG(ERR, EAL, "Heap %s not found\n", heap_name);
610 /* we shouldn't be able to destroy internal heaps */
611 if (heap->socket_id < RTE_MAX_NUMA_NODES) {
616 /* sanity checks done, now we can destroy the heap */
617 rte_spinlock_lock(&heap->lock);
618 ret = malloc_heap_destroy(heap);
620 /* if we failed, lock is still active */
622 rte_spinlock_unlock(&heap->lock);
624 rte_mcfg_mem_write_unlock();