1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
11 #include <rte_errno.h>
12 #include <rte_memcpy.h>
13 #include <rte_memory.h>
15 #include <rte_eal_memconfig.h>
16 #include <rte_branch_prediction.h>
17 #include <rte_debug.h>
18 #include <rte_launch.h>
19 #include <rte_per_lcore.h>
20 #include <rte_lcore.h>
21 #include <rte_common.h>
22 #include <rte_spinlock.h>
24 #include <rte_malloc.h>
25 #include "malloc_elem.h"
26 #include "malloc_heap.h"
27 #include "eal_memalloc.h"
30 /* Free the memory space back to heap */
31 void rte_free(void *addr)
33 if (addr == NULL) return;
34 if (malloc_heap_free(malloc_elem_from_data(addr)) < 0)
35 RTE_LOG(ERR, EAL, "Error: Invalid memory\n");
39 * Allocate memory on specified heap.
42 rte_malloc_socket(const char *type, size_t size, unsigned int align,
45 /* return NULL if size is 0 or alignment is not power-of-2 */
46 if (size == 0 || (align && !rte_is_power_of_2(align)))
49 if (!rte_eal_has_hugepages())
50 socket_arg = SOCKET_ID_ANY;
52 return malloc_heap_alloc(type, size, socket_arg, 0,
53 align == 0 ? 1 : align, 0, false);
57 * Allocate memory on default heap.
60 rte_malloc(const char *type, size_t size, unsigned align)
62 return rte_malloc_socket(type, size, align, SOCKET_ID_ANY);
66 * Allocate zero'd memory on specified heap.
69 rte_zmalloc_socket(const char *type, size_t size, unsigned align, int socket)
71 return rte_malloc_socket(type, size, align, socket);
75 * Allocate zero'd memory on default heap.
78 rte_zmalloc(const char *type, size_t size, unsigned align)
80 return rte_zmalloc_socket(type, size, align, SOCKET_ID_ANY);
84 * Allocate zero'd memory on specified heap.
87 rte_calloc_socket(const char *type, size_t num, size_t size, unsigned align, int socket)
89 return rte_zmalloc_socket(type, num * size, align, socket);
93 * Allocate zero'd memory on default heap.
96 rte_calloc(const char *type, size_t num, size_t size, unsigned align)
98 return rte_zmalloc(type, num * size, align);
102 * Resize allocated memory.
105 rte_realloc(void *ptr, size_t size, unsigned align)
108 return rte_malloc(NULL, size, align);
110 struct malloc_elem *elem = malloc_elem_from_data(ptr);
112 RTE_LOG(ERR, EAL, "Error: memory corruption detected\n");
116 size = RTE_CACHE_LINE_ROUNDUP(size), align = RTE_CACHE_LINE_ROUNDUP(align);
117 /* check alignment matches first, and if ok, see if we can resize block */
118 if (RTE_PTR_ALIGN(ptr,align) == ptr &&
119 malloc_heap_resize(elem, size) == 0)
122 /* either alignment is off, or we have no room to expand,
124 void *new_ptr = rte_malloc(NULL, size, align);
127 const unsigned old_size = elem->size - MALLOC_ELEM_OVERHEAD;
128 rte_memcpy(new_ptr, ptr, old_size < size ? old_size : size);
135 rte_malloc_validate(const void *ptr, size_t *size)
137 const struct malloc_elem *elem = malloc_elem_from_data(ptr);
138 if (!malloc_elem_cookies_ok(elem))
141 *size = elem->size - elem->pad - MALLOC_ELEM_OVERHEAD;
146 * Function to retrieve data for heap on given socket
149 rte_malloc_get_socket_stats(int socket,
150 struct rte_malloc_socket_stats *socket_stats)
152 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
153 int heap_idx, ret = -1;
155 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
157 heap_idx = malloc_socket_to_heap_id(socket);
161 ret = malloc_heap_get_stats(&mcfg->malloc_heaps[heap_idx],
164 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
170 * Function to dump contents of all heaps
172 void __rte_experimental
173 rte_malloc_dump_heaps(FILE *f)
175 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
178 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
180 for (idx = 0; idx < RTE_MAX_HEAPS; idx++) {
181 fprintf(f, "Heap id: %u\n", idx);
182 malloc_heap_dump(&mcfg->malloc_heaps[idx], f);
185 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
189 rte_malloc_heap_get_socket(const char *name)
191 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
192 struct malloc_heap *heap = NULL;
197 strnlen(name, RTE_HEAP_NAME_MAX_LEN) == 0 ||
198 strnlen(name, RTE_HEAP_NAME_MAX_LEN) ==
199 RTE_HEAP_NAME_MAX_LEN) {
203 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
204 for (idx = 0; idx < RTE_MAX_HEAPS; idx++) {
205 struct malloc_heap *tmp = &mcfg->malloc_heaps[idx];
207 if (!strncmp(name, tmp->name, RTE_HEAP_NAME_MAX_LEN)) {
214 ret = heap->socket_id;
219 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
225 rte_malloc_heap_socket_is_external(int socket_id)
227 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
231 if (socket_id == SOCKET_ID_ANY)
234 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
235 for (idx = 0; idx < RTE_MAX_HEAPS; idx++) {
236 struct malloc_heap *tmp = &mcfg->malloc_heaps[idx];
238 if ((int)tmp->socket_id == socket_id) {
239 /* external memory always has large socket ID's */
240 ret = tmp->socket_id >= RTE_MAX_NUMA_NODES;
244 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
250 * Print stats on memory type. If type is NULL, info on all types is printed
253 rte_malloc_dump_stats(FILE *f, __rte_unused const char *type)
255 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
256 unsigned int heap_id;
257 struct rte_malloc_socket_stats sock_stats;
259 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
261 /* Iterate through all initialised heaps */
262 for (heap_id = 0; heap_id < RTE_MAX_HEAPS; heap_id++) {
263 struct malloc_heap *heap = &mcfg->malloc_heaps[heap_id];
265 malloc_heap_get_stats(heap, &sock_stats);
267 fprintf(f, "Heap id:%u\n", heap_id);
268 fprintf(f, "\tHeap name:%s\n", heap->name);
269 fprintf(f, "\tHeap_size:%zu,\n", sock_stats.heap_totalsz_bytes);
270 fprintf(f, "\tFree_size:%zu,\n", sock_stats.heap_freesz_bytes);
271 fprintf(f, "\tAlloc_size:%zu,\n", sock_stats.heap_allocsz_bytes);
272 fprintf(f, "\tGreatest_free_size:%zu,\n",
273 sock_stats.greatest_free_size);
274 fprintf(f, "\tAlloc_count:%u,\n",sock_stats.alloc_count);
275 fprintf(f, "\tFree_count:%u,\n", sock_stats.free_count);
277 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
282 * TODO: Set limit to memory that can be allocated to memory type
285 rte_malloc_set_limit(__rte_unused const char *type,
286 __rte_unused size_t max)
292 * Return the IO address of a virtual address obtained through rte_malloc
295 rte_malloc_virt2iova(const void *addr)
297 const struct rte_memseg *ms;
298 struct malloc_elem *elem = malloc_elem_from_data(addr);
303 if (!elem->msl->external && rte_eal_iova_mode() == RTE_IOVA_VA)
304 return (uintptr_t) addr;
306 ms = rte_mem_virt2memseg(addr, elem->msl);
310 if (ms->iova == RTE_BAD_IOVA)
313 return ms->iova + RTE_PTR_DIFF(addr, ms->addr);
316 static struct malloc_heap *
317 find_named_heap(const char *name)
319 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
322 for (i = 0; i < RTE_MAX_HEAPS; i++) {
323 struct malloc_heap *heap = &mcfg->malloc_heaps[i];
325 if (!strncmp(name, heap->name, RTE_HEAP_NAME_MAX_LEN))
332 rte_malloc_heap_memory_add(const char *heap_name, void *va_addr, size_t len,
333 rte_iova_t iova_addrs[], unsigned int n_pages, size_t page_sz)
335 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
336 struct malloc_heap *heap = NULL;
340 if (heap_name == NULL || va_addr == NULL ||
341 page_sz == 0 || !rte_is_power_of_2(page_sz) ||
342 strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) == 0 ||
343 strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) ==
344 RTE_HEAP_NAME_MAX_LEN) {
349 rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
352 heap = find_named_heap(heap_name);
358 if (heap->socket_id < RTE_MAX_NUMA_NODES) {
359 /* cannot add memory to internal heaps */
365 if (n != n_pages && iova_addrs != NULL) {
371 rte_spinlock_lock(&heap->lock);
372 ret = malloc_heap_add_external_memory(heap, va_addr, iova_addrs, n,
374 rte_spinlock_unlock(&heap->lock);
377 rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
383 rte_malloc_heap_memory_remove(const char *heap_name, void *va_addr, size_t len)
385 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
386 struct malloc_heap *heap = NULL;
389 if (heap_name == NULL || va_addr == NULL || len == 0 ||
390 strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) == 0 ||
391 strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) ==
392 RTE_HEAP_NAME_MAX_LEN) {
396 rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
398 heap = find_named_heap(heap_name);
404 if (heap->socket_id < RTE_MAX_NUMA_NODES) {
405 /* cannot remove memory from internal heaps */
411 rte_spinlock_lock(&heap->lock);
412 ret = malloc_heap_remove_external_memory(heap, va_addr, len);
413 rte_spinlock_unlock(&heap->lock);
416 rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
421 struct sync_mem_walk_arg {
429 sync_mem_walk(const struct rte_memseg_list *msl, void *arg)
431 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
432 struct sync_mem_walk_arg *wa = arg;
433 size_t len = msl->page_sz * msl->memseg_arr.len;
435 if (msl->base_va == wa->va_addr &&
437 struct rte_memseg_list *found_msl;
441 msl_idx = msl - mcfg->memsegs;
442 found_msl = &mcfg->memsegs[msl_idx];
445 ret = rte_fbarray_attach(&found_msl->memseg_arr);
447 /* notify all subscribers that a memory area is about to
450 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
451 msl->base_va, msl->len);
452 ret = rte_fbarray_detach(&found_msl->memseg_arr);
456 wa->result = -rte_errno;
458 /* notify all subscribers that a new memory area was
462 eal_memalloc_mem_event_notify(
464 msl->base_va, msl->len);
473 sync_memory(const char *heap_name, void *va_addr, size_t len, bool attach)
475 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
476 struct malloc_heap *heap = NULL;
477 struct sync_mem_walk_arg wa;
480 if (heap_name == NULL || va_addr == NULL || len == 0 ||
481 strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) == 0 ||
482 strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) ==
483 RTE_HEAP_NAME_MAX_LEN) {
487 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
490 heap = find_named_heap(heap_name);
496 /* we shouldn't be able to sync to internal heaps */
497 if (heap->socket_id < RTE_MAX_NUMA_NODES) {
503 /* find corresponding memseg list to sync to */
504 wa.va_addr = va_addr;
506 wa.result = -ENOENT; /* fail unless explicitly told to succeed */
509 /* we're already holding a read lock */
510 rte_memseg_list_walk_thread_unsafe(sync_mem_walk, &wa);
513 rte_errno = -wa.result;
516 /* notify all subscribers that a new memory area was added */
518 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC,
523 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
528 rte_malloc_heap_memory_attach(const char *heap_name, void *va_addr, size_t len)
530 return sync_memory(heap_name, va_addr, len, true);
534 rte_malloc_heap_memory_detach(const char *heap_name, void *va_addr, size_t len)
536 return sync_memory(heap_name, va_addr, len, false);
540 rte_malloc_heap_create(const char *heap_name)
542 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
543 struct malloc_heap *heap = NULL;
546 if (heap_name == NULL ||
547 strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) == 0 ||
548 strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) ==
549 RTE_HEAP_NAME_MAX_LEN) {
553 /* check if there is space in the heap list, or if heap with this name
556 rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
558 for (i = 0; i < RTE_MAX_HEAPS; i++) {
559 struct malloc_heap *tmp = &mcfg->malloc_heaps[i];
561 if (strncmp(heap_name, tmp->name,
562 RTE_HEAP_NAME_MAX_LEN) == 0) {
563 RTE_LOG(ERR, EAL, "Heap %s already exists\n",
570 if (strnlen(tmp->name, RTE_HEAP_NAME_MAX_LEN) == 0) {
576 RTE_LOG(ERR, EAL, "Cannot create new heap: no space\n");
582 /* we're sure that we can create a new heap, so do it */
583 ret = malloc_heap_create(heap, heap_name);
585 rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
591 rte_malloc_heap_destroy(const char *heap_name)
593 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
594 struct malloc_heap *heap = NULL;
597 if (heap_name == NULL ||
598 strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) == 0 ||
599 strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) ==
600 RTE_HEAP_NAME_MAX_LEN) {
604 rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
606 /* start from non-socket heaps */
607 heap = find_named_heap(heap_name);
609 RTE_LOG(ERR, EAL, "Heap %s not found\n", heap_name);
614 /* we shouldn't be able to destroy internal heaps */
615 if (heap->socket_id < RTE_MAX_NUMA_NODES) {
620 /* sanity checks done, now we can destroy the heap */
621 rte_spinlock_lock(&heap->lock);
622 ret = malloc_heap_destroy(heap);
624 /* if we failed, lock is still active */
626 rte_spinlock_unlock(&heap->lock);
628 rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);