X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Fcommon%2Fmalloc_heap.c;h=5a09247a65377a0e418e30b91eef32d797e60c9d;hb=5b183ff611fc7aa4414abb99a31609a379323722;hp=c5d254d8a42415c9f64d585fafeb3174166c92e8;hpb=39ff94e71cd5469a7796d4a52340afaa54bee16e;p=dpdk.git diff --git a/lib/librte_eal/common/malloc_heap.c b/lib/librte_eal/common/malloc_heap.c index c5d254d8a4..5a09247a65 100644 --- a/lib/librte_eal/common/malloc_heap.c +++ b/lib/librte_eal/common/malloc_heap.c @@ -20,11 +20,14 @@ #include #include #include +#include #include #include #include "eal_internal_cfg.h" #include "eal_memalloc.h" +#include "eal_memcfg.h" +#include "eal_private.h" #include "malloc_elem.h" #include "malloc_heap.h" #include "malloc_mp.h" @@ -238,6 +241,9 @@ heap_alloc(struct malloc_heap *heap, const char *type __rte_unused, size_t size, size = RTE_CACHE_LINE_ROUNDUP(size); align = RTE_CACHE_LINE_ROUNDUP(align); + /* roundup might cause an overflow */ + if (size == 0) + return NULL; elem = find_suitable_element(heap, size, flags, align, bound, contig); if (elem != NULL) { elem = malloc_elem_alloc(elem, size, align, bound, contig); @@ -485,10 +491,9 @@ try_expand_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size, int socket, unsigned int flags, size_t align, size_t bound, bool contig) { - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; int ret; - rte_rwlock_write_lock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_write_lock(); if (rte_eal_process_type() == RTE_PROC_PRIMARY) { ret = try_expand_heap_primary(heap, pg_sz, elt_size, socket, @@ -498,7 +503,7 @@ try_expand_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size, flags, align, bound, contig); } - rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_write_unlock(); return ret; } @@ -637,13 +642,15 @@ malloc_heap_alloc_on_heap_id(const char *type, size_t size, unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY; int socket_id; void *ret; + const struct internal_config *internal_conf = + eal_get_internal_configuration(); rte_spinlock_lock(&(heap->lock)); align = align == 0 ? 1 : align; /* for legacy mode, try once and with all flags */ - if (internal_config.legacy_mem) { + if (internal_conf->legacy_mem) { ret = heap_alloc(heap, type, size, flags, align, bound, contig); goto alloc_unlock; } @@ -821,13 +828,14 @@ malloc_heap_free_pages(void *aligned_start, size_t aligned_len) int malloc_heap_free(struct malloc_elem *elem) { - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; struct malloc_heap *heap; void *start, *aligned_start, *end, *aligned_end; size_t len, aligned_len, page_sz; struct rte_memseg_list *msl; unsigned int i, n_segs, before_space, after_space; int ret; + const struct internal_config *internal_conf = + eal_get_internal_configuration(); if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY) return -1; @@ -850,7 +858,7 @@ malloc_heap_free(struct malloc_elem *elem) /* ...of which we can't avail if we are in legacy mode, or if this is an * externally allocated segment. */ - if (internal_config.legacy_mem || (msl->external > 0)) + if (internal_conf->legacy_mem || (msl->external > 0)) goto free_unlock; /* check if we can free any memory back to the system */ @@ -861,7 +869,7 @@ malloc_heap_free(struct malloc_elem *elem) * we will defer freeing these hugepages until the entire original allocation * can be freed */ - if (internal_config.match_allocations && elem->size != elem->orig_size) + if (internal_conf->match_allocations && elem->size != elem->orig_size) goto free_unlock; /* probably, but let's make sure, as we may not be using up full page */ @@ -935,7 +943,7 @@ malloc_heap_free(struct malloc_elem *elem) /* now we can finally free us some pages */ - rte_rwlock_write_lock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_write_lock(); /* * we allow secondary processes to clear the heap of this allocated @@ -990,7 +998,7 @@ malloc_heap_free(struct malloc_elem *elem) RTE_LOG(DEBUG, EAL, "Heap on socket %d was shrunk by %zdMB\n", msl->socket_id, aligned_len >> 20ULL); - rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_write_unlock(); free_unlock: rte_spinlock_unlock(&(heap->lock)); return ret; @@ -1118,7 +1126,7 @@ malloc_heap_create_external_seg(void *va_addr, rte_iova_t iova_addrs[], return NULL; } - snprintf(fbarray_name, sizeof(fbarray_name) - 1, "%s_%p", + snprintf(fbarray_name, sizeof(fbarray_name), "%s_%p", seg_name, va_addr); /* create the backing fbarray */ @@ -1319,10 +1327,11 @@ rte_eal_malloc_heap_init(void) { struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; unsigned int i; + const struct internal_config *internal_conf = + eal_get_internal_configuration(); - if (internal_config.match_allocations) { + if (internal_conf->match_allocations) RTE_LOG(DEBUG, EAL, "Hugepages will be freed exactly as allocated.\n"); - } if (rte_eal_process_type() == RTE_PROC_PRIMARY) { /* assign min socket ID to external heaps */ @@ -1334,7 +1343,7 @@ rte_eal_malloc_heap_init(void) char heap_name[RTE_HEAP_NAME_MAX_LEN]; int socket_id = rte_socket_id_by_idx(i); - snprintf(heap_name, sizeof(heap_name) - 1, + snprintf(heap_name, sizeof(heap_name), "socket_%i", socket_id); strlcpy(heap->name, heap_name, RTE_HEAP_NAME_MAX_LEN); heap->socket_id = socket_id; @@ -1344,7 +1353,7 @@ rte_eal_malloc_heap_init(void) if (register_mp_requests()) { RTE_LOG(ERR, EAL, "Couldn't register malloc multiprocess actions\n"); - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); return -1; } @@ -1352,7 +1361,7 @@ rte_eal_malloc_heap_init(void) * even come before primary itself is fully initialized, and secondaries * do not need to initialize the heap. */ - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); /* secondary process does not need to initialize anything */ if (rte_eal_process_type() != RTE_PROC_PRIMARY)