X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Fcommon%2Fmalloc_heap.c;h=ee400f38ec7fb69639530f93b1c36a69ceec4320;hb=e863fe3a13da89787fdf3b5c590101a3c0f10af6;hp=f9235932eb6c40b7b8e8b4ff42aa171cd7725063;hpb=f4be6a9a2903e68f5f04124eeb59325c17f1e1eb;p=dpdk.git diff --git a/lib/librte_eal/common/malloc_heap.c b/lib/librte_eal/common/malloc_heap.c index f9235932eb..ee400f38ec 100644 --- a/lib/librte_eal/common/malloc_heap.c +++ b/lib/librte_eal/common/malloc_heap.c @@ -20,11 +20,14 @@ #include #include #include +#include #include #include #include "eal_internal_cfg.h" #include "eal_memalloc.h" +#include "eal_memcfg.h" +#include "eal_private.h" #include "malloc_elem.h" #include "malloc_heap.h" #include "malloc_mp.h" @@ -238,6 +241,9 @@ heap_alloc(struct malloc_heap *heap, const char *type __rte_unused, size_t size, size = RTE_CACHE_LINE_ROUNDUP(size); align = RTE_CACHE_LINE_ROUNDUP(align); + /* roundup might cause an overflow */ + if (size == 0) + return NULL; elem = find_suitable_element(heap, size, flags, align, bound, contig); if (elem != NULL) { elem = malloc_elem_alloc(elem, size, align, bound, contig); @@ -454,6 +460,7 @@ try_expand_heap_secondary(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size, int socket, unsigned int flags, size_t align, size_t bound, bool contig) { + struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; struct malloc_mp_req req; int req_result; @@ -467,7 +474,7 @@ try_expand_heap_secondary(struct malloc_heap *heap, uint64_t pg_sz, req.alloc_req.elt_size = elt_size; req.alloc_req.page_sz = pg_sz; req.alloc_req.socket = socket; - req.alloc_req.heap = heap; /* it's in shared memory */ + req.alloc_req.malloc_heap_idx = heap - mcfg->malloc_heaps; req_result = request_to_primary(&req); @@ -485,10 +492,9 @@ try_expand_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size, int socket, unsigned int flags, size_t align, size_t bound, bool contig) { - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; int ret; - rte_rwlock_write_lock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_write_lock(); if (rte_eal_process_type() == RTE_PROC_PRIMARY) { ret = try_expand_heap_primary(heap, pg_sz, elt_size, socket, @@ -498,7 +504,7 @@ try_expand_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size, flags, align, bound, contig); } - rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_write_unlock(); return ret; } @@ -637,13 +643,15 @@ malloc_heap_alloc_on_heap_id(const char *type, size_t size, unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY; int socket_id; void *ret; + const struct internal_config *internal_conf = + eal_get_internal_configuration(); rte_spinlock_lock(&(heap->lock)); align = align == 0 ? 1 : align; /* for legacy mode, try once and with all flags */ - if (internal_config.legacy_mem) { + if (internal_conf->legacy_mem) { ret = heap_alloc(heap, type, size, flags, align, bound, contig); goto alloc_unlock; } @@ -821,13 +829,14 @@ malloc_heap_free_pages(void *aligned_start, size_t aligned_len) int malloc_heap_free(struct malloc_elem *elem) { - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; struct malloc_heap *heap; void *start, *aligned_start, *end, *aligned_end; size_t len, aligned_len, page_sz; struct rte_memseg_list *msl; unsigned int i, n_segs, before_space, after_space; int ret; + const struct internal_config *internal_conf = + eal_get_internal_configuration(); if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY) return -1; @@ -850,7 +859,7 @@ malloc_heap_free(struct malloc_elem *elem) /* ...of which we can't avail if we are in legacy mode, or if this is an * externally allocated segment. */ - if (internal_config.legacy_mem || (msl->external > 0)) + if (internal_conf->legacy_mem || (msl->external > 0)) goto free_unlock; /* check if we can free any memory back to the system */ @@ -861,7 +870,7 @@ malloc_heap_free(struct malloc_elem *elem) * we will defer freeing these hugepages until the entire original allocation * can be freed */ - if (internal_config.match_allocations && elem->size != elem->orig_size) + if (internal_conf->match_allocations && elem->size != elem->orig_size) goto free_unlock; /* probably, but let's make sure, as we may not be using up full page */ @@ -935,7 +944,7 @@ malloc_heap_free(struct malloc_elem *elem) /* now we can finally free us some pages */ - rte_rwlock_write_lock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_write_lock(); /* * we allow secondary processes to clear the heap of this allocated @@ -990,7 +999,7 @@ malloc_heap_free(struct malloc_elem *elem) RTE_LOG(DEBUG, EAL, "Heap on socket %d was shrunk by %zdMB\n", msl->socket_id, aligned_len >> 20ULL); - rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_write_unlock(); free_unlock: rte_spinlock_unlock(&(heap->lock)); return ret; @@ -1319,10 +1328,11 @@ rte_eal_malloc_heap_init(void) { struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; unsigned int i; + const struct internal_config *internal_conf = + eal_get_internal_configuration(); - if (internal_config.match_allocations) { + if (internal_conf->match_allocations) RTE_LOG(DEBUG, EAL, "Hugepages will be freed exactly as allocated.\n"); - } if (rte_eal_process_type() == RTE_PROC_PRIMARY) { /* assign min socket ID to external heaps */ @@ -1344,7 +1354,7 @@ rte_eal_malloc_heap_init(void) if (register_mp_requests()) { RTE_LOG(ERR, EAL, "Couldn't register malloc multiprocess actions\n"); - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); return -1; } @@ -1352,7 +1362,7 @@ rte_eal_malloc_heap_init(void) * even come before primary itself is fully initialized, and secondaries * do not need to initialize the heap. */ - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); /* secondary process does not need to initialize anything */ if (rte_eal_process_type() != RTE_PROC_PRIMARY)