X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_malloc%2Fmalloc_heap.c;h=defb903749e8ea496eaaabc49111bd25c314178b;hb=ff708facfcbf42f3dcb3c62d82ecd93e7b8c2506;hp=bba34dc9d089cf64bcdd601dffd53170654aa127;hpb=2a5c356e177dfd86a63cefb034f03e6d6f944875;p=dpdk.git diff --git a/lib/librte_malloc/malloc_heap.c b/lib/librte_malloc/malloc_heap.c index bba34dc9d0..defb903749 100644 --- a/lib/librte_malloc/malloc_heap.c +++ b/lib/librte_malloc/malloc_heap.c @@ -1,35 +1,34 @@ /*- * BSD LICENSE - * - * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions * are met: - * - * * Redistributions of source code must retain the above copyright + * + * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * */ #include #include @@ -41,7 +40,6 @@ #include #include -#include #include #include #include @@ -79,17 +77,30 @@ static int malloc_heap_add_memzone(struct malloc_heap *heap, size_t size, unsigned align) { const unsigned mz_flags = 0; - const size_t min_size = get_malloc_memzone_size(); + const size_t block_size = get_malloc_memzone_size(); /* ensure the data we want to allocate will fit in the memzone */ - size_t mz_size = size + align + MALLOC_ELEM_OVERHEAD * 2; - if (mz_size < min_size) - mz_size = min_size; + const size_t min_size = size + align + MALLOC_ELEM_OVERHEAD * 2; + const struct rte_memzone *mz = NULL; + struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; + unsigned numa_socket = heap - mcfg->malloc_heaps; + + size_t mz_size = min_size; + if (mz_size < block_size) + mz_size = block_size; char mz_name[RTE_MEMZONE_NAMESIZE]; - rte_snprintf(mz_name, sizeof(mz_name), "MALLOC_S%u_HEAP_%u", - heap->numa_socket, heap->mz_count++); - const struct rte_memzone *mz = rte_memzone_reserve(mz_name, mz_size, - heap->numa_socket, mz_flags); + snprintf(mz_name, sizeof(mz_name), "MALLOC_S%u_HEAP_%u", + numa_socket, heap->mz_count++); + + /* try getting a block. if we fail and we don't need as big a block + * as given in the config, we can shrink our request and try again + */ + do { + mz = rte_memzone_reserve(mz_name, mz_size, numa_socket, + mz_flags); + if (mz == NULL) + mz_size /= 2; + } while (mz == NULL && mz_size > min_size); if (mz == NULL) return -1; @@ -97,71 +108,40 @@ malloc_heap_add_memzone(struct malloc_heap *heap, size_t size, unsigned align) struct malloc_elem *start_elem = (struct malloc_elem *)mz->addr; struct malloc_elem *end_elem = RTE_PTR_ADD(mz->addr, mz_size - MALLOC_ELEM_OVERHEAD); - end_elem = RTE_PTR_ALIGN_FLOOR(end_elem, CACHE_LINE_SIZE); + end_elem = RTE_PTR_ALIGN_FLOOR(end_elem, RTE_CACHE_LINE_SIZE); const unsigned elem_size = (uintptr_t)end_elem - (uintptr_t)start_elem; - malloc_elem_init(start_elem, heap, elem_size); + malloc_elem_init(start_elem, heap, mz, elem_size); malloc_elem_mkend(end_elem, start_elem); + malloc_elem_free_list_insert(start_elem); - start_elem->next_free = heap->free_head; - heap->free_head = start_elem; /* increase heap total size by size of new memzone */ heap->total_size+=mz_size - MALLOC_ELEM_OVERHEAD; return 0; } -/* - * initialise a malloc heap object. The heap is locked with a private - * lock while being initialised. This function should only be called the - * first time a thread calls malloc - if even then, as heaps are per-socket - * not per-thread. - */ -static void -malloc_heap_init(struct malloc_heap *heap) -{ - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; - - rte_eal_mcfg_wait_complete(mcfg); - while (heap->initialised != INITIALISED) { - if (rte_atomic32_cmpset( - (volatile uint32_t*)&heap->initialised, - NOT_INITIALISED, INITIALISING)) { - - heap->free_head = NULL; - heap->mz_count = 0; - heap->alloc_count = 0; - heap->total_size = 0; - /* - * Find NUMA socket of heap that is being initialised, so that - * malloc_heaps[n].numa_socket == n - */ - heap->numa_socket = heap - mcfg->malloc_heaps; - rte_spinlock_init(&heap->lock); - heap->initialised = INITIALISED; - } - } -} - /* * Iterates through the freelist for a heap to find a free element * which can store data of the required size and with the requested alignment. - * Returns null on failure, or pointer to element on success, with the pointer - * to the previous element in the list, if any, being returned in a parameter - * (to make removing the element from the free list faster). + * Returns null on failure, or pointer to element on success. */ static struct malloc_elem * -find_suitable_element(struct malloc_heap *heap, size_t size, - unsigned align, struct malloc_elem **prev) +find_suitable_element(struct malloc_heap *heap, size_t size, unsigned align) { - struct malloc_elem *elem = heap->free_head; - *prev = NULL; - while(elem){ - if (malloc_elem_can_hold(elem, size, align)) - break; - *prev = elem; - elem = elem->next_free; + size_t idx; + struct malloc_elem *elem; + + for (idx = malloc_elem_free_list_index(size); + idx < RTE_HEAP_NUM_FREELISTS; idx++) + { + for (elem = LIST_FIRST(&heap->free_head[idx]); + !!elem; elem = LIST_NEXT(elem, free_list)) + { + if (malloc_elem_can_hold(elem, size, align)) + return elem; + } } - return elem; + return NULL; } /* @@ -174,21 +154,17 @@ void * malloc_heap_alloc(struct malloc_heap *heap, const char *type __attribute__((unused)), size_t size, unsigned align) { - if (!heap->initialised) - malloc_heap_init(heap); - - size = CACHE_LINE_ROUNDUP(size); - align = CACHE_LINE_ROUNDUP(align); + size = RTE_CACHE_LINE_ROUNDUP(size); + align = RTE_CACHE_LINE_ROUNDUP(align); rte_spinlock_lock(&heap->lock); - struct malloc_elem *prev, *elem = find_suitable_element(heap, - size, align, &prev); + struct malloc_elem *elem = find_suitable_element(heap, size, align); if (elem == NULL){ if ((malloc_heap_add_memzone(heap, size, align)) == 0) - elem = find_suitable_element(heap, size, align, &prev); + elem = find_suitable_element(heap, size, align); } if (elem != NULL){ - elem = malloc_elem_alloc(elem, size, align, prev); + elem = malloc_elem_alloc(elem, size, align); /* increase heap's count of allocated elements */ heap->alloc_count++; } @@ -201,13 +177,11 @@ malloc_heap_alloc(struct malloc_heap *heap, * Function to retrieve data for heap on given socket */ int -malloc_heap_get_stats(struct malloc_heap *heap, +malloc_heap_get_stats(const struct malloc_heap *heap, struct rte_malloc_socket_stats *socket_stats) { - if (!heap->initialised) - return -1; - - struct malloc_elem *elem = heap->free_head; + size_t idx; + struct malloc_elem *elem; /* Initialise variables for heap */ socket_stats->free_count = 0; @@ -215,13 +189,15 @@ malloc_heap_get_stats(struct malloc_heap *heap, socket_stats->greatest_free_size = 0; /* Iterate through free list */ - while(elem) { - socket_stats->free_count++; - socket_stats->heap_freesz_bytes += elem->size; - if (elem->size > socket_stats->greatest_free_size) - socket_stats->greatest_free_size = elem->size; - - elem = elem->next_free; + for (idx = 0; idx < RTE_HEAP_NUM_FREELISTS; idx++) { + for (elem = LIST_FIRST(&heap->free_head[idx]); + !!elem; elem = LIST_NEXT(elem, free_list)) + { + socket_stats->free_count++; + socket_stats->heap_freesz_bytes += elem->size; + if (elem->size > socket_stats->greatest_free_size) + socket_stats->greatest_free_size = elem->size; + } } /* Get stats on overall heap and allocated memory on this heap */ socket_stats->heap_totalsz_bytes = heap->total_size;