X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Fcommon%2Feal_common_memzone.c;h=64f4e0ade4f75b041c6952871e9518290f484e56;hb=c3cec1d8070860b3f68f3418b876a511fb99e981;hp=bcdd95e203b06cea8ddee156cec7ee92f6839bae;hpb=916e4f4f4e45a1d3cdd473cf9ef71c7212b83d40;p=dpdk.git diff --git a/lib/librte_eal/common/eal_common_memzone.c b/lib/librte_eal/common/eal_common_memzone.c index bcdd95e203..64f4e0ade4 100644 --- a/lib/librte_eal/common/eal_common_memzone.c +++ b/lib/librte_eal/common/eal_common_memzone.c @@ -1,35 +1,34 @@ /*- * BSD LICENSE - * - * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions * are met: - * - * * Redistributions of source code must retain the above copyright + * + * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * */ #include @@ -44,7 +43,6 @@ #include #include #include -#include #include #include #include @@ -52,15 +50,15 @@ #include #include +#include "malloc_heap.h" +#include "malloc_elem.h" #include "eal_private.h" -/* internal copy of free memory segments */ -static struct rte_memseg *free_memseg = NULL; - static inline const struct rte_memzone * memzone_lookup_thread_unsafe(const char *name) { const struct rte_mem_config *mcfg; + const struct rte_memzone *mz; unsigned i = 0; /* get pointer to global configuration */ @@ -70,48 +68,89 @@ memzone_lookup_thread_unsafe(const char *name) * the algorithm is not optimal (linear), but there are few * zones and this function should be called at init only */ - for (i = 0; i < RTE_MAX_MEMZONE && mcfg->memzone[i].addr != NULL; i++) { - if (!strncmp(name, mcfg->memzone[i].name, RTE_MEMZONE_NAMESIZE)) + for (i = 0; i < RTE_MAX_MEMZONE; i++) { + mz = &mcfg->memzone[i]; + if (mz->addr != NULL && !strncmp(name, mz->name, RTE_MEMZONE_NAMESIZE)) return &mcfg->memzone[i]; } return NULL; } -/* - * Return a pointer to a correctly filled memzone descriptor. If the - * allocation cannot be done, return NULL. - */ -const struct rte_memzone * -rte_memzone_reserve(const char *name, uint64_t len, int socket_id, - unsigned flags) +static inline struct rte_memzone * +get_next_free_memzone(void) { - return rte_memzone_reserve_aligned(name, - len, socket_id, flags, CACHE_LINE_SIZE); + struct rte_mem_config *mcfg; + unsigned i = 0; + + /* get pointer to global configuration */ + mcfg = rte_eal_get_configuration()->mem_config; + + for (i = 0; i < RTE_MAX_MEMZONE; i++) { + if (mcfg->memzone[i].addr == NULL) + return &mcfg->memzone[i]; + } + + return NULL; +} + +/* This function will return the greatest free block if a heap has been + * specified. If no heap has been specified, it will return the heap and + * length of the greatest free block available in all heaps */ +static size_t +find_heap_max_free_elem(int *s, unsigned align) +{ + struct rte_mem_config *mcfg; + struct rte_malloc_socket_stats stats; + int i, socket = *s; + size_t len = 0; + + /* get pointer to global configuration */ + mcfg = rte_eal_get_configuration()->mem_config; + + for (i = 0; i < RTE_MAX_NUMA_NODES; i++) { + if ((socket != SOCKET_ID_ANY) && (socket != i)) + continue; + + malloc_heap_get_stats(&mcfg->malloc_heaps[i], &stats); + if (stats.greatest_free_size > len) { + len = stats.greatest_free_size; + *s = i; + } + } + + if (len < MALLOC_ELEM_OVERHEAD + align) + return 0; + + return len - MALLOC_ELEM_OVERHEAD - align; } static const struct rte_memzone * -memzone_reserve_aligned_thread_unsafe(const char *name, uint64_t len, - int socket_id, unsigned flags, unsigned align) +memzone_reserve_aligned_thread_unsafe(const char *name, size_t len, + int socket_id, unsigned flags, unsigned align, unsigned bound) { + struct rte_memzone *mz; struct rte_mem_config *mcfg; - unsigned i = 0; - int memseg_idx = -1; - uint64_t addr_offset, requested_len; - uint64_t memseg_len = 0; - phys_addr_t memseg_physaddr; - void *memseg_addr; + size_t requested_len; + int socket, i; /* get pointer to global configuration */ mcfg = rte_eal_get_configuration()->mem_config; /* no more room in config */ - if (mcfg->memzone_idx >= RTE_MAX_MEMZONE) { + if (mcfg->memzone_cnt >= RTE_MAX_MEMZONE) { RTE_LOG(ERR, EAL, "%s(): No more room in config\n", __func__); rte_errno = ENOSPC; return NULL; } + if (strlen(name) > sizeof(mz->name) - 1) { + RTE_LOG(DEBUG, EAL, "%s(): memzone <%s>: name too long\n", + __func__, name); + rte_errno = ENAMETOOLONG; + return NULL; + } + /* zone already exist */ if ((memzone_lookup_thread_unsafe(name)) != NULL) { RTE_LOG(DEBUG, EAL, "%s(): memzone <%s> already exists\n", @@ -120,163 +159,200 @@ memzone_reserve_aligned_thread_unsafe(const char *name, uint64_t len, return NULL; } - /* align length on cache boundary */ - len += CACHE_LINE_MASK; - len &= ~((uint64_t) CACHE_LINE_MASK); - - /* save original length */ - requested_len = len; - - /* reserve extra space for future alignment */ - if (len) - len += align; - - /* save requested length */ - requested_len = len; + /* if alignment is not a power of two */ + if (align && !rte_is_power_of_2(align)) { + RTE_LOG(ERR, EAL, "%s(): Invalid alignment: %u\n", __func__, + align); + rte_errno = EINVAL; + return NULL; + } - /* reserve extra space for future alignment */ - if (len) - len += align; + /* alignment less than cache size is not allowed */ + if (align < RTE_CACHE_LINE_SIZE) + align = RTE_CACHE_LINE_SIZE; - /* find the smallest segment matching requirements */ - for (i = 0; i < RTE_MAX_MEMSEG; i++) { - /* last segment */ - if (free_memseg[i].addr == NULL) - break; + /* align length on cache boundary. Check for overflow before doing so */ + if (len > SIZE_MAX - RTE_CACHE_LINE_MASK) { + rte_errno = EINVAL; /* requested size too big */ + return NULL; + } - /* empty segment, skip it */ - if (free_memseg[i].len == 0) - continue; + len += RTE_CACHE_LINE_MASK; + len &= ~((size_t) RTE_CACHE_LINE_MASK); - /* bad socket ID */ - if (socket_id != SOCKET_ID_ANY && - socket_id != free_memseg[i].socket_id) - continue; + /* save minimal requested length */ + requested_len = RTE_MAX((size_t)RTE_CACHE_LINE_SIZE, len); - /* check len */ - if (len != 0 && len > free_memseg[i].len) - continue; + /* check that boundary condition is valid */ + if (bound != 0 && (requested_len > bound || !rte_is_power_of_2(bound))) { + rte_errno = EINVAL; + return NULL; + } - /* check flags for hugepage sizes */ - if ((flags & RTE_MEMZONE_2MB) && - free_memseg[i].hugepage_sz == RTE_PGSIZE_1G ) - continue; - if ((flags & RTE_MEMZONE_1GB) && - free_memseg[i].hugepage_sz == RTE_PGSIZE_2M ) - continue; + if ((socket_id != SOCKET_ID_ANY) && (socket_id >= RTE_MAX_NUMA_NODES)) { + rte_errno = EINVAL; + return NULL; + } - /* this segment is the best until now */ - if (memseg_idx == -1) { - memseg_idx = i; - memseg_len = free_memseg[i].len; - } - /* find the biggest contiguous zone */ - else if (len == 0) { - if (free_memseg[i].len > memseg_len) { - memseg_idx = i; - memseg_len = free_memseg[i].len; + if (!rte_eal_has_hugepages()) + socket_id = SOCKET_ID_ANY; + + if (len == 0) { + if (bound != 0) + requested_len = bound; + else { + requested_len = find_heap_max_free_elem(&socket_id, align); + if (requested_len == 0) { + rte_errno = ENOMEM; + return NULL; } } - /* - * find the smallest (we already checked that current - * zone length is > len - */ - else if (free_memseg[i].len < memseg_len) { - memseg_idx = i; - memseg_len = free_memseg[i].len; + } + + if (socket_id == SOCKET_ID_ANY) + socket = malloc_get_numa_socket(); + else + socket = socket_id; + + /* allocate memory on heap */ + void *mz_addr = malloc_heap_alloc(&mcfg->malloc_heaps[socket], NULL, + requested_len, flags, align, bound); + + if ((mz_addr == NULL) && (socket_id == SOCKET_ID_ANY)) { + /* try other heaps */ + for (i = 0; i < RTE_MAX_NUMA_NODES; i++) { + if (socket == i) + continue; + + mz_addr = malloc_heap_alloc(&mcfg->malloc_heaps[i], + NULL, requested_len, flags, align, bound); + if (mz_addr != NULL) + break; } } - /* no segment found */ - if (memseg_idx == -1) { - /* - * If RTE_MEMZONE_SIZE_HINT_ONLY flag is specified, - * try allocating again without the size parameter otherwise -fail. - */ - if ((flags & RTE_MEMZONE_SIZE_HINT_ONLY) && - ((flags & RTE_MEMZONE_1GB) || (flags & RTE_MEMZONE_2MB))) - return memzone_reserve_aligned_thread_unsafe(name, len - align, - socket_id, 0, align); - - RTE_LOG(ERR, EAL, "%s(): No appropriate segment found\n", __func__); + if (mz_addr == NULL) { rte_errno = ENOMEM; return NULL; } - /* get offset needed to adjust alignment */ - addr_offset = RTE_ALIGN_CEIL(free_memseg[memseg_idx].phys_addr, align) - - free_memseg[memseg_idx].phys_addr; + const struct malloc_elem *elem = malloc_elem_from_data(mz_addr); - /* save aligned physical and virtual addresses */ - memseg_physaddr = free_memseg[memseg_idx].phys_addr + addr_offset; - memseg_addr = RTE_PTR_ADD(free_memseg[memseg_idx].addr, (uintptr_t) addr_offset); + /* fill the zone in config */ + mz = get_next_free_memzone(); - /* if we are looking for a biggest memzone */ - if (requested_len == 0) - requested_len = memseg_len - addr_offset; + if (mz == NULL) { + RTE_LOG(ERR, EAL, "%s(): Cannot find free memzone but there is room " + "in config!\n", __func__); + rte_errno = ENOSPC; + return NULL; + } - /* set length to correct value */ - len = addr_offset + requested_len; + mcfg->memzone_cnt++; + snprintf(mz->name, sizeof(mz->name), "%s", name); + mz->phys_addr = rte_malloc_virt2phy(mz_addr); + mz->addr = mz_addr; + mz->len = (requested_len == 0 ? elem->size : requested_len); + mz->hugepage_sz = elem->ms->hugepage_sz; + mz->socket_id = elem->ms->socket_id; + mz->flags = 0; + mz->memseg_id = elem->ms - rte_eal_get_configuration()->mem_config->memseg; - /* update our internal state */ - free_memseg[memseg_idx].len -= len; - free_memseg[memseg_idx].phys_addr += len; - free_memseg[memseg_idx].addr = - (char *)free_memseg[memseg_idx].addr + len; + return mz; +} - /* fill the zone in config */ - struct rte_memzone *mz = &mcfg->memzone[mcfg->memzone_idx++]; - rte_snprintf(mz->name, sizeof(mz->name), "%s", name); - mz->phys_addr = memseg_physaddr; - mz->addr = memseg_addr; - mz->len = requested_len; - mz->hugepage_sz = free_memseg[memseg_idx].hugepage_sz; - mz->socket_id = free_memseg[memseg_idx].socket_id; - mz->flags = 0; +static const struct rte_memzone * +rte_memzone_reserve_thread_safe(const char *name, size_t len, + int socket_id, unsigned flags, unsigned align, + unsigned bound) +{ + struct rte_mem_config *mcfg; + const struct rte_memzone *mz = NULL; + + /* get pointer to global configuration */ + mcfg = rte_eal_get_configuration()->mem_config; + + rte_rwlock_write_lock(&mcfg->mlock); + + mz = memzone_reserve_aligned_thread_unsafe( + name, len, socket_id, flags, align, bound); + + rte_rwlock_write_unlock(&mcfg->mlock); return mz; } +/* + * Return a pointer to a correctly filled memzone descriptor (with a + * specified alignment and boundary). If the allocation cannot be done, + * return NULL. + */ +const struct rte_memzone * +rte_memzone_reserve_bounded(const char *name, size_t len, int socket_id, + unsigned flags, unsigned align, unsigned bound) +{ + return rte_memzone_reserve_thread_safe(name, len, socket_id, flags, + align, bound); +} + /* * Return a pointer to a correctly filled memzone descriptor (with a * specified alignment). If the allocation cannot be done, return NULL. */ const struct rte_memzone * -rte_memzone_reserve_aligned(const char *name, uint64_t len, - int socket_id, unsigned flags, unsigned align) +rte_memzone_reserve_aligned(const char *name, size_t len, int socket_id, + unsigned flags, unsigned align) { - struct rte_mem_config *mcfg; - const struct rte_memzone *mz = NULL; + return rte_memzone_reserve_thread_safe(name, len, socket_id, flags, + align, 0); +} - /* both sizes cannot be explicitly called for */ - if ((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB)) { - rte_errno = EINVAL; - return NULL; - } +/* + * Return a pointer to a correctly filled memzone descriptor. If the + * allocation cannot be done, return NULL. + */ +const struct rte_memzone * +rte_memzone_reserve(const char *name, size_t len, int socket_id, + unsigned flags) +{ + return rte_memzone_reserve_thread_safe(name, len, socket_id, + flags, RTE_CACHE_LINE_SIZE, 0); +} - /* if alignment is not a power of two */ - if (!rte_is_power_of_2(align)) { - RTE_LOG(ERR, EAL, "%s(): Invalid alignment: %u\n", __func__, - align); - rte_errno = EINVAL; - return NULL; - } +int +rte_memzone_free(const struct rte_memzone *mz) +{ + struct rte_mem_config *mcfg; + int ret = 0; + void *addr; + unsigned idx; - /* alignment less than cache size is not allowed */ - if (align < CACHE_LINE_SIZE) - align = CACHE_LINE_SIZE; + if (mz == NULL) + return -EINVAL; - /* get pointer to global configuration */ mcfg = rte_eal_get_configuration()->mem_config; rte_rwlock_write_lock(&mcfg->mlock); - mz = memzone_reserve_aligned_thread_unsafe( - name, len, socket_id, flags, align); + idx = ((uintptr_t)mz - (uintptr_t)mcfg->memzone); + idx = idx / sizeof(struct rte_memzone); + + addr = mcfg->memzone[idx].addr; + if (addr == NULL) + ret = -EINVAL; + else if (mcfg->memzone_cnt == 0) { + rte_panic("%s(): memzone address not NULL but memzone_cnt is 0!\n", + __func__); + } else { + memset(&mcfg->memzone[idx], 0, sizeof(mcfg->memzone[idx])); + mcfg->memzone_cnt--; + } rte_rwlock_write_unlock(&mcfg->mlock); - return mz; + rte_free(addr); + + return ret; } /* @@ -289,7 +365,7 @@ rte_memzone_lookup(const char *name) const struct rte_memzone *memzone = NULL; mcfg = rte_eal_get_configuration()->mem_config; - + rte_rwlock_read_lock(&mcfg->mlock); memzone = memzone_lookup_thread_unsafe(name); @@ -301,7 +377,7 @@ rte_memzone_lookup(const char *name) /* Dump all reserved memory zones on console */ void -rte_memzone_dump(void) +rte_memzone_dump(FILE *f) { struct rte_mem_config *mcfg; unsigned i = 0; @@ -314,7 +390,7 @@ rte_memzone_dump(void) for (i=0; imemzone[i].addr == NULL) break; - printf("Zone %o: name:<%s>, phys:0x%"PRIx64", len:0x%"PRIx64"" + fprintf(f, "Zone %u: name:<%s>, phys:0x%"PRIx64", len:0x%zx" ", virt:%p, socket_id:%"PRId32", flags:%"PRIx32"\n", i, mcfg->memzone[i].name, mcfg->memzone[i].phys_addr, @@ -326,45 +402,6 @@ rte_memzone_dump(void) rte_rwlock_read_unlock(&mcfg->mlock); } -/* - * called by init: modify the free memseg list to have cache-aligned - * addresses and cache-aligned lengths - */ -static int -memseg_sanitize(struct rte_memseg *memseg) -{ - unsigned phys_align; - unsigned virt_align; - unsigned off; - - phys_align = memseg->phys_addr & CACHE_LINE_MASK; - virt_align = (unsigned long)memseg->addr & CACHE_LINE_MASK; - - /* - * sanity check: phys_addr and addr must have the same - * alignment - */ - if (phys_align != virt_align) - return -1; - - /* memseg is really too small, don't bother with it */ - if (memseg->len < (2 * CACHE_LINE_SIZE)) { - memseg->len = 0; - return 0; - } - - /* align start address */ - off = (CACHE_LINE_SIZE - phys_align) & CACHE_LINE_MASK; - memseg->phys_addr += off; - memseg->addr = (char *)memseg->addr + off; - memseg->len -= off; - - /* align end address */ - memseg->len &= ~((uint64_t)CACHE_LINE_MASK); - - return 0; -} - /* * Init the memzone subsystem */ @@ -373,14 +410,10 @@ rte_eal_memzone_init(void) { struct rte_mem_config *mcfg; const struct rte_memseg *memseg; - unsigned i = 0; /* get pointer to global configuration */ mcfg = rte_eal_get_configuration()->mem_config; - /* mirror the runtime memsegs from config */ - free_memseg = mcfg->free_memseg; - /* secondary processes don't need to initialise anything */ if (rte_eal_process_type() == RTE_PROC_SECONDARY) return 0; @@ -393,25 +426,28 @@ rte_eal_memzone_init(void) rte_rwlock_write_lock(&mcfg->mlock); - /* duplicate the memsegs from config */ - memcpy(free_memseg, memseg, sizeof(struct rte_memseg) * RTE_MAX_MEMSEG); - - /* make all zones cache-aligned */ - for (i=0; imlock); - return -1; - } - } - /* delete all zones */ - mcfg->memzone_idx = 0; + mcfg->memzone_cnt = 0; memset(mcfg->memzone, 0, sizeof(mcfg->memzone)); rte_rwlock_write_unlock(&mcfg->mlock); - return 0; + return rte_eal_malloc_heap_init(); +} + +/* Walk all reserved memory zones */ +void rte_memzone_walk(void (*func)(const struct rte_memzone *, void *), + void *arg) +{ + struct rte_mem_config *mcfg; + unsigned i; + + mcfg = rte_eal_get_configuration()->mem_config; + + rte_rwlock_read_lock(&mcfg->mlock); + for (i=0; imemzone[i].addr != NULL) + (*func)(&mcfg->memzone[i], arg); + } + rte_rwlock_read_unlock(&mcfg->mlock); }