X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Fcommon%2Feal_common_memory.c;h=4a9cc1f19ac9061f3cc9edce53407c0d0a379ac9;hb=70c7747689082abba6452803b6e23e6aed649ace;hp=3971773618f655064fc62e1d55d68d1eaf3a44db;hpb=3717943819676920c153a5ed35c9347152d8eb85;p=dpdk.git diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c index 3971773618..4a9cc1f19a 100644 --- a/lib/librte_eal/common/eal_common_memory.c +++ b/lib/librte_eal/common/eal_common_memory.c @@ -24,6 +24,8 @@ #include "eal_memalloc.h" #include "eal_private.h" #include "eal_internal_cfg.h" +#include "eal_memcfg.h" +#include "malloc_heap.h" /* * Try to mmap *size bytes in /dev/zero. If it is successful, return the @@ -38,6 +40,7 @@ static void *next_baseaddr; static uint64_t system_page_sz; +#define MAX_MMAP_WITH_DEFINED_ADDR_TRIES 5 void * eal_get_virtual_area(void *requested_addr, size_t *size, size_t page_sz, int flags, int mmap_flags) @@ -45,6 +48,7 @@ eal_get_virtual_area(void *requested_addr, size_t *size, bool addr_is_hint, allow_shrink, unmap, no_align; uint64_t map_sz; void *mapped_addr, *aligned_addr; + uint8_t try = 0; if (system_page_sz == 0) system_page_sz = sysconf(_SC_PAGESIZE); @@ -61,6 +65,11 @@ eal_get_virtual_area(void *requested_addr, size_t *size, rte_eal_process_type() == RTE_PROC_PRIMARY) next_baseaddr = (void *) internal_config.base_virtaddr; +#ifdef RTE_ARCH_64 + if (next_baseaddr == NULL && internal_config.base_virtaddr == 0 && + rte_eal_process_type() == RTE_PROC_PRIMARY) + next_baseaddr = (void *) eal_get_baseaddr(); +#endif if (requested_addr == NULL && next_baseaddr != NULL) { requested_addr = next_baseaddr; requested_addr = RTE_PTR_ALIGN(requested_addr, page_sz); @@ -92,7 +101,20 @@ eal_get_virtual_area(void *requested_addr, size_t *size, mmap_flags, -1, 0); if (mapped_addr == MAP_FAILED && allow_shrink) *size -= page_sz; - } while (allow_shrink && mapped_addr == MAP_FAILED && *size > 0); + + if (mapped_addr != MAP_FAILED && addr_is_hint && + mapped_addr != requested_addr) { + try++; + next_baseaddr = RTE_PTR_ADD(next_baseaddr, page_sz); + if (try <= MAX_MMAP_WITH_DEFINED_ADDR_TRIES) { + /* hint was not used. Try with another offset */ + munmap(mapped_addr, map_sz); + mapped_addr = MAP_FAILED; + requested_addr = next_baseaddr; + } + } + } while ((allow_shrink || addr_is_hint) && + mapped_addr == MAP_FAILED && *size > 0); /* align resulting address - if map failed, we will ignore the value * anyway, so no need to add additional checks. @@ -205,7 +227,7 @@ virt2memseg_list(const void *addr) return msl; } -__rte_experimental struct rte_memseg_list * +struct rte_memseg_list * rte_mem_virt2memseg_list(const void *addr) { return virt2memseg_list(addr); @@ -242,7 +264,7 @@ find_virt_legacy(const struct rte_memseg_list *msl __rte_unused, return 0; } -__rte_experimental void * +void * rte_mem_iova2virt(rte_iova_t iova) { struct virtiova vi; @@ -261,7 +283,7 @@ rte_mem_iova2virt(rte_iova_t iova) return vi.virt; } -__rte_experimental struct rte_memseg * +struct rte_memseg * rte_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl) { return virt2memseg(addr, msl != NULL ? msl : @@ -330,7 +352,7 @@ dump_memseg(const struct rte_memseg_list *msl, const struct rte_memseg *ms, * Defining here because declared in rte_memory.h, but the actual implementation * is in eal_common_memalloc.c, like all other memalloc internals. */ -int __rte_experimental +int rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb, void *arg) { @@ -343,7 +365,7 @@ rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb, return eal_memalloc_mem_event_callback_register(name, clb, arg); } -int __rte_experimental +int rte_mem_event_callback_unregister(const char *name, void *arg) { /* FreeBSD boots with legacy mem enabled by default */ @@ -355,7 +377,7 @@ rte_mem_event_callback_unregister(const char *name, void *arg) return eal_memalloc_mem_event_callback_unregister(name, arg); } -int __rte_experimental +int rte_mem_alloc_validator_register(const char *name, rte_mem_alloc_validator_t clb, int socket_id, size_t limit) { @@ -369,7 +391,7 @@ rte_mem_alloc_validator_register(const char *name, limit); } -int __rte_experimental +int rte_mem_alloc_validator_unregister(const char *name, int socket_id) { /* FreeBSD boots with legacy mem enabled by default */ @@ -388,6 +410,97 @@ rte_dump_physmem_layout(FILE *f) rte_memseg_walk(dump_memseg, f); } +static int +check_iova(const struct rte_memseg_list *msl __rte_unused, + const struct rte_memseg *ms, void *arg) +{ + uint64_t *mask = arg; + rte_iova_t iova; + + /* higher address within segment */ + iova = (ms->iova + ms->len) - 1; + if (!(iova & *mask)) + return 0; + + RTE_LOG(DEBUG, EAL, "memseg iova %"PRIx64", len %zx, out of range\n", + ms->iova, ms->len); + + RTE_LOG(DEBUG, EAL, "\tusing dma mask %"PRIx64"\n", *mask); + return 1; +} + +#define MAX_DMA_MASK_BITS 63 + +/* check memseg iovas are within the required range based on dma mask */ +static int +check_dma_mask(uint8_t maskbits, bool thread_unsafe) +{ + struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; + uint64_t mask; + int ret; + + /* Sanity check. We only check width can be managed with 64 bits + * variables. Indeed any higher value is likely wrong. */ + if (maskbits > MAX_DMA_MASK_BITS) { + RTE_LOG(ERR, EAL, "wrong dma mask size %u (Max: %u)\n", + maskbits, MAX_DMA_MASK_BITS); + return -1; + } + + /* create dma mask */ + mask = ~((1ULL << maskbits) - 1); + + if (thread_unsafe) + ret = rte_memseg_walk_thread_unsafe(check_iova, &mask); + else + ret = rte_memseg_walk(check_iova, &mask); + + if (ret) + /* + * Dma mask precludes hugepage usage. + * This device can not be used and we do not need to keep + * the dma mask. + */ + return 1; + + /* + * we need to keep the more restricted maskbit for checking + * potential dynamic memory allocation in the future. + */ + mcfg->dma_maskbits = mcfg->dma_maskbits == 0 ? maskbits : + RTE_MIN(mcfg->dma_maskbits, maskbits); + + return 0; +} + +int +rte_mem_check_dma_mask(uint8_t maskbits) +{ + return check_dma_mask(maskbits, false); +} + +int +rte_mem_check_dma_mask_thread_unsafe(uint8_t maskbits) +{ + return check_dma_mask(maskbits, true); +} + +/* + * Set dma mask to use when memory initialization is done. + * + * This function should ONLY be used by code executed before the memory + * initialization. PMDs should use rte_mem_check_dma_mask if addressing + * limitations by the device. + */ +void +rte_mem_set_dma_mask(uint8_t maskbits) +{ + struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; + + mcfg->dma_maskbits = mcfg->dma_maskbits == 0 ? maskbits : + RTE_MIN(mcfg->dma_maskbits, maskbits); +} + /* return the number of memory channels */ unsigned rte_memory_get_nchannel(void) { @@ -425,7 +538,7 @@ rte_mem_lock_page(const void *virt) return mlock((void *)aligned, page_size); } -int __rte_experimental +int rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg) { struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; @@ -464,21 +577,20 @@ rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg) return 0; } -int __rte_experimental +int rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg) { - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; int ret = 0; /* do not allow allocations/frees/init while we iterate */ - rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_lock(); ret = rte_memseg_contig_walk_thread_unsafe(func, arg); - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); return ret; } -int __rte_experimental +int rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg) { struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; @@ -506,21 +618,20 @@ rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg) return 0; } -int __rte_experimental +int rte_memseg_walk(rte_memseg_walk_t func, void *arg) { - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; int ret = 0; /* do not allow allocations/frees/init while we iterate */ - rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_lock(); ret = rte_memseg_walk_thread_unsafe(func, arg); - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); return ret; } -int __rte_experimental +int rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg) { struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; @@ -539,21 +650,20 @@ rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg) return 0; } -int __rte_experimental +int rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg) { - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; int ret = 0; /* do not allow allocations/frees/init while we iterate */ - rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_lock(); ret = rte_memseg_list_walk_thread_unsafe(func, arg); - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); return ret; } -int __rte_experimental +int rte_memseg_get_fd_thread_unsafe(const struct rte_memseg *ms) { struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; @@ -581,6 +691,12 @@ rte_memseg_get_fd_thread_unsafe(const struct rte_memseg *ms) return -1; } + /* segment fd API is not supported for external segments */ + if (msl->external) { + rte_errno = ENOTSUP; + return -1; + } + ret = eal_memalloc_get_seg_fd(msl_idx, seg_idx); if (ret < 0) { rte_errno = -ret; @@ -589,20 +705,19 @@ rte_memseg_get_fd_thread_unsafe(const struct rte_memseg *ms) return ret; } -int __rte_experimental +int rte_memseg_get_fd(const struct rte_memseg *ms) { - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; int ret; - rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_lock(); ret = rte_memseg_get_fd_thread_unsafe(ms); - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); return ret; } -int __rte_experimental +int rte_memseg_get_fd_offset_thread_unsafe(const struct rte_memseg *ms, size_t *offset) { @@ -631,6 +746,12 @@ rte_memseg_get_fd_offset_thread_unsafe(const struct rte_memseg *ms, return -1; } + /* segment fd API is not supported for external segments */ + if (msl->external) { + rte_errno = ENOTSUP; + return -1; + } + ret = eal_memalloc_get_seg_fd_offset(msl_idx, seg_idx, offset); if (ret < 0) { rte_errno = -ret; @@ -639,19 +760,134 @@ rte_memseg_get_fd_offset_thread_unsafe(const struct rte_memseg *ms, return ret; } -int __rte_experimental +int rte_memseg_get_fd_offset(const struct rte_memseg *ms, size_t *offset) { - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; int ret; - rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_lock(); ret = rte_memseg_get_fd_offset_thread_unsafe(ms, offset); - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); + + return ret; +} + +int +rte_extmem_register(void *va_addr, size_t len, rte_iova_t iova_addrs[], + unsigned int n_pages, size_t page_sz) +{ + struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; + unsigned int socket_id, n; + int ret = 0; + + if (va_addr == NULL || page_sz == 0 || len == 0 || + !rte_is_power_of_2(page_sz) || + RTE_ALIGN(len, page_sz) != len || + ((len / page_sz) != n_pages && iova_addrs != NULL) || + !rte_is_aligned(va_addr, page_sz)) { + rte_errno = EINVAL; + return -1; + } + rte_mcfg_mem_write_lock(); + + /* make sure the segment doesn't already exist */ + if (malloc_heap_find_external_seg(va_addr, len) != NULL) { + rte_errno = EEXIST; + ret = -1; + goto unlock; + } + + /* get next available socket ID */ + socket_id = mcfg->next_socket_id; + if (socket_id > INT32_MAX) { + RTE_LOG(ERR, EAL, "Cannot assign new socket ID's\n"); + rte_errno = ENOSPC; + ret = -1; + goto unlock; + } + + /* we can create a new memseg */ + n = len / page_sz; + if (malloc_heap_create_external_seg(va_addr, iova_addrs, n, + page_sz, "extmem", socket_id) == NULL) { + ret = -1; + goto unlock; + } + + /* memseg list successfully created - increment next socket ID */ + mcfg->next_socket_id++; +unlock: + rte_mcfg_mem_write_unlock(); + return ret; +} + +int +rte_extmem_unregister(void *va_addr, size_t len) +{ + struct rte_memseg_list *msl; + int ret = 0; + if (va_addr == NULL || len == 0) { + rte_errno = EINVAL; + return -1; + } + rte_mcfg_mem_write_lock(); + + /* find our segment */ + msl = malloc_heap_find_external_seg(va_addr, len); + if (msl == NULL) { + rte_errno = ENOENT; + ret = -1; + goto unlock; + } + + ret = malloc_heap_destroy_external_seg(msl); +unlock: + rte_mcfg_mem_write_unlock(); return ret; } +static int +sync_memory(void *va_addr, size_t len, bool attach) +{ + struct rte_memseg_list *msl; + int ret = 0; + + if (va_addr == NULL || len == 0) { + rte_errno = EINVAL; + return -1; + } + rte_mcfg_mem_write_lock(); + + /* find our segment */ + msl = malloc_heap_find_external_seg(va_addr, len); + if (msl == NULL) { + rte_errno = ENOENT; + ret = -1; + goto unlock; + } + if (attach) + ret = rte_fbarray_attach(&msl->memseg_arr); + else + ret = rte_fbarray_detach(&msl->memseg_arr); + +unlock: + rte_mcfg_mem_write_unlock(); + return ret; +} + +int +rte_extmem_attach(void *va_addr, size_t len) +{ + return sync_memory(va_addr, len, true); +} + +int +rte_extmem_detach(void *va_addr, size_t len) +{ + return sync_memory(va_addr, len, false); +} + /* init memory subsystem */ int rte_eal_memory_init(void) @@ -664,7 +900,7 @@ rte_eal_memory_init(void) return -1; /* lock mem hotplug here, to prevent races while we init */ - rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_lock(); if (rte_eal_memseg_init() < 0) goto fail; @@ -683,6 +919,6 @@ rte_eal_memory_init(void) return 0; fail: - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); return -1; }