From 68c360386712df2d729858d2dde16ddb2fd90aac Mon Sep 17 00:00:00 2001 From: Anatoly Burakov Date: Mon, 30 Apr 2018 12:21:43 +0100 Subject: [PATCH] mem: unmap unneeded space When we ask to reserve virtual areas, we usually include alignment in the mapping size, and that memory ends up being wasted. Wasting a gigabyte of VA space while trying to reserve one gigabyte is pretty expensive on 32-bit, so after we're done mapping, unmap unneeded space. Signed-off-by: Anatoly Burakov Acked-by: Bruce Richardson --- lib/librte_eal/common/eal_common_memory.c | 26 ++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c index 5611a5176d..4f13b58534 100644 --- a/lib/librte_eal/common/eal_common_memory.c +++ b/lib/librte_eal/common/eal_common_memory.c @@ -121,8 +121,32 @@ eal_get_virtual_area(void *requested_addr, size_t *size, RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n", aligned_addr, *size); - if (unmap) + if (unmap) { munmap(mapped_addr, map_sz); + } else if (!no_align) { + void *map_end, *aligned_end; + size_t before_len, after_len; + + /* when we reserve space with alignment, we add alignment to + * mapping size. On 32-bit, if 1GB alignment was requested, this + * would waste 1GB of address space, which is a luxury we cannot + * afford. so, if alignment was performed, check if any unneeded + * address space can be unmapped back. + */ + + map_end = RTE_PTR_ADD(mapped_addr, (size_t)map_sz); + aligned_end = RTE_PTR_ADD(aligned_addr, *size); + + /* unmap space before aligned mmap address */ + before_len = RTE_PTR_DIFF(aligned_addr, mapped_addr); + if (before_len > 0) + munmap(mapped_addr, before_len); + + /* unmap space after aligned end mmap address */ + after_len = RTE_PTR_DIFF(map_end, aligned_end); + if (after_len > 0) + munmap(aligned_end, after_len); + } baseaddr_offset += *size; -- 2.20.1