X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest%2Ftest_memzone.c;h=381f643bb459e0a3dc9a5e49a4504d31da11012e;hb=942405f9e2f2c22aa817be374ccfe939a72df2ce;hp=17e558880448b9a35d7a8045a1cf2545ade66f22;hpb=dada9ef6edc59015b6674b5a95258787c71401b0;p=dpdk.git diff --git a/app/test/test_memzone.c b/app/test/test_memzone.c index 17e5588804..381f643bb4 100644 --- a/app/test/test_memzone.c +++ b/app/test/test_memzone.c @@ -1,35 +1,34 @@ /*- * BSD LICENSE - * - * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions * are met: - * - * * Redistributions of source code must retain the above copyright + * + * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * */ #include @@ -37,13 +36,15 @@ #include #include -#include - +#include +#include #include #include #include #include +#include #include +#include #include "test.h" @@ -115,7 +116,7 @@ test_memzone_reserving_zone_size_bigger_than_the_maximum(void) return -1; } - mz = rte_memzone_reserve("zone_size_bigger_than_the_maximum", 0x1900000000ULL, + mz = rte_memzone_reserve("zone_size_bigger_than_the_maximum", (size_t)-1, SOCKET_ID_ANY, 0); if (mz != NULL) { printf("It is impossible to reserve such big a memzone\n"); @@ -132,7 +133,7 @@ test_memzone_reserve_flags(void) const struct rte_memseg *ms; int hugepage_2MB_avail = 0; int hugepage_1GB_avail = 0; - const int size = 100; + const size_t size = 100; int i = 0; ms = rte_eal_get_physmem_layout(); for (i = 0; i < RTE_MAX_MEMSEG; i++) { @@ -265,9 +266,9 @@ test_memzone_reserve_max(void) const struct rte_memseg *ms; int memseg_idx = 0; int memzone_idx = 0; - uint64_t len = 0; + size_t len = 0; void* last_addr; - uint64_t maxlen = 0; + size_t maxlen = 0; /* get pointer to global configuration */ config = rte_eal_get_configuration(); @@ -279,8 +280,10 @@ test_memzone_reserve_max(void) if (ms[memseg_idx].len < maxlen) continue; - len = ms[memseg_idx].len; - last_addr = ms[memseg_idx].addr; + /* align everything */ + last_addr = RTE_PTR_ALIGN_CEIL(ms[memseg_idx].addr, CACHE_LINE_SIZE); + len = ms[memseg_idx].len - RTE_PTR_DIFF(last_addr, ms[memseg_idx].addr); + len &= ~((size_t) CACHE_LINE_MASK); /* cycle through all memzones */ for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) { @@ -291,23 +294,21 @@ test_memzone_reserve_max(void) /* check if the memzone is in our memseg and subtract length */ if ((config->mem_config->memzone[memzone_idx].addr >= - ms[memseg_idx].addr) && - (config->mem_config->memzone[memzone_idx].addr <= - (RTE_PTR_ADD(ms[memseg_idx].addr, - (size_t)ms[memseg_idx].len)))) { + ms[memseg_idx].addr) && + (config->mem_config->memzone[memzone_idx].addr < + (RTE_PTR_ADD(ms[memseg_idx].addr, ms[memseg_idx].len)))) { /* since the zones can now be aligned and occasionally skip * some space, we should calculate the length based on * reported length and start addresses difference. Addresses * are allocated sequentially so we don't need to worry about * them being in the right order. */ - len -= (uintptr_t) RTE_PTR_SUB( - config->mem_config->memzone[memzone_idx].addr, - (uintptr_t) last_addr); + len -= RTE_PTR_DIFF( + config->mem_config->memzone[memzone_idx].addr, + last_addr); len -= config->mem_config->memzone[memzone_idx].len; - last_addr = - RTE_PTR_ADD(config->mem_config->memzone[memzone_idx].addr, - (size_t) config->mem_config->memzone[memzone_idx].len); + last_addr = RTE_PTR_ADD(config->mem_config->memzone[memzone_idx].addr, + (size_t) config->mem_config->memzone[memzone_idx].len); } } @@ -317,20 +318,25 @@ test_memzone_reserve_max(void) maxlen = len; } + if (maxlen == 0) { + printf("There is no space left!\n"); + return 0; + } + mz = rte_memzone_reserve("max_zone", 0, SOCKET_ID_ANY, 0); if (mz == NULL){ printf("Failed to reserve a big chunk of memory\n"); - rte_dump_physmem_layout(); - rte_memzone_dump(); + rte_dump_physmem_layout(stdout); + rte_memzone_dump(stdout); return -1; } if (mz->len != maxlen) { printf("Memzone reserve with 0 size did not return bigest block\n"); - printf("Expected size = %" PRIu64 ", actual size = %" PRIu64 "\n", - maxlen, mz->len); - rte_dump_physmem_layout(); - rte_memzone_dump(); + printf("Expected size = %zu, actual size = %zu\n", + maxlen, mz->len); + rte_dump_physmem_layout(stdout); + rte_memzone_dump(stdout); return -1; } @@ -345,9 +351,14 @@ test_memzone_reserve_max_aligned(void) const struct rte_memseg *ms; int memseg_idx = 0; int memzone_idx = 0; - uint64_t addr_offset, len = 0; + uintptr_t addr_offset; + size_t len = 0; void* last_addr; - uint64_t maxlen = 0; + size_t maxlen = 0; + + /* random alignment */ + rte_srand((unsigned)rte_rdtsc()); + const unsigned align = 1 << ((rte_rand() % 8) + 5); /* from 128 up to 4k alignment */ /* get pointer to global configuration */ config = rte_eal_get_configuration(); @@ -362,8 +373,10 @@ test_memzone_reserve_max_aligned(void) if (ms[memseg_idx].len < maxlen) continue; - len = ms[memseg_idx].len; - last_addr = ms[memseg_idx].addr; + /* align everything */ + last_addr = RTE_PTR_ALIGN_CEIL(ms[memseg_idx].addr, CACHE_LINE_SIZE); + len = ms[memseg_idx].len - RTE_PTR_DIFF(last_addr, ms[memseg_idx].addr); + len &= ~((size_t) CACHE_LINE_MASK); /* cycle through all memzones */ for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) { @@ -375,9 +388,8 @@ test_memzone_reserve_max_aligned(void) /* check if the memzone is in our memseg and subtract length */ if ((config->mem_config->memzone[memzone_idx].addr >= ms[memseg_idx].addr) && - (config->mem_config->memzone[memzone_idx].addr <= - (RTE_PTR_ADD(ms[memseg_idx].addr, - (size_t) ms[memseg_idx].len)))) { + (config->mem_config->memzone[memzone_idx].addr < + (RTE_PTR_ADD(ms[memseg_idx].addr, ms[memseg_idx].len)))) { /* since the zones can now be aligned and occasionally skip * some space, we should calculate the length based on * reported length and start addresses difference. @@ -394,29 +406,34 @@ test_memzone_reserve_max_aligned(void) /* make sure we get the alignment offset */ if (len > maxlen) { - addr_offset = RTE_ALIGN_CEIL((uintptr_t) last_addr, 512) - (uintptr_t) last_addr; + addr_offset = RTE_PTR_ALIGN_CEIL((uintptr_t) last_addr, align) - (uintptr_t) last_addr; maxlen = len; } } + if (maxlen == 0 || maxlen == addr_offset) { + printf("There is no space left for biggest %u-aligned memzone!\n", align); + return 0; + } + maxlen -= addr_offset; mz = rte_memzone_reserve_aligned("max_zone_aligned", 0, - SOCKET_ID_ANY, 0, 512); + SOCKET_ID_ANY, 0, align); if (mz == NULL){ printf("Failed to reserve a big chunk of memory\n"); - rte_dump_physmem_layout(); - rte_memzone_dump(); + rte_dump_physmem_layout(stdout); + rte_memzone_dump(stdout); return -1; } if (mz->len != maxlen) { - printf("Memzone reserve with 0 size and alignment 512 did not return" - " bigest block\n"); - printf("Expected size = %" PRIu64 ", actual size = %" PRIu64 "\n", + printf("Memzone reserve with 0 size and alignment %u did not return" + " bigest block\n", align); + printf("Expected size = %zu, actual size = %zu\n", maxlen, mz->len); - rte_dump_physmem_layout(); - rte_memzone_dump(); + rte_dump_physmem_layout(stdout); + rte_memzone_dump(stdout); return -1; } @@ -433,60 +450,74 @@ test_memzone_aligned(void) const struct rte_memzone *memzone_aligned_1024; /* memzone that should automatically be adjusted to align on 64 bytes */ - memzone_aligned_32 = rte_memzone_lookup("aligned_32"); - if (memzone_aligned_32 == NULL) - memzone_aligned_32 = rte_memzone_reserve_aligned("aligned_32", 100, + memzone_aligned_32 = rte_memzone_reserve_aligned("aligned_32", 100, SOCKET_ID_ANY, 0, 32); /* memzone that is supposed to be aligned on a 128 byte boundary */ - memzone_aligned_128 = rte_memzone_lookup("aligned_128"); - if (memzone_aligned_128 == NULL) - memzone_aligned_128 = rte_memzone_reserve_aligned("aligned_128", 100, + memzone_aligned_128 = rte_memzone_reserve_aligned("aligned_128", 100, SOCKET_ID_ANY, 0, 128); /* memzone that is supposed to be aligned on a 256 byte boundary */ - memzone_aligned_256 = rte_memzone_lookup("aligned_256"); - if (memzone_aligned_256 == NULL) - memzone_aligned_256 = rte_memzone_reserve_aligned("aligned_256", 100, + memzone_aligned_256 = rte_memzone_reserve_aligned("aligned_256", 100, SOCKET_ID_ANY, 0, 256); /* memzone that is supposed to be aligned on a 512 byte boundary */ - memzone_aligned_512 = rte_memzone_lookup("aligned_512"); - if (memzone_aligned_512 == NULL) - memzone_aligned_512 = rte_memzone_reserve_aligned("aligned_512", 100, + memzone_aligned_512 = rte_memzone_reserve_aligned("aligned_512", 100, SOCKET_ID_ANY, 0, 512); /* memzone that is supposed to be aligned on a 1024 byte boundary */ - memzone_aligned_1024 = rte_memzone_lookup("aligned_1024"); - if (memzone_aligned_1024 == NULL) - memzone_aligned_1024 = rte_memzone_reserve_aligned("aligned_1024", 100, + memzone_aligned_1024 = rte_memzone_reserve_aligned("aligned_1024", 100, SOCKET_ID_ANY, 0, 1024); printf("check alignments and lengths\n"); + if (memzone_aligned_32 == NULL) { + printf("Unable to reserve 64-byte aligned memzone!\n"); + return -1; + } if ((memzone_aligned_32->phys_addr & CACHE_LINE_MASK) != 0) return -1; if (((uintptr_t) memzone_aligned_32->addr & CACHE_LINE_MASK) != 0) return -1; if ((memzone_aligned_32->len & CACHE_LINE_MASK) != 0) return -1; + + if (memzone_aligned_128 == NULL) { + printf("Unable to reserve 128-byte aligned memzone!\n"); + return -1; + } if ((memzone_aligned_128->phys_addr & 127) != 0) return -1; if (((uintptr_t) memzone_aligned_128->addr & 127) != 0) return -1; if ((memzone_aligned_128->len & CACHE_LINE_MASK) != 0) return -1; + + if (memzone_aligned_256 == NULL) { + printf("Unable to reserve 256-byte aligned memzone!\n"); + return -1; + } if ((memzone_aligned_256->phys_addr & 255) != 0) return -1; if (((uintptr_t) memzone_aligned_256->addr & 255) != 0) return -1; if ((memzone_aligned_256->len & CACHE_LINE_MASK) != 0) return -1; + + if (memzone_aligned_512 == NULL) { + printf("Unable to reserve 512-byte aligned memzone!\n"); + return -1; + } if ((memzone_aligned_512->phys_addr & 511) != 0) return -1; if (((uintptr_t) memzone_aligned_512->addr & 511) != 0) return -1; if ((memzone_aligned_512->len & CACHE_LINE_MASK) != 0) return -1; + + if (memzone_aligned_1024 == NULL) { + printf("Unable to reserve 1024-byte aligned memzone!\n"); + return -1; + } if ((memzone_aligned_1024->phys_addr & 1023) != 0) return -1; if (((uintptr_t) memzone_aligned_1024->addr & 1023) != 0) @@ -494,7 +525,6 @@ test_memzone_aligned(void) if ((memzone_aligned_1024->len & CACHE_LINE_MASK) != 0) return -1; - /* check that zones don't overlap */ printf("check overlapping\n"); if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len, @@ -530,34 +560,394 @@ test_memzone_aligned(void) return 0; } -int +static int +check_memzone_bounded(const char *name, uint32_t len, uint32_t align, + uint32_t bound) +{ + const struct rte_memzone *mz; + phys_addr_t bmask; + + bmask = ~((phys_addr_t)bound - 1); + + if ((mz = rte_memzone_reserve_bounded(name, len, SOCKET_ID_ANY, 0, + align, bound)) == NULL) { + printf("%s(%s): memzone creation failed\n", + __func__, name); + return (-1); + } + + if ((mz->phys_addr & ((phys_addr_t)align - 1)) != 0) { + printf("%s(%s): invalid phys addr alignment\n", + __func__, mz->name); + return (-1); + } + + if (((uintptr_t) mz->addr & ((uintptr_t)align - 1)) != 0) { + printf("%s(%s): invalid virtual addr alignment\n", + __func__, mz->name); + return (-1); + } + + if ((mz->len & CACHE_LINE_MASK) != 0 || mz->len < len || + mz->len < CACHE_LINE_SIZE) { + printf("%s(%s): invalid length\n", + __func__, mz->name); + return (-1); + } + + if ((mz->phys_addr & bmask) != + ((mz->phys_addr + mz->len - 1) & bmask)) { + printf("%s(%s): invalid memzone boundary %u crossed\n", + __func__, mz->name, bound); + return (-1); + } + + return (0); +} + +static int +test_memzone_bounded(void) +{ + const struct rte_memzone *memzone_err; + const char *name; + int rc; + + /* should fail as boundary is not power of two */ + name = "bounded_error_31"; + if ((memzone_err = rte_memzone_reserve_bounded(name, + 100, SOCKET_ID_ANY, 0, 32, UINT32_MAX)) != NULL) { + printf("%s(%s)created a memzone with invalid boundary " + "conditions\n", __func__, memzone_err->name); + return (-1); + } + + /* should fail as len is greater then boundary */ + name = "bounded_error_32"; + if ((memzone_err = rte_memzone_reserve_bounded(name, + 100, SOCKET_ID_ANY, 0, 32, 32)) != NULL) { + printf("%s(%s)created a memzone with invalid boundary " + "conditions\n", __func__, memzone_err->name); + return (-1); + } + + if ((rc = check_memzone_bounded("bounded_128", 100, 128, 128)) != 0) + return (rc); + + if ((rc = check_memzone_bounded("bounded_256", 100, 256, 128)) != 0) + return (rc); + + if ((rc = check_memzone_bounded("bounded_1K", 100, 64, 1024)) != 0) + return (rc); + + if ((rc = check_memzone_bounded("bounded_1K_MAX", 0, 64, 1024)) != 0) + return (rc); + + return (0); +} + +static int +test_memzone_reserve_memory_in_smallest_segment(void) +{ + const struct rte_memzone *mz; + const struct rte_memseg *ms, *min_ms, *prev_min_ms; + size_t min_len, prev_min_len; + const struct rte_config *config; + int i; + + config = rte_eal_get_configuration(); + + min_ms = NULL; /*< smallest segment */ + prev_min_ms = NULL; /*< second smallest segment */ + + /* find two smallest segments */ + for (i = 0; i < RTE_MAX_MEMSEG; i++) { + ms = &config->mem_config->free_memseg[i]; + + if (ms->addr == NULL) + break; + if (ms->len == 0) + continue; + + if (min_ms == NULL) + min_ms = ms; + else if (min_ms->len > ms->len) { + /* set last smallest to second last */ + prev_min_ms = min_ms; + + /* set new smallest */ + min_ms = ms; + } + else if (prev_min_ms == NULL) { + prev_min_ms = ms; + } + } + + if (min_ms == NULL || prev_min_ms == NULL) { + printf("Smallest segments not found!\n"); + return -1; + } + + min_len = min_ms->len; + prev_min_len = prev_min_ms->len; + + /* try reserving a memzone in the smallest memseg */ + mz = rte_memzone_reserve("smallest_mz", CACHE_LINE_SIZE, + SOCKET_ID_ANY, 0); + if (mz == NULL) { + printf("Failed to reserve memory from smallest memseg!\n"); + return -1; + } + if (prev_min_ms->len != prev_min_len && + min_ms->len != min_len - CACHE_LINE_SIZE) { + printf("Reserved memory from wrong memseg!\n"); + return -1; + } + + return 0; +} + +/* this test is a bit tricky, and thus warrants explanation. + * + * first, we find two smallest memsegs to conduct our experiments on. + * + * then, we bring them within alignment from each other: if second segment is + * twice+ as big as the first, reserve memory from that segment; if second + * segment is comparable in length to the first, then cut the first segment + * down until it becomes less than half of second segment, and then cut down + * the second segment to be within alignment of the first. + * + * then, we have to pass the following test: if segments are within alignment + * of each other (that is, the difference is less than 256 bytes, which is what + * our alignment will be), segment with smallest offset should be picked. + * + * we know that min_ms will be our smallest segment, so we need to make sure + * that we adjust the alignments so that the bigger segment has smallest + * alignment (in our case, smallest segment will have 64-byte alignment, while + * bigger segment will have 128-byte alignment). + */ +static int +test_memzone_reserve_memory_with_smallest_offset(void) +{ + const struct rte_memseg *ms, *min_ms, *prev_min_ms; + size_t len, min_len, prev_min_len; + const struct rte_config *config; + int i, align; + + config = rte_eal_get_configuration(); + + min_ms = NULL; /*< smallest segment */ + prev_min_ms = NULL; /*< second smallest segment */ + align = CACHE_LINE_SIZE * 4; + + /* find two smallest segments */ + for (i = 0; i < RTE_MAX_MEMSEG; i++) { + ms = &config->mem_config->free_memseg[i]; + + if (ms->addr == NULL) + break; + if (ms->len == 0) + continue; + + if (min_ms == NULL) + min_ms = ms; + else if (min_ms->len > ms->len) { + /* set last smallest to second last */ + prev_min_ms = min_ms; + + /* set new smallest */ + min_ms = ms; + } + else if (prev_min_ms == NULL) { + prev_min_ms = ms; + } + } + + if (min_ms == NULL || prev_min_ms == NULL) { + printf("Smallest segments not found!\n"); + return -1; + } + + prev_min_len = prev_min_ms->len; + min_len = min_ms->len; + + /* if smallest segment is bigger than half of bigger segment */ + if (prev_min_ms->len - min_ms->len <= min_ms->len) { + + len = (min_ms->len * 2) - prev_min_ms->len; + + /* make sure final length is *not* aligned */ + while (((min_ms->addr_64 + len) & (align-1)) == 0) + len += CACHE_LINE_SIZE; + + if (rte_memzone_reserve("dummy_mz1", len, SOCKET_ID_ANY, 0) == NULL) { + printf("Cannot reserve memory!\n"); + return -1; + } + + /* check if we got memory from correct segment */ + if (min_ms->len != min_len - len) { + printf("Reserved memory from wrong segment!\n"); + return -1; + } + } + /* if we don't need to touch smallest segment but it's aligned */ + else if ((min_ms->addr_64 & (align-1)) == 0) { + if (rte_memzone_reserve("align_mz1", CACHE_LINE_SIZE, + SOCKET_ID_ANY, 0) == NULL) { + printf("Cannot reserve memory!\n"); + return -1; + } + if (min_ms->len != min_len - CACHE_LINE_SIZE) { + printf("Reserved memory from wrong segment!\n"); + return -1; + } + } + + /* if smallest segment is less than half of bigger segment */ + if (prev_min_ms->len - min_ms->len > min_ms->len) { + len = prev_min_ms->len - min_ms->len - align; + + /* make sure final length is aligned */ + while (((prev_min_ms->addr_64 + len) & (align-1)) != 0) + len += CACHE_LINE_SIZE; + + if (rte_memzone_reserve("dummy_mz2", len, SOCKET_ID_ANY, 0) == NULL) { + printf("Cannot reserve memory!\n"); + return -1; + } + + /* check if we got memory from correct segment */ + if (prev_min_ms->len != prev_min_len - len) { + printf("Reserved memory from wrong segment!\n"); + return -1; + } + } + len = CACHE_LINE_SIZE; + + + + prev_min_len = prev_min_ms->len; + min_len = min_ms->len; + + if (min_len >= prev_min_len || prev_min_len - min_len > (unsigned) align) { + printf("Segments are of wrong lengths!\n"); + return -1; + } + + /* try reserving from a bigger segment */ + if (rte_memzone_reserve_aligned("smallest_offset", len, SOCKET_ID_ANY, 0, align) == + NULL) { + printf("Cannot reserve memory!\n"); + return -1; + } + + /* check if we got memory from correct segment */ + if (min_ms->len != min_len && prev_min_ms->len != (prev_min_len - len)) { + printf("Reserved memory from segment with smaller offset!\n"); + return -1; + } + + return 0; +} + +static int +test_memzone_reserve_remainder(void) +{ + const struct rte_memzone *mz1, *mz2; + const struct rte_memseg *ms, *min_ms = NULL; + size_t min_len; + const struct rte_config *config; + int i, align; + + min_len = 0; + align = CACHE_LINE_SIZE; + + config = rte_eal_get_configuration(); + + /* find minimum free contiguous length */ + for (i = 0; i < RTE_MAX_MEMSEG; i++) { + ms = &config->mem_config->free_memseg[i]; + + if (ms->addr == NULL) + break; + if (ms->len == 0) + continue; + + if (min_len == 0 || ms->len < min_len) { + min_len = ms->len; + min_ms = ms; + + /* find maximum alignment this segment is able to hold */ + align = CACHE_LINE_SIZE; + while ((ms->addr_64 & (align-1)) == 0) { + align <<= 1; + } + } + } + + if (min_ms == NULL) { + printf("Minimal sized segment not found!\n"); + return -1; + } + + /* try reserving min_len bytes with alignment - this should not affect our + * memseg, the memory will be taken from a different one. + */ + mz1 = rte_memzone_reserve_aligned("reserve_remainder_1", min_len, + SOCKET_ID_ANY, 0, align); + if (mz1 == NULL) { + printf("Failed to reserve %zu bytes aligned on %i bytes\n", min_len, + align); + return -1; + } + if (min_ms->len != min_len) { + printf("Memseg memory should not have been reserved!\n"); + return -1; + } + + /* try reserving min_len bytes with less alignment - this should fill up + * the segment. + */ + mz2 = rte_memzone_reserve("reserve_remainder_2", min_len, + SOCKET_ID_ANY, 0); + if (mz2 == NULL) { + printf("Failed to reserve %zu bytes\n", min_len); + return -1; + } + if (min_ms->len != 0) { + printf("Memseg memory should have been reserved!\n"); + return -1; + } + + return 0; +} + +static int test_memzone(void) { const struct rte_memzone *memzone1; const struct rte_memzone *memzone2; const struct rte_memzone *memzone3; + const struct rte_memzone *memzone4; const struct rte_memzone *mz; - memzone1 = rte_memzone_lookup("testzone1"); - if (memzone1 == NULL) - memzone1 = rte_memzone_reserve("testzone1", 100, + memzone1 = rte_memzone_reserve("testzone1", 100, SOCKET_ID_ANY, 0); - memzone2 = rte_memzone_lookup("testzone2"); - if (memzone2 == NULL) - memzone2 = rte_memzone_reserve("testzone2", 1000, + memzone2 = rte_memzone_reserve("testzone2", 1000, 0, 0); - memzone3 = rte_memzone_lookup("testzone3"); - if (memzone3 == NULL) - memzone3 = rte_memzone_reserve("testzone3", 1000, + memzone3 = rte_memzone_reserve("testzone3", 1000, 1, 0); + memzone4 = rte_memzone_reserve("testzone4", 1024, + SOCKET_ID_ANY, 0); + /* memzone3 may be NULL if we don't have NUMA */ - if (memzone1 == NULL || memzone2 == NULL) + if (memzone1 == NULL || memzone2 == NULL || memzone4 == NULL) return -1; - rte_memzone_dump(); + rte_memzone_dump(stdout); /* check cache-line alignments */ printf("check alignments and lengths\n"); @@ -575,6 +965,8 @@ test_memzone(void) if (memzone3 != NULL && ((memzone3->len & CACHE_LINE_MASK) != 0 || memzone3->len == 0)) return -1; + if (memzone4->len != 1024) + return -1; /* check that zones don't overlap */ printf("check overlapping\n"); @@ -614,8 +1006,12 @@ test_memzone(void) if (test_memzone_reserving_zone_size_bigger_than_the_maximum() < 0) return -1; - printf("test reserving the largest size memzone possible\n"); - if (test_memzone_reserve_max() < 0) + printf("test reserving memory in smallest segments\n"); + if (test_memzone_reserve_memory_in_smallest_segment() < 0) + return -1; + + printf("test reserving memory in segments with smallest offsets\n"); + if (test_memzone_reserve_memory_with_smallest_offset() < 0) return -1; printf("test memzone_reserve flags\n"); @@ -626,13 +1022,31 @@ test_memzone(void) if (test_memzone_aligned() < 0) return -1; + printf("test boundary alignment for memzone_reserve\n"); + if (test_memzone_bounded() < 0) + return -1; + printf("test invalid alignment for memzone_reserve\n"); if (test_memzone_invalid_alignment() < 0) return -1; + printf("test reserving amounts of memory equal to segment's length\n"); + if (test_memzone_reserve_remainder() < 0) + return -1; + + printf("test reserving the largest size memzone possible\n"); + if (test_memzone_reserve_max() < 0) + return -1; + printf("test reserving the largest size aligned memzone possible\n"); if (test_memzone_reserve_max_aligned() < 0) return -1; return 0; } + +static struct test_command memzone_cmd = { + .command = "memzone_autotest", + .callback = test_memzone, +}; +REGISTER_TEST_COMMAND(memzone_cmd);