4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <sys/queue.h>
39 #include <rte_random.h>
40 #include <rte_cycles.h>
41 #include <rte_memory.h>
42 #include <rte_memzone.h>
44 #include <rte_eal_memconfig.h>
45 #include <rte_common.h>
46 #include <rte_string_fns.h>
54 * - Search for three reserved zones or reserve them if they do not exist:
56 * - One is on any socket id.
57 * - The second is on socket 0.
58 * - The last one is on socket 1 (if socket 1 exists).
60 * - Check that the zones exist.
62 * - Check that the zones are cache-aligned.
64 * - Check that zones do not overlap.
66 * - Check that the zones are on the correct socket id.
68 * - Check that a lookup of the first zone returns the same pointer.
70 * - Check that it is not possible to create another zone with the
71 * same name as an existing zone.
73 * - Check flags for specific huge page size reservation
76 /* Test if memory overlaps: return 1 if true, or 0 if false. */
78 is_memory_overlap(phys_addr_t ptr1, size_t len1, phys_addr_t ptr2, size_t len2)
80 if (ptr2 >= ptr1 && (ptr2 - ptr1) < len1)
82 else if (ptr2 < ptr1 && (ptr1 - ptr2) < len2)
88 test_memzone_invalid_alignment(void)
90 const struct rte_memzone * mz;
92 mz = rte_memzone_lookup("invalid_alignment");
94 printf("Zone with invalid alignment has been reserved\n");
98 mz = rte_memzone_reserve_aligned("invalid_alignment", 100,
99 SOCKET_ID_ANY, 0, 100);
101 printf("Zone with invalid alignment has been reserved\n");
108 test_memzone_reserving_zone_size_bigger_than_the_maximum(void)
110 const struct rte_memzone * mz;
112 mz = rte_memzone_lookup("zone_size_bigger_than_the_maximum");
114 printf("zone_size_bigger_than_the_maximum has been reserved\n");
118 mz = rte_memzone_reserve("zone_size_bigger_than_the_maximum", (size_t)-1,
121 printf("It is impossible to reserve such big a memzone\n");
129 test_memzone_reserve_flags(void)
131 const struct rte_memzone *mz;
132 const struct rte_memseg *ms;
133 int hugepage_2MB_avail = 0;
134 int hugepage_1GB_avail = 0;
135 int hugepage_16MB_avail = 0;
136 int hugepage_16GB_avail = 0;
137 const size_t size = 100;
139 ms = rte_eal_get_physmem_layout();
140 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
141 if (ms[i].hugepage_sz == RTE_PGSIZE_2M)
142 hugepage_2MB_avail = 1;
143 if (ms[i].hugepage_sz == RTE_PGSIZE_1G)
144 hugepage_1GB_avail = 1;
145 if (ms[i].hugepage_sz == RTE_PGSIZE_16M)
146 hugepage_16MB_avail = 1;
147 if (ms[i].hugepage_sz == RTE_PGSIZE_16G)
148 hugepage_16GB_avail = 1;
150 /* Display the availability of 2MB ,1GB, 16MB, 16GB pages */
151 if (hugepage_2MB_avail)
152 printf("2MB Huge pages available\n");
153 if (hugepage_1GB_avail)
154 printf("1GB Huge pages available\n");
155 if (hugepage_16MB_avail)
156 printf("16MB Huge pages available\n");
157 if (hugepage_16GB_avail)
158 printf("16GB Huge pages available\n");
160 * If 2MB pages available, check that a small memzone is correctly
161 * reserved from 2MB huge pages when requested by the RTE_MEMZONE_2MB flag.
162 * Also check that RTE_MEMZONE_SIZE_HINT_ONLY flag only defaults to an
163 * available page size (i.e 1GB ) when 2MB pages are unavailable.
165 if (hugepage_2MB_avail) {
166 mz = rte_memzone_reserve("flag_zone_2M", size, SOCKET_ID_ANY,
169 printf("MEMZONE FLAG 2MB\n");
172 if (mz->hugepage_sz != RTE_PGSIZE_2M) {
173 printf("hugepage_sz not equal 2M\n");
177 mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
178 RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY);
180 printf("MEMZONE FLAG 2MB\n");
183 if (mz->hugepage_sz != RTE_PGSIZE_2M) {
184 printf("hugepage_sz not equal 2M\n");
188 /* Check if 1GB huge pages are unavailable, that function fails unless
189 * HINT flag is indicated
191 if (!hugepage_1GB_avail) {
192 mz = rte_memzone_reserve("flag_zone_1G_HINT", size, SOCKET_ID_ANY,
193 RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY);
195 printf("MEMZONE FLAG 1GB & HINT\n");
198 if (mz->hugepage_sz != RTE_PGSIZE_2M) {
199 printf("hugepage_sz not equal 2M\n");
203 mz = rte_memzone_reserve("flag_zone_1G", size, SOCKET_ID_ANY,
206 printf("MEMZONE FLAG 1GB\n");
212 /*As with 2MB tests above for 1GB huge page requests*/
213 if (hugepage_1GB_avail) {
214 mz = rte_memzone_reserve("flag_zone_1G", size, SOCKET_ID_ANY,
217 printf("MEMZONE FLAG 1GB\n");
220 if (mz->hugepage_sz != RTE_PGSIZE_1G) {
221 printf("hugepage_sz not equal 1G\n");
225 mz = rte_memzone_reserve("flag_zone_1G_HINT", size, SOCKET_ID_ANY,
226 RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY);
228 printf("MEMZONE FLAG 1GB\n");
231 if (mz->hugepage_sz != RTE_PGSIZE_1G) {
232 printf("hugepage_sz not equal 1G\n");
236 /* Check if 1GB huge pages are unavailable, that function fails unless
237 * HINT flag is indicated
239 if (!hugepage_2MB_avail) {
240 mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
241 RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY);
243 printf("MEMZONE FLAG 2MB & HINT\n");
246 if (mz->hugepage_sz != RTE_PGSIZE_1G) {
247 printf("hugepage_sz not equal 1G\n");
250 mz = rte_memzone_reserve("flag_zone_2M", size, SOCKET_ID_ANY,
253 printf("MEMZONE FLAG 2MB\n");
258 if (hugepage_2MB_avail && hugepage_1GB_avail) {
259 mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
260 RTE_MEMZONE_2MB|RTE_MEMZONE_1GB);
262 printf("BOTH SIZES SET\n");
268 * This option is for IBM Power. If 16MB pages available, check
269 * that a small memzone is correctly reserved from 16MB huge pages
270 * when requested by the RTE_MEMZONE_16MB flag. Also check that
271 * RTE_MEMZONE_SIZE_HINT_ONLY flag only defaults to an available
272 * page size (i.e 16GB ) when 16MB pages are unavailable.
274 if (hugepage_16MB_avail) {
275 mz = rte_memzone_reserve("flag_zone_16M", size, SOCKET_ID_ANY,
278 printf("MEMZONE FLAG 16MB\n");
281 if (mz->hugepage_sz != RTE_PGSIZE_16M) {
282 printf("hugepage_sz not equal 16M\n");
286 mz = rte_memzone_reserve("flag_zone_16M_HINT", size,
287 SOCKET_ID_ANY, RTE_MEMZONE_16MB|RTE_MEMZONE_SIZE_HINT_ONLY);
289 printf("MEMZONE FLAG 2MB\n");
292 if (mz->hugepage_sz != RTE_PGSIZE_16M) {
293 printf("hugepage_sz not equal 16M\n");
297 /* Check if 1GB huge pages are unavailable, that function fails
298 * unless HINT flag is indicated
300 if (!hugepage_16GB_avail) {
301 mz = rte_memzone_reserve("flag_zone_16G_HINT", size,
303 RTE_MEMZONE_16GB|RTE_MEMZONE_SIZE_HINT_ONLY);
305 printf("MEMZONE FLAG 16GB & HINT\n");
308 if (mz->hugepage_sz != RTE_PGSIZE_16M) {
309 printf("hugepage_sz not equal 16M\n");
313 mz = rte_memzone_reserve("flag_zone_16G", size,
314 SOCKET_ID_ANY, RTE_MEMZONE_16GB);
316 printf("MEMZONE FLAG 16GB\n");
321 /*As with 16MB tests above for 16GB huge page requests*/
322 if (hugepage_16GB_avail) {
323 mz = rte_memzone_reserve("flag_zone_16G", size, SOCKET_ID_ANY,
326 printf("MEMZONE FLAG 16GB\n");
329 if (mz->hugepage_sz != RTE_PGSIZE_16G) {
330 printf("hugepage_sz not equal 16G\n");
334 mz = rte_memzone_reserve("flag_zone_16G_HINT", size,
335 SOCKET_ID_ANY, RTE_MEMZONE_16GB|RTE_MEMZONE_SIZE_HINT_ONLY);
337 printf("MEMZONE FLAG 16GB\n");
340 if (mz->hugepage_sz != RTE_PGSIZE_16G) {
341 printf("hugepage_sz not equal 16G\n");
345 /* Check if 1GB huge pages are unavailable, that function fails
346 * unless HINT flag is indicated
348 if (!hugepage_16MB_avail) {
349 mz = rte_memzone_reserve("flag_zone_16M_HINT", size,
351 RTE_MEMZONE_16MB|RTE_MEMZONE_SIZE_HINT_ONLY);
353 printf("MEMZONE FLAG 16MB & HINT\n");
356 if (mz->hugepage_sz != RTE_PGSIZE_16G) {
357 printf("hugepage_sz not equal 16G\n");
360 mz = rte_memzone_reserve("flag_zone_16M", size,
361 SOCKET_ID_ANY, RTE_MEMZONE_16MB);
363 printf("MEMZONE FLAG 16MB\n");
368 if (hugepage_16MB_avail && hugepage_16GB_avail) {
369 mz = rte_memzone_reserve("flag_zone_16M_HINT", size,
371 RTE_MEMZONE_16MB|RTE_MEMZONE_16GB);
373 printf("BOTH SIZES SET\n");
382 test_memzone_reserve_max(void)
384 const struct rte_memzone *mz;
385 const struct rte_config *config;
386 const struct rte_memseg *ms;
393 /* get pointer to global configuration */
394 config = rte_eal_get_configuration();
396 ms = rte_eal_get_physmem_layout();
398 for (memseg_idx = 0; memseg_idx < RTE_MAX_MEMSEG; memseg_idx++){
399 /* ignore smaller memsegs as they can only get smaller */
400 if (ms[memseg_idx].len < maxlen)
403 /* align everything */
404 last_addr = RTE_PTR_ALIGN_CEIL(ms[memseg_idx].addr, RTE_CACHE_LINE_SIZE);
405 len = ms[memseg_idx].len - RTE_PTR_DIFF(last_addr, ms[memseg_idx].addr);
406 len &= ~((size_t) RTE_CACHE_LINE_MASK);
408 /* cycle through all memzones */
409 for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) {
411 /* stop when reaching last allocated memzone */
412 if (config->mem_config->memzone[memzone_idx].addr == NULL)
415 /* check if the memzone is in our memseg and subtract length */
416 if ((config->mem_config->memzone[memzone_idx].addr >=
417 ms[memseg_idx].addr) &&
418 (config->mem_config->memzone[memzone_idx].addr <
419 (RTE_PTR_ADD(ms[memseg_idx].addr, ms[memseg_idx].len)))) {
420 /* since the zones can now be aligned and occasionally skip
421 * some space, we should calculate the length based on
422 * reported length and start addresses difference. Addresses
423 * are allocated sequentially so we don't need to worry about
424 * them being in the right order.
427 config->mem_config->memzone[memzone_idx].addr,
429 len -= config->mem_config->memzone[memzone_idx].len;
430 last_addr = RTE_PTR_ADD(config->mem_config->memzone[memzone_idx].addr,
431 (size_t) config->mem_config->memzone[memzone_idx].len);
435 /* we don't need to calculate offset here since length
436 * is always cache-aligned */
442 printf("There is no space left!\n");
446 mz = rte_memzone_reserve("max_zone", 0, SOCKET_ID_ANY, 0);
448 printf("Failed to reserve a big chunk of memory\n");
449 rte_dump_physmem_layout(stdout);
450 rte_memzone_dump(stdout);
454 if (mz->len != maxlen) {
455 printf("Memzone reserve with 0 size did not return bigest block\n");
456 printf("Expected size = %zu, actual size = %zu\n",
458 rte_dump_physmem_layout(stdout);
459 rte_memzone_dump(stdout);
467 test_memzone_reserve_max_aligned(void)
469 const struct rte_memzone *mz;
470 const struct rte_config *config;
471 const struct rte_memseg *ms;
474 uintptr_t addr_offset;
479 /* random alignment */
480 rte_srand((unsigned)rte_rdtsc());
481 const unsigned align = 1 << ((rte_rand() % 8) + 5); /* from 128 up to 4k alignment */
483 /* get pointer to global configuration */
484 config = rte_eal_get_configuration();
486 ms = rte_eal_get_physmem_layout();
490 for (memseg_idx = 0; memseg_idx < RTE_MAX_MEMSEG; memseg_idx++){
492 /* ignore smaller memsegs as they can only get smaller */
493 if (ms[memseg_idx].len < maxlen)
496 /* align everything */
497 last_addr = RTE_PTR_ALIGN_CEIL(ms[memseg_idx].addr, RTE_CACHE_LINE_SIZE);
498 len = ms[memseg_idx].len - RTE_PTR_DIFF(last_addr, ms[memseg_idx].addr);
499 len &= ~((size_t) RTE_CACHE_LINE_MASK);
501 /* cycle through all memzones */
502 for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) {
504 /* stop when reaching last allocated memzone */
505 if (config->mem_config->memzone[memzone_idx].addr == NULL)
508 /* check if the memzone is in our memseg and subtract length */
509 if ((config->mem_config->memzone[memzone_idx].addr >=
510 ms[memseg_idx].addr) &&
511 (config->mem_config->memzone[memzone_idx].addr <
512 (RTE_PTR_ADD(ms[memseg_idx].addr, ms[memseg_idx].len)))) {
513 /* since the zones can now be aligned and occasionally skip
514 * some space, we should calculate the length based on
515 * reported length and start addresses difference.
517 len -= (uintptr_t) RTE_PTR_SUB(
518 config->mem_config->memzone[memzone_idx].addr,
519 (uintptr_t) last_addr);
520 len -= config->mem_config->memzone[memzone_idx].len;
522 RTE_PTR_ADD(config->mem_config->memzone[memzone_idx].addr,
523 (size_t) config->mem_config->memzone[memzone_idx].len);
527 /* make sure we get the alignment offset */
529 addr_offset = RTE_PTR_ALIGN_CEIL((uintptr_t) last_addr, align) - (uintptr_t) last_addr;
534 if (maxlen == 0 || maxlen == addr_offset) {
535 printf("There is no space left for biggest %u-aligned memzone!\n", align);
539 maxlen -= addr_offset;
541 mz = rte_memzone_reserve_aligned("max_zone_aligned", 0,
542 SOCKET_ID_ANY, 0, align);
544 printf("Failed to reserve a big chunk of memory\n");
545 rte_dump_physmem_layout(stdout);
546 rte_memzone_dump(stdout);
550 if (mz->len != maxlen) {
551 printf("Memzone reserve with 0 size and alignment %u did not return"
552 " bigest block\n", align);
553 printf("Expected size = %zu, actual size = %zu\n",
555 rte_dump_physmem_layout(stdout);
556 rte_memzone_dump(stdout);
564 test_memzone_aligned(void)
566 const struct rte_memzone *memzone_aligned_32;
567 const struct rte_memzone *memzone_aligned_128;
568 const struct rte_memzone *memzone_aligned_256;
569 const struct rte_memzone *memzone_aligned_512;
570 const struct rte_memzone *memzone_aligned_1024;
572 /* memzone that should automatically be adjusted to align on 64 bytes */
573 memzone_aligned_32 = rte_memzone_reserve_aligned("aligned_32", 100,
574 SOCKET_ID_ANY, 0, 32);
576 /* memzone that is supposed to be aligned on a 128 byte boundary */
577 memzone_aligned_128 = rte_memzone_reserve_aligned("aligned_128", 100,
578 SOCKET_ID_ANY, 0, 128);
580 /* memzone that is supposed to be aligned on a 256 byte boundary */
581 memzone_aligned_256 = rte_memzone_reserve_aligned("aligned_256", 100,
582 SOCKET_ID_ANY, 0, 256);
584 /* memzone that is supposed to be aligned on a 512 byte boundary */
585 memzone_aligned_512 = rte_memzone_reserve_aligned("aligned_512", 100,
586 SOCKET_ID_ANY, 0, 512);
588 /* memzone that is supposed to be aligned on a 1024 byte boundary */
589 memzone_aligned_1024 = rte_memzone_reserve_aligned("aligned_1024", 100,
590 SOCKET_ID_ANY, 0, 1024);
592 printf("check alignments and lengths\n");
593 if (memzone_aligned_32 == NULL) {
594 printf("Unable to reserve 64-byte aligned memzone!\n");
597 if ((memzone_aligned_32->phys_addr & RTE_CACHE_LINE_MASK) != 0)
599 if (((uintptr_t) memzone_aligned_32->addr & RTE_CACHE_LINE_MASK) != 0)
601 if ((memzone_aligned_32->len & RTE_CACHE_LINE_MASK) != 0)
604 if (memzone_aligned_128 == NULL) {
605 printf("Unable to reserve 128-byte aligned memzone!\n");
608 if ((memzone_aligned_128->phys_addr & 127) != 0)
610 if (((uintptr_t) memzone_aligned_128->addr & 127) != 0)
612 if ((memzone_aligned_128->len & RTE_CACHE_LINE_MASK) != 0)
615 if (memzone_aligned_256 == NULL) {
616 printf("Unable to reserve 256-byte aligned memzone!\n");
619 if ((memzone_aligned_256->phys_addr & 255) != 0)
621 if (((uintptr_t) memzone_aligned_256->addr & 255) != 0)
623 if ((memzone_aligned_256->len & RTE_CACHE_LINE_MASK) != 0)
626 if (memzone_aligned_512 == NULL) {
627 printf("Unable to reserve 512-byte aligned memzone!\n");
630 if ((memzone_aligned_512->phys_addr & 511) != 0)
632 if (((uintptr_t) memzone_aligned_512->addr & 511) != 0)
634 if ((memzone_aligned_512->len & RTE_CACHE_LINE_MASK) != 0)
637 if (memzone_aligned_1024 == NULL) {
638 printf("Unable to reserve 1024-byte aligned memzone!\n");
641 if ((memzone_aligned_1024->phys_addr & 1023) != 0)
643 if (((uintptr_t) memzone_aligned_1024->addr & 1023) != 0)
645 if ((memzone_aligned_1024->len & RTE_CACHE_LINE_MASK) != 0)
648 /* check that zones don't overlap */
649 printf("check overlapping\n");
650 if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
651 memzone_aligned_128->phys_addr, memzone_aligned_128->len))
653 if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
654 memzone_aligned_256->phys_addr, memzone_aligned_256->len))
656 if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
657 memzone_aligned_512->phys_addr, memzone_aligned_512->len))
659 if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
660 memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
662 if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len,
663 memzone_aligned_256->phys_addr, memzone_aligned_256->len))
665 if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len,
666 memzone_aligned_512->phys_addr, memzone_aligned_512->len))
668 if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len,
669 memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
671 if (is_memory_overlap(memzone_aligned_256->phys_addr, memzone_aligned_256->len,
672 memzone_aligned_512->phys_addr, memzone_aligned_512->len))
674 if (is_memory_overlap(memzone_aligned_256->phys_addr, memzone_aligned_256->len,
675 memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
677 if (is_memory_overlap(memzone_aligned_512->phys_addr, memzone_aligned_512->len,
678 memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
684 check_memzone_bounded(const char *name, uint32_t len, uint32_t align,
687 const struct rte_memzone *mz;
690 bmask = ~((phys_addr_t)bound - 1);
692 if ((mz = rte_memzone_reserve_bounded(name, len, SOCKET_ID_ANY, 0,
693 align, bound)) == NULL) {
694 printf("%s(%s): memzone creation failed\n",
699 if ((mz->phys_addr & ((phys_addr_t)align - 1)) != 0) {
700 printf("%s(%s): invalid phys addr alignment\n",
705 if (((uintptr_t) mz->addr & ((uintptr_t)align - 1)) != 0) {
706 printf("%s(%s): invalid virtual addr alignment\n",
711 if ((mz->len & RTE_CACHE_LINE_MASK) != 0 || mz->len < len ||
712 mz->len < RTE_CACHE_LINE_SIZE) {
713 printf("%s(%s): invalid length\n",
718 if ((mz->phys_addr & bmask) !=
719 ((mz->phys_addr + mz->len - 1) & bmask)) {
720 printf("%s(%s): invalid memzone boundary %u crossed\n",
721 __func__, mz->name, bound);
729 test_memzone_bounded(void)
731 const struct rte_memzone *memzone_err;
735 /* should fail as boundary is not power of two */
736 name = "bounded_error_31";
737 if ((memzone_err = rte_memzone_reserve_bounded(name,
738 100, SOCKET_ID_ANY, 0, 32, UINT32_MAX)) != NULL) {
739 printf("%s(%s)created a memzone with invalid boundary "
740 "conditions\n", __func__, memzone_err->name);
744 /* should fail as len is greater then boundary */
745 name = "bounded_error_32";
746 if ((memzone_err = rte_memzone_reserve_bounded(name,
747 100, SOCKET_ID_ANY, 0, 32, 32)) != NULL) {
748 printf("%s(%s)created a memzone with invalid boundary "
749 "conditions\n", __func__, memzone_err->name);
753 if ((rc = check_memzone_bounded("bounded_128", 100, 128, 128)) != 0)
756 if ((rc = check_memzone_bounded("bounded_256", 100, 256, 128)) != 0)
759 if ((rc = check_memzone_bounded("bounded_1K", 100, 64, 1024)) != 0)
762 if ((rc = check_memzone_bounded("bounded_1K_MAX", 0, 64, 1024)) != 0)
769 test_memzone_reserve_memory_in_smallest_segment(void)
771 const struct rte_memzone *mz;
772 const struct rte_memseg *ms, *min_ms, *prev_min_ms;
773 size_t min_len, prev_min_len;
774 const struct rte_config *config;
777 config = rte_eal_get_configuration();
779 min_ms = NULL; /*< smallest segment */
780 prev_min_ms = NULL; /*< second smallest segment */
782 /* find two smallest segments */
783 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
784 ms = &config->mem_config->free_memseg[i];
786 if (ms->addr == NULL)
793 else if (min_ms->len > ms->len) {
794 /* set last smallest to second last */
795 prev_min_ms = min_ms;
797 /* set new smallest */
799 } else if ((prev_min_ms == NULL)
800 || (prev_min_ms->len > ms->len))
804 if (min_ms == NULL || prev_min_ms == NULL) {
805 printf("Smallest segments not found!\n");
809 min_len = min_ms->len;
810 prev_min_len = prev_min_ms->len;
812 /* try reserving a memzone in the smallest memseg */
813 mz = rte_memzone_reserve("smallest_mz", RTE_CACHE_LINE_SIZE,
816 printf("Failed to reserve memory from smallest memseg!\n");
819 if (prev_min_ms->len != prev_min_len &&
820 min_ms->len != min_len - RTE_CACHE_LINE_SIZE) {
821 printf("Reserved memory from wrong memseg!\n");
828 /* this test is a bit tricky, and thus warrants explanation.
830 * first, we find two smallest memsegs to conduct our experiments on.
832 * then, we bring them within alignment from each other: if second segment is
833 * twice+ as big as the first, reserve memory from that segment; if second
834 * segment is comparable in length to the first, then cut the first segment
835 * down until it becomes less than half of second segment, and then cut down
836 * the second segment to be within alignment of the first.
838 * then, we have to pass the following test: if segments are within alignment
839 * of each other (that is, the difference is less than 256 bytes, which is what
840 * our alignment will be), segment with smallest offset should be picked.
842 * we know that min_ms will be our smallest segment, so we need to make sure
843 * that we adjust the alignments so that the bigger segment has smallest
844 * alignment (in our case, smallest segment will have 64-byte alignment, while
845 * bigger segment will have 128-byte alignment).
848 test_memzone_reserve_memory_with_smallest_offset(void)
850 const struct rte_memseg *ms, *min_ms, *prev_min_ms;
851 size_t len, min_len, prev_min_len;
852 const struct rte_config *config;
855 config = rte_eal_get_configuration();
857 min_ms = NULL; /*< smallest segment */
858 prev_min_ms = NULL; /*< second smallest segment */
859 align = RTE_CACHE_LINE_SIZE * 4;
861 /* find two smallest segments */
862 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
863 ms = &config->mem_config->free_memseg[i];
865 if (ms->addr == NULL)
872 else if (min_ms->len > ms->len) {
873 /* set last smallest to second last */
874 prev_min_ms = min_ms;
876 /* set new smallest */
878 } else if ((prev_min_ms == NULL)
879 || (prev_min_ms->len > ms->len)) {
884 if (min_ms == NULL || prev_min_ms == NULL) {
885 printf("Smallest segments not found!\n");
889 prev_min_len = prev_min_ms->len;
890 min_len = min_ms->len;
892 /* if smallest segment is bigger than half of bigger segment */
893 if (prev_min_ms->len - min_ms->len <= min_ms->len) {
895 len = (min_ms->len * 2) - prev_min_ms->len;
897 /* make sure final length is *not* aligned */
898 while (((min_ms->addr_64 + len) & (align-1)) == 0)
899 len += RTE_CACHE_LINE_SIZE;
901 if (rte_memzone_reserve("dummy_mz1", len, SOCKET_ID_ANY, 0) == NULL) {
902 printf("Cannot reserve memory!\n");
906 /* check if we got memory from correct segment */
907 if (min_ms->len != min_len - len) {
908 printf("Reserved memory from wrong segment!\n");
912 /* if we don't need to touch smallest segment but it's aligned */
913 else if ((min_ms->addr_64 & (align-1)) == 0) {
914 if (rte_memzone_reserve("align_mz1", RTE_CACHE_LINE_SIZE,
915 SOCKET_ID_ANY, 0) == NULL) {
916 printf("Cannot reserve memory!\n");
919 if (min_ms->len != min_len - RTE_CACHE_LINE_SIZE) {
920 printf("Reserved memory from wrong segment!\n");
925 /* if smallest segment is less than half of bigger segment */
926 if (prev_min_ms->len - min_ms->len > min_ms->len) {
927 len = prev_min_ms->len - min_ms->len - align;
929 /* make sure final length is aligned */
930 while (((prev_min_ms->addr_64 + len) & (align-1)) != 0)
931 len += RTE_CACHE_LINE_SIZE;
933 if (rte_memzone_reserve("dummy_mz2", len, SOCKET_ID_ANY, 0) == NULL) {
934 printf("Cannot reserve memory!\n");
938 /* check if we got memory from correct segment */
939 if (prev_min_ms->len != prev_min_len - len) {
940 printf("Reserved memory from wrong segment!\n");
944 len = RTE_CACHE_LINE_SIZE;
948 prev_min_len = prev_min_ms->len;
949 min_len = min_ms->len;
951 if (min_len >= prev_min_len || prev_min_len - min_len > (unsigned) align) {
952 printf("Segments are of wrong lengths!\n");
956 /* try reserving from a bigger segment */
957 if (rte_memzone_reserve_aligned("smallest_offset", len, SOCKET_ID_ANY, 0, align) ==
959 printf("Cannot reserve memory!\n");
963 /* check if we got memory from correct segment */
964 if (min_ms->len != min_len && prev_min_ms->len != (prev_min_len - len)) {
965 printf("Reserved memory from segment with smaller offset!\n");
973 test_memzone_reserve_remainder(void)
975 const struct rte_memzone *mz1, *mz2;
976 const struct rte_memseg *ms, *min_ms = NULL;
978 const struct rte_config *config;
982 align = RTE_CACHE_LINE_SIZE;
984 config = rte_eal_get_configuration();
986 /* find minimum free contiguous length */
987 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
988 ms = &config->mem_config->free_memseg[i];
990 if (ms->addr == NULL)
995 if (min_len == 0 || ms->len < min_len) {
999 /* find maximum alignment this segment is able to hold */
1000 align = RTE_CACHE_LINE_SIZE;
1001 while ((ms->addr_64 & (align-1)) == 0) {
1007 if (min_ms == NULL) {
1008 printf("Minimal sized segment not found!\n");
1012 /* try reserving min_len bytes with alignment - this should not affect our
1013 * memseg, the memory will be taken from a different one.
1015 mz1 = rte_memzone_reserve_aligned("reserve_remainder_1", min_len,
1016 SOCKET_ID_ANY, 0, align);
1018 printf("Failed to reserve %zu bytes aligned on %i bytes\n", min_len,
1022 if (min_ms->len != min_len) {
1023 printf("Memseg memory should not have been reserved!\n");
1027 /* try reserving min_len bytes with less alignment - this should fill up
1030 mz2 = rte_memzone_reserve("reserve_remainder_2", min_len,
1033 printf("Failed to reserve %zu bytes\n", min_len);
1036 if (min_ms->len != 0) {
1037 printf("Memseg memory should have been reserved!\n");
1047 const struct rte_memzone *memzone1;
1048 const struct rte_memzone *memzone2;
1049 const struct rte_memzone *memzone3;
1050 const struct rte_memzone *memzone4;
1051 const struct rte_memzone *mz;
1053 memzone1 = rte_memzone_reserve("testzone1", 100,
1056 memzone2 = rte_memzone_reserve("testzone2", 1000,
1059 memzone3 = rte_memzone_reserve("testzone3", 1000,
1062 memzone4 = rte_memzone_reserve("testzone4", 1024,
1065 /* memzone3 may be NULL if we don't have NUMA */
1066 if (memzone1 == NULL || memzone2 == NULL || memzone4 == NULL)
1069 rte_memzone_dump(stdout);
1071 /* check cache-line alignments */
1072 printf("check alignments and lengths\n");
1074 if ((memzone1->phys_addr & RTE_CACHE_LINE_MASK) != 0)
1076 if ((memzone2->phys_addr & RTE_CACHE_LINE_MASK) != 0)
1078 if (memzone3 != NULL && (memzone3->phys_addr & RTE_CACHE_LINE_MASK) != 0)
1080 if ((memzone1->len & RTE_CACHE_LINE_MASK) != 0 || memzone1->len == 0)
1082 if ((memzone2->len & RTE_CACHE_LINE_MASK) != 0 || memzone2->len == 0)
1084 if (memzone3 != NULL && ((memzone3->len & RTE_CACHE_LINE_MASK) != 0 ||
1085 memzone3->len == 0))
1087 if (memzone4->len != 1024)
1090 /* check that zones don't overlap */
1091 printf("check overlapping\n");
1093 if (is_memory_overlap(memzone1->phys_addr, memzone1->len,
1094 memzone2->phys_addr, memzone2->len))
1096 if (memzone3 != NULL &&
1097 is_memory_overlap(memzone1->phys_addr, memzone1->len,
1098 memzone3->phys_addr, memzone3->len))
1100 if (memzone3 != NULL &&
1101 is_memory_overlap(memzone2->phys_addr, memzone2->len,
1102 memzone3->phys_addr, memzone3->len))
1105 printf("check socket ID\n");
1107 /* memzone2 must be on socket id 0 and memzone3 on socket 1 */
1108 if (memzone2->socket_id != 0)
1110 if (memzone3 != NULL && memzone3->socket_id != 1)
1113 printf("test zone lookup\n");
1114 mz = rte_memzone_lookup("testzone1");
1118 printf("test duplcate zone name\n");
1119 mz = rte_memzone_reserve("testzone1", 100,
1124 printf("test reserving memzone with bigger size than the maximum\n");
1125 if (test_memzone_reserving_zone_size_bigger_than_the_maximum() < 0)
1128 printf("test reserving memory in smallest segments\n");
1129 if (test_memzone_reserve_memory_in_smallest_segment() < 0)
1132 printf("test reserving memory in segments with smallest offsets\n");
1133 if (test_memzone_reserve_memory_with_smallest_offset() < 0)
1136 printf("test memzone_reserve flags\n");
1137 if (test_memzone_reserve_flags() < 0)
1140 printf("test alignment for memzone_reserve\n");
1141 if (test_memzone_aligned() < 0)
1144 printf("test boundary alignment for memzone_reserve\n");
1145 if (test_memzone_bounded() < 0)
1148 printf("test invalid alignment for memzone_reserve\n");
1149 if (test_memzone_invalid_alignment() < 0)
1152 printf("test reserving amounts of memory equal to segment's length\n");
1153 if (test_memzone_reserve_remainder() < 0)
1156 printf("test reserving the largest size memzone possible\n");
1157 if (test_memzone_reserve_max() < 0)
1160 printf("test reserving the largest size aligned memzone possible\n");
1161 if (test_memzone_reserve_max_aligned() < 0)
1167 static struct test_command memzone_cmd = {
1168 .command = "memzone_autotest",
1169 .callback = test_memzone,
1171 REGISTER_TEST_COMMAND(memzone_cmd);