4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <sys/queue.h>
39 #include <rte_random.h>
40 #include <rte_cycles.h>
41 #include <rte_memory.h>
42 #include <rte_memzone.h>
43 #include <rte_tailq.h>
45 #include <rte_eal_memconfig.h>
46 #include <rte_common.h>
47 #include <rte_string_fns.h>
55 * - Search for three reserved zones or reserve them if they do not exist:
57 * - One is on any socket id.
58 * - The second is on socket 0.
59 * - The last one is on socket 1 (if socket 1 exists).
61 * - Check that the zones exist.
63 * - Check that the zones are cache-aligned.
65 * - Check that zones do not overlap.
67 * - Check that the zones are on the correct socket id.
69 * - Check that a lookup of the first zone returns the same pointer.
71 * - Check that it is not possible to create another zone with the
72 * same name as an existing zone.
74 * - Check flags for specific huge page size reservation
77 /* Test if memory overlaps: return 1 if true, or 0 if false. */
79 is_memory_overlap(phys_addr_t ptr1, size_t len1, phys_addr_t ptr2, size_t len2)
81 if (ptr2 >= ptr1 && (ptr2 - ptr1) < len1)
83 else if (ptr2 < ptr1 && (ptr1 - ptr2) < len2)
89 test_memzone_invalid_alignment(void)
91 const struct rte_memzone * mz;
93 mz = rte_memzone_lookup("invalid_alignment");
95 printf("Zone with invalid alignment has been reserved\n");
99 mz = rte_memzone_reserve_aligned("invalid_alignment", 100,
100 SOCKET_ID_ANY, 0, 100);
102 printf("Zone with invalid alignment has been reserved\n");
109 test_memzone_reserving_zone_size_bigger_than_the_maximum(void)
111 const struct rte_memzone * mz;
113 mz = rte_memzone_lookup("zone_size_bigger_than_the_maximum");
115 printf("zone_size_bigger_than_the_maximum has been reserved\n");
119 mz = rte_memzone_reserve("zone_size_bigger_than_the_maximum", (size_t)-1,
122 printf("It is impossible to reserve such big a memzone\n");
130 test_memzone_reserve_flags(void)
132 const struct rte_memzone *mz;
133 const struct rte_memseg *ms;
134 int hugepage_2MB_avail = 0;
135 int hugepage_1GB_avail = 0;
136 const size_t size = 100;
138 ms = rte_eal_get_physmem_layout();
139 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
140 if (ms[i].hugepage_sz == RTE_PGSIZE_2M)
141 hugepage_2MB_avail = 1;
142 if (ms[i].hugepage_sz == RTE_PGSIZE_1G)
143 hugepage_1GB_avail = 1;
145 /* Display the availability of 2MB and 1GB pages */
146 if (hugepage_2MB_avail)
147 printf("2MB Huge pages available\n");
148 if (hugepage_1GB_avail)
149 printf("1GB Huge pages available\n");
151 * If 2MB pages available, check that a small memzone is correctly
152 * reserved from 2MB huge pages when requested by the RTE_MEMZONE_2MB flag.
153 * Also check that RTE_MEMZONE_SIZE_HINT_ONLY flag only defaults to an
154 * available page size (i.e 1GB ) when 2MB pages are unavailable.
156 if (hugepage_2MB_avail) {
157 mz = rte_memzone_reserve("flag_zone_2M", size, SOCKET_ID_ANY,
160 printf("MEMZONE FLAG 2MB\n");
163 if (mz->hugepage_sz != RTE_PGSIZE_2M) {
164 printf("hugepage_sz not equal 2M\n");
168 mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
169 RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY);
171 printf("MEMZONE FLAG 2MB\n");
174 if (mz->hugepage_sz != RTE_PGSIZE_2M) {
175 printf("hugepage_sz not equal 2M\n");
179 /* Check if 1GB huge pages are unavailable, that function fails unless
180 * HINT flag is indicated
182 if (!hugepage_1GB_avail) {
183 mz = rte_memzone_reserve("flag_zone_1G_HINT", size, SOCKET_ID_ANY,
184 RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY);
186 printf("MEMZONE FLAG 1GB & HINT\n");
189 if (mz->hugepage_sz != RTE_PGSIZE_2M) {
190 printf("hugepage_sz not equal 2M\n");
194 mz = rte_memzone_reserve("flag_zone_1G", size, SOCKET_ID_ANY,
197 printf("MEMZONE FLAG 1GB\n");
203 /*As with 2MB tests above for 1GB huge page requests*/
204 if (hugepage_1GB_avail) {
205 mz = rte_memzone_reserve("flag_zone_1G", size, SOCKET_ID_ANY,
208 printf("MEMZONE FLAG 1GB\n");
211 if (mz->hugepage_sz != RTE_PGSIZE_1G) {
212 printf("hugepage_sz not equal 1G\n");
216 mz = rte_memzone_reserve("flag_zone_1G_HINT", size, SOCKET_ID_ANY,
217 RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY);
219 printf("MEMZONE FLAG 1GB\n");
222 if (mz->hugepage_sz != RTE_PGSIZE_1G) {
223 printf("hugepage_sz not equal 1G\n");
227 /* Check if 1GB huge pages are unavailable, that function fails unless
228 * HINT flag is indicated
230 if (!hugepage_2MB_avail) {
231 mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
232 RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY);
234 printf("MEMZONE FLAG 2MB & HINT\n");
237 if (mz->hugepage_sz != RTE_PGSIZE_1G) {
238 printf("hugepage_sz not equal 1G\n");
241 mz = rte_memzone_reserve("flag_zone_2M", size, SOCKET_ID_ANY,
244 printf("MEMZONE FLAG 2MB\n");
249 if (hugepage_2MB_avail && hugepage_1GB_avail) {
250 mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
251 RTE_MEMZONE_2MB|RTE_MEMZONE_1GB);
253 printf("BOTH SIZES SET\n");
262 test_memzone_reserve_max(void)
264 const struct rte_memzone *mz;
265 const struct rte_config *config;
266 const struct rte_memseg *ms;
273 /* get pointer to global configuration */
274 config = rte_eal_get_configuration();
276 ms = rte_eal_get_physmem_layout();
278 for (memseg_idx = 0; memseg_idx < RTE_MAX_MEMSEG; memseg_idx++){
279 /* ignore smaller memsegs as they can only get smaller */
280 if (ms[memseg_idx].len < maxlen)
283 /* align everything */
284 last_addr = RTE_PTR_ALIGN_CEIL(ms[memseg_idx].addr, CACHE_LINE_SIZE);
285 len = ms[memseg_idx].len - RTE_PTR_DIFF(last_addr, ms[memseg_idx].addr);
286 len &= ~((size_t) CACHE_LINE_MASK);
288 /* cycle through all memzones */
289 for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) {
291 /* stop when reaching last allocated memzone */
292 if (config->mem_config->memzone[memzone_idx].addr == NULL)
295 /* check if the memzone is in our memseg and subtract length */
296 if ((config->mem_config->memzone[memzone_idx].addr >=
297 ms[memseg_idx].addr) &&
298 (config->mem_config->memzone[memzone_idx].addr <
299 (RTE_PTR_ADD(ms[memseg_idx].addr, ms[memseg_idx].len)))) {
300 /* since the zones can now be aligned and occasionally skip
301 * some space, we should calculate the length based on
302 * reported length and start addresses difference. Addresses
303 * are allocated sequentially so we don't need to worry about
304 * them being in the right order.
307 config->mem_config->memzone[memzone_idx].addr,
309 len -= config->mem_config->memzone[memzone_idx].len;
310 last_addr = RTE_PTR_ADD(config->mem_config->memzone[memzone_idx].addr,
311 (size_t) config->mem_config->memzone[memzone_idx].len);
315 /* we don't need to calculate offset here since length
316 * is always cache-aligned */
322 printf("There is no space left!\n");
326 mz = rte_memzone_reserve("max_zone", 0, SOCKET_ID_ANY, 0);
328 printf("Failed to reserve a big chunk of memory\n");
329 rte_dump_physmem_layout(stdout);
330 rte_memzone_dump(stdout);
334 if (mz->len != maxlen) {
335 printf("Memzone reserve with 0 size did not return bigest block\n");
336 printf("Expected size = %zu, actual size = %zu\n",
338 rte_dump_physmem_layout(stdout);
339 rte_memzone_dump(stdout);
347 test_memzone_reserve_max_aligned(void)
349 const struct rte_memzone *mz;
350 const struct rte_config *config;
351 const struct rte_memseg *ms;
354 uintptr_t addr_offset;
359 /* random alignment */
360 rte_srand((unsigned)rte_rdtsc());
361 const unsigned align = 1 << ((rte_rand() % 8) + 5); /* from 128 up to 4k alignment */
363 /* get pointer to global configuration */
364 config = rte_eal_get_configuration();
366 ms = rte_eal_get_physmem_layout();
370 for (memseg_idx = 0; memseg_idx < RTE_MAX_MEMSEG; memseg_idx++){
372 /* ignore smaller memsegs as they can only get smaller */
373 if (ms[memseg_idx].len < maxlen)
376 /* align everything */
377 last_addr = RTE_PTR_ALIGN_CEIL(ms[memseg_idx].addr, CACHE_LINE_SIZE);
378 len = ms[memseg_idx].len - RTE_PTR_DIFF(last_addr, ms[memseg_idx].addr);
379 len &= ~((size_t) CACHE_LINE_MASK);
381 /* cycle through all memzones */
382 for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) {
384 /* stop when reaching last allocated memzone */
385 if (config->mem_config->memzone[memzone_idx].addr == NULL)
388 /* check if the memzone is in our memseg and subtract length */
389 if ((config->mem_config->memzone[memzone_idx].addr >=
390 ms[memseg_idx].addr) &&
391 (config->mem_config->memzone[memzone_idx].addr <
392 (RTE_PTR_ADD(ms[memseg_idx].addr, ms[memseg_idx].len)))) {
393 /* since the zones can now be aligned and occasionally skip
394 * some space, we should calculate the length based on
395 * reported length and start addresses difference.
397 len -= (uintptr_t) RTE_PTR_SUB(
398 config->mem_config->memzone[memzone_idx].addr,
399 (uintptr_t) last_addr);
400 len -= config->mem_config->memzone[memzone_idx].len;
402 RTE_PTR_ADD(config->mem_config->memzone[memzone_idx].addr,
403 (size_t) config->mem_config->memzone[memzone_idx].len);
407 /* make sure we get the alignment offset */
409 addr_offset = RTE_PTR_ALIGN_CEIL((uintptr_t) last_addr, align) - (uintptr_t) last_addr;
414 if (maxlen == 0 || maxlen == addr_offset) {
415 printf("There is no space left for biggest %u-aligned memzone!\n", align);
419 maxlen -= addr_offset;
421 mz = rte_memzone_reserve_aligned("max_zone_aligned", 0,
422 SOCKET_ID_ANY, 0, align);
424 printf("Failed to reserve a big chunk of memory\n");
425 rte_dump_physmem_layout(stdout);
426 rte_memzone_dump(stdout);
430 if (mz->len != maxlen) {
431 printf("Memzone reserve with 0 size and alignment %u did not return"
432 " bigest block\n", align);
433 printf("Expected size = %zu, actual size = %zu\n",
435 rte_dump_physmem_layout(stdout);
436 rte_memzone_dump(stdout);
444 test_memzone_aligned(void)
446 const struct rte_memzone *memzone_aligned_32;
447 const struct rte_memzone *memzone_aligned_128;
448 const struct rte_memzone *memzone_aligned_256;
449 const struct rte_memzone *memzone_aligned_512;
450 const struct rte_memzone *memzone_aligned_1024;
452 /* memzone that should automatically be adjusted to align on 64 bytes */
453 memzone_aligned_32 = rte_memzone_reserve_aligned("aligned_32", 100,
454 SOCKET_ID_ANY, 0, 32);
456 /* memzone that is supposed to be aligned on a 128 byte boundary */
457 memzone_aligned_128 = rte_memzone_reserve_aligned("aligned_128", 100,
458 SOCKET_ID_ANY, 0, 128);
460 /* memzone that is supposed to be aligned on a 256 byte boundary */
461 memzone_aligned_256 = rte_memzone_reserve_aligned("aligned_256", 100,
462 SOCKET_ID_ANY, 0, 256);
464 /* memzone that is supposed to be aligned on a 512 byte boundary */
465 memzone_aligned_512 = rte_memzone_reserve_aligned("aligned_512", 100,
466 SOCKET_ID_ANY, 0, 512);
468 /* memzone that is supposed to be aligned on a 1024 byte boundary */
469 memzone_aligned_1024 = rte_memzone_reserve_aligned("aligned_1024", 100,
470 SOCKET_ID_ANY, 0, 1024);
472 printf("check alignments and lengths\n");
473 if (memzone_aligned_32 == NULL) {
474 printf("Unable to reserve 64-byte aligned memzone!\n");
477 if ((memzone_aligned_32->phys_addr & CACHE_LINE_MASK) != 0)
479 if (((uintptr_t) memzone_aligned_32->addr & CACHE_LINE_MASK) != 0)
481 if ((memzone_aligned_32->len & CACHE_LINE_MASK) != 0)
484 if (memzone_aligned_128 == NULL) {
485 printf("Unable to reserve 128-byte aligned memzone!\n");
488 if ((memzone_aligned_128->phys_addr & 127) != 0)
490 if (((uintptr_t) memzone_aligned_128->addr & 127) != 0)
492 if ((memzone_aligned_128->len & CACHE_LINE_MASK) != 0)
495 if (memzone_aligned_256 == NULL) {
496 printf("Unable to reserve 256-byte aligned memzone!\n");
499 if ((memzone_aligned_256->phys_addr & 255) != 0)
501 if (((uintptr_t) memzone_aligned_256->addr & 255) != 0)
503 if ((memzone_aligned_256->len & CACHE_LINE_MASK) != 0)
506 if (memzone_aligned_512 == NULL) {
507 printf("Unable to reserve 512-byte aligned memzone!\n");
510 if ((memzone_aligned_512->phys_addr & 511) != 0)
512 if (((uintptr_t) memzone_aligned_512->addr & 511) != 0)
514 if ((memzone_aligned_512->len & CACHE_LINE_MASK) != 0)
517 if (memzone_aligned_1024 == NULL) {
518 printf("Unable to reserve 1024-byte aligned memzone!\n");
521 if ((memzone_aligned_1024->phys_addr & 1023) != 0)
523 if (((uintptr_t) memzone_aligned_1024->addr & 1023) != 0)
525 if ((memzone_aligned_1024->len & CACHE_LINE_MASK) != 0)
528 /* check that zones don't overlap */
529 printf("check overlapping\n");
530 if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
531 memzone_aligned_128->phys_addr, memzone_aligned_128->len))
533 if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
534 memzone_aligned_256->phys_addr, memzone_aligned_256->len))
536 if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
537 memzone_aligned_512->phys_addr, memzone_aligned_512->len))
539 if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
540 memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
542 if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len,
543 memzone_aligned_256->phys_addr, memzone_aligned_256->len))
545 if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len,
546 memzone_aligned_512->phys_addr, memzone_aligned_512->len))
548 if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len,
549 memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
551 if (is_memory_overlap(memzone_aligned_256->phys_addr, memzone_aligned_256->len,
552 memzone_aligned_512->phys_addr, memzone_aligned_512->len))
554 if (is_memory_overlap(memzone_aligned_256->phys_addr, memzone_aligned_256->len,
555 memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
557 if (is_memory_overlap(memzone_aligned_512->phys_addr, memzone_aligned_512->len,
558 memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
564 check_memzone_bounded(const char *name, uint32_t len, uint32_t align,
567 const struct rte_memzone *mz;
570 bmask = ~((phys_addr_t)bound - 1);
572 if ((mz = rte_memzone_reserve_bounded(name, len, SOCKET_ID_ANY, 0,
573 align, bound)) == NULL) {
574 printf("%s(%s): memzone creation failed\n",
579 if ((mz->phys_addr & ((phys_addr_t)align - 1)) != 0) {
580 printf("%s(%s): invalid phys addr alignment\n",
585 if (((uintptr_t) mz->addr & ((uintptr_t)align - 1)) != 0) {
586 printf("%s(%s): invalid virtual addr alignment\n",
591 if ((mz->len & CACHE_LINE_MASK) != 0 || mz->len < len ||
592 mz->len < CACHE_LINE_SIZE) {
593 printf("%s(%s): invalid length\n",
598 if ((mz->phys_addr & bmask) !=
599 ((mz->phys_addr + mz->len - 1) & bmask)) {
600 printf("%s(%s): invalid memzone boundary %u crossed\n",
601 __func__, mz->name, bound);
609 test_memzone_bounded(void)
611 const struct rte_memzone *memzone_err;
615 /* should fail as boundary is not power of two */
616 name = "bounded_error_31";
617 if ((memzone_err = rte_memzone_reserve_bounded(name,
618 100, SOCKET_ID_ANY, 0, 32, UINT32_MAX)) != NULL) {
619 printf("%s(%s)created a memzone with invalid boundary "
620 "conditions\n", __func__, memzone_err->name);
624 /* should fail as len is greater then boundary */
625 name = "bounded_error_32";
626 if ((memzone_err = rte_memzone_reserve_bounded(name,
627 100, SOCKET_ID_ANY, 0, 32, 32)) != NULL) {
628 printf("%s(%s)created a memzone with invalid boundary "
629 "conditions\n", __func__, memzone_err->name);
633 if ((rc = check_memzone_bounded("bounded_128", 100, 128, 128)) != 0)
636 if ((rc = check_memzone_bounded("bounded_256", 100, 256, 128)) != 0)
639 if ((rc = check_memzone_bounded("bounded_1K", 100, 64, 1024)) != 0)
642 if ((rc = check_memzone_bounded("bounded_1K_MAX", 0, 64, 1024)) != 0)
649 test_memzone_reserve_memory_in_smallest_segment(void)
651 const struct rte_memzone *mz;
652 const struct rte_memseg *ms, *min_ms, *prev_min_ms;
653 size_t min_len, prev_min_len;
654 const struct rte_config *config;
657 config = rte_eal_get_configuration();
659 min_ms = NULL; /*< smallest segment */
660 prev_min_ms = NULL; /*< second smallest segment */
662 /* find two smallest segments */
663 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
664 ms = &config->mem_config->free_memseg[i];
666 if (ms->addr == NULL)
673 else if (min_ms->len > ms->len) {
674 /* set last smallest to second last */
675 prev_min_ms = min_ms;
677 /* set new smallest */
680 else if (prev_min_ms == NULL) {
685 if (min_ms == NULL || prev_min_ms == NULL) {
686 printf("Smallest segments not found!\n");
690 min_len = min_ms->len;
691 prev_min_len = prev_min_ms->len;
693 /* try reserving a memzone in the smallest memseg */
694 mz = rte_memzone_reserve("smallest_mz", CACHE_LINE_SIZE,
697 printf("Failed to reserve memory from smallest memseg!\n");
700 if (prev_min_ms->len != prev_min_len &&
701 min_ms->len != min_len - CACHE_LINE_SIZE) {
702 printf("Reserved memory from wrong memseg!\n");
709 /* this test is a bit tricky, and thus warrants explanation.
711 * first, we find two smallest memsegs to conduct our experiments on.
713 * then, we bring them within alignment from each other: if second segment is
714 * twice+ as big as the first, reserve memory from that segment; if second
715 * segment is comparable in length to the first, then cut the first segment
716 * down until it becomes less than half of second segment, and then cut down
717 * the second segment to be within alignment of the first.
719 * then, we have to pass the following test: if segments are within alignment
720 * of each other (that is, the difference is less than 256 bytes, which is what
721 * our alignment will be), segment with smallest offset should be picked.
723 * we know that min_ms will be our smallest segment, so we need to make sure
724 * that we adjust the alignments so that the bigger segment has smallest
725 * alignment (in our case, smallest segment will have 64-byte alignment, while
726 * bigger segment will have 128-byte alignment).
729 test_memzone_reserve_memory_with_smallest_offset(void)
731 const struct rte_memseg *ms, *min_ms, *prev_min_ms;
732 size_t len, min_len, prev_min_len;
733 const struct rte_config *config;
736 config = rte_eal_get_configuration();
738 min_ms = NULL; /*< smallest segment */
739 prev_min_ms = NULL; /*< second smallest segment */
740 align = CACHE_LINE_SIZE * 4;
742 /* find two smallest segments */
743 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
744 ms = &config->mem_config->free_memseg[i];
746 if (ms->addr == NULL)
753 else if (min_ms->len > ms->len) {
754 /* set last smallest to second last */
755 prev_min_ms = min_ms;
757 /* set new smallest */
760 else if (prev_min_ms == NULL) {
765 if (min_ms == NULL || prev_min_ms == NULL) {
766 printf("Smallest segments not found!\n");
770 prev_min_len = prev_min_ms->len;
771 min_len = min_ms->len;
773 /* if smallest segment is bigger than half of bigger segment */
774 if (prev_min_ms->len - min_ms->len <= min_ms->len) {
776 len = (min_ms->len * 2) - prev_min_ms->len;
778 /* make sure final length is *not* aligned */
779 while (((min_ms->addr_64 + len) & (align-1)) == 0)
780 len += CACHE_LINE_SIZE;
782 if (rte_memzone_reserve("dummy_mz1", len, SOCKET_ID_ANY, 0) == NULL) {
783 printf("Cannot reserve memory!\n");
787 /* check if we got memory from correct segment */
788 if (min_ms->len != min_len - len) {
789 printf("Reserved memory from wrong segment!\n");
793 /* if we don't need to touch smallest segment but it's aligned */
794 else if ((min_ms->addr_64 & (align-1)) == 0) {
795 if (rte_memzone_reserve("align_mz1", CACHE_LINE_SIZE,
796 SOCKET_ID_ANY, 0) == NULL) {
797 printf("Cannot reserve memory!\n");
800 if (min_ms->len != min_len - CACHE_LINE_SIZE) {
801 printf("Reserved memory from wrong segment!\n");
806 /* if smallest segment is less than half of bigger segment */
807 if (prev_min_ms->len - min_ms->len > min_ms->len) {
808 len = prev_min_ms->len - min_ms->len - align;
810 /* make sure final length is aligned */
811 while (((prev_min_ms->addr_64 + len) & (align-1)) != 0)
812 len += CACHE_LINE_SIZE;
814 if (rte_memzone_reserve("dummy_mz2", len, SOCKET_ID_ANY, 0) == NULL) {
815 printf("Cannot reserve memory!\n");
819 /* check if we got memory from correct segment */
820 if (prev_min_ms->len != prev_min_len - len) {
821 printf("Reserved memory from wrong segment!\n");
825 len = CACHE_LINE_SIZE;
829 prev_min_len = prev_min_ms->len;
830 min_len = min_ms->len;
832 if (min_len >= prev_min_len || prev_min_len - min_len > (unsigned) align) {
833 printf("Segments are of wrong lengths!\n");
837 /* try reserving from a bigger segment */
838 if (rte_memzone_reserve_aligned("smallest_offset", len, SOCKET_ID_ANY, 0, align) ==
840 printf("Cannot reserve memory!\n");
844 /* check if we got memory from correct segment */
845 if (min_ms->len != min_len && prev_min_ms->len != (prev_min_len - len)) {
846 printf("Reserved memory from segment with smaller offset!\n");
854 test_memzone_reserve_remainder(void)
856 const struct rte_memzone *mz1, *mz2;
857 const struct rte_memseg *ms, *min_ms = NULL;
859 const struct rte_config *config;
863 align = CACHE_LINE_SIZE;
865 config = rte_eal_get_configuration();
867 /* find minimum free contiguous length */
868 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
869 ms = &config->mem_config->free_memseg[i];
871 if (ms->addr == NULL)
876 if (min_len == 0 || ms->len < min_len) {
880 /* find maximum alignment this segment is able to hold */
881 align = CACHE_LINE_SIZE;
882 while ((ms->addr_64 & (align-1)) == 0) {
888 if (min_ms == NULL) {
889 printf("Minimal sized segment not found!\n");
893 /* try reserving min_len bytes with alignment - this should not affect our
894 * memseg, the memory will be taken from a different one.
896 mz1 = rte_memzone_reserve_aligned("reserve_remainder_1", min_len,
897 SOCKET_ID_ANY, 0, align);
899 printf("Failed to reserve %zu bytes aligned on %i bytes\n", min_len,
903 if (min_ms->len != min_len) {
904 printf("Memseg memory should not have been reserved!\n");
908 /* try reserving min_len bytes with less alignment - this should fill up
911 mz2 = rte_memzone_reserve("reserve_remainder_2", min_len,
914 printf("Failed to reserve %zu bytes\n", min_len);
917 if (min_ms->len != 0) {
918 printf("Memseg memory should have been reserved!\n");
928 const struct rte_memzone *memzone1;
929 const struct rte_memzone *memzone2;
930 const struct rte_memzone *memzone3;
931 const struct rte_memzone *memzone4;
932 const struct rte_memzone *mz;
934 memzone1 = rte_memzone_reserve("testzone1", 100,
937 memzone2 = rte_memzone_reserve("testzone2", 1000,
940 memzone3 = rte_memzone_reserve("testzone3", 1000,
943 memzone4 = rte_memzone_reserve("testzone4", 1024,
946 /* memzone3 may be NULL if we don't have NUMA */
947 if (memzone1 == NULL || memzone2 == NULL || memzone4 == NULL)
950 rte_memzone_dump(stdout);
952 /* check cache-line alignments */
953 printf("check alignments and lengths\n");
955 if ((memzone1->phys_addr & CACHE_LINE_MASK) != 0)
957 if ((memzone2->phys_addr & CACHE_LINE_MASK) != 0)
959 if (memzone3 != NULL && (memzone3->phys_addr & CACHE_LINE_MASK) != 0)
961 if ((memzone1->len & CACHE_LINE_MASK) != 0 || memzone1->len == 0)
963 if ((memzone2->len & CACHE_LINE_MASK) != 0 || memzone2->len == 0)
965 if (memzone3 != NULL && ((memzone3->len & CACHE_LINE_MASK) != 0 ||
968 if (memzone4->len != 1024)
971 /* check that zones don't overlap */
972 printf("check overlapping\n");
974 if (is_memory_overlap(memzone1->phys_addr, memzone1->len,
975 memzone2->phys_addr, memzone2->len))
977 if (memzone3 != NULL &&
978 is_memory_overlap(memzone1->phys_addr, memzone1->len,
979 memzone3->phys_addr, memzone3->len))
981 if (memzone3 != NULL &&
982 is_memory_overlap(memzone2->phys_addr, memzone2->len,
983 memzone3->phys_addr, memzone3->len))
986 printf("check socket ID\n");
988 /* memzone2 must be on socket id 0 and memzone3 on socket 1 */
989 if (memzone2->socket_id != 0)
991 if (memzone3 != NULL && memzone3->socket_id != 1)
994 printf("test zone lookup\n");
995 mz = rte_memzone_lookup("testzone1");
999 printf("test duplcate zone name\n");
1000 mz = rte_memzone_reserve("testzone1", 100,
1005 printf("test reserving memzone with bigger size than the maximum\n");
1006 if (test_memzone_reserving_zone_size_bigger_than_the_maximum() < 0)
1009 printf("test reserving memory in smallest segments\n");
1010 if (test_memzone_reserve_memory_in_smallest_segment() < 0)
1013 printf("test reserving memory in segments with smallest offsets\n");
1014 if (test_memzone_reserve_memory_with_smallest_offset() < 0)
1017 printf("test memzone_reserve flags\n");
1018 if (test_memzone_reserve_flags() < 0)
1021 printf("test alignment for memzone_reserve\n");
1022 if (test_memzone_aligned() < 0)
1025 printf("test boundary alignment for memzone_reserve\n");
1026 if (test_memzone_bounded() < 0)
1029 printf("test invalid alignment for memzone_reserve\n");
1030 if (test_memzone_invalid_alignment() < 0)
1033 printf("test reserving amounts of memory equal to segment's length\n");
1034 if (test_memzone_reserve_remainder() < 0)
1037 printf("test reserving the largest size memzone possible\n");
1038 if (test_memzone_reserve_max() < 0)
1041 printf("test reserving the largest size aligned memzone possible\n");
1042 if (test_memzone_reserve_max_aligned() < 0)
1048 static struct test_command memzone_cmd = {
1049 .command = "memzone_autotest",
1050 .callback = test_memzone,
1052 REGISTER_TEST_COMMAND(memzone_cmd);