4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <sys/queue.h>
39 #include <cmdline_parse.h>
41 #include <rte_random.h>
42 #include <rte_cycles.h>
43 #include <rte_memory.h>
44 #include <rte_memzone.h>
45 #include <rte_tailq.h>
47 #include <rte_eal_memconfig.h>
48 #include <rte_common.h>
56 * - Search for three reserved zones or reserve them if they do not exist:
58 * - One is on any socket id.
59 * - The second is on socket 0.
60 * - The last one is on socket 1 (if socket 1 exists).
62 * - Check that the zones exist.
64 * - Check that the zones are cache-aligned.
66 * - Check that zones do not overlap.
68 * - Check that the zones are on the correct socket id.
70 * - Check that a lookup of the first zone returns the same pointer.
72 * - Check that it is not possible to create another zone with the
73 * same name as an existing zone.
75 * - Check flags for specific huge page size reservation
78 /* Test if memory overlaps: return 1 if true, or 0 if false. */
80 is_memory_overlap(phys_addr_t ptr1, size_t len1, phys_addr_t ptr2, size_t len2)
82 if (ptr2 >= ptr1 && (ptr2 - ptr1) < len1)
84 else if (ptr2 < ptr1 && (ptr1 - ptr2) < len2)
90 test_memzone_invalid_alignment(void)
92 const struct rte_memzone * mz;
94 mz = rte_memzone_lookup("invalid_alignment");
96 printf("Zone with invalid alignment has been reserved\n");
100 mz = rte_memzone_reserve_aligned("invalid_alignment", 100,
101 SOCKET_ID_ANY, 0, 100);
103 printf("Zone with invalid alignment has been reserved\n");
110 test_memzone_reserving_zone_size_bigger_than_the_maximum(void)
112 const struct rte_memzone * mz;
114 mz = rte_memzone_lookup("zone_size_bigger_than_the_maximum");
116 printf("zone_size_bigger_than_the_maximum has been reserved\n");
120 mz = rte_memzone_reserve("zone_size_bigger_than_the_maximum", (size_t)-1,
123 printf("It is impossible to reserve such big a memzone\n");
131 test_memzone_reserve_flags(void)
133 const struct rte_memzone *mz;
134 const struct rte_memseg *ms;
135 int hugepage_2MB_avail = 0;
136 int hugepage_1GB_avail = 0;
137 const int size = 100;
139 ms = rte_eal_get_physmem_layout();
140 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
141 if (ms[i].hugepage_sz == RTE_PGSIZE_2M)
142 hugepage_2MB_avail = 1;
143 if (ms[i].hugepage_sz == RTE_PGSIZE_1G)
144 hugepage_1GB_avail = 1;
146 /* Display the availability of 2MB and 1GB pages */
147 if (hugepage_2MB_avail)
148 printf("2MB Huge pages available\n");
149 if (hugepage_1GB_avail)
150 printf("1GB Huge pages available\n");
152 * If 2MB pages available, check that a small memzone is correctly
153 * reserved from 2MB huge pages when requested by the RTE_MEMZONE_2MB flag.
154 * Also check that RTE_MEMZONE_SIZE_HINT_ONLY flag only defaults to an
155 * available page size (i.e 1GB ) when 2MB pages are unavailable.
157 if (hugepage_2MB_avail) {
158 mz = rte_memzone_reserve("flag_zone_2M", size, SOCKET_ID_ANY,
161 printf("MEMZONE FLAG 2MB\n");
164 if (mz->hugepage_sz != RTE_PGSIZE_2M) {
165 printf("hugepage_sz not equal 2M\n");
169 mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
170 RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY);
172 printf("MEMZONE FLAG 2MB\n");
175 if (mz->hugepage_sz != RTE_PGSIZE_2M) {
176 printf("hugepage_sz not equal 2M\n");
180 /* Check if 1GB huge pages are unavailable, that function fails unless
181 * HINT flag is indicated
183 if (!hugepage_1GB_avail) {
184 mz = rte_memzone_reserve("flag_zone_1G_HINT", size, SOCKET_ID_ANY,
185 RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY);
187 printf("MEMZONE FLAG 1GB & HINT\n");
190 if (mz->hugepage_sz != RTE_PGSIZE_2M) {
191 printf("hugepage_sz not equal 2M\n");
195 mz = rte_memzone_reserve("flag_zone_1G", size, SOCKET_ID_ANY,
198 printf("MEMZONE FLAG 1GB\n");
204 /*As with 2MB tests above for 1GB huge page requests*/
205 if (hugepage_1GB_avail) {
206 mz = rte_memzone_reserve("flag_zone_1G", size, SOCKET_ID_ANY,
209 printf("MEMZONE FLAG 1GB\n");
212 if (mz->hugepage_sz != RTE_PGSIZE_1G) {
213 printf("hugepage_sz not equal 1G\n");
217 mz = rte_memzone_reserve("flag_zone_1G_HINT", size, SOCKET_ID_ANY,
218 RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY);
220 printf("MEMZONE FLAG 1GB\n");
223 if (mz->hugepage_sz != RTE_PGSIZE_1G) {
224 printf("hugepage_sz not equal 1G\n");
228 /* Check if 1GB huge pages are unavailable, that function fails unless
229 * HINT flag is indicated
231 if (!hugepage_2MB_avail) {
232 mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
233 RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY);
235 printf("MEMZONE FLAG 2MB & HINT\n");
238 if (mz->hugepage_sz != RTE_PGSIZE_1G) {
239 printf("hugepage_sz not equal 1G\n");
242 mz = rte_memzone_reserve("flag_zone_2M", size, SOCKET_ID_ANY,
245 printf("MEMZONE FLAG 2MB\n");
250 if (hugepage_2MB_avail && hugepage_1GB_avail) {
251 mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
252 RTE_MEMZONE_2MB|RTE_MEMZONE_1GB);
254 printf("BOTH SIZES SET\n");
263 test_memzone_reserve_max(void)
265 const struct rte_memzone *mz;
266 const struct rte_config *config;
267 const struct rte_memseg *ms;
274 /* get pointer to global configuration */
275 config = rte_eal_get_configuration();
277 ms = rte_eal_get_physmem_layout();
279 for (memseg_idx = 0; memseg_idx < RTE_MAX_MEMSEG; memseg_idx++){
280 /* ignore smaller memsegs as they can only get smaller */
281 if (ms[memseg_idx].len < maxlen)
284 /* align everything */
285 last_addr = RTE_PTR_ALIGN_CEIL(ms[memseg_idx].addr, CACHE_LINE_SIZE);
286 len = ms[memseg_idx].len - RTE_PTR_DIFF(last_addr, ms[memseg_idx].addr);
287 len &= ~((size_t) CACHE_LINE_MASK);
289 /* cycle through all memzones */
290 for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) {
292 /* stop when reaching last allocated memzone */
293 if (config->mem_config->memzone[memzone_idx].addr == NULL)
296 /* check if the memzone is in our memseg and subtract length */
297 if ((config->mem_config->memzone[memzone_idx].addr >=
298 ms[memseg_idx].addr) &&
299 (config->mem_config->memzone[memzone_idx].addr <
300 (RTE_PTR_ADD(ms[memseg_idx].addr, ms[memseg_idx].len)))) {
301 /* since the zones can now be aligned and occasionally skip
302 * some space, we should calculate the length based on
303 * reported length and start addresses difference. Addresses
304 * are allocated sequentially so we don't need to worry about
305 * them being in the right order.
308 config->mem_config->memzone[memzone_idx].addr,
310 len -= config->mem_config->memzone[memzone_idx].len;
311 last_addr = RTE_PTR_ADD(config->mem_config->memzone[memzone_idx].addr,
312 (size_t) config->mem_config->memzone[memzone_idx].len);
316 /* we don't need to calculate offset here since length
317 * is always cache-aligned */
323 printf("There is no space left!\n");
327 mz = rte_memzone_reserve("max_zone", 0, SOCKET_ID_ANY, 0);
329 printf("Failed to reserve a big chunk of memory\n");
330 rte_dump_physmem_layout();
335 if (mz->len != maxlen) {
336 printf("Memzone reserve with 0 size did not return bigest block\n");
337 printf("Expected size = %zu, actual size = %zu\n",
339 rte_dump_physmem_layout();
348 test_memzone_reserve_max_aligned(void)
350 const struct rte_memzone *mz;
351 const struct rte_config *config;
352 const struct rte_memseg *ms;
355 uintptr_t addr_offset;
360 /* random alignment */
361 rte_srand((unsigned)rte_rdtsc());
362 const unsigned align = 1 << ((rte_rand() % 8) + 5); /* from 128 up to 4k alignment */
364 /* get pointer to global configuration */
365 config = rte_eal_get_configuration();
367 ms = rte_eal_get_physmem_layout();
371 for (memseg_idx = 0; memseg_idx < RTE_MAX_MEMSEG; memseg_idx++){
373 /* ignore smaller memsegs as they can only get smaller */
374 if (ms[memseg_idx].len < maxlen)
377 /* align everything */
378 last_addr = RTE_PTR_ALIGN_CEIL(ms[memseg_idx].addr, CACHE_LINE_SIZE);
379 len = ms[memseg_idx].len - RTE_PTR_DIFF(last_addr, ms[memseg_idx].addr);
380 len &= ~((size_t) CACHE_LINE_MASK);
382 /* cycle through all memzones */
383 for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) {
385 /* stop when reaching last allocated memzone */
386 if (config->mem_config->memzone[memzone_idx].addr == NULL)
389 /* check if the memzone is in our memseg and subtract length */
390 if ((config->mem_config->memzone[memzone_idx].addr >=
391 ms[memseg_idx].addr) &&
392 (config->mem_config->memzone[memzone_idx].addr <
393 (RTE_PTR_ADD(ms[memseg_idx].addr, ms[memseg_idx].len)))) {
394 /* since the zones can now be aligned and occasionally skip
395 * some space, we should calculate the length based on
396 * reported length and start addresses difference.
398 len -= (uintptr_t) RTE_PTR_SUB(
399 config->mem_config->memzone[memzone_idx].addr,
400 (uintptr_t) last_addr);
401 len -= config->mem_config->memzone[memzone_idx].len;
403 RTE_PTR_ADD(config->mem_config->memzone[memzone_idx].addr,
404 (size_t) config->mem_config->memzone[memzone_idx].len);
408 /* make sure we get the alignment offset */
410 addr_offset = RTE_PTR_ALIGN_CEIL((uintptr_t) last_addr, align) - (uintptr_t) last_addr;
415 if (maxlen == 0 || maxlen == addr_offset) {
416 printf("There is no space left for biggest %u-aligned memzone!\n", align);
420 maxlen -= addr_offset;
422 mz = rte_memzone_reserve_aligned("max_zone_aligned", 0,
423 SOCKET_ID_ANY, 0, align);
425 printf("Failed to reserve a big chunk of memory\n");
426 rte_dump_physmem_layout();
431 if (mz->len != maxlen) {
432 printf("Memzone reserve with 0 size and alignment %u did not return"
433 " bigest block\n", align);
434 printf("Expected size = %zu, actual size = %zu\n",
436 rte_dump_physmem_layout();
445 test_memzone_aligned(void)
447 const struct rte_memzone *memzone_aligned_32;
448 const struct rte_memzone *memzone_aligned_128;
449 const struct rte_memzone *memzone_aligned_256;
450 const struct rte_memzone *memzone_aligned_512;
451 const struct rte_memzone *memzone_aligned_1024;
453 /* memzone that should automatically be adjusted to align on 64 bytes */
454 memzone_aligned_32 = rte_memzone_lookup("aligned_32");
455 if (memzone_aligned_32 == NULL)
456 memzone_aligned_32 = rte_memzone_reserve_aligned("aligned_32", 100,
457 SOCKET_ID_ANY, 0, 32);
459 /* memzone that is supposed to be aligned on a 128 byte boundary */
460 memzone_aligned_128 = rte_memzone_lookup("aligned_128");
461 if (memzone_aligned_128 == NULL)
462 memzone_aligned_128 = rte_memzone_reserve_aligned("aligned_128", 100,
463 SOCKET_ID_ANY, 0, 128);
465 /* memzone that is supposed to be aligned on a 256 byte boundary */
466 memzone_aligned_256 = rte_memzone_lookup("aligned_256");
467 if (memzone_aligned_256 == NULL)
468 memzone_aligned_256 = rte_memzone_reserve_aligned("aligned_256", 100,
469 SOCKET_ID_ANY, 0, 256);
471 /* memzone that is supposed to be aligned on a 512 byte boundary */
472 memzone_aligned_512 = rte_memzone_lookup("aligned_512");
473 if (memzone_aligned_512 == NULL)
474 memzone_aligned_512 = rte_memzone_reserve_aligned("aligned_512", 100,
475 SOCKET_ID_ANY, 0, 512);
477 /* memzone that is supposed to be aligned on a 1024 byte boundary */
478 memzone_aligned_1024 = rte_memzone_lookup("aligned_1024");
479 if (memzone_aligned_1024 == NULL)
480 memzone_aligned_1024 = rte_memzone_reserve_aligned("aligned_1024", 100,
481 SOCKET_ID_ANY, 0, 1024);
483 printf("check alignments and lengths\n");
484 if (memzone_aligned_32 == NULL) {
485 printf("Unable to reserve 64-byte aligned memzone!\n");
488 if ((memzone_aligned_32->phys_addr & CACHE_LINE_MASK) != 0)
490 if (((uintptr_t) memzone_aligned_32->addr & CACHE_LINE_MASK) != 0)
492 if ((memzone_aligned_32->len & CACHE_LINE_MASK) != 0)
494 if (memzone_aligned_128 == NULL) {
495 printf("Unable to reserve 128-byte aligned memzone!\n");
498 if ((memzone_aligned_128->phys_addr & 127) != 0)
500 if (((uintptr_t) memzone_aligned_128->addr & 127) != 0)
502 if ((memzone_aligned_128->len & CACHE_LINE_MASK) != 0)
504 if (memzone_aligned_256 == NULL) {
505 printf("Unable to reserve 256-byte aligned memzone!\n");
508 if ((memzone_aligned_256->phys_addr & 255) != 0)
510 if (((uintptr_t) memzone_aligned_256->addr & 255) != 0)
512 if ((memzone_aligned_256->len & CACHE_LINE_MASK) != 0)
514 if (memzone_aligned_512 == NULL) {
515 printf("Unable to reserve 512-byte aligned memzone!\n");
518 if ((memzone_aligned_512->phys_addr & 511) != 0)
520 if (((uintptr_t) memzone_aligned_512->addr & 511) != 0)
522 if ((memzone_aligned_512->len & CACHE_LINE_MASK) != 0)
524 if (memzone_aligned_1024 == NULL) {
525 printf("Unable to reserve 1024-byte aligned memzone!\n");
528 if ((memzone_aligned_1024->phys_addr & 1023) != 0)
530 if (((uintptr_t) memzone_aligned_1024->addr & 1023) != 0)
532 if ((memzone_aligned_1024->len & CACHE_LINE_MASK) != 0)
535 /* check that zones don't overlap */
536 printf("check overlapping\n");
537 if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
538 memzone_aligned_128->phys_addr, memzone_aligned_128->len))
540 if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
541 memzone_aligned_256->phys_addr, memzone_aligned_256->len))
543 if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
544 memzone_aligned_512->phys_addr, memzone_aligned_512->len))
546 if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
547 memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
549 if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len,
550 memzone_aligned_256->phys_addr, memzone_aligned_256->len))
552 if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len,
553 memzone_aligned_512->phys_addr, memzone_aligned_512->len))
555 if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len,
556 memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
558 if (is_memory_overlap(memzone_aligned_256->phys_addr, memzone_aligned_256->len,
559 memzone_aligned_512->phys_addr, memzone_aligned_512->len))
561 if (is_memory_overlap(memzone_aligned_256->phys_addr, memzone_aligned_256->len,
562 memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
564 if (is_memory_overlap(memzone_aligned_512->phys_addr, memzone_aligned_512->len,
565 memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
573 const struct rte_memzone *memzone1;
574 const struct rte_memzone *memzone2;
575 const struct rte_memzone *memzone3;
576 const struct rte_memzone *mz;
578 memzone1 = rte_memzone_lookup("testzone1");
579 if (memzone1 == NULL)
580 memzone1 = rte_memzone_reserve("testzone1", 100,
583 memzone2 = rte_memzone_lookup("testzone2");
584 if (memzone2 == NULL)
585 memzone2 = rte_memzone_reserve("testzone2", 1000,
588 memzone3 = rte_memzone_lookup("testzone3");
589 if (memzone3 == NULL)
590 memzone3 = rte_memzone_reserve("testzone3", 1000,
593 /* memzone3 may be NULL if we don't have NUMA */
594 if (memzone1 == NULL || memzone2 == NULL)
599 /* check cache-line alignments */
600 printf("check alignments and lengths\n");
602 if ((memzone1->phys_addr & CACHE_LINE_MASK) != 0)
604 if ((memzone2->phys_addr & CACHE_LINE_MASK) != 0)
606 if (memzone3 != NULL && (memzone3->phys_addr & CACHE_LINE_MASK) != 0)
608 if ((memzone1->len & CACHE_LINE_MASK) != 0 || memzone1->len == 0)
610 if ((memzone2->len & CACHE_LINE_MASK) != 0 || memzone2->len == 0)
612 if (memzone3 != NULL && ((memzone3->len & CACHE_LINE_MASK) != 0 ||
616 /* check that zones don't overlap */
617 printf("check overlapping\n");
619 if (is_memory_overlap(memzone1->phys_addr, memzone1->len,
620 memzone2->phys_addr, memzone2->len))
622 if (memzone3 != NULL &&
623 is_memory_overlap(memzone1->phys_addr, memzone1->len,
624 memzone3->phys_addr, memzone3->len))
626 if (memzone3 != NULL &&
627 is_memory_overlap(memzone2->phys_addr, memzone2->len,
628 memzone3->phys_addr, memzone3->len))
631 printf("check socket ID\n");
633 /* memzone2 must be on socket id 0 and memzone3 on socket 1 */
634 if (memzone2->socket_id != 0)
636 if (memzone3 != NULL && memzone3->socket_id != 1)
639 printf("test zone lookup\n");
640 mz = rte_memzone_lookup("testzone1");
644 printf("test duplcate zone name\n");
645 mz = rte_memzone_reserve("testzone1", 100,
650 printf("test reserving memzone with bigger size than the maximum\n");
651 if (test_memzone_reserving_zone_size_bigger_than_the_maximum() < 0)
654 printf("test memzone_reserve flags\n");
655 if (test_memzone_reserve_flags() < 0)
658 printf("test alignment for memzone_reserve\n");
659 if (test_memzone_aligned() < 0)
662 printf("test invalid alignment for memzone_reserve\n");
663 if (test_memzone_invalid_alignment() < 0)
666 printf("test reserving the largest size memzone possible\n");
667 if (test_memzone_reserve_max() < 0)
670 printf("test reserving the largest size aligned memzone possible\n");
671 if (test_memzone_reserve_max_aligned() < 0)