4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * version: DPDK.L.1.2.3-3
39 #include <sys/queue.h>
41 #include <cmdline_parse.h>
43 #include <rte_memory.h>
44 #include <rte_memzone.h>
45 #include <rte_tailq.h>
47 #include <rte_common.h>
55 * - Search for three reserved zones or reserve them if they do not exist:
57 * - One is on any socket id.
58 * - The second is on socket 0.
59 * - The last one is on socket 1 (if socket 1 exists).
61 * - Check that the zones exist.
63 * - Check that the zones are cache-aligned.
65 * - Check that zones do not overlap.
67 * - Check that the zones are on the correct socket id.
69 * - Check that a lookup of the first zone returns the same pointer.
71 * - Check that it is not possible to create another zone with the
72 * same name as an existing zone.
74 * - Check flags for specific huge page size reservation
77 /* Test if memory overlaps: return 1 if true, or 0 if false. */
79 is_memory_overlap(phys_addr_t ptr1, size_t len1, phys_addr_t ptr2, size_t len2)
81 if (ptr2 >= ptr1 && (ptr2 - ptr1) < len1)
83 else if (ptr2 < ptr1 && (ptr1 - ptr2) < len2)
89 test_memzone_invalid_alignment(void)
91 const struct rte_memzone * mz;
93 mz = rte_memzone_lookup("invalid_alignment");
95 printf("Zone with invalid alignment has been reserved\n");
99 mz = rte_memzone_reserve_aligned("invalid_alignment", 100,
100 SOCKET_ID_ANY, 0, 100);
102 printf("Zone with invalid alignment has been reserved\n");
109 test_memzone_reserving_zone_size_bigger_than_the_maximum(void)
111 const struct rte_memzone * mz;
113 mz = rte_memzone_lookup("zone_size_bigger_than_the_maximum");
115 printf("zone_size_bigger_than_the_maximum has been reserved\n");
119 mz = rte_memzone_reserve("zone_size_bigger_than_the_maximum", 0x1900000000ULL,
122 printf("It is impossible to reserve such big a memzone\n");
130 test_memzone_reserve_flags(void)
132 const struct rte_memzone *mz;
133 const struct rte_memseg *ms;
134 int hugepage_2MB_avail = 0;
135 int hugepage_1GB_avail = 0;
136 const int size = 100;
138 ms = rte_eal_get_physmem_layout();
139 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
140 if (ms[i].hugepage_sz == RTE_PGSIZE_2M)
141 hugepage_2MB_avail = 1;
142 if (ms[i].hugepage_sz == RTE_PGSIZE_1G)
143 hugepage_1GB_avail = 1;
145 /* Display the availability of 2MB and 1GB pages */
146 if (hugepage_2MB_avail)
147 printf("2MB Huge pages available\n");
148 if (hugepage_1GB_avail)
149 printf("1GB Huge pages available\n");
151 * If 2MB pages available, check that a small memzone is correctly
152 * reserved from 2MB huge pages when requested by the RTE_MEMZONE_2MB flag.
153 * Also check that RTE_MEMZONE_SIZE_HINT_ONLY flag only defaults to an
154 * available page size (i.e 1GB ) when 2MB pages are unavailable.
156 if (hugepage_2MB_avail) {
157 mz = rte_memzone_reserve("flag_zone_2M", size, SOCKET_ID_ANY,
160 printf("MEMZONE FLAG 2MB\n");
163 if (mz->hugepage_sz != RTE_PGSIZE_2M) {
164 printf("hugepage_sz not equal 2M\n");
168 mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
169 RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY);
171 printf("MEMZONE FLAG 2MB\n");
174 if (mz->hugepage_sz != RTE_PGSIZE_2M) {
175 printf("hugepage_sz not equal 2M\n");
179 /* Check if 1GB huge pages are unavailable, that function fails unless
180 * HINT flag is indicated
182 if (!hugepage_1GB_avail) {
183 mz = rte_memzone_reserve("flag_zone_1G_HINT", size, SOCKET_ID_ANY,
184 RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY);
186 printf("MEMZONE FLAG 1GB & HINT\n");
189 if (mz->hugepage_sz != RTE_PGSIZE_2M) {
190 printf("hugepage_sz not equal 2M\n");
194 mz = rte_memzone_reserve("flag_zone_1G", size, SOCKET_ID_ANY,
197 printf("MEMZONE FLAG 1GB\n");
203 /*As with 2MB tests above for 1GB huge page requests*/
204 if (hugepage_1GB_avail) {
205 mz = rte_memzone_reserve("flag_zone_1G", size, SOCKET_ID_ANY,
208 printf("MEMZONE FLAG 1GB\n");
211 if (mz->hugepage_sz != RTE_PGSIZE_1G) {
212 printf("hugepage_sz not equal 1G\n");
216 mz = rte_memzone_reserve("flag_zone_1G_HINT", size, SOCKET_ID_ANY,
217 RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY);
219 printf("MEMZONE FLAG 1GB\n");
222 if (mz->hugepage_sz != RTE_PGSIZE_1G) {
223 printf("hugepage_sz not equal 1G\n");
227 /* Check if 1GB huge pages are unavailable, that function fails unless
228 * HINT flag is indicated
230 if (!hugepage_2MB_avail) {
231 mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
232 RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY);
234 printf("MEMZONE FLAG 2MB & HINT\n");
237 if (mz->hugepage_sz != RTE_PGSIZE_1G) {
238 printf("hugepage_sz not equal 1G\n");
241 mz = rte_memzone_reserve("flag_zone_2M", size, SOCKET_ID_ANY,
244 printf("MEMZONE FLAG 2MB\n");
249 if (hugepage_2MB_avail && hugepage_1GB_avail) {
250 mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
251 RTE_MEMZONE_2MB|RTE_MEMZONE_1GB);
253 printf("BOTH SIZES SET\n");
262 test_memzone_reserve_max(void)
264 const struct rte_memzone *mz;
265 const struct rte_config *config;
266 const struct rte_memseg *ms;
273 /* get pointer to global configuration */
274 config = rte_eal_get_configuration();
276 ms = rte_eal_get_physmem_layout();
278 for (memseg_idx = 0; memseg_idx < RTE_MAX_MEMSEG; memseg_idx++){
279 /* ignore smaller memsegs as they can only get smaller */
280 if (ms[memseg_idx].len < maxlen)
283 len = ms[memseg_idx].len;
284 last_addr = ms[memseg_idx].addr;
286 /* cycle through all memzones */
287 for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) {
289 /* stop when reaching last allocated memzone */
290 if (config->mem_config->memzone[memzone_idx].addr == NULL)
293 /* check if the memzone is in our memseg and subtract length */
294 if ((config->mem_config->memzone[memzone_idx].addr >=
295 ms[memseg_idx].addr) &&
296 (config->mem_config->memzone[memzone_idx].addr <=
297 (RTE_PTR_ADD(ms[memseg_idx].addr,
298 (size_t)ms[memseg_idx].len)))) {
299 /* since the zones can now be aligned and occasionally skip
300 * some space, we should calculate the length based on
301 * reported length and start addresses difference. Addresses
302 * are allocated sequentially so we don't need to worry about
303 * them being in the right order.
305 len -= (uintptr_t) RTE_PTR_SUB(
306 config->mem_config->memzone[memzone_idx].addr,
307 (uintptr_t) last_addr);
308 len -= config->mem_config->memzone[memzone_idx].len;
310 RTE_PTR_ADD(config->mem_config->memzone[memzone_idx].addr,
311 (size_t) config->mem_config->memzone[memzone_idx].len);
315 /* we don't need to calculate offset here since length
316 * is always cache-aligned */
321 mz = rte_memzone_reserve("max_zone", 0, SOCKET_ID_ANY, 0);
323 printf("Failed to reserve a big chunk of memory\n");
324 rte_dump_physmem_layout();
329 if (mz->len != maxlen) {
330 printf("Memzone reserve with 0 size did not return bigest block\n");
331 printf("Expected size = %" PRIu64 ", actual size = %" PRIu64 "\n",
333 rte_dump_physmem_layout();
342 test_memzone_reserve_max_aligned(void)
344 const struct rte_memzone *mz;
345 const struct rte_config *config;
346 const struct rte_memseg *ms;
349 uint64_t addr_offset, len = 0;
353 /* get pointer to global configuration */
354 config = rte_eal_get_configuration();
356 ms = rte_eal_get_physmem_layout();
360 for (memseg_idx = 0; memseg_idx < RTE_MAX_MEMSEG; memseg_idx++){
362 /* ignore smaller memsegs as they can only get smaller */
363 if (ms[memseg_idx].len < maxlen)
366 len = ms[memseg_idx].len;
367 last_addr = ms[memseg_idx].addr;
369 /* cycle through all memzones */
370 for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) {
372 /* stop when reaching last allocated memzone */
373 if (config->mem_config->memzone[memzone_idx].addr == NULL)
376 /* check if the memzone is in our memseg and subtract length */
377 if ((config->mem_config->memzone[memzone_idx].addr >=
378 ms[memseg_idx].addr) &&
379 (config->mem_config->memzone[memzone_idx].addr <=
380 (RTE_PTR_ADD(ms[memseg_idx].addr,
381 (size_t) ms[memseg_idx].len)))) {
382 /* since the zones can now be aligned and occasionally skip
383 * some space, we should calculate the length based on
384 * reported length and start addresses difference.
386 len -= (uintptr_t) RTE_PTR_SUB(
387 config->mem_config->memzone[memzone_idx].addr,
388 (uintptr_t) last_addr);
389 len -= config->mem_config->memzone[memzone_idx].len;
391 RTE_PTR_ADD(config->mem_config->memzone[memzone_idx].addr,
392 (size_t) config->mem_config->memzone[memzone_idx].len);
396 /* make sure we get the alignment offset */
398 addr_offset = RTE_ALIGN_CEIL((uintptr_t) last_addr, 512) - (uintptr_t) last_addr;
403 maxlen -= addr_offset;
405 mz = rte_memzone_reserve_aligned("max_zone_aligned", 0,
406 SOCKET_ID_ANY, 0, 512);
408 printf("Failed to reserve a big chunk of memory\n");
409 rte_dump_physmem_layout();
414 if (mz->len != maxlen) {
415 printf("Memzone reserve with 0 size and alignment 512 did not return"
417 printf("Expected size = %" PRIu64 ", actual size = %" PRIu64 "\n",
419 rte_dump_physmem_layout();
428 test_memzone_aligned(void)
430 const struct rte_memzone *memzone_aligned_32;
431 const struct rte_memzone *memzone_aligned_128;
432 const struct rte_memzone *memzone_aligned_256;
433 const struct rte_memzone *memzone_aligned_512;
434 const struct rte_memzone *memzone_aligned_1024;
436 /* memzone that should automatically be adjusted to align on 64 bytes */
437 memzone_aligned_32 = rte_memzone_lookup("aligned_32");
438 if (memzone_aligned_32 == NULL)
439 memzone_aligned_32 = rte_memzone_reserve_aligned("aligned_32", 100,
440 SOCKET_ID_ANY, 0, 32);
442 /* memzone that is supposed to be aligned on a 128 byte boundary */
443 memzone_aligned_128 = rte_memzone_lookup("aligned_128");
444 if (memzone_aligned_128 == NULL)
445 memzone_aligned_128 = rte_memzone_reserve_aligned("aligned_128", 100,
446 SOCKET_ID_ANY, 0, 128);
448 /* memzone that is supposed to be aligned on a 256 byte boundary */
449 memzone_aligned_256 = rte_memzone_lookup("aligned_256");
450 if (memzone_aligned_256 == NULL)
451 memzone_aligned_256 = rte_memzone_reserve_aligned("aligned_256", 100,
452 SOCKET_ID_ANY, 0, 256);
454 /* memzone that is supposed to be aligned on a 512 byte boundary */
455 memzone_aligned_512 = rte_memzone_lookup("aligned_512");
456 if (memzone_aligned_512 == NULL)
457 memzone_aligned_512 = rte_memzone_reserve_aligned("aligned_512", 100,
458 SOCKET_ID_ANY, 0, 512);
460 /* memzone that is supposed to be aligned on a 1024 byte boundary */
461 memzone_aligned_1024 = rte_memzone_lookup("aligned_1024");
462 if (memzone_aligned_1024 == NULL)
463 memzone_aligned_1024 = rte_memzone_reserve_aligned("aligned_1024", 100,
464 SOCKET_ID_ANY, 0, 1024);
466 printf("check alignments and lengths\n");
467 if ((memzone_aligned_32->phys_addr & CACHE_LINE_MASK) != 0)
469 if (((uintptr_t) memzone_aligned_32->addr & CACHE_LINE_MASK) != 0)
471 if ((memzone_aligned_32->len & CACHE_LINE_MASK) != 0)
473 if ((memzone_aligned_128->phys_addr & 127) != 0)
475 if (((uintptr_t) memzone_aligned_128->addr & 127) != 0)
477 if ((memzone_aligned_128->len & CACHE_LINE_MASK) != 0)
479 if ((memzone_aligned_256->phys_addr & 255) != 0)
481 if (((uintptr_t) memzone_aligned_256->addr & 255) != 0)
483 if ((memzone_aligned_256->len & CACHE_LINE_MASK) != 0)
485 if ((memzone_aligned_512->phys_addr & 511) != 0)
487 if (((uintptr_t) memzone_aligned_512->addr & 511) != 0)
489 if ((memzone_aligned_512->len & CACHE_LINE_MASK) != 0)
491 if ((memzone_aligned_1024->phys_addr & 1023) != 0)
493 if (((uintptr_t) memzone_aligned_1024->addr & 1023) != 0)
495 if ((memzone_aligned_1024->len & CACHE_LINE_MASK) != 0)
499 /* check that zones don't overlap */
500 printf("check overlapping\n");
501 if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
502 memzone_aligned_128->phys_addr, memzone_aligned_128->len))
504 if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
505 memzone_aligned_256->phys_addr, memzone_aligned_256->len))
507 if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
508 memzone_aligned_512->phys_addr, memzone_aligned_512->len))
510 if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
511 memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
513 if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len,
514 memzone_aligned_256->phys_addr, memzone_aligned_256->len))
516 if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len,
517 memzone_aligned_512->phys_addr, memzone_aligned_512->len))
519 if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len,
520 memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
522 if (is_memory_overlap(memzone_aligned_256->phys_addr, memzone_aligned_256->len,
523 memzone_aligned_512->phys_addr, memzone_aligned_512->len))
525 if (is_memory_overlap(memzone_aligned_256->phys_addr, memzone_aligned_256->len,
526 memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
528 if (is_memory_overlap(memzone_aligned_512->phys_addr, memzone_aligned_512->len,
529 memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
537 const struct rte_memzone *memzone1;
538 const struct rte_memzone *memzone2;
539 const struct rte_memzone *memzone3;
540 const struct rte_memzone *mz;
542 memzone1 = rte_memzone_lookup("testzone1");
543 if (memzone1 == NULL)
544 memzone1 = rte_memzone_reserve("testzone1", 100,
547 memzone2 = rte_memzone_lookup("testzone2");
548 if (memzone2 == NULL)
549 memzone2 = rte_memzone_reserve("testzone2", 1000,
552 memzone3 = rte_memzone_lookup("testzone3");
553 if (memzone3 == NULL)
554 memzone3 = rte_memzone_reserve("testzone3", 1000,
557 /* memzone3 may be NULL if we don't have NUMA */
558 if (memzone1 == NULL || memzone2 == NULL)
563 /* check cache-line alignments */
564 printf("check alignments and lengths\n");
566 if ((memzone1->phys_addr & CACHE_LINE_MASK) != 0)
568 if ((memzone2->phys_addr & CACHE_LINE_MASK) != 0)
570 if (memzone3 != NULL && (memzone3->phys_addr & CACHE_LINE_MASK) != 0)
572 if ((memzone1->len & CACHE_LINE_MASK) != 0 || memzone1->len == 0)
574 if ((memzone2->len & CACHE_LINE_MASK) != 0 || memzone2->len == 0)
576 if (memzone3 != NULL && ((memzone3->len & CACHE_LINE_MASK) != 0 ||
580 /* check that zones don't overlap */
581 printf("check overlapping\n");
583 if (is_memory_overlap(memzone1->phys_addr, memzone1->len,
584 memzone2->phys_addr, memzone2->len))
586 if (memzone3 != NULL &&
587 is_memory_overlap(memzone1->phys_addr, memzone1->len,
588 memzone3->phys_addr, memzone3->len))
590 if (memzone3 != NULL &&
591 is_memory_overlap(memzone2->phys_addr, memzone2->len,
592 memzone3->phys_addr, memzone3->len))
595 printf("check socket ID\n");
597 /* memzone2 must be on socket id 0 and memzone3 on socket 1 */
598 if (memzone2->socket_id != 0)
600 if (memzone3 != NULL && memzone3->socket_id != 1)
603 printf("test zone lookup\n");
604 mz = rte_memzone_lookup("testzone1");
608 printf("test duplcate zone name\n");
609 mz = rte_memzone_reserve("testzone1", 100,
614 printf("test reserving memzone with bigger size than the maximum\n");
615 if (test_memzone_reserving_zone_size_bigger_than_the_maximum() < 0)
618 printf("test reserving the largest size memzone possible\n");
619 if (test_memzone_reserve_max() < 0)
622 printf("test memzone_reserve flags\n");
623 if (test_memzone_reserve_flags() < 0)
626 printf("test alignment for memzone_reserve\n");
627 if (test_memzone_aligned() < 0)
630 printf("test invalid alignment for memzone_reserve\n");
631 if (test_memzone_invalid_alignment() < 0)
634 printf("test reserving the largest size aligned memzone possible\n");
635 if (test_memzone_reserve_max_aligned() < 0)