ab287aa24c0c905d283fd5bda4e0ccf05fd40046
[dpdk.git] / lib / librte_eal / linuxapp / eal / eal_memory.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   Copyright(c) 2013 6WIND.
6  *   All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34
35 #define _FILE_OFFSET_BITS 64
36 #include <errno.h>
37 #include <stdarg.h>
38 #include <stdbool.h>
39 #include <stdlib.h>
40 #include <stdio.h>
41 #include <stdint.h>
42 #include <inttypes.h>
43 #include <string.h>
44 #include <sys/mman.h>
45 #include <sys/types.h>
46 #include <sys/stat.h>
47 #include <sys/queue.h>
48 #include <sys/file.h>
49 #include <unistd.h>
50 #include <limits.h>
51 #include <sys/ioctl.h>
52 #include <sys/time.h>
53 #include <signal.h>
54 #include <setjmp.h>
55 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
56 #include <numa.h>
57 #include <numaif.h>
58 #endif
59
60 #include <rte_log.h>
61 #include <rte_memory.h>
62 #include <rte_launch.h>
63 #include <rte_eal.h>
64 #include <rte_eal_memconfig.h>
65 #include <rte_per_lcore.h>
66 #include <rte_lcore.h>
67 #include <rte_common.h>
68 #include <rte_string_fns.h>
69
70 #include "eal_private.h"
71 #include "eal_internal_cfg.h"
72 #include "eal_filesystem.h"
73 #include "eal_hugepages.h"
74
75 #define PFN_MASK_SIZE   8
76
77 /**
78  * @file
79  * Huge page mapping under linux
80  *
81  * To reserve a big contiguous amount of memory, we use the hugepage
82  * feature of linux. For that, we need to have hugetlbfs mounted. This
83  * code will create many files in this directory (one per page) and
84  * map them in virtual memory. For each page, we will retrieve its
85  * physical address and remap it in order to have a virtual contiguous
86  * zone as well as a physical contiguous zone.
87  */
88
89 static uint64_t baseaddr_offset;
90
91 static bool phys_addrs_available = true;
92
93 #define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space"
94
95 static void
96 test_phys_addrs_available(void)
97 {
98         uint64_t tmp;
99         phys_addr_t physaddr;
100
101         if (!rte_eal_has_hugepages()) {
102                 RTE_LOG(ERR, EAL,
103                         "Started without hugepages support, physical addresses not available\n");
104                 phys_addrs_available = false;
105                 return;
106         }
107
108         physaddr = rte_mem_virt2phy(&tmp);
109         if (physaddr == RTE_BAD_PHYS_ADDR) {
110                 RTE_LOG(ERR, EAL,
111                         "Cannot obtain physical addresses: %s. "
112                         "Only vfio will function.\n",
113                         strerror(errno));
114                 phys_addrs_available = false;
115         }
116 }
117
118 /*
119  * Get physical address of any mapped virtual address in the current process.
120  */
121 phys_addr_t
122 rte_mem_virt2phy(const void *virtaddr)
123 {
124         int fd, retval;
125         uint64_t page, physaddr;
126         unsigned long virt_pfn;
127         int page_size;
128         off_t offset;
129
130         if (rte_eal_iova_mode() == RTE_IOVA_VA)
131                 return (uintptr_t)virtaddr;
132
133         /* Cannot parse /proc/self/pagemap, no need to log errors everywhere */
134         if (!phys_addrs_available)
135                 return RTE_BAD_PHYS_ADDR;
136
137         /* standard page size */
138         page_size = getpagesize();
139
140         fd = open("/proc/self/pagemap", O_RDONLY);
141         if (fd < 0) {
142                 RTE_LOG(ERR, EAL, "%s(): cannot open /proc/self/pagemap: %s\n",
143                         __func__, strerror(errno));
144                 return RTE_BAD_PHYS_ADDR;
145         }
146
147         virt_pfn = (unsigned long)virtaddr / page_size;
148         offset = sizeof(uint64_t) * virt_pfn;
149         if (lseek(fd, offset, SEEK_SET) == (off_t) -1) {
150                 RTE_LOG(ERR, EAL, "%s(): seek error in /proc/self/pagemap: %s\n",
151                                 __func__, strerror(errno));
152                 close(fd);
153                 return RTE_BAD_PHYS_ADDR;
154         }
155
156         retval = read(fd, &page, PFN_MASK_SIZE);
157         close(fd);
158         if (retval < 0) {
159                 RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
160                                 __func__, strerror(errno));
161                 return RTE_BAD_PHYS_ADDR;
162         } else if (retval != PFN_MASK_SIZE) {
163                 RTE_LOG(ERR, EAL, "%s(): read %d bytes from /proc/self/pagemap "
164                                 "but expected %d:\n",
165                                 __func__, retval, PFN_MASK_SIZE);
166                 return RTE_BAD_PHYS_ADDR;
167         }
168
169         /*
170          * the pfn (page frame number) are bits 0-54 (see
171          * pagemap.txt in linux Documentation)
172          */
173         if ((page & 0x7fffffffffffffULL) == 0)
174                 return RTE_BAD_PHYS_ADDR;
175
176         physaddr = ((page & 0x7fffffffffffffULL) * page_size)
177                 + ((unsigned long)virtaddr % page_size);
178
179         return physaddr;
180 }
181
182 /*
183  * For each hugepage in hugepg_tbl, fill the physaddr value. We find
184  * it by browsing the /proc/self/pagemap special file.
185  */
186 static int
187 find_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
188 {
189         unsigned int i;
190         phys_addr_t addr;
191
192         for (i = 0; i < hpi->num_pages[0]; i++) {
193                 addr = rte_mem_virt2phy(hugepg_tbl[i].orig_va);
194                 if (addr == RTE_BAD_PHYS_ADDR)
195                         return -1;
196                 hugepg_tbl[i].physaddr = addr;
197         }
198         return 0;
199 }
200
201 /*
202  * For each hugepage in hugepg_tbl, fill the physaddr value sequentially.
203  */
204 static int
205 set_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
206 {
207         unsigned int i;
208         static phys_addr_t addr;
209
210         for (i = 0; i < hpi->num_pages[0]; i++) {
211                 hugepg_tbl[i].physaddr = addr;
212                 addr += hugepg_tbl[i].size;
213         }
214         return 0;
215 }
216
217 /*
218  * Check whether address-space layout randomization is enabled in
219  * the kernel. This is important for multi-process as it can prevent
220  * two processes mapping data to the same virtual address
221  * Returns:
222  *    0 - address space randomization disabled
223  *    1/2 - address space randomization enabled
224  *    negative error code on error
225  */
226 static int
227 aslr_enabled(void)
228 {
229         char c;
230         int retval, fd = open(RANDOMIZE_VA_SPACE_FILE, O_RDONLY);
231         if (fd < 0)
232                 return -errno;
233         retval = read(fd, &c, 1);
234         close(fd);
235         if (retval < 0)
236                 return -errno;
237         if (retval == 0)
238                 return -EIO;
239         switch (c) {
240                 case '0' : return 0;
241                 case '1' : return 1;
242                 case '2' : return 2;
243                 default: return -EINVAL;
244         }
245 }
246
247 /*
248  * Try to mmap *size bytes in /dev/zero. If it is successful, return the
249  * pointer to the mmap'd area and keep *size unmodified. Else, retry
250  * with a smaller zone: decrease *size by hugepage_sz until it reaches
251  * 0. In this case, return NULL. Note: this function returns an address
252  * which is a multiple of hugepage size.
253  */
254 static void *
255 get_virtual_area(size_t *size, size_t hugepage_sz)
256 {
257         void *addr;
258         int fd;
259         long aligned_addr;
260
261         if (internal_config.base_virtaddr != 0) {
262                 addr = (void*) (uintptr_t) (internal_config.base_virtaddr +
263                                 baseaddr_offset);
264         }
265         else addr = NULL;
266
267         RTE_LOG(DEBUG, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
268
269         fd = open("/dev/zero", O_RDONLY);
270         if (fd < 0){
271                 RTE_LOG(ERR, EAL, "Cannot open /dev/zero\n");
272                 return NULL;
273         }
274         do {
275                 addr = mmap(addr,
276                                 (*size) + hugepage_sz, PROT_READ,
277 #ifdef RTE_ARCH_PPC_64
278                                 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
279 #else
280                                 MAP_PRIVATE,
281 #endif
282                                 fd, 0);
283                 if (addr == MAP_FAILED)
284                         *size -= hugepage_sz;
285         } while (addr == MAP_FAILED && *size > 0);
286
287         if (addr == MAP_FAILED) {
288                 close(fd);
289                 RTE_LOG(ERR, EAL, "Cannot get a virtual area: %s\n",
290                         strerror(errno));
291                 return NULL;
292         }
293
294         munmap(addr, (*size) + hugepage_sz);
295         close(fd);
296
297         /* align addr to a huge page size boundary */
298         aligned_addr = (long)addr;
299         aligned_addr += (hugepage_sz - 1);
300         aligned_addr &= (~(hugepage_sz - 1));
301         addr = (void *)(aligned_addr);
302
303         RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n",
304                 addr, *size);
305
306         /* increment offset */
307         baseaddr_offset += *size;
308
309         return addr;
310 }
311
312 static sigjmp_buf huge_jmpenv;
313
314 static void huge_sigbus_handler(int signo __rte_unused)
315 {
316         siglongjmp(huge_jmpenv, 1);
317 }
318
319 /* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
320  * non-static local variable in the stack frame calling sigsetjmp might be
321  * clobbered by a call to longjmp.
322  */
323 static int huge_wrap_sigsetjmp(void)
324 {
325         return sigsetjmp(huge_jmpenv, 1);
326 }
327
328 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
329 /* Callback for numa library. */
330 void numa_error(char *where)
331 {
332         RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
333 }
334 #endif
335
336 /*
337  * Mmap all hugepages of hugepage table: it first open a file in
338  * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
339  * virtual address is stored in hugepg_tbl[i].orig_va, else it is stored
340  * in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to
341  * map continguous physical blocks in contiguous virtual blocks.
342  */
343 static unsigned
344 map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
345                   uint64_t *essential_memory __rte_unused, int orig)
346 {
347         int fd;
348         unsigned i;
349         void *virtaddr;
350         void *vma_addr = NULL;
351         size_t vma_len = 0;
352 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
353         int node_id = -1;
354         int essential_prev = 0;
355         int oldpolicy;
356         struct bitmask *oldmask = numa_allocate_nodemask();
357         bool have_numa = true;
358         unsigned long maxnode = 0;
359
360         /* Check if kernel supports NUMA. */
361         if (numa_available() != 0) {
362                 RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
363                 have_numa = false;
364         }
365
366         if (orig && have_numa) {
367                 RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
368                 if (get_mempolicy(&oldpolicy, oldmask->maskp,
369                                   oldmask->size + 1, 0, 0) < 0) {
370                         RTE_LOG(ERR, EAL,
371                                 "Failed to get current mempolicy: %s. "
372                                 "Assuming MPOL_DEFAULT.\n", strerror(errno));
373                         oldpolicy = MPOL_DEFAULT;
374                 }
375                 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
376                         if (internal_config.socket_mem[i])
377                                 maxnode = i + 1;
378         }
379 #endif
380
381         for (i = 0; i < hpi->num_pages[0]; i++) {
382                 uint64_t hugepage_sz = hpi->hugepage_sz;
383
384 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
385                 if (maxnode) {
386                         unsigned int j;
387
388                         for (j = 0; j < maxnode; j++)
389                                 if (essential_memory[j])
390                                         break;
391
392                         if (j == maxnode) {
393                                 node_id = (node_id + 1) % maxnode;
394                                 while (!internal_config.socket_mem[node_id]) {
395                                         node_id++;
396                                         node_id %= maxnode;
397                                 }
398                                 essential_prev = 0;
399                         } else {
400                                 node_id = j;
401                                 essential_prev = essential_memory[j];
402
403                                 if (essential_memory[j] < hugepage_sz)
404                                         essential_memory[j] = 0;
405                                 else
406                                         essential_memory[j] -= hugepage_sz;
407                         }
408
409                         RTE_LOG(DEBUG, EAL,
410                                 "Setting policy MPOL_PREFERRED for socket %d\n",
411                                 node_id);
412                         numa_set_preferred(node_id);
413                 }
414 #endif
415
416                 if (orig) {
417                         hugepg_tbl[i].file_id = i;
418                         hugepg_tbl[i].size = hugepage_sz;
419                         eal_get_hugefile_path(hugepg_tbl[i].filepath,
420                                         sizeof(hugepg_tbl[i].filepath), hpi->hugedir,
421                                         hugepg_tbl[i].file_id);
422                         hugepg_tbl[i].filepath[sizeof(hugepg_tbl[i].filepath) - 1] = '\0';
423                 }
424 #ifndef RTE_ARCH_64
425                 /* for 32-bit systems, don't remap 1G and 16G pages, just reuse
426                  * original map address as final map address.
427                  */
428                 else if ((hugepage_sz == RTE_PGSIZE_1G)
429                         || (hugepage_sz == RTE_PGSIZE_16G)) {
430                         hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va;
431                         hugepg_tbl[i].orig_va = NULL;
432                         continue;
433                 }
434 #endif
435                 else if (vma_len == 0) {
436                         unsigned j, num_pages;
437
438                         /* reserve a virtual area for next contiguous
439                          * physical block: count the number of
440                          * contiguous physical pages. */
441                         for (j = i+1; j < hpi->num_pages[0] ; j++) {
442 #ifdef RTE_ARCH_PPC_64
443                                 /* The physical addresses are sorted in
444                                  * descending order on PPC64 */
445                                 if (hugepg_tbl[j].physaddr !=
446                                     hugepg_tbl[j-1].physaddr - hugepage_sz)
447                                         break;
448 #else
449                                 if (hugepg_tbl[j].physaddr !=
450                                     hugepg_tbl[j-1].physaddr + hugepage_sz)
451                                         break;
452 #endif
453                         }
454                         num_pages = j - i;
455                         vma_len = num_pages * hugepage_sz;
456
457                         /* get the biggest virtual memory area up to
458                          * vma_len. If it fails, vma_addr is NULL, so
459                          * let the kernel provide the address. */
460                         vma_addr = get_virtual_area(&vma_len, hpi->hugepage_sz);
461                         if (vma_addr == NULL)
462                                 vma_len = hugepage_sz;
463                 }
464
465                 /* try to create hugepage file */
466                 fd = open(hugepg_tbl[i].filepath, O_CREAT | O_RDWR, 0600);
467                 if (fd < 0) {
468                         RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
469                                         strerror(errno));
470                         goto out;
471                 }
472
473                 /* map the segment, and populate page tables,
474                  * the kernel fills this segment with zeros */
475                 virtaddr = mmap(vma_addr, hugepage_sz, PROT_READ | PROT_WRITE,
476                                 MAP_SHARED | MAP_POPULATE, fd, 0);
477                 if (virtaddr == MAP_FAILED) {
478                         RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
479                                         strerror(errno));
480                         close(fd);
481                         goto out;
482                 }
483
484                 if (orig) {
485                         hugepg_tbl[i].orig_va = virtaddr;
486                 }
487                 else {
488                         hugepg_tbl[i].final_va = virtaddr;
489                 }
490
491                 if (orig) {
492                         /* In linux, hugetlb limitations, like cgroup, are
493                          * enforced at fault time instead of mmap(), even
494                          * with the option of MAP_POPULATE. Kernel will send
495                          * a SIGBUS signal. To avoid to be killed, save stack
496                          * environment here, if SIGBUS happens, we can jump
497                          * back here.
498                          */
499                         if (huge_wrap_sigsetjmp()) {
500                                 RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more "
501                                         "hugepages of size %u MB\n",
502                                         (unsigned)(hugepage_sz / 0x100000));
503                                 munmap(virtaddr, hugepage_sz);
504                                 close(fd);
505                                 unlink(hugepg_tbl[i].filepath);
506 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
507                                 if (maxnode)
508                                         essential_memory[node_id] =
509                                                 essential_prev;
510 #endif
511                                 goto out;
512                         }
513                         *(int *)virtaddr = 0;
514                 }
515
516
517                 /* set shared flock on the file. */
518                 if (flock(fd, LOCK_SH | LOCK_NB) == -1) {
519                         RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
520                                 __func__, strerror(errno));
521                         close(fd);
522                         goto out;
523                 }
524
525                 close(fd);
526
527                 vma_addr = (char *)vma_addr + hugepage_sz;
528                 vma_len -= hugepage_sz;
529         }
530
531 out:
532 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
533         if (maxnode) {
534                 RTE_LOG(DEBUG, EAL,
535                         "Restoring previous memory policy: %d\n", oldpolicy);
536                 if (oldpolicy == MPOL_DEFAULT) {
537                         numa_set_localalloc();
538                 } else if (set_mempolicy(oldpolicy, oldmask->maskp,
539                                          oldmask->size + 1) < 0) {
540                         RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
541                                 strerror(errno));
542                         numa_set_localalloc();
543                 }
544         }
545         numa_free_cpumask(oldmask);
546 #endif
547         return i;
548 }
549
550 /* Unmap all hugepages from original mapping */
551 static int
552 unmap_all_hugepages_orig(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
553 {
554         unsigned i;
555         for (i = 0; i < hpi->num_pages[0]; i++) {
556                 if (hugepg_tbl[i].orig_va) {
557                         munmap(hugepg_tbl[i].orig_va, hpi->hugepage_sz);
558                         hugepg_tbl[i].orig_va = NULL;
559                 }
560         }
561         return 0;
562 }
563
564 /*
565  * Parse /proc/self/numa_maps to get the NUMA socket ID for each huge
566  * page.
567  */
568 static int
569 find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
570 {
571         int socket_id;
572         char *end, *nodestr;
573         unsigned i, hp_count = 0;
574         uint64_t virt_addr;
575         char buf[BUFSIZ];
576         char hugedir_str[PATH_MAX];
577         FILE *f;
578
579         f = fopen("/proc/self/numa_maps", "r");
580         if (f == NULL) {
581                 RTE_LOG(NOTICE, EAL, "NUMA support not available"
582                         " consider that all memory is in socket_id 0\n");
583                 return 0;
584         }
585
586         snprintf(hugedir_str, sizeof(hugedir_str),
587                         "%s/%s", hpi->hugedir, internal_config.hugefile_prefix);
588
589         /* parse numa map */
590         while (fgets(buf, sizeof(buf), f) != NULL) {
591
592                 /* ignore non huge page */
593                 if (strstr(buf, " huge ") == NULL &&
594                                 strstr(buf, hugedir_str) == NULL)
595                         continue;
596
597                 /* get zone addr */
598                 virt_addr = strtoull(buf, &end, 16);
599                 if (virt_addr == 0 || end == buf) {
600                         RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
601                         goto error;
602                 }
603
604                 /* get node id (socket id) */
605                 nodestr = strstr(buf, " N");
606                 if (nodestr == NULL) {
607                         RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
608                         goto error;
609                 }
610                 nodestr += 2;
611                 end = strstr(nodestr, "=");
612                 if (end == NULL) {
613                         RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
614                         goto error;
615                 }
616                 end[0] = '\0';
617                 end = NULL;
618
619                 socket_id = strtoul(nodestr, &end, 0);
620                 if ((nodestr[0] == '\0') || (end == NULL) || (*end != '\0')) {
621                         RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
622                         goto error;
623                 }
624
625                 /* if we find this page in our mappings, set socket_id */
626                 for (i = 0; i < hpi->num_pages[0]; i++) {
627                         void *va = (void *)(unsigned long)virt_addr;
628                         if (hugepg_tbl[i].orig_va == va) {
629                                 hugepg_tbl[i].socket_id = socket_id;
630                                 hp_count++;
631 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
632                                 RTE_LOG(DEBUG, EAL,
633                                         "Hugepage %s is on socket %d\n",
634                                         hugepg_tbl[i].filepath, socket_id);
635 #endif
636                         }
637                 }
638         }
639
640         if (hp_count < hpi->num_pages[0])
641                 goto error;
642
643         fclose(f);
644         return 0;
645
646 error:
647         fclose(f);
648         return -1;
649 }
650
651 static int
652 cmp_physaddr(const void *a, const void *b)
653 {
654 #ifndef RTE_ARCH_PPC_64
655         const struct hugepage_file *p1 = a;
656         const struct hugepage_file *p2 = b;
657 #else
658         /* PowerPC needs memory sorted in reverse order from x86 */
659         const struct hugepage_file *p1 = b;
660         const struct hugepage_file *p2 = a;
661 #endif
662         if (p1->physaddr < p2->physaddr)
663                 return -1;
664         else if (p1->physaddr > p2->physaddr)
665                 return 1;
666         else
667                 return 0;
668 }
669
670 /*
671  * Uses mmap to create a shared memory area for storage of data
672  * Used in this file to store the hugepage file map on disk
673  */
674 static void *
675 create_shared_memory(const char *filename, const size_t mem_size)
676 {
677         void *retval;
678         int fd = open(filename, O_CREAT | O_RDWR, 0666);
679         if (fd < 0)
680                 return NULL;
681         if (ftruncate(fd, mem_size) < 0) {
682                 close(fd);
683                 return NULL;
684         }
685         retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
686         close(fd);
687         if (retval == MAP_FAILED)
688                 return NULL;
689         return retval;
690 }
691
692 /*
693  * this copies *active* hugepages from one hugepage table to another.
694  * destination is typically the shared memory.
695  */
696 static int
697 copy_hugepages_to_shared_mem(struct hugepage_file * dst, int dest_size,
698                 const struct hugepage_file * src, int src_size)
699 {
700         int src_pos, dst_pos = 0;
701
702         for (src_pos = 0; src_pos < src_size; src_pos++) {
703                 if (src[src_pos].final_va != NULL) {
704                         /* error on overflow attempt */
705                         if (dst_pos == dest_size)
706                                 return -1;
707                         memcpy(&dst[dst_pos], &src[src_pos], sizeof(struct hugepage_file));
708                         dst_pos++;
709                 }
710         }
711         return 0;
712 }
713
714 static int
715 unlink_hugepage_files(struct hugepage_file *hugepg_tbl,
716                 unsigned num_hp_info)
717 {
718         unsigned socket, size;
719         int page, nrpages = 0;
720
721         /* get total number of hugepages */
722         for (size = 0; size < num_hp_info; size++)
723                 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
724                         nrpages +=
725                         internal_config.hugepage_info[size].num_pages[socket];
726
727         for (page = 0; page < nrpages; page++) {
728                 struct hugepage_file *hp = &hugepg_tbl[page];
729
730                 if (hp->final_va != NULL && unlink(hp->filepath)) {
731                         RTE_LOG(WARNING, EAL, "%s(): Removing %s failed: %s\n",
732                                 __func__, hp->filepath, strerror(errno));
733                 }
734         }
735         return 0;
736 }
737
738 /*
739  * unmaps hugepages that are not going to be used. since we originally allocate
740  * ALL hugepages (not just those we need), additional unmapping needs to be done.
741  */
742 static int
743 unmap_unneeded_hugepages(struct hugepage_file *hugepg_tbl,
744                 struct hugepage_info *hpi,
745                 unsigned num_hp_info)
746 {
747         unsigned socket, size;
748         int page, nrpages = 0;
749
750         /* get total number of hugepages */
751         for (size = 0; size < num_hp_info; size++)
752                 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
753                         nrpages += internal_config.hugepage_info[size].num_pages[socket];
754
755         for (size = 0; size < num_hp_info; size++) {
756                 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
757                         unsigned pages_found = 0;
758
759                         /* traverse until we have unmapped all the unused pages */
760                         for (page = 0; page < nrpages; page++) {
761                                 struct hugepage_file *hp = &hugepg_tbl[page];
762
763                                 /* find a page that matches the criteria */
764                                 if ((hp->size == hpi[size].hugepage_sz) &&
765                                                 (hp->socket_id == (int) socket)) {
766
767                                         /* if we skipped enough pages, unmap the rest */
768                                         if (pages_found == hpi[size].num_pages[socket]) {
769                                                 uint64_t unmap_len;
770
771                                                 unmap_len = hp->size;
772
773                                                 /* get start addr and len of the remaining segment */
774                                                 munmap(hp->final_va, (size_t) unmap_len);
775
776                                                 hp->final_va = NULL;
777                                                 if (unlink(hp->filepath) == -1) {
778                                                         RTE_LOG(ERR, EAL, "%s(): Removing %s failed: %s\n",
779                                                                         __func__, hp->filepath, strerror(errno));
780                                                         return -1;
781                                                 }
782                                         } else {
783                                                 /* lock the page and skip */
784                                                 pages_found++;
785                                         }
786
787                                 } /* match page */
788                         } /* foreach page */
789                 } /* foreach socket */
790         } /* foreach pagesize */
791
792         return 0;
793 }
794
795 static inline uint64_t
796 get_socket_mem_size(int socket)
797 {
798         uint64_t size = 0;
799         unsigned i;
800
801         for (i = 0; i < internal_config.num_hugepage_sizes; i++){
802                 struct hugepage_info *hpi = &internal_config.hugepage_info[i];
803                 if (hpi->hugedir != NULL)
804                         size += hpi->hugepage_sz * hpi->num_pages[socket];
805         }
806
807         return size;
808 }
809
810 /*
811  * This function is a NUMA-aware equivalent of calc_num_pages.
812  * It takes in the list of hugepage sizes and the
813  * number of pages thereof, and calculates the best number of
814  * pages of each size to fulfill the request for <memory> ram
815  */
816 static int
817 calc_num_pages_per_socket(uint64_t * memory,
818                 struct hugepage_info *hp_info,
819                 struct hugepage_info *hp_used,
820                 unsigned num_hp_info)
821 {
822         unsigned socket, j, i = 0;
823         unsigned requested, available;
824         int total_num_pages = 0;
825         uint64_t remaining_mem, cur_mem;
826         uint64_t total_mem = internal_config.memory;
827
828         if (num_hp_info == 0)
829                 return -1;
830
831         /* if specific memory amounts per socket weren't requested */
832         if (internal_config.force_sockets == 0) {
833                 int cpu_per_socket[RTE_MAX_NUMA_NODES];
834                 size_t default_size, total_size;
835                 unsigned lcore_id;
836
837                 /* Compute number of cores per socket */
838                 memset(cpu_per_socket, 0, sizeof(cpu_per_socket));
839                 RTE_LCORE_FOREACH(lcore_id) {
840                         cpu_per_socket[rte_lcore_to_socket_id(lcore_id)]++;
841                 }
842
843                 /*
844                  * Automatically spread requested memory amongst detected sockets according
845                  * to number of cores from cpu mask present on each socket
846                  */
847                 total_size = internal_config.memory;
848                 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {
849
850                         /* Set memory amount per socket */
851                         default_size = (internal_config.memory * cpu_per_socket[socket])
852                                         / rte_lcore_count();
853
854                         /* Limit to maximum available memory on socket */
855                         default_size = RTE_MIN(default_size, get_socket_mem_size(socket));
856
857                         /* Update sizes */
858                         memory[socket] = default_size;
859                         total_size -= default_size;
860                 }
861
862                 /*
863                  * If some memory is remaining, try to allocate it by getting all
864                  * available memory from sockets, one after the other
865                  */
866                 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {
867                         /* take whatever is available */
868                         default_size = RTE_MIN(get_socket_mem_size(socket) - memory[socket],
869                                                total_size);
870
871                         /* Update sizes */
872                         memory[socket] += default_size;
873                         total_size -= default_size;
874                 }
875         }
876
877         for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_mem != 0; socket++) {
878                 /* skips if the memory on specific socket wasn't requested */
879                 for (i = 0; i < num_hp_info && memory[socket] != 0; i++){
880                         hp_used[i].hugedir = hp_info[i].hugedir;
881                         hp_used[i].num_pages[socket] = RTE_MIN(
882                                         memory[socket] / hp_info[i].hugepage_sz,
883                                         hp_info[i].num_pages[socket]);
884
885                         cur_mem = hp_used[i].num_pages[socket] *
886                                         hp_used[i].hugepage_sz;
887
888                         memory[socket] -= cur_mem;
889                         total_mem -= cur_mem;
890
891                         total_num_pages += hp_used[i].num_pages[socket];
892
893                         /* check if we have met all memory requests */
894                         if (memory[socket] == 0)
895                                 break;
896
897                         /* check if we have any more pages left at this size, if so
898                          * move on to next size */
899                         if (hp_used[i].num_pages[socket] == hp_info[i].num_pages[socket])
900                                 continue;
901                         /* At this point we know that there are more pages available that are
902                          * bigger than the memory we want, so lets see if we can get enough
903                          * from other page sizes.
904                          */
905                         remaining_mem = 0;
906                         for (j = i+1; j < num_hp_info; j++)
907                                 remaining_mem += hp_info[j].hugepage_sz *
908                                 hp_info[j].num_pages[socket];
909
910                         /* is there enough other memory, if not allocate another page and quit */
911                         if (remaining_mem < memory[socket]){
912                                 cur_mem = RTE_MIN(memory[socket],
913                                                 hp_info[i].hugepage_sz);
914                                 memory[socket] -= cur_mem;
915                                 total_mem -= cur_mem;
916                                 hp_used[i].num_pages[socket]++;
917                                 total_num_pages++;
918                                 break; /* we are done with this socket*/
919                         }
920                 }
921                 /* if we didn't satisfy all memory requirements per socket */
922                 if (memory[socket] > 0) {
923                         /* to prevent icc errors */
924                         requested = (unsigned) (internal_config.socket_mem[socket] /
925                                         0x100000);
926                         available = requested -
927                                         ((unsigned) (memory[socket] / 0x100000));
928                         RTE_LOG(ERR, EAL, "Not enough memory available on socket %u! "
929                                         "Requested: %uMB, available: %uMB\n", socket,
930                                         requested, available);
931                         return -1;
932                 }
933         }
934
935         /* if we didn't satisfy total memory requirements */
936         if (total_mem > 0) {
937                 requested = (unsigned) (internal_config.memory / 0x100000);
938                 available = requested - (unsigned) (total_mem / 0x100000);
939                 RTE_LOG(ERR, EAL, "Not enough memory available! Requested: %uMB,"
940                                 " available: %uMB\n", requested, available);
941                 return -1;
942         }
943         return total_num_pages;
944 }
945
946 static inline size_t
947 eal_get_hugepage_mem_size(void)
948 {
949         uint64_t size = 0;
950         unsigned i, j;
951
952         for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
953                 struct hugepage_info *hpi = &internal_config.hugepage_info[i];
954                 if (hpi->hugedir != NULL) {
955                         for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
956                                 size += hpi->hugepage_sz * hpi->num_pages[j];
957                         }
958                 }
959         }
960
961         return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX;
962 }
963
964 static struct sigaction huge_action_old;
965 static int huge_need_recover;
966
967 static void
968 huge_register_sigbus(void)
969 {
970         sigset_t mask;
971         struct sigaction action;
972
973         sigemptyset(&mask);
974         sigaddset(&mask, SIGBUS);
975         action.sa_flags = 0;
976         action.sa_mask = mask;
977         action.sa_handler = huge_sigbus_handler;
978
979         huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old);
980 }
981
982 static void
983 huge_recover_sigbus(void)
984 {
985         if (huge_need_recover) {
986                 sigaction(SIGBUS, &huge_action_old, NULL);
987                 huge_need_recover = 0;
988         }
989 }
990
991 /*
992  * Prepare physical memory mapping: fill configuration structure with
993  * these infos, return 0 on success.
994  *  1. map N huge pages in separate files in hugetlbfs
995  *  2. find associated physical addr
996  *  3. find associated NUMA socket ID
997  *  4. sort all huge pages by physical address
998  *  5. remap these N huge pages in the correct order
999  *  6. unmap the first mapping
1000  *  7. fill memsegs in configuration with contiguous zones
1001  */
1002 int
1003 rte_eal_hugepage_init(void)
1004 {
1005         struct rte_mem_config *mcfg;
1006         struct hugepage_file *hugepage = NULL, *tmp_hp = NULL;
1007         struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
1008
1009         uint64_t memory[RTE_MAX_NUMA_NODES];
1010
1011         unsigned hp_offset;
1012         int i, j, new_memseg;
1013         int nr_hugefiles, nr_hugepages = 0;
1014         void *addr;
1015
1016         test_phys_addrs_available();
1017
1018         memset(used_hp, 0, sizeof(used_hp));
1019
1020         /* get pointer to global configuration */
1021         mcfg = rte_eal_get_configuration()->mem_config;
1022
1023         /* hugetlbfs can be disabled */
1024         if (internal_config.no_hugetlbfs) {
1025                 addr = mmap(NULL, internal_config.memory, PROT_READ | PROT_WRITE,
1026                                 MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
1027                 if (addr == MAP_FAILED) {
1028                         RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
1029                                         strerror(errno));
1030                         return -1;
1031                 }
1032                 if (rte_eal_iova_mode() == RTE_IOVA_VA)
1033                         mcfg->memseg[0].phys_addr = (uintptr_t)addr;
1034                 else
1035                         mcfg->memseg[0].phys_addr = RTE_BAD_PHYS_ADDR;
1036                 mcfg->memseg[0].addr = addr;
1037                 mcfg->memseg[0].hugepage_sz = RTE_PGSIZE_4K;
1038                 mcfg->memseg[0].len = internal_config.memory;
1039                 mcfg->memseg[0].socket_id = 0;
1040                 return 0;
1041         }
1042
1043         /* calculate total number of hugepages available. at this point we haven't
1044          * yet started sorting them so they all are on socket 0 */
1045         for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
1046                 /* meanwhile, also initialize used_hp hugepage sizes in used_hp */
1047                 used_hp[i].hugepage_sz = internal_config.hugepage_info[i].hugepage_sz;
1048
1049                 nr_hugepages += internal_config.hugepage_info[i].num_pages[0];
1050         }
1051
1052         /*
1053          * allocate a memory area for hugepage table.
1054          * this isn't shared memory yet. due to the fact that we need some
1055          * processing done on these pages, shared memory will be created
1056          * at a later stage.
1057          */
1058         tmp_hp = malloc(nr_hugepages * sizeof(struct hugepage_file));
1059         if (tmp_hp == NULL)
1060                 goto fail;
1061
1062         memset(tmp_hp, 0, nr_hugepages * sizeof(struct hugepage_file));
1063
1064         hp_offset = 0; /* where we start the current page size entries */
1065
1066         huge_register_sigbus();
1067
1068         /* make a copy of socket_mem, needed for balanced allocation. */
1069         for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
1070                 memory[i] = internal_config.socket_mem[i];
1071
1072
1073         /* map all hugepages and sort them */
1074         for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
1075                 unsigned pages_old, pages_new;
1076                 struct hugepage_info *hpi;
1077
1078                 /*
1079                  * we don't yet mark hugepages as used at this stage, so
1080                  * we just map all hugepages available to the system
1081                  * all hugepages are still located on socket 0
1082                  */
1083                 hpi = &internal_config.hugepage_info[i];
1084
1085                 if (hpi->num_pages[0] == 0)
1086                         continue;
1087
1088                 /* map all hugepages available */
1089                 pages_old = hpi->num_pages[0];
1090                 pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
1091                                               memory, 1);
1092                 if (pages_new < pages_old) {
1093                         RTE_LOG(DEBUG, EAL,
1094                                 "%d not %d hugepages of size %u MB allocated\n",
1095                                 pages_new, pages_old,
1096                                 (unsigned)(hpi->hugepage_sz / 0x100000));
1097
1098                         int pages = pages_old - pages_new;
1099
1100                         nr_hugepages -= pages;
1101                         hpi->num_pages[0] = pages_new;
1102                         if (pages_new == 0)
1103                                 continue;
1104                 }
1105
1106                 if (phys_addrs_available) {
1107                         /* find physical addresses for each hugepage */
1108                         if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
1109                                 RTE_LOG(DEBUG, EAL, "Failed to find phys addr "
1110                                         "for %u MB pages\n",
1111                                         (unsigned int)(hpi->hugepage_sz / 0x100000));
1112                                 goto fail;
1113                         }
1114                 } else {
1115                         /* set physical addresses for each hugepage */
1116                         if (set_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
1117                                 RTE_LOG(DEBUG, EAL, "Failed to set phys addr "
1118                                         "for %u MB pages\n",
1119                                         (unsigned int)(hpi->hugepage_sz / 0x100000));
1120                                 goto fail;
1121                         }
1122                 }
1123
1124                 if (find_numasocket(&tmp_hp[hp_offset], hpi) < 0){
1125                         RTE_LOG(DEBUG, EAL, "Failed to find NUMA socket for %u MB pages\n",
1126                                         (unsigned)(hpi->hugepage_sz / 0x100000));
1127                         goto fail;
1128                 }
1129
1130                 qsort(&tmp_hp[hp_offset], hpi->num_pages[0],
1131                       sizeof(struct hugepage_file), cmp_physaddr);
1132
1133                 /* remap all hugepages */
1134                 if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
1135                     hpi->num_pages[0]) {
1136                         RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
1137                                         (unsigned)(hpi->hugepage_sz / 0x100000));
1138                         goto fail;
1139                 }
1140
1141                 /* unmap original mappings */
1142                 if (unmap_all_hugepages_orig(&tmp_hp[hp_offset], hpi) < 0)
1143                         goto fail;
1144
1145                 /* we have processed a num of hugepages of this size, so inc offset */
1146                 hp_offset += hpi->num_pages[0];
1147         }
1148
1149         huge_recover_sigbus();
1150
1151         if (internal_config.memory == 0 && internal_config.force_sockets == 0)
1152                 internal_config.memory = eal_get_hugepage_mem_size();
1153
1154         nr_hugefiles = nr_hugepages;
1155
1156
1157         /* clean out the numbers of pages */
1158         for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++)
1159                 for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
1160                         internal_config.hugepage_info[i].num_pages[j] = 0;
1161
1162         /* get hugepages for each socket */
1163         for (i = 0; i < nr_hugefiles; i++) {
1164                 int socket = tmp_hp[i].socket_id;
1165
1166                 /* find a hugepage info with right size and increment num_pages */
1167                 const int nb_hpsizes = RTE_MIN(MAX_HUGEPAGE_SIZES,
1168                                 (int)internal_config.num_hugepage_sizes);
1169                 for (j = 0; j < nb_hpsizes; j++) {
1170                         if (tmp_hp[i].size ==
1171                                         internal_config.hugepage_info[j].hugepage_sz) {
1172                                 internal_config.hugepage_info[j].num_pages[socket]++;
1173                         }
1174                 }
1175         }
1176
1177         /* make a copy of socket_mem, needed for number of pages calculation */
1178         for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
1179                 memory[i] = internal_config.socket_mem[i];
1180
1181         /* calculate final number of pages */
1182         nr_hugepages = calc_num_pages_per_socket(memory,
1183                         internal_config.hugepage_info, used_hp,
1184                         internal_config.num_hugepage_sizes);
1185
1186         /* error if not enough memory available */
1187         if (nr_hugepages < 0)
1188                 goto fail;
1189
1190         /* reporting in! */
1191         for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
1192                 for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
1193                         if (used_hp[i].num_pages[j] > 0) {
1194                                 RTE_LOG(DEBUG, EAL,
1195                                         "Requesting %u pages of size %uMB"
1196                                         " from socket %i\n",
1197                                         used_hp[i].num_pages[j],
1198                                         (unsigned)
1199                                         (used_hp[i].hugepage_sz / 0x100000),
1200                                         j);
1201                         }
1202                 }
1203         }
1204
1205         /* create shared memory */
1206         hugepage = create_shared_memory(eal_hugepage_info_path(),
1207                         nr_hugefiles * sizeof(struct hugepage_file));
1208
1209         if (hugepage == NULL) {
1210                 RTE_LOG(ERR, EAL, "Failed to create shared memory!\n");
1211                 goto fail;
1212         }
1213         memset(hugepage, 0, nr_hugefiles * sizeof(struct hugepage_file));
1214
1215         /*
1216          * unmap pages that we won't need (looks at used_hp).
1217          * also, sets final_va to NULL on pages that were unmapped.
1218          */
1219         if (unmap_unneeded_hugepages(tmp_hp, used_hp,
1220                         internal_config.num_hugepage_sizes) < 0) {
1221                 RTE_LOG(ERR, EAL, "Unmapping and locking hugepages failed!\n");
1222                 goto fail;
1223         }
1224
1225         /*
1226          * copy stuff from malloc'd hugepage* to the actual shared memory.
1227          * this procedure only copies those hugepages that have final_va
1228          * not NULL. has overflow protection.
1229          */
1230         if (copy_hugepages_to_shared_mem(hugepage, nr_hugefiles,
1231                         tmp_hp, nr_hugefiles) < 0) {
1232                 RTE_LOG(ERR, EAL, "Copying tables to shared memory failed!\n");
1233                 goto fail;
1234         }
1235
1236         /* free the hugepage backing files */
1237         if (internal_config.hugepage_unlink &&
1238                 unlink_hugepage_files(tmp_hp, internal_config.num_hugepage_sizes) < 0) {
1239                 RTE_LOG(ERR, EAL, "Unlinking hugepage files failed!\n");
1240                 goto fail;
1241         }
1242
1243         /* free the temporary hugepage table */
1244         free(tmp_hp);
1245         tmp_hp = NULL;
1246
1247         /* first memseg index shall be 0 after incrementing it below */
1248         j = -1;
1249         for (i = 0; i < nr_hugefiles; i++) {
1250                 new_memseg = 0;
1251
1252                 /* if this is a new section, create a new memseg */
1253                 if (i == 0)
1254                         new_memseg = 1;
1255                 else if (hugepage[i].socket_id != hugepage[i-1].socket_id)
1256                         new_memseg = 1;
1257                 else if (hugepage[i].size != hugepage[i-1].size)
1258                         new_memseg = 1;
1259
1260 #ifdef RTE_ARCH_PPC_64
1261                 /* On PPC64 architecture, the mmap always start from higher
1262                  * virtual address to lower address. Here, both the physical
1263                  * address and virtual address are in descending order */
1264                 else if ((hugepage[i-1].physaddr - hugepage[i].physaddr) !=
1265                     hugepage[i].size)
1266                         new_memseg = 1;
1267                 else if (((unsigned long)hugepage[i-1].final_va -
1268                     (unsigned long)hugepage[i].final_va) != hugepage[i].size)
1269                         new_memseg = 1;
1270 #else
1271                 else if ((hugepage[i].physaddr - hugepage[i-1].physaddr) !=
1272                     hugepage[i].size)
1273                         new_memseg = 1;
1274                 else if (((unsigned long)hugepage[i].final_va -
1275                     (unsigned long)hugepage[i-1].final_va) != hugepage[i].size)
1276                         new_memseg = 1;
1277 #endif
1278
1279                 if (new_memseg) {
1280                         j += 1;
1281                         if (j == RTE_MAX_MEMSEG)
1282                                 break;
1283
1284                         mcfg->memseg[j].phys_addr = hugepage[i].physaddr;
1285                         mcfg->memseg[j].addr = hugepage[i].final_va;
1286                         mcfg->memseg[j].len = hugepage[i].size;
1287                         mcfg->memseg[j].socket_id = hugepage[i].socket_id;
1288                         mcfg->memseg[j].hugepage_sz = hugepage[i].size;
1289                 }
1290                 /* continuation of previous memseg */
1291                 else {
1292 #ifdef RTE_ARCH_PPC_64
1293                 /* Use the phy and virt address of the last page as segment
1294                  * address for IBM Power architecture */
1295                         mcfg->memseg[j].phys_addr = hugepage[i].physaddr;
1296                         mcfg->memseg[j].addr = hugepage[i].final_va;
1297 #endif
1298                         mcfg->memseg[j].len += mcfg->memseg[j].hugepage_sz;
1299                 }
1300                 hugepage[i].memseg_id = j;
1301         }
1302
1303         if (i < nr_hugefiles) {
1304                 RTE_LOG(ERR, EAL, "Can only reserve %d pages "
1305                         "from %d requested\n"
1306                         "Current %s=%d is not enough\n"
1307                         "Please either increase it or request less amount "
1308                         "of memory.\n",
1309                         i, nr_hugefiles, RTE_STR(CONFIG_RTE_MAX_MEMSEG),
1310                         RTE_MAX_MEMSEG);
1311                 goto fail;
1312         }
1313
1314         munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
1315
1316         return 0;
1317
1318 fail:
1319         huge_recover_sigbus();
1320         free(tmp_hp);
1321         if (hugepage != NULL)
1322                 munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
1323
1324         return -1;
1325 }
1326
1327 /*
1328  * uses fstat to report the size of a file on disk
1329  */
1330 static off_t
1331 getFileSize(int fd)
1332 {
1333         struct stat st;
1334         if (fstat(fd, &st) < 0)
1335                 return 0;
1336         return st.st_size;
1337 }
1338
1339 /*
1340  * This creates the memory mappings in the secondary process to match that of
1341  * the server process. It goes through each memory segment in the DPDK runtime
1342  * configuration and finds the hugepages which form that segment, mapping them
1343  * in order to form a contiguous block in the virtual memory space
1344  */
1345 int
1346 rte_eal_hugepage_attach(void)
1347 {
1348         const struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1349         struct hugepage_file *hp = NULL;
1350         unsigned num_hp = 0;
1351         unsigned i, s = 0; /* s used to track the segment number */
1352         unsigned max_seg = RTE_MAX_MEMSEG;
1353         off_t size = 0;
1354         int fd, fd_zero = -1, fd_hugepage = -1;
1355
1356         if (aslr_enabled() > 0) {
1357                 RTE_LOG(WARNING, EAL, "WARNING: Address Space Layout Randomization "
1358                                 "(ASLR) is enabled in the kernel.\n");
1359                 RTE_LOG(WARNING, EAL, "   This may cause issues with mapping memory "
1360                                 "into secondary processes\n");
1361         }
1362
1363         test_phys_addrs_available();
1364
1365         fd_zero = open("/dev/zero", O_RDONLY);
1366         if (fd_zero < 0) {
1367                 RTE_LOG(ERR, EAL, "Could not open /dev/zero\n");
1368                 goto error;
1369         }
1370         fd_hugepage = open(eal_hugepage_info_path(), O_RDONLY);
1371         if (fd_hugepage < 0) {
1372                 RTE_LOG(ERR, EAL, "Could not open %s\n", eal_hugepage_info_path());
1373                 goto error;
1374         }
1375
1376         /* map all segments into memory to make sure we get the addrs */
1377         for (s = 0; s < RTE_MAX_MEMSEG; ++s) {
1378                 void *base_addr;
1379
1380                 /*
1381                  * the first memory segment with len==0 is the one that
1382                  * follows the last valid segment.
1383                  */
1384                 if (mcfg->memseg[s].len == 0)
1385                         break;
1386
1387                 /*
1388                  * fdzero is mmapped to get a contiguous block of virtual
1389                  * addresses of the appropriate memseg size.
1390                  * use mmap to get identical addresses as the primary process.
1391                  */
1392                 base_addr = mmap(mcfg->memseg[s].addr, mcfg->memseg[s].len,
1393                                  PROT_READ,
1394 #ifdef RTE_ARCH_PPC_64
1395                                  MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
1396 #else
1397                                  MAP_PRIVATE,
1398 #endif
1399                                  fd_zero, 0);
1400                 if (base_addr == MAP_FAILED ||
1401                     base_addr != mcfg->memseg[s].addr) {
1402                         max_seg = s;
1403                         if (base_addr != MAP_FAILED) {
1404                                 /* errno is stale, don't use */
1405                                 RTE_LOG(ERR, EAL, "Could not mmap %llu bytes "
1406                                         "in /dev/zero at [%p], got [%p] - "
1407                                         "please use '--base-virtaddr' option\n",
1408                                         (unsigned long long)mcfg->memseg[s].len,
1409                                         mcfg->memseg[s].addr, base_addr);
1410                                 munmap(base_addr, mcfg->memseg[s].len);
1411                         } else {
1412                                 RTE_LOG(ERR, EAL, "Could not mmap %llu bytes "
1413                                         "in /dev/zero at [%p]: '%s'\n",
1414                                         (unsigned long long)mcfg->memseg[s].len,
1415                                         mcfg->memseg[s].addr, strerror(errno));
1416                         }
1417                         if (aslr_enabled() > 0) {
1418                                 RTE_LOG(ERR, EAL, "It is recommended to "
1419                                         "disable ASLR in the kernel "
1420                                         "and retry running both primary "
1421                                         "and secondary processes\n");
1422                         }
1423                         goto error;
1424                 }
1425         }
1426
1427         size = getFileSize(fd_hugepage);
1428         hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0);
1429         if (hp == MAP_FAILED) {
1430                 RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path());
1431                 goto error;
1432         }
1433
1434         num_hp = size / sizeof(struct hugepage_file);
1435         RTE_LOG(DEBUG, EAL, "Analysing %u files\n", num_hp);
1436
1437         s = 0;
1438         while (s < RTE_MAX_MEMSEG && mcfg->memseg[s].len > 0){
1439                 void *addr, *base_addr;
1440                 uintptr_t offset = 0;
1441                 size_t mapping_size;
1442                 /*
1443                  * free previously mapped memory so we can map the
1444                  * hugepages into the space
1445                  */
1446                 base_addr = mcfg->memseg[s].addr;
1447                 munmap(base_addr, mcfg->memseg[s].len);
1448
1449                 /* find the hugepages for this segment and map them
1450                  * we don't need to worry about order, as the server sorted the
1451                  * entries before it did the second mmap of them */
1452                 for (i = 0; i < num_hp && offset < mcfg->memseg[s].len; i++){
1453                         if (hp[i].memseg_id == (int)s){
1454                                 fd = open(hp[i].filepath, O_RDWR);
1455                                 if (fd < 0) {
1456                                         RTE_LOG(ERR, EAL, "Could not open %s\n",
1457                                                 hp[i].filepath);
1458                                         goto error;
1459                                 }
1460                                 mapping_size = hp[i].size;
1461                                 addr = mmap(RTE_PTR_ADD(base_addr, offset),
1462                                                 mapping_size, PROT_READ | PROT_WRITE,
1463                                                 MAP_SHARED, fd, 0);
1464                                 close(fd); /* close file both on success and on failure */
1465                                 if (addr == MAP_FAILED ||
1466                                                 addr != RTE_PTR_ADD(base_addr, offset)) {
1467                                         RTE_LOG(ERR, EAL, "Could not mmap %s\n",
1468                                                 hp[i].filepath);
1469                                         goto error;
1470                                 }
1471                                 offset+=mapping_size;
1472                         }
1473                 }
1474                 RTE_LOG(DEBUG, EAL, "Mapped segment %u of size 0x%llx\n", s,
1475                                 (unsigned long long)mcfg->memseg[s].len);
1476                 s++;
1477         }
1478         /* unmap the hugepage config file, since we are done using it */
1479         munmap(hp, size);
1480         close(fd_zero);
1481         close(fd_hugepage);
1482         return 0;
1483
1484 error:
1485         for (i = 0; i < max_seg && mcfg->memseg[i].len > 0; i++)
1486                 munmap(mcfg->memseg[i].addr, mcfg->memseg[i].len);
1487         if (hp != NULL && hp != MAP_FAILED)
1488                 munmap(hp, size);
1489         if (fd_zero >= 0)
1490                 close(fd_zero);
1491         if (fd_hugepage >= 0)
1492                 close(fd_hugepage);
1493         return -1;
1494 }
1495
1496 int
1497 rte_eal_using_phys_addrs(void)
1498 {
1499         return phys_addrs_available;
1500 }