1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
7 #include <sys/sysctl.h>
12 #include <rte_eal_memconfig.h>
14 #include <rte_string_fns.h>
15 #include "eal_private.h"
16 #include "eal_internal_cfg.h"
17 #include "eal_filesystem.h"
19 #define EAL_PAGE_SIZE (sysconf(_SC_PAGESIZE))
22 * Get physical address of any mapped virtual address in the current process.
25 rte_mem_virt2phy(const void *virtaddr)
27 /* XXX not implemented. This function is only used by
28 * rte_mempool_virt2iova() when hugepages are disabled. */
33 rte_mem_virt2iova(const void *virtaddr)
35 return rte_mem_virt2phy(virtaddr);
39 rte_eal_hugepage_init(void)
41 struct rte_mem_config *mcfg;
42 uint64_t total_mem = 0;
44 unsigned i, j, seg_idx = 0;
46 /* get pointer to global configuration */
47 mcfg = rte_eal_get_configuration()->mem_config;
49 /* for debug purposes, hugetlbfs can be disabled */
50 if (internal_config.no_hugetlbfs) {
51 addr = malloc(internal_config.memory);
52 mcfg->memseg[0].iova = (rte_iova_t)(uintptr_t)addr;
53 mcfg->memseg[0].addr = addr;
54 mcfg->memseg[0].hugepage_sz = RTE_PGSIZE_4K;
55 mcfg->memseg[0].len = internal_config.memory;
56 mcfg->memseg[0].socket_id = 0;
60 /* map all hugepages and sort them */
61 for (i = 0; i < internal_config.num_hugepage_sizes; i ++){
62 struct hugepage_info *hpi;
64 hpi = &internal_config.hugepage_info[i];
65 for (j = 0; j < hpi->num_pages[0]; j++) {
66 struct rte_memseg *seg;
69 size_t sysctl_size = sizeof(physaddr);
70 char physaddr_str[64];
72 addr = mmap(NULL, hpi->hugepage_sz, PROT_READ|PROT_WRITE,
73 MAP_SHARED, hpi->lock_descriptor,
75 if (addr == MAP_FAILED) {
76 RTE_LOG(ERR, EAL, "Failed to mmap buffer %u from %s\n",
81 snprintf(physaddr_str, sizeof(physaddr_str), "hw.contigmem"
83 error = sysctlbyname(physaddr_str, &physaddr, &sysctl_size,
86 RTE_LOG(ERR, EAL, "Failed to get physical addr for buffer %u "
87 "from %s\n", j, hpi->hugedir);
91 seg = &mcfg->memseg[seg_idx++];
94 seg->hugepage_sz = hpi->hugepage_sz;
95 seg->len = hpi->hugepage_sz;
96 seg->nchannel = mcfg->nchannel;
97 seg->nrank = mcfg->nrank;
100 RTE_LOG(INFO, EAL, "Mapped memory segment %u @ %p: physaddr:0x%"
102 seg_idx, addr, physaddr, hpi->hugepage_sz);
103 if (total_mem >= internal_config.memory ||
104 seg_idx >= RTE_MAX_MEMSEG)
112 rte_eal_hugepage_attach(void)
114 const struct hugepage_info *hpi;
115 int fd_hugepage_info, fd_hugepage = -1;
117 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
119 /* Obtain a file descriptor for hugepage_info */
120 fd_hugepage_info = open(eal_hugepage_info_path(), O_RDONLY);
121 if (fd_hugepage_info < 0) {
122 RTE_LOG(ERR, EAL, "Could not open %s\n", eal_hugepage_info_path());
126 /* Map the shared hugepage_info into the process address spaces */
127 hpi = mmap(NULL, sizeof(struct hugepage_info), PROT_READ, MAP_PRIVATE,
128 fd_hugepage_info, 0);
130 RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path());
134 /* Obtain a file descriptor for contiguous memory */
135 fd_hugepage = open(hpi->hugedir, O_RDWR);
136 if (fd_hugepage < 0) {
137 RTE_LOG(ERR, EAL, "Could not open %s\n", hpi->hugedir);
141 /* Map the contiguous memory into each memory segment */
142 for (i = 0; i < hpi->num_pages[0]; i++) {
145 struct rte_memseg *seg = &mcfg->memseg[i];
147 addr = mmap(seg->addr, hpi->hugepage_sz, PROT_READ|PROT_WRITE,
148 MAP_SHARED|MAP_FIXED, fd_hugepage,
150 if (addr == MAP_FAILED || addr != seg->addr) {
151 RTE_LOG(ERR, EAL, "Failed to mmap buffer %u from %s\n",
158 /* hugepage_info is no longer required */
159 munmap((void *)(uintptr_t)hpi, sizeof(struct hugepage_info));
160 close(fd_hugepage_info);
165 if (fd_hugepage_info >= 0)
166 close(fd_hugepage_info);
167 if (fd_hugepage >= 0)
173 rte_eal_using_phys_addrs(void)