1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
7 #include <sys/sysctl.h>
14 #include <rte_eal_memconfig.h>
16 #include <rte_string_fns.h>
17 #include "eal_private.h"
18 #include "eal_internal_cfg.h"
19 #include "eal_filesystem.h"
21 #define EAL_PAGE_SIZE (sysconf(_SC_PAGESIZE))
24 * Get physical address of any mapped virtual address in the current process.
27 rte_mem_virt2phy(const void *virtaddr)
29 /* XXX not implemented. This function is only used by
30 * rte_mempool_virt2iova() when hugepages are disabled. */
35 rte_mem_virt2iova(const void *virtaddr)
37 return rte_mem_virt2phy(virtaddr);
41 rte_eal_hugepage_init(void)
43 struct rte_mem_config *mcfg;
44 uint64_t total_mem = 0;
46 unsigned int i, j, seg_idx = 0;
48 /* get pointer to global configuration */
49 mcfg = rte_eal_get_configuration()->mem_config;
51 /* for debug purposes, hugetlbfs can be disabled */
52 if (internal_config.no_hugetlbfs) {
53 struct rte_memseg_list *msl;
54 struct rte_fbarray *arr;
55 struct rte_memseg *ms;
59 /* create a memseg list */
60 msl = &mcfg->memsegs[0];
62 page_sz = RTE_PGSIZE_4K;
63 n_segs = internal_config.memory / page_sz;
65 if (rte_fbarray_init(&msl->memseg_arr, "nohugemem", n_segs,
66 sizeof(struct rte_memseg))) {
67 RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
71 addr = mmap(NULL, internal_config.memory,
72 PROT_READ | PROT_WRITE,
73 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
74 if (addr == MAP_FAILED) {
75 RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
80 msl->page_sz = page_sz;
83 /* populate memsegs. each memseg is 1 page long */
84 for (cur_seg = 0; cur_seg < n_segs; cur_seg++) {
85 arr = &msl->memseg_arr;
87 ms = rte_fbarray_get(arr, cur_seg);
88 if (rte_eal_iova_mode() == RTE_IOVA_VA)
89 ms->iova = (uintptr_t)addr;
91 ms->iova = RTE_BAD_IOVA;
93 ms->hugepage_sz = page_sz;
97 rte_fbarray_set_used(arr, cur_seg);
99 addr = RTE_PTR_ADD(addr, page_sz);
104 /* map all hugepages and sort them */
105 for (i = 0; i < internal_config.num_hugepage_sizes; i ++){
106 struct hugepage_info *hpi;
107 uint64_t page_sz, mem_needed;
108 unsigned int n_pages, max_pages;
110 hpi = &internal_config.hugepage_info[i];
111 page_sz = hpi->hugepage_sz;
112 max_pages = hpi->num_pages[0];
113 mem_needed = RTE_ALIGN_CEIL(internal_config.memory - total_mem,
116 n_pages = RTE_MIN(mem_needed / page_sz, max_pages);
118 for (j = 0; j < n_pages; j++) {
119 struct rte_memseg_list *msl;
120 struct rte_fbarray *arr;
121 struct rte_memseg *seg;
125 size_t sysctl_size = sizeof(physaddr);
126 char physaddr_str[64];
128 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS;
131 msl = &mcfg->memsegs[msl_idx];
132 arr = &msl->memseg_arr;
134 if (msl->page_sz != page_sz)
137 empty = arr->count == 0;
139 /* we need 1, plus hole if not empty */
140 ms_idx = rte_fbarray_find_next_n_free(arr,
141 0, 1 + (empty ? 1 : 0));
143 /* memseg list is full? */
147 /* leave some space between memsegs, they are
148 * not IOVA contiguous, so they shouldn't be VA
156 if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
157 RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase %s and/or %s in configuration.\n",
158 RTE_STR(CONFIG_RTE_MAX_MEMSEG_PER_TYPE),
159 RTE_STR(CONFIG_RTE_MAX_MEM_PER_TYPE));
162 arr = &msl->memseg_arr;
163 seg = rte_fbarray_get(arr, ms_idx);
165 addr = RTE_PTR_ADD(msl->base_va,
166 (size_t)msl->page_sz * ms_idx);
168 /* address is already mapped in memseg list, so using
169 * MAP_FIXED here is safe.
171 addr = mmap(addr, page_sz, PROT_READ|PROT_WRITE,
172 MAP_SHARED | MAP_FIXED,
173 hpi->lock_descriptor,
175 if (addr == MAP_FAILED) {
176 RTE_LOG(ERR, EAL, "Failed to mmap buffer %u from %s\n",
181 snprintf(physaddr_str, sizeof(physaddr_str), "hw.contigmem"
183 error = sysctlbyname(physaddr_str, &physaddr, &sysctl_size,
186 RTE_LOG(ERR, EAL, "Failed to get physical addr for buffer %u "
187 "from %s\n", j, hpi->hugedir);
192 seg->iova = physaddr;
193 seg->hugepage_sz = page_sz;
195 seg->nchannel = mcfg->nchannel;
196 seg->nrank = mcfg->nrank;
199 rte_fbarray_set_used(arr, ms_idx);
201 RTE_LOG(INFO, EAL, "Mapped memory segment %u @ %p: physaddr:0x%"
203 seg_idx, addr, physaddr, page_sz);
205 total_mem += seg->len;
207 if (total_mem >= internal_config.memory)
210 if (total_mem < internal_config.memory) {
211 RTE_LOG(ERR, EAL, "Couldn't reserve requested memory, "
212 "requested: %" PRIu64 "M "
213 "available: %" PRIu64 "M\n",
214 internal_config.memory >> 20, total_mem >> 20);
220 struct attach_walk_args {
225 attach_segment(const struct rte_memseg_list *msl __rte_unused,
226 const struct rte_memseg *ms, void *arg)
228 struct attach_walk_args *wa = arg;
231 addr = mmap(ms->addr, ms->len, PROT_READ | PROT_WRITE,
232 MAP_SHARED | MAP_FIXED, wa->fd_hugepage,
233 wa->seg_idx * EAL_PAGE_SIZE);
234 if (addr == MAP_FAILED || addr != ms->addr)
242 rte_eal_hugepage_attach(void)
244 const struct hugepage_info *hpi;
245 int fd_hugepage = -1;
248 hpi = &internal_config.hugepage_info[0];
250 for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
251 const struct hugepage_info *cur_hpi = &hpi[i];
252 struct attach_walk_args wa;
254 memset(&wa, 0, sizeof(wa));
256 /* Obtain a file descriptor for contiguous memory */
257 fd_hugepage = open(cur_hpi->hugedir, O_RDWR);
258 if (fd_hugepage < 0) {
259 RTE_LOG(ERR, EAL, "Could not open %s\n",
263 wa.fd_hugepage = fd_hugepage;
266 /* Map the contiguous memory into each memory segment */
267 if (rte_memseg_walk(attach_segment, &wa) < 0) {
268 RTE_LOG(ERR, EAL, "Failed to mmap buffer %u from %s\n",
269 wa.seg_idx, cur_hpi->hugedir);
277 /* hugepage_info is no longer required */
281 if (fd_hugepage >= 0)
287 rte_eal_using_phys_addrs(void)