1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2020 Dmitry Kozlyuk
7 #include "eal_internal_cfg.h"
8 #include "eal_memalloc.h"
9 #include "eal_memcfg.h"
10 #include "eal_private.h"
11 #include "eal_windows.h"
14 eal_memalloc_get_seg_fd(int list_idx, int seg_idx)
16 /* Hugepages have no associated files in Windows. */
17 RTE_SET_USED(list_idx);
18 RTE_SET_USED(seg_idx);
19 EAL_LOG_NOT_IMPLEMENTED();
24 eal_memalloc_get_seg_fd_offset(int list_idx, int seg_idx, size_t *offset)
26 /* Hugepages have no associated files in Windows. */
27 RTE_SET_USED(list_idx);
28 RTE_SET_USED(seg_idx);
30 EAL_LOG_NOT_IMPLEMENTED();
35 alloc_seg(struct rte_memseg *ms, void *requested_addr, int socket_id,
36 struct hugepage_info *hi)
38 HANDLE current_process;
39 unsigned int numa_node;
42 rte_iova_t iova = RTE_BAD_IOVA;
43 PSAPI_WORKING_SET_EX_INFORMATION info;
44 PSAPI_WORKING_SET_EX_BLOCK *page;
47 /* If a segment is already allocated as needed, return it. */
48 if ((ms->addr == requested_addr) &&
49 (ms->socket_id == socket_id) &&
50 (ms->hugepage_sz == hi->hugepage_sz)) {
54 /* Bugcheck, should not happen. */
55 RTE_LOG(DEBUG, EAL, "Attempted to reallocate segment %p "
56 "(size %zu) on socket %d", ms->addr,
57 ms->len, ms->socket_id);
61 current_process = GetCurrentProcess();
62 numa_node = eal_socket_numa_node(socket_id);
63 alloc_sz = hi->hugepage_sz;
65 if (requested_addr == NULL) {
66 /* Request a new chunk of memory from OS. */
67 addr = eal_mem_alloc_socket(alloc_sz, socket_id);
69 RTE_LOG(DEBUG, EAL, "Cannot allocate %zu bytes "
70 "on socket %d\n", alloc_sz, socket_id);
74 /* Requested address is already reserved, commit memory. */
75 addr = eal_mem_commit(requested_addr, alloc_sz, socket_id);
77 /* During commitment, memory is temporary freed and might
78 * be allocated by different non-EAL thread. This is a fatal
79 * error, because it breaks MSL assumptions.
81 if ((addr != NULL) && (addr != requested_addr)) {
82 RTE_LOG(CRIT, EAL, "Address %p occupied by an alien "
83 " allocation - MSL is not VA-contiguous!\n",
89 RTE_LOG(DEBUG, EAL, "Cannot commit reserved memory %p "
90 "(size %zu) on socket %d\n",
91 requested_addr, alloc_sz, socket_id);
96 /* Force OS to allocate a physical page and select a NUMA node.
97 * Hugepages are not pageable in Windows, so there's no race
98 * for physical address.
100 *(volatile int *)addr = *(volatile int *)addr;
102 /* Only try to obtain IOVA if it's available, so that applications
103 * that do not need IOVA can use this allocator.
105 if (rte_eal_using_phys_addrs()) {
106 iova = rte_mem_virt2iova(addr);
107 if (iova == RTE_BAD_IOVA) {
109 "Cannot get IOVA of allocated segment\n");
114 /* Only "Ex" function can handle hugepages. */
115 info.VirtualAddress = addr;
116 if (!QueryWorkingSetEx(current_process, &info, sizeof(info))) {
117 RTE_LOG_WIN32_ERR("QueryWorkingSetEx(%p)", addr);
121 page = &info.VirtualAttributes;
122 if (!page->Valid || !page->LargePage) {
123 RTE_LOG(DEBUG, EAL, "Got regular page instead of a hugepage\n");
126 if (page->Node != numa_node) {
128 "NUMA node hint %u (socket %d) not respected, got %u\n",
129 numa_node, socket_id, page->Node);
134 ms->hugepage_sz = hi->hugepage_sz;
136 ms->nchannel = rte_memory_get_nchannel();
137 ms->nrank = rte_memory_get_nrank();
139 ms->socket_id = socket_id;
144 /* Only jump here when `addr` and `alloc_sz` are valid. */
145 if (eal_mem_decommit(addr, alloc_sz) && (rte_errno == EADDRNOTAVAIL)) {
146 /* During decommitment, memory is temporarily returned
147 * to the system and the address may become unavailable.
149 RTE_LOG(CRIT, EAL, "Address %p occupied by an alien "
150 " allocation - MSL is not VA-contiguous!\n", addr);
156 free_seg(struct rte_memseg *ms)
158 if (eal_mem_decommit(ms->addr, ms->len)) {
159 if (rte_errno == EADDRNOTAVAIL) {
160 /* See alloc_seg() for explanation. */
161 RTE_LOG(CRIT, EAL, "Address %p occupied by an alien "
162 " allocation - MSL is not VA-contiguous!\n",
168 /* Must clear the segment, because alloc_seg() inspects it. */
169 memset(ms, 0, sizeof(*ms));
173 struct alloc_walk_param {
174 struct hugepage_info *hi;
175 struct rte_memseg **ms;
177 unsigned int segs_allocated;
184 alloc_seg_walk(const struct rte_memseg_list *msl, void *arg)
186 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
187 struct alloc_walk_param *wa = arg;
188 struct rte_memseg_list *cur_msl;
190 int cur_idx, start_idx, j;
191 unsigned int msl_idx, need, i;
193 if (msl->page_sz != wa->page_sz)
195 if (msl->socket_id != wa->socket)
198 page_sz = (size_t)msl->page_sz;
200 msl_idx = msl - mcfg->memsegs;
201 cur_msl = &mcfg->memsegs[msl_idx];
205 /* try finding space in memseg list */
207 /* if we require exact number of pages in a list, find them */
208 cur_idx = rte_fbarray_find_next_n_free(
209 &cur_msl->memseg_arr, 0, need);
216 /* we don't require exact number of pages, so we're going to go
217 * for best-effort allocation. that means finding the biggest
218 * unused block, and going with that.
220 cur_idx = rte_fbarray_find_biggest_free(
221 &cur_msl->memseg_arr, 0);
225 /* adjust the size to possibly be smaller than original
226 * request, but do not allow it to be bigger.
228 cur_len = rte_fbarray_find_contig_free(
229 &cur_msl->memseg_arr, cur_idx);
230 need = RTE_MIN(need, (unsigned int)cur_len);
233 for (i = 0; i < need; i++, cur_idx++) {
234 struct rte_memseg *cur;
237 cur = rte_fbarray_get(&cur_msl->memseg_arr, cur_idx);
238 map_addr = RTE_PTR_ADD(cur_msl->base_va, cur_idx * page_sz);
240 if (alloc_seg(cur, map_addr, wa->socket, wa->hi)) {
241 RTE_LOG(DEBUG, EAL, "attempted to allocate %i segments, "
242 "but only %i were allocated\n", need, i);
244 /* if exact number wasn't requested, stop */
249 for (j = start_idx; j < cur_idx; j++) {
250 struct rte_memseg *tmp;
251 struct rte_fbarray *arr = &cur_msl->memseg_arr;
253 tmp = rte_fbarray_get(arr, j);
254 rte_fbarray_set_free(arr, j);
257 RTE_LOG(DEBUG, EAL, "Cannot free page\n");
261 memset(wa->ms, 0, sizeof(*wa->ms) * wa->n_segs);
268 rte_fbarray_set_used(&cur_msl->memseg_arr, cur_idx);
272 wa->segs_allocated = i;
276 /* if we didn't allocate any segments, move on to the next list */
280 struct free_walk_param {
281 struct hugepage_info *hi;
282 struct rte_memseg *ms;
285 free_seg_walk(const struct rte_memseg_list *msl, void *arg)
287 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
288 struct rte_memseg_list *found_msl;
289 struct free_walk_param *wa = arg;
290 uintptr_t start_addr, end_addr;
291 int msl_idx, seg_idx, ret;
293 start_addr = (uintptr_t) msl->base_va;
294 end_addr = start_addr + msl->len;
296 if ((uintptr_t)wa->ms->addr < start_addr ||
297 (uintptr_t)wa->ms->addr >= end_addr)
300 msl_idx = msl - mcfg->memsegs;
301 seg_idx = RTE_PTR_DIFF(wa->ms->addr, start_addr) / msl->page_sz;
304 found_msl = &mcfg->memsegs[msl_idx];
305 found_msl->version++;
307 rte_fbarray_set_free(&found_msl->memseg_arr, seg_idx);
309 ret = free_seg(wa->ms);
311 return (ret < 0) ? (-1) : 1;
315 eal_memalloc_alloc_seg_bulk(struct rte_memseg **ms, int n_segs,
316 size_t page_sz, int socket, bool exact)
320 struct alloc_walk_param wa;
321 struct hugepage_info *hi = NULL;
322 struct internal_config *internal_conf =
323 eal_get_internal_configuration();
325 if (internal_conf->legacy_mem) {
326 RTE_LOG(ERR, EAL, "dynamic allocation not supported in legacy mode\n");
330 for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
331 struct hugepage_info *hpi = &internal_conf->hugepage_info[i];
332 if (page_sz == hpi->hugepage_sz) {
338 RTE_LOG(ERR, EAL, "cannot find relevant hugepage_info entry\n");
342 memset(&wa, 0, sizeof(wa));
347 wa.page_sz = page_sz;
349 wa.segs_allocated = 0;
351 /* memalloc is locked, so it's safe to use thread-unsafe version */
352 ret = rte_memseg_list_walk_thread_unsafe(alloc_seg_walk, &wa);
354 RTE_LOG(ERR, EAL, "cannot find suitable memseg_list\n");
356 } else if (ret > 0) {
357 ret = (int)wa.segs_allocated;
364 eal_memalloc_alloc_seg(size_t page_sz, int socket)
366 struct rte_memseg *ms = NULL;
367 eal_memalloc_alloc_seg_bulk(&ms, 1, page_sz, socket, true);
372 eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs)
375 struct internal_config *internal_conf =
376 eal_get_internal_configuration();
378 /* dynamic free not supported in legacy mode */
379 if (internal_conf->legacy_mem)
382 for (seg = 0; seg < n_segs; seg++) {
383 struct rte_memseg *cur = ms[seg];
384 struct hugepage_info *hi = NULL;
385 struct free_walk_param wa;
389 /* if this page is marked as unfreeable, fail */
390 if (cur->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE) {
391 RTE_LOG(DEBUG, EAL, "Page is not allowed to be freed\n");
396 memset(&wa, 0, sizeof(wa));
398 for (i = 0; i < RTE_DIM(internal_conf->hugepage_info); i++) {
399 hi = &internal_conf->hugepage_info[i];
400 if (cur->hugepage_sz == hi->hugepage_sz)
403 if (i == RTE_DIM(internal_conf->hugepage_info)) {
404 RTE_LOG(ERR, EAL, "Can't find relevant hugepage_info entry\n");
412 /* memalloc is locked, so it's safe to use thread-unsafe version
414 walk_res = rte_memseg_list_walk_thread_unsafe(free_seg_walk,
419 RTE_LOG(ERR, EAL, "Couldn't find memseg list\n");
426 eal_memalloc_free_seg(struct rte_memseg *ms)
428 return eal_memalloc_free_seg_bulk(&ms, 1);
432 eal_memalloc_sync_with_primary(void)
434 /* No multi-process support. */
435 EAL_LOG_NOT_IMPLEMENTED();
440 eal_memalloc_cleanup(void)
442 /* not implemented */
447 eal_memalloc_init(void)
449 /* No action required. */