1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
12 #include <sys/queue.h>
15 #include <rte_memory.h>
16 #include <rte_memzone.h>
18 #include <rte_eal_memconfig.h>
19 #include <rte_per_lcore.h>
20 #include <rte_errno.h>
21 #include <rte_string_fns.h>
22 #include <rte_common.h>
24 #include "malloc_heap.h"
25 #include "malloc_elem.h"
26 #include "eal_private.h"
28 static inline const struct rte_memzone *
29 memzone_lookup_thread_unsafe(const char *name)
31 const struct rte_mem_config *mcfg;
32 const struct rte_memzone *mz;
35 /* get pointer to global configuration */
36 mcfg = rte_eal_get_configuration()->mem_config;
39 * the algorithm is not optimal (linear), but there are few
40 * zones and this function should be called at init only
42 for (i = 0; i < RTE_MAX_MEMZONE; i++) {
43 mz = &mcfg->memzone[i];
44 if (mz->addr != NULL && !strncmp(name, mz->name, RTE_MEMZONE_NAMESIZE))
45 return &mcfg->memzone[i];
51 static inline struct rte_memzone *
52 get_next_free_memzone(void)
54 struct rte_mem_config *mcfg;
57 /* get pointer to global configuration */
58 mcfg = rte_eal_get_configuration()->mem_config;
60 for (i = 0; i < RTE_MAX_MEMZONE; i++) {
61 if (mcfg->memzone[i].addr == NULL)
62 return &mcfg->memzone[i];
68 /* This function will return the greatest free block if a heap has been
69 * specified. If no heap has been specified, it will return the heap and
70 * length of the greatest free block available in all heaps */
72 find_heap_max_free_elem(int *s, unsigned align)
74 struct rte_mem_config *mcfg;
75 struct rte_malloc_socket_stats stats;
79 /* get pointer to global configuration */
80 mcfg = rte_eal_get_configuration()->mem_config;
82 for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
83 if ((socket != SOCKET_ID_ANY) && (socket != i))
86 malloc_heap_get_stats(&mcfg->malloc_heaps[i], &stats);
87 if (stats.greatest_free_size > len) {
88 len = stats.greatest_free_size;
93 if (len < MALLOC_ELEM_OVERHEAD + align)
96 return len - MALLOC_ELEM_OVERHEAD - align;
99 static const struct rte_memzone *
100 memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
101 int socket_id, unsigned int flags, unsigned int align,
104 struct rte_memzone *mz;
105 struct rte_mem_config *mcfg;
106 size_t requested_len;
110 /* get pointer to global configuration */
111 mcfg = rte_eal_get_configuration()->mem_config;
113 /* no more room in config */
114 if (mcfg->memzone_cnt >= RTE_MAX_MEMZONE) {
115 RTE_LOG(ERR, EAL, "%s(): No more room in config\n", __func__);
120 if (strlen(name) > sizeof(mz->name) - 1) {
121 RTE_LOG(DEBUG, EAL, "%s(): memzone <%s>: name too long\n",
123 rte_errno = ENAMETOOLONG;
127 /* zone already exist */
128 if ((memzone_lookup_thread_unsafe(name)) != NULL) {
129 RTE_LOG(DEBUG, EAL, "%s(): memzone <%s> already exists\n",
135 /* if alignment is not a power of two */
136 if (align && !rte_is_power_of_2(align)) {
137 RTE_LOG(ERR, EAL, "%s(): Invalid alignment: %u\n", __func__,
143 /* alignment less than cache size is not allowed */
144 if (align < RTE_CACHE_LINE_SIZE)
145 align = RTE_CACHE_LINE_SIZE;
147 /* align length on cache boundary. Check for overflow before doing so */
148 if (len > SIZE_MAX - RTE_CACHE_LINE_MASK) {
149 rte_errno = EINVAL; /* requested size too big */
153 len += RTE_CACHE_LINE_MASK;
154 len &= ~((size_t) RTE_CACHE_LINE_MASK);
156 /* save minimal requested length */
157 requested_len = RTE_MAX((size_t)RTE_CACHE_LINE_SIZE, len);
159 /* check that boundary condition is valid */
160 if (bound != 0 && (requested_len > bound || !rte_is_power_of_2(bound))) {
165 if ((socket_id != SOCKET_ID_ANY) &&
166 (socket_id >= RTE_MAX_NUMA_NODES || socket_id < 0)) {
171 if (!rte_eal_has_hugepages())
172 socket_id = SOCKET_ID_ANY;
174 contig = (flags & RTE_MEMZONE_IOVA_CONTIG) != 0;
175 /* malloc only cares about size flags, remove contig flag from flags */
176 flags &= ~RTE_MEMZONE_IOVA_CONTIG;
179 /* len == 0 is only allowed for non-contiguous zones */
181 RTE_LOG(DEBUG, EAL, "Reserving zero-length contiguous memzones is not supported\n");
186 requested_len = bound;
188 requested_len = find_heap_max_free_elem(&socket_id, align);
189 if (requested_len == 0) {
196 if (socket_id == SOCKET_ID_ANY)
197 socket = malloc_get_numa_socket();
201 /* allocate memory on heap */
202 void *mz_addr = malloc_heap_alloc(&mcfg->malloc_heaps[socket], NULL,
203 requested_len, flags, align, bound, contig);
205 if ((mz_addr == NULL) && (socket_id == SOCKET_ID_ANY)) {
206 /* try other heaps */
207 for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
211 mz_addr = malloc_heap_alloc(&mcfg->malloc_heaps[i],
212 NULL, requested_len, flags, align,
219 if (mz_addr == NULL) {
224 struct malloc_elem *elem = malloc_elem_from_data(mz_addr);
226 /* fill the zone in config */
227 mz = get_next_free_memzone();
230 RTE_LOG(ERR, EAL, "%s(): Cannot find free memzone but there is room "
231 "in config!\n", __func__);
232 malloc_elem_free(elem);
238 snprintf(mz->name, sizeof(mz->name), "%s", name);
239 mz->iova = rte_malloc_virt2iova(mz_addr);
241 mz->len = (requested_len == 0 ? elem->size : requested_len);
242 mz->hugepage_sz = elem->msl->page_sz;
243 mz->socket_id = elem->msl->socket_id;
249 static const struct rte_memzone *
250 rte_memzone_reserve_thread_safe(const char *name, size_t len, int socket_id,
251 unsigned int flags, unsigned int align, unsigned int bound)
253 struct rte_mem_config *mcfg;
254 const struct rte_memzone *mz = NULL;
256 /* get pointer to global configuration */
257 mcfg = rte_eal_get_configuration()->mem_config;
259 rte_rwlock_write_lock(&mcfg->mlock);
261 mz = memzone_reserve_aligned_thread_unsafe(
262 name, len, socket_id, flags, align, bound);
264 rte_rwlock_write_unlock(&mcfg->mlock);
270 * Return a pointer to a correctly filled memzone descriptor (with a
271 * specified alignment and boundary). If the allocation cannot be done,
274 const struct rte_memzone *
275 rte_memzone_reserve_bounded(const char *name, size_t len, int socket_id,
276 unsigned flags, unsigned align, unsigned bound)
278 return rte_memzone_reserve_thread_safe(name, len, socket_id, flags,
283 * Return a pointer to a correctly filled memzone descriptor (with a
284 * specified alignment). If the allocation cannot be done, return NULL.
286 const struct rte_memzone *
287 rte_memzone_reserve_aligned(const char *name, size_t len, int socket_id,
288 unsigned flags, unsigned align)
290 return rte_memzone_reserve_thread_safe(name, len, socket_id, flags,
295 * Return a pointer to a correctly filled memzone descriptor. If the
296 * allocation cannot be done, return NULL.
298 const struct rte_memzone *
299 rte_memzone_reserve(const char *name, size_t len, int socket_id,
302 return rte_memzone_reserve_thread_safe(name, len, socket_id,
303 flags, RTE_CACHE_LINE_SIZE, 0);
307 rte_memzone_free(const struct rte_memzone *mz)
309 struct rte_mem_config *mcfg;
317 mcfg = rte_eal_get_configuration()->mem_config;
319 rte_rwlock_write_lock(&mcfg->mlock);
321 idx = ((uintptr_t)mz - (uintptr_t)mcfg->memzone);
322 idx = idx / sizeof(struct rte_memzone);
324 addr = mcfg->memzone[idx].addr;
327 else if (mcfg->memzone_cnt == 0) {
328 rte_panic("%s(): memzone address not NULL but memzone_cnt is 0!\n",
331 memset(&mcfg->memzone[idx], 0, sizeof(mcfg->memzone[idx]));
335 rte_rwlock_write_unlock(&mcfg->mlock);
343 * Lookup for the memzone identified by the given name
345 const struct rte_memzone *
346 rte_memzone_lookup(const char *name)
348 struct rte_mem_config *mcfg;
349 const struct rte_memzone *memzone = NULL;
351 mcfg = rte_eal_get_configuration()->mem_config;
353 rte_rwlock_read_lock(&mcfg->mlock);
355 memzone = memzone_lookup_thread_unsafe(name);
357 rte_rwlock_read_unlock(&mcfg->mlock);
363 dump_memzone(const struct rte_memzone *mz, void *arg)
365 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
366 struct rte_memseg_list *msl = NULL;
367 void *cur_addr, *mz_end;
368 struct rte_memseg *ms;
373 mz_idx = mz - mcfg->memzone;
375 fprintf(f, "Zone %u: name:<%s>, len:0x%zx, virt:%p, "
376 "socket_id:%"PRId32", flags:%"PRIx32"\n",
384 /* go through each page occupied by this memzone */
385 msl = rte_mem_virt2memseg_list(mz->addr);
387 RTE_LOG(DEBUG, EAL, "Skipping bad memzone\n");
390 page_sz = (size_t)mz->hugepage_sz;
391 cur_addr = RTE_PTR_ALIGN_FLOOR(mz->addr, page_sz);
392 mz_end = RTE_PTR_ADD(cur_addr, mz->len);
394 fprintf(f, "physical segments used:\n");
395 ms_idx = RTE_PTR_DIFF(mz->addr, msl->base_va) / page_sz;
396 ms = rte_fbarray_get(&msl->memseg_arr, ms_idx);
399 fprintf(f, " addr: %p iova: 0x%" PRIx64 " "
402 cur_addr, ms->iova, ms->len, page_sz);
404 /* advance VA to next page */
405 cur_addr = RTE_PTR_ADD(cur_addr, page_sz);
407 /* memzones occupy contiguous segments */
409 } while (cur_addr < mz_end);
412 /* Dump all reserved memory zones on console */
414 rte_memzone_dump(FILE *f)
416 rte_memzone_walk(dump_memzone, f);
420 * Init the memzone subsystem
423 rte_eal_memzone_init(void)
425 struct rte_mem_config *mcfg;
427 /* get pointer to global configuration */
428 mcfg = rte_eal_get_configuration()->mem_config;
430 /* secondary processes don't need to initialise anything */
431 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
434 rte_rwlock_write_lock(&mcfg->mlock);
436 /* delete all zones */
437 mcfg->memzone_cnt = 0;
438 memset(mcfg->memzone, 0, sizeof(mcfg->memzone));
440 rte_rwlock_write_unlock(&mcfg->mlock);
442 return rte_eal_malloc_heap_init();
445 /* Walk all reserved memory zones */
446 void rte_memzone_walk(void (*func)(const struct rte_memzone *, void *),
449 struct rte_mem_config *mcfg;
452 mcfg = rte_eal_get_configuration()->mem_config;
454 rte_rwlock_read_lock(&mcfg->mlock);
455 for (i=0; i<RTE_MAX_MEMZONE; i++) {
456 if (mcfg->memzone[i].addr != NULL)
457 (*func)(&mcfg->memzone[i], arg);
459 rte_rwlock_read_unlock(&mcfg->mlock);