1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
12 #include <sys/queue.h>
15 #include <rte_memory.h>
16 #include <rte_memzone.h>
18 #include <rte_per_lcore.h>
19 #include <rte_errno.h>
20 #include <rte_string_fns.h>
21 #include <rte_common.h>
22 #include <rte_eal_trace.h>
24 #include "malloc_heap.h"
25 #include "malloc_elem.h"
26 #include "eal_private.h"
27 #include "eal_memcfg.h"
29 static inline const struct rte_memzone *
30 memzone_lookup_thread_unsafe(const char *name)
32 struct rte_mem_config *mcfg;
33 struct rte_fbarray *arr;
34 const struct rte_memzone *mz;
37 /* get pointer to global configuration */
38 mcfg = rte_eal_get_configuration()->mem_config;
39 arr = &mcfg->memzones;
42 * the algorithm is not optimal (linear), but there are few
43 * zones and this function should be called at init only
45 i = rte_fbarray_find_next_used(arr, 0);
47 mz = rte_fbarray_get(arr, i);
48 if (mz->addr != NULL &&
49 !strncmp(name, mz->name, RTE_MEMZONE_NAMESIZE))
51 i = rte_fbarray_find_next_used(arr, i + 1);
56 #define MEMZONE_KNOWN_FLAGS (RTE_MEMZONE_2MB \
64 | RTE_MEMZONE_SIZE_HINT_ONLY \
65 | RTE_MEMZONE_IOVA_CONTIG \
68 static const struct rte_memzone *
69 memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
70 int socket_id, unsigned int flags, unsigned int align,
73 struct rte_memzone *mz;
74 struct rte_mem_config *mcfg;
75 struct rte_fbarray *arr;
81 /* get pointer to global configuration */
82 mcfg = rte_eal_get_configuration()->mem_config;
83 arr = &mcfg->memzones;
85 /* no more room in config */
86 if (arr->count >= arr->len) {
88 "%s(): Number of requested memzone segments exceeds RTE_MAX_MEMZONE\n",
94 if (strlen(name) > sizeof(mz->name) - 1) {
95 RTE_LOG(DEBUG, EAL, "%s(): memzone <%s>: name too long\n",
97 rte_errno = ENAMETOOLONG;
101 /* zone already exist */
102 if ((memzone_lookup_thread_unsafe(name)) != NULL) {
103 RTE_LOG(DEBUG, EAL, "%s(): memzone <%s> already exists\n",
109 /* if alignment is not a power of two */
110 if (align && !rte_is_power_of_2(align)) {
111 RTE_LOG(ERR, EAL, "%s(): Invalid alignment: %u\n", __func__,
117 /* alignment less than cache size is not allowed */
118 if (align < RTE_CACHE_LINE_SIZE)
119 align = RTE_CACHE_LINE_SIZE;
121 /* align length on cache boundary. Check for overflow before doing so */
122 if (len > SIZE_MAX - RTE_CACHE_LINE_MASK) {
123 rte_errno = EINVAL; /* requested size too big */
127 len = RTE_ALIGN_CEIL(len, RTE_CACHE_LINE_SIZE);
129 /* save minimal requested length */
130 requested_len = RTE_MAX((size_t)RTE_CACHE_LINE_SIZE, len);
132 /* check that boundary condition is valid */
133 if (bound != 0 && (requested_len > bound || !rte_is_power_of_2(bound))) {
138 if ((socket_id != SOCKET_ID_ANY) && socket_id < 0) {
143 if ((flags & ~MEMZONE_KNOWN_FLAGS) != 0) {
148 /* only set socket to SOCKET_ID_ANY if we aren't allocating for an
151 if (!rte_eal_has_hugepages() && socket_id < RTE_MAX_NUMA_NODES)
152 socket_id = SOCKET_ID_ANY;
154 contig = (flags & RTE_MEMZONE_IOVA_CONTIG) != 0;
155 /* malloc only cares about size flags, remove contig flag from flags */
156 flags &= ~RTE_MEMZONE_IOVA_CONTIG;
158 if (len == 0 && bound == 0) {
159 /* no size constraints were placed, so use malloc elem len */
161 mz_addr = malloc_heap_alloc_biggest(NULL, socket_id, flags,
165 requested_len = bound;
166 /* allocate memory on heap */
167 mz_addr = malloc_heap_alloc(NULL, requested_len, socket_id,
168 flags, align, bound, contig);
170 if (mz_addr == NULL) {
175 struct malloc_elem *elem = malloc_elem_from_data(mz_addr);
177 /* fill the zone in config */
178 mz_idx = rte_fbarray_find_next_free(arr, 0);
183 rte_fbarray_set_used(arr, mz_idx);
184 mz = rte_fbarray_get(arr, mz_idx);
188 RTE_LOG(ERR, EAL, "%s(): Cannot find free memzone\n", __func__);
189 malloc_heap_free(elem);
194 strlcpy(mz->name, name, sizeof(mz->name));
195 mz->iova = rte_malloc_virt2iova(mz_addr);
197 mz->len = requested_len == 0 ?
198 elem->size - elem->pad - MALLOC_ELEM_OVERHEAD :
200 mz->hugepage_sz = elem->msl->page_sz;
201 mz->socket_id = elem->msl->socket_id;
207 static const struct rte_memzone *
208 rte_memzone_reserve_thread_safe(const char *name, size_t len, int socket_id,
209 unsigned int flags, unsigned int align, unsigned int bound)
211 struct rte_mem_config *mcfg;
212 const struct rte_memzone *mz = NULL;
214 /* get pointer to global configuration */
215 mcfg = rte_eal_get_configuration()->mem_config;
217 rte_rwlock_write_lock(&mcfg->mlock);
219 mz = memzone_reserve_aligned_thread_unsafe(
220 name, len, socket_id, flags, align, bound);
222 rte_eal_trace_memzone_reserve(name, len, socket_id, flags, align,
225 rte_rwlock_write_unlock(&mcfg->mlock);
231 * Return a pointer to a correctly filled memzone descriptor (with a
232 * specified alignment and boundary). If the allocation cannot be done,
235 const struct rte_memzone *
236 rte_memzone_reserve_bounded(const char *name, size_t len, int socket_id,
237 unsigned flags, unsigned align, unsigned bound)
239 return rte_memzone_reserve_thread_safe(name, len, socket_id, flags,
244 * Return a pointer to a correctly filled memzone descriptor (with a
245 * specified alignment). If the allocation cannot be done, return NULL.
247 const struct rte_memzone *
248 rte_memzone_reserve_aligned(const char *name, size_t len, int socket_id,
249 unsigned flags, unsigned align)
251 return rte_memzone_reserve_thread_safe(name, len, socket_id, flags,
256 * Return a pointer to a correctly filled memzone descriptor. If the
257 * allocation cannot be done, return NULL.
259 const struct rte_memzone *
260 rte_memzone_reserve(const char *name, size_t len, int socket_id,
263 return rte_memzone_reserve_thread_safe(name, len, socket_id,
264 flags, RTE_CACHE_LINE_SIZE, 0);
268 rte_memzone_free(const struct rte_memzone *mz)
270 char name[RTE_MEMZONE_NAMESIZE];
271 struct rte_mem_config *mcfg;
272 struct rte_fbarray *arr;
273 struct rte_memzone *found_mz;
281 rte_strlcpy(name, mz->name, RTE_MEMZONE_NAMESIZE);
282 mcfg = rte_eal_get_configuration()->mem_config;
283 arr = &mcfg->memzones;
285 rte_rwlock_write_lock(&mcfg->mlock);
287 idx = rte_fbarray_find_idx(arr, mz);
288 found_mz = rte_fbarray_get(arr, idx);
290 if (found_mz == NULL) {
292 } else if (found_mz->addr == NULL) {
293 RTE_LOG(ERR, EAL, "Memzone is not allocated\n");
296 addr = found_mz->addr;
297 memset(found_mz, 0, sizeof(*found_mz));
298 rte_fbarray_set_free(arr, idx);
301 rte_rwlock_write_unlock(&mcfg->mlock);
306 rte_eal_trace_memzone_free(name, addr, ret);
311 * Lookup for the memzone identified by the given name
313 const struct rte_memzone *
314 rte_memzone_lookup(const char *name)
316 struct rte_mem_config *mcfg;
317 const struct rte_memzone *memzone = NULL;
319 mcfg = rte_eal_get_configuration()->mem_config;
321 rte_rwlock_read_lock(&mcfg->mlock);
323 memzone = memzone_lookup_thread_unsafe(name);
325 rte_rwlock_read_unlock(&mcfg->mlock);
327 rte_eal_trace_memzone_lookup(name, memzone);
332 dump_memzone(const struct rte_memzone *mz, void *arg)
334 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
335 struct rte_memseg_list *msl = NULL;
336 void *cur_addr, *mz_end;
337 struct rte_memseg *ms;
342 mz_idx = rte_fbarray_find_idx(&mcfg->memzones, mz);
344 fprintf(f, "Zone %u: name:<%s>, len:0x%zx, virt:%p, "
345 "socket_id:%"PRId32", flags:%"PRIx32"\n",
353 /* go through each page occupied by this memzone */
354 msl = rte_mem_virt2memseg_list(mz->addr);
356 RTE_LOG(DEBUG, EAL, "Skipping bad memzone\n");
359 page_sz = (size_t)mz->hugepage_sz;
360 cur_addr = RTE_PTR_ALIGN_FLOOR(mz->addr, page_sz);
361 mz_end = RTE_PTR_ADD(cur_addr, mz->len);
363 fprintf(f, "physical segments used:\n");
364 ms_idx = RTE_PTR_DIFF(mz->addr, msl->base_va) / page_sz;
365 ms = rte_fbarray_get(&msl->memseg_arr, ms_idx);
368 fprintf(f, " addr: %p iova: 0x%" PRIx64 " "
371 cur_addr, ms->iova, ms->len, page_sz);
373 /* advance VA to next page */
374 cur_addr = RTE_PTR_ADD(cur_addr, page_sz);
376 /* memzones occupy contiguous segments */
378 } while (cur_addr < mz_end);
381 /* Dump all reserved memory zones on console */
383 rte_memzone_dump(FILE *f)
385 rte_memzone_walk(dump_memzone, f);
389 * Init the memzone subsystem
392 rte_eal_memzone_init(void)
394 struct rte_mem_config *mcfg;
397 /* get pointer to global configuration */
398 mcfg = rte_eal_get_configuration()->mem_config;
400 rte_rwlock_write_lock(&mcfg->mlock);
402 if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
403 rte_fbarray_init(&mcfg->memzones, "memzone",
404 RTE_MAX_MEMZONE, sizeof(struct rte_memzone))) {
405 RTE_LOG(ERR, EAL, "Cannot allocate memzone list\n");
407 } else if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
408 rte_fbarray_attach(&mcfg->memzones)) {
409 RTE_LOG(ERR, EAL, "Cannot attach to memzone list\n");
413 rte_rwlock_write_unlock(&mcfg->mlock);
418 /* Walk all reserved memory zones */
419 void rte_memzone_walk(void (*func)(const struct rte_memzone *, void *),
422 struct rte_mem_config *mcfg;
423 struct rte_fbarray *arr;
426 mcfg = rte_eal_get_configuration()->mem_config;
427 arr = &mcfg->memzones;
429 rte_rwlock_read_lock(&mcfg->mlock);
430 i = rte_fbarray_find_next_used(arr, 0);
432 struct rte_memzone *mz = rte_fbarray_get(arr, i);
434 i = rte_fbarray_find_next_used(arr, i + 1);
436 rte_rwlock_read_unlock(&mcfg->mlock);