4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/queue.h>
44 #include <rte_memory.h>
45 #include <rte_memzone.h>
47 #include <rte_eal_memconfig.h>
48 #include <rte_per_lcore.h>
49 #include <rte_errno.h>
50 #include <rte_string_fns.h>
51 #include <rte_common.h>
53 #include "malloc_heap.h"
54 #include "malloc_elem.h"
55 #include "eal_private.h"
57 static inline const struct rte_memzone *
58 memzone_lookup_thread_unsafe(const char *name)
60 const struct rte_mem_config *mcfg;
61 const struct rte_memzone *mz;
64 /* get pointer to global configuration */
65 mcfg = rte_eal_get_configuration()->mem_config;
68 * the algorithm is not optimal (linear), but there are few
69 * zones and this function should be called at init only
71 for (i = 0; i < RTE_MAX_MEMZONE; i++) {
72 mz = &mcfg->memzone[i];
73 if (mz->addr != NULL && !strncmp(name, mz->name, RTE_MEMZONE_NAMESIZE))
74 return &mcfg->memzone[i];
80 /* This function will return the greatest free block if a heap has been
81 * specified. If no heap has been specified, it will return the heap and
82 * length of the greatest free block available in all heaps */
84 find_heap_max_free_elem(int *s, unsigned align)
86 struct rte_mem_config *mcfg;
87 struct rte_malloc_socket_stats stats;
91 /* get pointer to global configuration */
92 mcfg = rte_eal_get_configuration()->mem_config;
94 for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
95 if ((socket != SOCKET_ID_ANY) && (socket != i))
98 malloc_heap_get_stats(&mcfg->malloc_heaps[i], &stats);
99 if (stats.greatest_free_size > len) {
100 len = stats.greatest_free_size;
105 return (len - MALLOC_ELEM_OVERHEAD - align);
108 static const struct rte_memzone *
109 memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
110 int socket_id, unsigned flags, unsigned align, unsigned bound)
112 struct rte_mem_config *mcfg;
113 size_t requested_len;
116 /* get pointer to global configuration */
117 mcfg = rte_eal_get_configuration()->mem_config;
119 /* no more room in config */
120 if (mcfg->memzone_idx >= RTE_MAX_MEMZONE) {
121 RTE_LOG(ERR, EAL, "%s(): No more room in config\n", __func__);
126 /* zone already exist */
127 if ((memzone_lookup_thread_unsafe(name)) != NULL) {
128 RTE_LOG(DEBUG, EAL, "%s(): memzone <%s> already exists\n",
134 /* if alignment is not a power of two */
135 if (align && !rte_is_power_of_2(align)) {
136 RTE_LOG(ERR, EAL, "%s(): Invalid alignment: %u\n", __func__,
142 /* alignment less than cache size is not allowed */
143 if (align < RTE_CACHE_LINE_SIZE)
144 align = RTE_CACHE_LINE_SIZE;
146 /* align length on cache boundary. Check for overflow before doing so */
147 if (len > SIZE_MAX - RTE_CACHE_LINE_MASK) {
148 rte_errno = EINVAL; /* requested size too big */
152 len += RTE_CACHE_LINE_MASK;
153 len &= ~((size_t) RTE_CACHE_LINE_MASK);
155 /* save minimal requested length */
156 requested_len = RTE_MAX((size_t)RTE_CACHE_LINE_SIZE, len);
158 /* check that boundary condition is valid */
159 if (bound != 0 && (requested_len > bound || !rte_is_power_of_2(bound))) {
164 if ((socket_id != SOCKET_ID_ANY) && (socket_id >= RTE_MAX_NUMA_NODES)) {
169 if (!rte_eal_has_hugepages())
170 socket_id = SOCKET_ID_ANY;
174 requested_len = bound;
176 requested_len = find_heap_max_free_elem(&socket_id, align);
179 if (socket_id == SOCKET_ID_ANY)
180 socket = malloc_get_numa_socket();
184 /* allocate memory on heap */
185 void *mz_addr = malloc_heap_alloc(&mcfg->malloc_heaps[socket], NULL,
186 requested_len, flags, align, bound);
188 if ((mz_addr == NULL) && (socket_id == SOCKET_ID_ANY)) {
189 /* try other heaps */
190 for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
194 mz_addr = malloc_heap_alloc(&mcfg->malloc_heaps[i],
195 NULL, requested_len, flags, align, bound);
201 if (mz_addr == NULL) {
206 const struct malloc_elem *elem = malloc_elem_from_data(mz_addr);
208 /* fill the zone in config */
209 struct rte_memzone *mz = &mcfg->memzone[mcfg->memzone_idx++];
210 snprintf(mz->name, sizeof(mz->name), "%s", name);
211 mz->phys_addr = rte_malloc_virt2phy(mz_addr);
213 mz->len = (requested_len == 0 ? elem->size : requested_len);
214 mz->hugepage_sz = elem->ms->hugepage_sz;
215 mz->socket_id = elem->ms->socket_id;
217 mz->memseg_id = elem->ms - rte_eal_get_configuration()->mem_config->memseg;
222 static const struct rte_memzone *
223 rte_memzone_reserve_thread_safe(const char *name, size_t len,
224 int socket_id, unsigned flags, unsigned align,
227 struct rte_mem_config *mcfg;
228 const struct rte_memzone *mz = NULL;
230 /* get pointer to global configuration */
231 mcfg = rte_eal_get_configuration()->mem_config;
233 rte_rwlock_write_lock(&mcfg->mlock);
235 mz = memzone_reserve_aligned_thread_unsafe(
236 name, len, socket_id, flags, align, bound);
238 rte_rwlock_write_unlock(&mcfg->mlock);
244 * Return a pointer to a correctly filled memzone descriptor (with a
245 * specified alignment and boundary). If the allocation cannot be done,
248 const struct rte_memzone *
249 rte_memzone_reserve_bounded(const char *name, size_t len, int socket_id,
250 unsigned flags, unsigned align, unsigned bound)
252 return rte_memzone_reserve_thread_safe(name, len, socket_id, flags,
257 * Return a pointer to a correctly filled memzone descriptor (with a
258 * specified alignment). If the allocation cannot be done, return NULL.
260 const struct rte_memzone *
261 rte_memzone_reserve_aligned(const char *name, size_t len, int socket_id,
262 unsigned flags, unsigned align)
264 return rte_memzone_reserve_thread_safe(name, len, socket_id, flags,
269 * Return a pointer to a correctly filled memzone descriptor. If the
270 * allocation cannot be done, return NULL.
272 const struct rte_memzone *
273 rte_memzone_reserve(const char *name, size_t len, int socket_id,
276 return rte_memzone_reserve_thread_safe(name, len, socket_id,
277 flags, RTE_CACHE_LINE_SIZE, 0);
281 * Lookup for the memzone identified by the given name
283 const struct rte_memzone *
284 rte_memzone_lookup(const char *name)
286 struct rte_mem_config *mcfg;
287 const struct rte_memzone *memzone = NULL;
289 mcfg = rte_eal_get_configuration()->mem_config;
291 rte_rwlock_read_lock(&mcfg->mlock);
293 memzone = memzone_lookup_thread_unsafe(name);
295 rte_rwlock_read_unlock(&mcfg->mlock);
300 /* Dump all reserved memory zones on console */
302 rte_memzone_dump(FILE *f)
304 struct rte_mem_config *mcfg;
307 /* get pointer to global configuration */
308 mcfg = rte_eal_get_configuration()->mem_config;
310 rte_rwlock_read_lock(&mcfg->mlock);
312 for (i=0; i<RTE_MAX_MEMZONE; i++) {
313 if (mcfg->memzone[i].addr == NULL)
315 fprintf(f, "Zone %u: name:<%s>, phys:0x%"PRIx64", len:0x%zx"
316 ", virt:%p, socket_id:%"PRId32", flags:%"PRIx32"\n", i,
317 mcfg->memzone[i].name,
318 mcfg->memzone[i].phys_addr,
319 mcfg->memzone[i].len,
320 mcfg->memzone[i].addr,
321 mcfg->memzone[i].socket_id,
322 mcfg->memzone[i].flags);
324 rte_rwlock_read_unlock(&mcfg->mlock);
328 * Init the memzone subsystem
331 rte_eal_memzone_init(void)
333 struct rte_mem_config *mcfg;
334 const struct rte_memseg *memseg;
336 /* get pointer to global configuration */
337 mcfg = rte_eal_get_configuration()->mem_config;
339 /* secondary processes don't need to initialise anything */
340 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
343 memseg = rte_eal_get_physmem_layout();
344 if (memseg == NULL) {
345 RTE_LOG(ERR, EAL, "%s(): Cannot get physical layout\n", __func__);
349 rte_rwlock_write_lock(&mcfg->mlock);
351 /* delete all zones */
352 mcfg->memzone_idx = 0;
353 memset(mcfg->memzone, 0, sizeof(mcfg->memzone));
355 rte_rwlock_write_unlock(&mcfg->mlock);
357 return rte_eal_malloc_heap_init();
360 /* Walk all reserved memory zones */
361 void rte_memzone_walk(void (*func)(const struct rte_memzone *, void *),
364 struct rte_mem_config *mcfg;
367 mcfg = rte_eal_get_configuration()->mem_config;
369 rte_rwlock_read_lock(&mcfg->mlock);
370 for (i=0; i<RTE_MAX_MEMZONE; i++) {
371 if (mcfg->memzone[i].addr != NULL)
372 (*func)(&mcfg->memzone[i], arg);
374 rte_rwlock_read_unlock(&mcfg->mlock);