4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/queue.h>
44 #include <rte_memory.h>
45 #include <rte_memzone.h>
47 #include <rte_eal_memconfig.h>
48 #include <rte_per_lcore.h>
49 #include <rte_errno.h>
50 #include <rte_string_fns.h>
51 #include <rte_common.h>
53 #include "eal_private.h"
55 /* internal copy of free memory segments */
56 static struct rte_memseg *free_memseg = NULL;
58 static inline const struct rte_memzone *
59 memzone_lookup_thread_unsafe(const char *name)
61 const struct rte_mem_config *mcfg;
64 /* get pointer to global configuration */
65 mcfg = rte_eal_get_configuration()->mem_config;
68 * the algorithm is not optimal (linear), but there are few
69 * zones and this function should be called at init only
71 for (i = 0; i < RTE_MAX_MEMZONE && mcfg->memzone[i].addr != NULL; i++) {
72 if (!strncmp(name, mcfg->memzone[i].name, RTE_MEMZONE_NAMESIZE))
73 return &mcfg->memzone[i];
80 * Return a pointer to a correctly filled memzone descriptor. If the
81 * allocation cannot be done, return NULL.
83 const struct rte_memzone *
84 rte_memzone_reserve(const char *name, size_t len, int socket_id,
87 return rte_memzone_reserve_aligned(name,
88 len, socket_id, flags, RTE_CACHE_LINE_SIZE);
92 * Helper function for memzone_reserve_aligned_thread_unsafe().
93 * Calculate address offset from the start of the segment.
94 * Align offset in that way that it satisfy istart alignmnet and
95 * buffer of the requested length would not cross specified boundary.
97 static inline phys_addr_t
98 align_phys_boundary(const struct rte_memseg *ms, size_t len, size_t align,
101 phys_addr_t addr_offset, bmask, end, start;
104 step = RTE_MAX(align, bound);
105 bmask = ~((phys_addr_t)bound - 1);
107 /* calculate offset to closest alignment */
108 start = RTE_ALIGN_CEIL(ms->phys_addr, align);
109 addr_offset = start - ms->phys_addr;
111 while (addr_offset + len < ms->len) {
113 /* check, do we meet boundary condition */
114 end = start + len - (len != 0);
115 if ((start & bmask) == (end & bmask))
118 /* calculate next offset */
119 start = RTE_ALIGN_CEIL(start + 1, step);
120 addr_offset = start - ms->phys_addr;
123 return (addr_offset);
126 static const struct rte_memzone *
127 memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
128 int socket_id, unsigned flags, unsigned align, unsigned bound)
130 struct rte_mem_config *mcfg;
133 uint64_t addr_offset, seg_offset = 0;
134 size_t requested_len;
135 size_t memseg_len = 0;
136 phys_addr_t memseg_physaddr;
139 /* get pointer to global configuration */
140 mcfg = rte_eal_get_configuration()->mem_config;
142 /* no more room in config */
143 if (mcfg->memzone_idx >= RTE_MAX_MEMZONE) {
144 RTE_LOG(ERR, EAL, "%s(): No more room in config\n", __func__);
149 /* zone already exist */
150 if ((memzone_lookup_thread_unsafe(name)) != NULL) {
151 RTE_LOG(DEBUG, EAL, "%s(): memzone <%s> already exists\n",
157 /* if alignment is not a power of two */
158 if (align && !rte_is_power_of_2(align)) {
159 RTE_LOG(ERR, EAL, "%s(): Invalid alignment: %u\n", __func__,
165 /* alignment less than cache size is not allowed */
166 if (align < RTE_CACHE_LINE_SIZE)
167 align = RTE_CACHE_LINE_SIZE;
170 /* align length on cache boundary. Check for overflow before doing so */
171 if (len > SIZE_MAX - RTE_CACHE_LINE_MASK) {
172 rte_errno = EINVAL; /* requested size too big */
176 len += RTE_CACHE_LINE_MASK;
177 len &= ~((size_t) RTE_CACHE_LINE_MASK);
179 /* save minimal requested length */
180 requested_len = RTE_MAX((size_t)RTE_CACHE_LINE_SIZE, len);
182 /* check that boundary condition is valid */
184 (requested_len > bound || !rte_is_power_of_2(bound))) {
189 /* find the smallest segment matching requirements */
190 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
192 if (free_memseg[i].addr == NULL)
195 /* empty segment, skip it */
196 if (free_memseg[i].len == 0)
200 if (socket_id != SOCKET_ID_ANY &&
201 free_memseg[i].socket_id != SOCKET_ID_ANY &&
202 socket_id != free_memseg[i].socket_id)
206 * calculate offset to closest alignment that
207 * meets boundary conditions.
209 addr_offset = align_phys_boundary(free_memseg + i,
210 requested_len, align, bound);
213 if ((requested_len + addr_offset) > free_memseg[i].len)
216 /* check flags for hugepage sizes */
217 if ((flags & RTE_MEMZONE_2MB) &&
218 free_memseg[i].hugepage_sz == RTE_PGSIZE_1G)
220 if ((flags & RTE_MEMZONE_1GB) &&
221 free_memseg[i].hugepage_sz == RTE_PGSIZE_2M)
223 if ((flags & RTE_MEMZONE_16MB) &&
224 free_memseg[i].hugepage_sz == RTE_PGSIZE_16G)
226 if ((flags & RTE_MEMZONE_16GB) &&
227 free_memseg[i].hugepage_sz == RTE_PGSIZE_16M)
230 /* this segment is the best until now */
231 if (memseg_idx == -1) {
233 memseg_len = free_memseg[i].len;
234 seg_offset = addr_offset;
236 /* find the biggest contiguous zone */
238 if (free_memseg[i].len > memseg_len) {
240 memseg_len = free_memseg[i].len;
241 seg_offset = addr_offset;
245 * find the smallest (we already checked that current
246 * zone length is > len
248 else if (free_memseg[i].len + align < memseg_len ||
249 (free_memseg[i].len <= memseg_len + align &&
250 addr_offset < seg_offset)) {
252 memseg_len = free_memseg[i].len;
253 seg_offset = addr_offset;
257 /* no segment found */
258 if (memseg_idx == -1) {
260 * If RTE_MEMZONE_SIZE_HINT_ONLY flag is specified,
261 * try allocating again without the size parameter otherwise -fail.
263 if ((flags & RTE_MEMZONE_SIZE_HINT_ONLY) &&
264 ((flags & RTE_MEMZONE_1GB) || (flags & RTE_MEMZONE_2MB)
265 || (flags & RTE_MEMZONE_16MB) || (flags & RTE_MEMZONE_16GB)))
266 return memzone_reserve_aligned_thread_unsafe(name,
267 len, socket_id, 0, align, bound);
273 /* save aligned physical and virtual addresses */
274 memseg_physaddr = free_memseg[memseg_idx].phys_addr + seg_offset;
275 memseg_addr = RTE_PTR_ADD(free_memseg[memseg_idx].addr,
276 (uintptr_t) seg_offset);
278 /* if we are looking for a biggest memzone */
281 requested_len = memseg_len - seg_offset;
283 requested_len = RTE_ALIGN_CEIL(memseg_physaddr + 1,
284 bound) - memseg_physaddr;
287 /* set length to correct value */
288 len = (size_t)seg_offset + requested_len;
290 /* update our internal state */
291 free_memseg[memseg_idx].len -= len;
292 free_memseg[memseg_idx].phys_addr += len;
293 free_memseg[memseg_idx].addr =
294 (char *)free_memseg[memseg_idx].addr + len;
296 /* fill the zone in config */
297 struct rte_memzone *mz = &mcfg->memzone[mcfg->memzone_idx++];
298 snprintf(mz->name, sizeof(mz->name), "%s", name);
299 mz->phys_addr = memseg_physaddr;
300 mz->addr = memseg_addr;
301 mz->len = requested_len;
302 mz->hugepage_sz = free_memseg[memseg_idx].hugepage_sz;
303 mz->socket_id = free_memseg[memseg_idx].socket_id;
305 mz->memseg_id = memseg_idx;
311 * Return a pointer to a correctly filled memzone descriptor (with a
312 * specified alignment). If the allocation cannot be done, return NULL.
314 const struct rte_memzone *
315 rte_memzone_reserve_aligned(const char *name, size_t len,
316 int socket_id, unsigned flags, unsigned align)
318 struct rte_mem_config *mcfg;
319 const struct rte_memzone *mz = NULL;
321 /* both sizes cannot be explicitly called for */
322 if (((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB))
323 || ((flags & RTE_MEMZONE_16MB) && (flags & RTE_MEMZONE_16GB))) {
328 /* get pointer to global configuration */
329 mcfg = rte_eal_get_configuration()->mem_config;
331 rte_rwlock_write_lock(&mcfg->mlock);
333 mz = memzone_reserve_aligned_thread_unsafe(
334 name, len, socket_id, flags, align, 0);
336 rte_rwlock_write_unlock(&mcfg->mlock);
342 * Return a pointer to a correctly filled memzone descriptor (with a
343 * specified alignment and boundary).
344 * If the allocation cannot be done, return NULL.
346 const struct rte_memzone *
347 rte_memzone_reserve_bounded(const char *name, size_t len,
348 int socket_id, unsigned flags, unsigned align, unsigned bound)
350 struct rte_mem_config *mcfg;
351 const struct rte_memzone *mz = NULL;
353 /* both sizes cannot be explicitly called for */
354 if (((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB))
355 || ((flags & RTE_MEMZONE_16MB) && (flags & RTE_MEMZONE_16GB))) {
360 /* get pointer to global configuration */
361 mcfg = rte_eal_get_configuration()->mem_config;
363 rte_rwlock_write_lock(&mcfg->mlock);
365 mz = memzone_reserve_aligned_thread_unsafe(
366 name, len, socket_id, flags, align, bound);
368 rte_rwlock_write_unlock(&mcfg->mlock);
375 * Lookup for the memzone identified by the given name
377 const struct rte_memzone *
378 rte_memzone_lookup(const char *name)
380 struct rte_mem_config *mcfg;
381 const struct rte_memzone *memzone = NULL;
383 mcfg = rte_eal_get_configuration()->mem_config;
385 rte_rwlock_read_lock(&mcfg->mlock);
387 memzone = memzone_lookup_thread_unsafe(name);
389 rte_rwlock_read_unlock(&mcfg->mlock);
394 /* Dump all reserved memory zones on console */
396 rte_memzone_dump(FILE *f)
398 struct rte_mem_config *mcfg;
401 /* get pointer to global configuration */
402 mcfg = rte_eal_get_configuration()->mem_config;
404 rte_rwlock_read_lock(&mcfg->mlock);
406 for (i=0; i<RTE_MAX_MEMZONE; i++) {
407 if (mcfg->memzone[i].addr == NULL)
409 fprintf(f, "Zone %u: name:<%s>, phys:0x%"PRIx64", len:0x%zx"
410 ", virt:%p, socket_id:%"PRId32", flags:%"PRIx32"\n", i,
411 mcfg->memzone[i].name,
412 mcfg->memzone[i].phys_addr,
413 mcfg->memzone[i].len,
414 mcfg->memzone[i].addr,
415 mcfg->memzone[i].socket_id,
416 mcfg->memzone[i].flags);
418 rte_rwlock_read_unlock(&mcfg->mlock);
422 * called by init: modify the free memseg list to have cache-aligned
423 * addresses and cache-aligned lengths
426 memseg_sanitize(struct rte_memseg *memseg)
432 phys_align = memseg->phys_addr & RTE_CACHE_LINE_MASK;
433 virt_align = (unsigned long)memseg->addr & RTE_CACHE_LINE_MASK;
436 * sanity check: phys_addr and addr must have the same
439 if (phys_align != virt_align)
442 /* memseg is really too small, don't bother with it */
443 if (memseg->len < (2 * RTE_CACHE_LINE_SIZE)) {
448 /* align start address */
449 off = (RTE_CACHE_LINE_SIZE - phys_align) & RTE_CACHE_LINE_MASK;
450 memseg->phys_addr += off;
451 memseg->addr = (char *)memseg->addr + off;
454 /* align end address */
455 memseg->len &= ~((uint64_t)RTE_CACHE_LINE_MASK);
461 * Init the memzone subsystem
464 rte_eal_memzone_init(void)
466 struct rte_mem_config *mcfg;
467 const struct rte_memseg *memseg;
470 /* get pointer to global configuration */
471 mcfg = rte_eal_get_configuration()->mem_config;
473 /* mirror the runtime memsegs from config */
474 free_memseg = mcfg->free_memseg;
476 /* secondary processes don't need to initialise anything */
477 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
480 memseg = rte_eal_get_physmem_layout();
481 if (memseg == NULL) {
482 RTE_LOG(ERR, EAL, "%s(): Cannot get physical layout\n", __func__);
486 rte_rwlock_write_lock(&mcfg->mlock);
488 /* fill in uninitialized free_memsegs */
489 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
490 if (memseg[i].addr == NULL)
492 if (free_memseg[i].addr != NULL)
494 memcpy(&free_memseg[i], &memseg[i], sizeof(struct rte_memseg));
497 /* make all zones cache-aligned */
498 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
499 if (free_memseg[i].addr == NULL)
501 if (memseg_sanitize(&free_memseg[i]) < 0) {
502 RTE_LOG(ERR, EAL, "%s(): Sanity check failed\n", __func__);
503 rte_rwlock_write_unlock(&mcfg->mlock);
508 /* delete all zones */
509 mcfg->memzone_idx = 0;
510 memset(mcfg->memzone, 0, sizeof(mcfg->memzone));
512 rte_rwlock_write_unlock(&mcfg->mlock);
517 /* Walk all reserved memory zones */
518 void rte_memzone_walk(void (*func)(const struct rte_memzone *, void *),
521 struct rte_mem_config *mcfg;
524 mcfg = rte_eal_get_configuration()->mem_config;
526 rte_rwlock_read_lock(&mcfg->mlock);
527 for (i=0; i<RTE_MAX_MEMZONE; i++) {
528 if (mcfg->memzone[i].addr != NULL)
529 (*func)(&mcfg->memzone[i], arg);
531 rte_rwlock_read_unlock(&mcfg->mlock);