4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/queue.h>
44 #include <rte_memory.h>
45 #include <rte_memzone.h>
46 #include <rte_tailq.h>
48 #include <rte_eal_memconfig.h>
49 #include <rte_per_lcore.h>
50 #include <rte_errno.h>
51 #include <rte_string_fns.h>
52 #include <rte_common.h>
54 #include "eal_private.h"
56 /* internal copy of free memory segments */
57 static struct rte_memseg *free_memseg = NULL;
59 static inline const struct rte_memzone *
60 memzone_lookup_thread_unsafe(const char *name)
62 const struct rte_mem_config *mcfg;
65 /* get pointer to global configuration */
66 mcfg = rte_eal_get_configuration()->mem_config;
69 * the algorithm is not optimal (linear), but there are few
70 * zones and this function should be called at init only
72 for (i = 0; i < RTE_MAX_MEMZONE && mcfg->memzone[i].addr != NULL; i++) {
73 if (!strncmp(name, mcfg->memzone[i].name, RTE_MEMZONE_NAMESIZE))
74 return &mcfg->memzone[i];
81 * Return a pointer to a correctly filled memzone descriptor. If the
82 * allocation cannot be done, return NULL.
84 const struct rte_memzone *
85 rte_memzone_reserve(const char *name, size_t len, int socket_id,
88 return rte_memzone_reserve_aligned(name,
89 len, socket_id, flags, RTE_CACHE_LINE_SIZE);
93 * Helper function for memzone_reserve_aligned_thread_unsafe().
94 * Calculate address offset from the start of the segment.
95 * Align offset in that way that it satisfy istart alignmnet and
96 * buffer of the requested length would not cross specified boundary.
98 static inline phys_addr_t
99 align_phys_boundary(const struct rte_memseg *ms, size_t len, size_t align,
102 phys_addr_t addr_offset, bmask, end, start;
105 step = RTE_MAX(align, bound);
106 bmask = ~((phys_addr_t)bound - 1);
108 /* calculate offset to closest alignment */
109 start = RTE_ALIGN_CEIL(ms->phys_addr, align);
110 addr_offset = start - ms->phys_addr;
112 while (addr_offset + len < ms->len) {
114 /* check, do we meet boundary condition */
115 end = start + len - (len != 0);
116 if ((start & bmask) == (end & bmask))
119 /* calculate next offset */
120 start = RTE_ALIGN_CEIL(start + 1, step);
121 addr_offset = start - ms->phys_addr;
124 return (addr_offset);
127 static const struct rte_memzone *
128 memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
129 int socket_id, unsigned flags, unsigned align, unsigned bound)
131 struct rte_mem_config *mcfg;
134 uint64_t addr_offset, seg_offset = 0;
135 size_t requested_len;
136 size_t memseg_len = 0;
137 phys_addr_t memseg_physaddr;
140 /* get pointer to global configuration */
141 mcfg = rte_eal_get_configuration()->mem_config;
143 /* no more room in config */
144 if (mcfg->memzone_idx >= RTE_MAX_MEMZONE) {
145 RTE_LOG(ERR, EAL, "%s(): No more room in config\n", __func__);
150 /* zone already exist */
151 if ((memzone_lookup_thread_unsafe(name)) != NULL) {
152 RTE_LOG(DEBUG, EAL, "%s(): memzone <%s> already exists\n",
158 /* if alignment is not a power of two */
159 if (align && !rte_is_power_of_2(align)) {
160 RTE_LOG(ERR, EAL, "%s(): Invalid alignment: %u\n", __func__,
166 /* alignment less than cache size is not allowed */
167 if (align < RTE_CACHE_LINE_SIZE)
168 align = RTE_CACHE_LINE_SIZE;
171 /* align length on cache boundary. Check for overflow before doing so */
172 if (len > SIZE_MAX - RTE_CACHE_LINE_MASK) {
173 rte_errno = EINVAL; /* requested size too big */
177 len += RTE_CACHE_LINE_MASK;
178 len &= ~((size_t) RTE_CACHE_LINE_MASK);
180 /* save minimal requested length */
181 requested_len = RTE_MAX((size_t)RTE_CACHE_LINE_SIZE, len);
183 /* check that boundary condition is valid */
185 (requested_len > bound || !rte_is_power_of_2(bound))) {
190 /* find the smallest segment matching requirements */
191 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
193 if (free_memseg[i].addr == NULL)
196 /* empty segment, skip it */
197 if (free_memseg[i].len == 0)
201 if (socket_id != SOCKET_ID_ANY &&
202 free_memseg[i].socket_id != SOCKET_ID_ANY &&
203 socket_id != free_memseg[i].socket_id)
207 * calculate offset to closest alignment that
208 * meets boundary conditions.
210 addr_offset = align_phys_boundary(free_memseg + i,
211 requested_len, align, bound);
214 if ((requested_len + addr_offset) > free_memseg[i].len)
217 /* check flags for hugepage sizes */
218 if ((flags & RTE_MEMZONE_2MB) &&
219 free_memseg[i].hugepage_sz == RTE_PGSIZE_1G)
221 if ((flags & RTE_MEMZONE_1GB) &&
222 free_memseg[i].hugepage_sz == RTE_PGSIZE_2M)
224 if ((flags & RTE_MEMZONE_16MB) &&
225 free_memseg[i].hugepage_sz == RTE_PGSIZE_16G)
227 if ((flags & RTE_MEMZONE_16GB) &&
228 free_memseg[i].hugepage_sz == RTE_PGSIZE_16M)
231 /* this segment is the best until now */
232 if (memseg_idx == -1) {
234 memseg_len = free_memseg[i].len;
235 seg_offset = addr_offset;
237 /* find the biggest contiguous zone */
239 if (free_memseg[i].len > memseg_len) {
241 memseg_len = free_memseg[i].len;
242 seg_offset = addr_offset;
246 * find the smallest (we already checked that current
247 * zone length is > len
249 else if (free_memseg[i].len + align < memseg_len ||
250 (free_memseg[i].len <= memseg_len + align &&
251 addr_offset < seg_offset)) {
253 memseg_len = free_memseg[i].len;
254 seg_offset = addr_offset;
258 /* no segment found */
259 if (memseg_idx == -1) {
261 * If RTE_MEMZONE_SIZE_HINT_ONLY flag is specified,
262 * try allocating again without the size parameter otherwise -fail.
264 if ((flags & RTE_MEMZONE_SIZE_HINT_ONLY) &&
265 ((flags & RTE_MEMZONE_1GB) || (flags & RTE_MEMZONE_2MB)
266 || (flags & RTE_MEMZONE_16MB) || (flags & RTE_MEMZONE_16GB)))
267 return memzone_reserve_aligned_thread_unsafe(name,
268 len, socket_id, 0, align, bound);
274 /* save aligned physical and virtual addresses */
275 memseg_physaddr = free_memseg[memseg_idx].phys_addr + seg_offset;
276 memseg_addr = RTE_PTR_ADD(free_memseg[memseg_idx].addr,
277 (uintptr_t) seg_offset);
279 /* if we are looking for a biggest memzone */
282 requested_len = memseg_len - seg_offset;
284 requested_len = RTE_ALIGN_CEIL(memseg_physaddr + 1,
285 bound) - memseg_physaddr;
288 /* set length to correct value */
289 len = (size_t)seg_offset + requested_len;
291 /* update our internal state */
292 free_memseg[memseg_idx].len -= len;
293 free_memseg[memseg_idx].phys_addr += len;
294 free_memseg[memseg_idx].addr =
295 (char *)free_memseg[memseg_idx].addr + len;
297 /* fill the zone in config */
298 struct rte_memzone *mz = &mcfg->memzone[mcfg->memzone_idx++];
299 snprintf(mz->name, sizeof(mz->name), "%s", name);
300 mz->phys_addr = memseg_physaddr;
301 mz->addr = memseg_addr;
302 mz->len = requested_len;
303 mz->hugepage_sz = free_memseg[memseg_idx].hugepage_sz;
304 mz->socket_id = free_memseg[memseg_idx].socket_id;
306 mz->memseg_id = memseg_idx;
312 * Return a pointer to a correctly filled memzone descriptor (with a
313 * specified alignment). If the allocation cannot be done, return NULL.
315 const struct rte_memzone *
316 rte_memzone_reserve_aligned(const char *name, size_t len,
317 int socket_id, unsigned flags, unsigned align)
319 struct rte_mem_config *mcfg;
320 const struct rte_memzone *mz = NULL;
322 /* both sizes cannot be explicitly called for */
323 if (((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB))
324 || ((flags & RTE_MEMZONE_16MB) && (flags & RTE_MEMZONE_16GB))) {
329 /* get pointer to global configuration */
330 mcfg = rte_eal_get_configuration()->mem_config;
332 rte_rwlock_write_lock(&mcfg->mlock);
334 mz = memzone_reserve_aligned_thread_unsafe(
335 name, len, socket_id, flags, align, 0);
337 rte_rwlock_write_unlock(&mcfg->mlock);
343 * Return a pointer to a correctly filled memzone descriptor (with a
344 * specified alignment and boundary).
345 * If the allocation cannot be done, return NULL.
347 const struct rte_memzone *
348 rte_memzone_reserve_bounded(const char *name, size_t len,
349 int socket_id, unsigned flags, unsigned align, unsigned bound)
351 struct rte_mem_config *mcfg;
352 const struct rte_memzone *mz = NULL;
354 /* both sizes cannot be explicitly called for */
355 if (((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB))
356 || ((flags & RTE_MEMZONE_16MB) && (flags & RTE_MEMZONE_16GB))) {
361 /* get pointer to global configuration */
362 mcfg = rte_eal_get_configuration()->mem_config;
364 rte_rwlock_write_lock(&mcfg->mlock);
366 mz = memzone_reserve_aligned_thread_unsafe(
367 name, len, socket_id, flags, align, bound);
369 rte_rwlock_write_unlock(&mcfg->mlock);
376 * Lookup for the memzone identified by the given name
378 const struct rte_memzone *
379 rte_memzone_lookup(const char *name)
381 struct rte_mem_config *mcfg;
382 const struct rte_memzone *memzone = NULL;
384 mcfg = rte_eal_get_configuration()->mem_config;
386 rte_rwlock_read_lock(&mcfg->mlock);
388 memzone = memzone_lookup_thread_unsafe(name);
390 rte_rwlock_read_unlock(&mcfg->mlock);
395 /* Dump all reserved memory zones on console */
397 rte_memzone_dump(FILE *f)
399 struct rte_mem_config *mcfg;
402 /* get pointer to global configuration */
403 mcfg = rte_eal_get_configuration()->mem_config;
405 rte_rwlock_read_lock(&mcfg->mlock);
407 for (i=0; i<RTE_MAX_MEMZONE; i++) {
408 if (mcfg->memzone[i].addr == NULL)
410 fprintf(f, "Zone %u: name:<%s>, phys:0x%"PRIx64", len:0x%zx"
411 ", virt:%p, socket_id:%"PRId32", flags:%"PRIx32"\n", i,
412 mcfg->memzone[i].name,
413 mcfg->memzone[i].phys_addr,
414 mcfg->memzone[i].len,
415 mcfg->memzone[i].addr,
416 mcfg->memzone[i].socket_id,
417 mcfg->memzone[i].flags);
419 rte_rwlock_read_unlock(&mcfg->mlock);
423 * called by init: modify the free memseg list to have cache-aligned
424 * addresses and cache-aligned lengths
427 memseg_sanitize(struct rte_memseg *memseg)
433 phys_align = memseg->phys_addr & RTE_CACHE_LINE_MASK;
434 virt_align = (unsigned long)memseg->addr & RTE_CACHE_LINE_MASK;
437 * sanity check: phys_addr and addr must have the same
440 if (phys_align != virt_align)
443 /* memseg is really too small, don't bother with it */
444 if (memseg->len < (2 * RTE_CACHE_LINE_SIZE)) {
449 /* align start address */
450 off = (RTE_CACHE_LINE_SIZE - phys_align) & RTE_CACHE_LINE_MASK;
451 memseg->phys_addr += off;
452 memseg->addr = (char *)memseg->addr + off;
455 /* align end address */
456 memseg->len &= ~((uint64_t)RTE_CACHE_LINE_MASK);
462 * Init the memzone subsystem
465 rte_eal_memzone_init(void)
467 struct rte_mem_config *mcfg;
468 const struct rte_memseg *memseg;
471 /* get pointer to global configuration */
472 mcfg = rte_eal_get_configuration()->mem_config;
474 /* mirror the runtime memsegs from config */
475 free_memseg = mcfg->free_memseg;
477 /* secondary processes don't need to initialise anything */
478 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
481 memseg = rte_eal_get_physmem_layout();
482 if (memseg == NULL) {
483 RTE_LOG(ERR, EAL, "%s(): Cannot get physical layout\n", __func__);
487 rte_rwlock_write_lock(&mcfg->mlock);
489 /* fill in uninitialized free_memsegs */
490 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
491 if (memseg[i].addr == NULL)
493 if (free_memseg[i].addr != NULL)
495 memcpy(&free_memseg[i], &memseg[i], sizeof(struct rte_memseg));
498 /* make all zones cache-aligned */
499 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
500 if (free_memseg[i].addr == NULL)
502 if (memseg_sanitize(&free_memseg[i]) < 0) {
503 RTE_LOG(ERR, EAL, "%s(): Sanity check failed\n", __func__);
504 rte_rwlock_write_unlock(&mcfg->mlock);
509 /* delete all zones */
510 mcfg->memzone_idx = 0;
511 memset(mcfg->memzone, 0, sizeof(mcfg->memzone));
513 rte_rwlock_write_unlock(&mcfg->mlock);
518 /* Walk all reserved memory zones */
519 void rte_memzone_walk(void (*func)(const struct rte_memzone *, void *),
522 struct rte_mem_config *mcfg;
525 mcfg = rte_eal_get_configuration()->mem_config;
527 rte_rwlock_read_lock(&mcfg->mlock);
528 for (i=0; i<RTE_MAX_MEMZONE; i++) {
529 if (mcfg->memzone[i].addr != NULL)
530 (*func)(&mcfg->memzone[i], arg);
532 rte_rwlock_read_unlock(&mcfg->mlock);