4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/queue.h>
45 #include <rte_memory.h>
46 #include <rte_memzone.h>
47 #include <rte_tailq.h>
49 #include <rte_eal_memconfig.h>
50 #include <rte_per_lcore.h>
51 #include <rte_errno.h>
52 #include <rte_string_fns.h>
53 #include <rte_common.h>
55 #include "eal_private.h"
57 /* internal copy of free memory segments */
58 static struct rte_memseg *free_memseg = NULL;
60 static inline const struct rte_memzone *
61 memzone_lookup_thread_unsafe(const char *name)
63 const struct rte_mem_config *mcfg;
66 /* get pointer to global configuration */
67 mcfg = rte_eal_get_configuration()->mem_config;
70 * the algorithm is not optimal (linear), but there are few
71 * zones and this function should be called at init only
73 for (i = 0; i < RTE_MAX_MEMZONE && mcfg->memzone[i].addr != NULL; i++) {
74 if (!strncmp(name, mcfg->memzone[i].name, RTE_MEMZONE_NAMESIZE))
75 return &mcfg->memzone[i];
82 * Return a pointer to a correctly filled memzone descriptor. If the
83 * allocation cannot be done, return NULL.
85 const struct rte_memzone *
86 rte_memzone_reserve(const char *name, size_t len, int socket_id,
89 return rte_memzone_reserve_aligned(name,
90 len, socket_id, flags, CACHE_LINE_SIZE);
93 static const struct rte_memzone *
94 memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
95 int socket_id, unsigned flags, unsigned align)
97 struct rte_mem_config *mcfg;
100 uint64_t addr_offset;
101 size_t requested_len;
102 size_t memseg_len = 0;
103 phys_addr_t memseg_physaddr;
106 /* get pointer to global configuration */
107 mcfg = rte_eal_get_configuration()->mem_config;
109 /* no more room in config */
110 if (mcfg->memzone_idx >= RTE_MAX_MEMZONE) {
111 RTE_LOG(ERR, EAL, "%s(): No more room in config\n", __func__);
116 /* zone already exist */
117 if ((memzone_lookup_thread_unsafe(name)) != NULL) {
118 RTE_LOG(DEBUG, EAL, "%s(): memzone <%s> already exists\n",
124 /* align length on cache boundary. Check for overflow before doing so */
125 if (len > SIZE_MAX - CACHE_LINE_MASK) {
126 rte_errno = EINVAL; /* requested size too big */
129 len += CACHE_LINE_MASK;
130 len &= ~((size_t) CACHE_LINE_MASK);
132 /* save original length */
135 /* reserve extra space for future alignment */
139 /* save requested length */
142 /* reserve extra space for future alignment */
146 /* find the smallest segment matching requirements */
147 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
149 if (free_memseg[i].addr == NULL)
152 /* empty segment, skip it */
153 if (free_memseg[i].len == 0)
157 if (socket_id != SOCKET_ID_ANY &&
158 socket_id != free_memseg[i].socket_id)
162 if (len != 0 && len > free_memseg[i].len)
165 /* check flags for hugepage sizes */
166 if ((flags & RTE_MEMZONE_2MB) &&
167 free_memseg[i].hugepage_sz == RTE_PGSIZE_1G )
169 if ((flags & RTE_MEMZONE_1GB) &&
170 free_memseg[i].hugepage_sz == RTE_PGSIZE_2M )
173 /* this segment is the best until now */
174 if (memseg_idx == -1) {
176 memseg_len = free_memseg[i].len;
178 /* find the biggest contiguous zone */
180 if (free_memseg[i].len > memseg_len) {
182 memseg_len = free_memseg[i].len;
186 * find the smallest (we already checked that current
187 * zone length is > len
189 else if (free_memseg[i].len < memseg_len) {
191 memseg_len = free_memseg[i].len;
195 /* no segment found */
196 if (memseg_idx == -1) {
198 * If RTE_MEMZONE_SIZE_HINT_ONLY flag is specified,
199 * try allocating again without the size parameter otherwise -fail.
201 if ((flags & RTE_MEMZONE_SIZE_HINT_ONLY) &&
202 ((flags & RTE_MEMZONE_1GB) || (flags & RTE_MEMZONE_2MB)))
203 return memzone_reserve_aligned_thread_unsafe(name, len - align,
204 socket_id, 0, align);
206 RTE_LOG(ERR, EAL, "%s(%s, %zu, %d): "
207 "No appropriate segment found\n",
208 __func__, name, requested_len, socket_id);
213 /* get offset needed to adjust alignment */
214 addr_offset = RTE_ALIGN_CEIL(free_memseg[memseg_idx].phys_addr, align) -
215 free_memseg[memseg_idx].phys_addr;
217 /* save aligned physical and virtual addresses */
218 memseg_physaddr = free_memseg[memseg_idx].phys_addr + addr_offset;
219 memseg_addr = RTE_PTR_ADD(free_memseg[memseg_idx].addr,
220 (uintptr_t) addr_offset);
222 /* if we are looking for a biggest memzone */
223 if (requested_len == 0)
224 requested_len = memseg_len - addr_offset;
226 /* set length to correct value */
227 len = (size_t)addr_offset + requested_len;
229 /* update our internal state */
230 free_memseg[memseg_idx].len -= len;
231 free_memseg[memseg_idx].phys_addr += len;
232 free_memseg[memseg_idx].addr =
233 (char *)free_memseg[memseg_idx].addr + len;
235 /* fill the zone in config */
236 struct rte_memzone *mz = &mcfg->memzone[mcfg->memzone_idx++];
237 rte_snprintf(mz->name, sizeof(mz->name), "%s", name);
238 mz->phys_addr = memseg_physaddr;
239 mz->addr = memseg_addr;
240 mz->len = requested_len;
241 mz->hugepage_sz = free_memseg[memseg_idx].hugepage_sz;
242 mz->socket_id = free_memseg[memseg_idx].socket_id;
249 * Return a pointer to a correctly filled memzone descriptor (with a
250 * specified alignment). If the allocation cannot be done, return NULL.
252 const struct rte_memzone *
253 rte_memzone_reserve_aligned(const char *name, size_t len,
254 int socket_id, unsigned flags, unsigned align)
256 struct rte_mem_config *mcfg;
257 const struct rte_memzone *mz = NULL;
259 /* both sizes cannot be explicitly called for */
260 if ((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB)) {
265 /* if alignment is not a power of two */
266 if (!rte_is_power_of_2(align)) {
267 RTE_LOG(ERR, EAL, "%s(): Invalid alignment: %u\n", __func__,
273 /* alignment less than cache size is not allowed */
274 if (align < CACHE_LINE_SIZE)
275 align = CACHE_LINE_SIZE;
277 /* get pointer to global configuration */
278 mcfg = rte_eal_get_configuration()->mem_config;
280 rte_rwlock_write_lock(&mcfg->mlock);
282 mz = memzone_reserve_aligned_thread_unsafe(
283 name, len, socket_id, flags, align);
285 rte_rwlock_write_unlock(&mcfg->mlock);
291 * Lookup for the memzone identified by the given name
293 const struct rte_memzone *
294 rte_memzone_lookup(const char *name)
296 struct rte_mem_config *mcfg;
297 const struct rte_memzone *memzone = NULL;
299 mcfg = rte_eal_get_configuration()->mem_config;
301 rte_rwlock_read_lock(&mcfg->mlock);
303 memzone = memzone_lookup_thread_unsafe(name);
305 rte_rwlock_read_unlock(&mcfg->mlock);
310 /* Dump all reserved memory zones on console */
312 rte_memzone_dump(void)
314 struct rte_mem_config *mcfg;
317 /* get pointer to global configuration */
318 mcfg = rte_eal_get_configuration()->mem_config;
320 rte_rwlock_read_lock(&mcfg->mlock);
322 for (i=0; i<RTE_MAX_MEMZONE; i++) {
323 if (mcfg->memzone[i].addr == NULL)
325 printf("Zone %o: name:<%s>, phys:0x%"PRIx64", len:0x%zx"
326 ", virt:%p, socket_id:%"PRId32", flags:%"PRIx32"\n", i,
327 mcfg->memzone[i].name,
328 mcfg->memzone[i].phys_addr,
329 mcfg->memzone[i].len,
330 mcfg->memzone[i].addr,
331 mcfg->memzone[i].socket_id,
332 mcfg->memzone[i].flags);
334 rte_rwlock_read_unlock(&mcfg->mlock);
338 * called by init: modify the free memseg list to have cache-aligned
339 * addresses and cache-aligned lengths
342 memseg_sanitize(struct rte_memseg *memseg)
348 phys_align = memseg->phys_addr & CACHE_LINE_MASK;
349 virt_align = (unsigned long)memseg->addr & CACHE_LINE_MASK;
352 * sanity check: phys_addr and addr must have the same
355 if (phys_align != virt_align)
358 /* memseg is really too small, don't bother with it */
359 if (memseg->len < (2 * CACHE_LINE_SIZE)) {
364 /* align start address */
365 off = (CACHE_LINE_SIZE - phys_align) & CACHE_LINE_MASK;
366 memseg->phys_addr += off;
367 memseg->addr = (char *)memseg->addr + off;
370 /* align end address */
371 memseg->len &= ~((uint64_t)CACHE_LINE_MASK);
377 * Init the memzone subsystem
380 rte_eal_memzone_init(void)
382 struct rte_mem_config *mcfg;
383 const struct rte_memseg *memseg;
386 /* get pointer to global configuration */
387 mcfg = rte_eal_get_configuration()->mem_config;
389 /* mirror the runtime memsegs from config */
390 free_memseg = mcfg->free_memseg;
392 /* secondary processes don't need to initialise anything */
393 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
396 memseg = rte_eal_get_physmem_layout();
397 if (memseg == NULL) {
398 RTE_LOG(ERR, EAL, "%s(): Cannot get physical layout\n", __func__);
402 rte_rwlock_write_lock(&mcfg->mlock);
404 /* duplicate the memsegs from config */
405 memcpy(free_memseg, memseg, sizeof(struct rte_memseg) * RTE_MAX_MEMSEG);
407 /* make all zones cache-aligned */
408 for (i=0; i<RTE_MAX_MEMSEG; i++) {
409 if (free_memseg[i].addr == NULL)
411 if (memseg_sanitize(&free_memseg[i]) < 0) {
412 RTE_LOG(ERR, EAL, "%s(): Sanity check failed\n", __func__);
413 rte_rwlock_write_unlock(&mcfg->mlock);
418 /* delete all zones */
419 mcfg->memzone_idx = 0;
420 memset(mcfg->memzone, 0, sizeof(mcfg->memzone));
422 rte_rwlock_write_unlock(&mcfg->mlock);