/*-
* BSD LICENSE
- *
+ *
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
- *
+ *
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* zones and this function should be called at init only
*/
for (i = 0; i < RTE_MAX_MEMZONE && mcfg->memzone[i].addr != NULL; i++) {
- if (!strncmp(name, mcfg->memzone[i].name, RTE_MEMZONE_NAMESIZE))
+ if (!strncmp(name, mcfg->memzone[i].name, RTE_MEMZONE_NAMESIZE))
return &mcfg->memzone[i];
}
unsigned flags)
{
return rte_memzone_reserve_aligned(name,
- len, socket_id, flags, CACHE_LINE_SIZE);
+ len, socket_id, flags, RTE_CACHE_LINE_SIZE);
}
/*
}
/* alignment less than cache size is not allowed */
- if (align < CACHE_LINE_SIZE)
- align = CACHE_LINE_SIZE;
+ if (align < RTE_CACHE_LINE_SIZE)
+ align = RTE_CACHE_LINE_SIZE;
/* align length on cache boundary. Check for overflow before doing so */
- if (len > SIZE_MAX - CACHE_LINE_MASK) {
+ if (len > SIZE_MAX - RTE_CACHE_LINE_MASK) {
rte_errno = EINVAL; /* requested size too big */
return NULL;
}
- len += CACHE_LINE_MASK;
- len &= ~((size_t) CACHE_LINE_MASK);
+ len += RTE_CACHE_LINE_MASK;
+ len &= ~((size_t) RTE_CACHE_LINE_MASK);
/* save minimal requested length */
- requested_len = RTE_MAX((size_t)CACHE_LINE_SIZE, len);
+ requested_len = RTE_MAX((size_t)RTE_CACHE_LINE_SIZE, len);
/* check that boundary condition is valid */
if (bound != 0 &&
/* bad socket ID */
if (socket_id != SOCKET_ID_ANY &&
+ free_memseg[i].socket_id != SOCKET_ID_ANY &&
socket_id != free_memseg[i].socket_id)
continue;
/* check flags for hugepage sizes */
if ((flags & RTE_MEMZONE_2MB) &&
- free_memseg[i].hugepage_sz == RTE_PGSIZE_1G )
+ free_memseg[i].hugepage_sz == RTE_PGSIZE_1G)
continue;
if ((flags & RTE_MEMZONE_1GB) &&
- free_memseg[i].hugepage_sz == RTE_PGSIZE_2M )
+ free_memseg[i].hugepage_sz == RTE_PGSIZE_2M)
+ continue;
+ if ((flags & RTE_MEMZONE_16MB) &&
+ free_memseg[i].hugepage_sz == RTE_PGSIZE_16G)
+ continue;
+ if ((flags & RTE_MEMZONE_16GB) &&
+ free_memseg[i].hugepage_sz == RTE_PGSIZE_16M)
continue;
/* this segment is the best until now */
* try allocating again without the size parameter otherwise -fail.
*/
if ((flags & RTE_MEMZONE_SIZE_HINT_ONLY) &&
- ((flags & RTE_MEMZONE_1GB) || (flags & RTE_MEMZONE_2MB)))
+ ((flags & RTE_MEMZONE_1GB) || (flags & RTE_MEMZONE_2MB)
+ || (flags & RTE_MEMZONE_16MB) || (flags & RTE_MEMZONE_16GB)))
return memzone_reserve_aligned_thread_unsafe(name,
len, socket_id, 0, align, bound);
/* fill the zone in config */
struct rte_memzone *mz = &mcfg->memzone[mcfg->memzone_idx++];
- rte_snprintf(mz->name, sizeof(mz->name), "%s", name);
+ snprintf(mz->name, sizeof(mz->name), "%s", name);
mz->phys_addr = memseg_physaddr;
mz->addr = memseg_addr;
mz->len = requested_len;
const struct rte_memzone *mz = NULL;
/* both sizes cannot be explicitly called for */
- if ((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB)) {
+ if (((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB))
+ || ((flags & RTE_MEMZONE_16MB) && (flags & RTE_MEMZONE_16GB))) {
rte_errno = EINVAL;
return NULL;
}
const struct rte_memzone *mz = NULL;
/* both sizes cannot be explicitly called for */
- if ((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB)) {
+ if (((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB))
+ || ((flags & RTE_MEMZONE_16MB) && (flags & RTE_MEMZONE_16GB))) {
rte_errno = EINVAL;
return NULL;
}
const struct rte_memzone *memzone = NULL;
mcfg = rte_eal_get_configuration()->mem_config;
-
+
rte_rwlock_read_lock(&mcfg->mlock);
memzone = memzone_lookup_thread_unsafe(name);
/* Dump all reserved memory zones on console */
void
-rte_memzone_dump(void)
+rte_memzone_dump(FILE *f)
{
struct rte_mem_config *mcfg;
unsigned i = 0;
for (i=0; i<RTE_MAX_MEMZONE; i++) {
if (mcfg->memzone[i].addr == NULL)
break;
- printf("Zone %u: name:<%s>, phys:0x%"PRIx64", len:0x%zx"
+ fprintf(f, "Zone %u: name:<%s>, phys:0x%"PRIx64", len:0x%zx"
", virt:%p, socket_id:%"PRId32", flags:%"PRIx32"\n", i,
mcfg->memzone[i].name,
mcfg->memzone[i].phys_addr,
unsigned virt_align;
unsigned off;
- phys_align = memseg->phys_addr & CACHE_LINE_MASK;
- virt_align = (unsigned long)memseg->addr & CACHE_LINE_MASK;
+ phys_align = memseg->phys_addr & RTE_CACHE_LINE_MASK;
+ virt_align = (unsigned long)memseg->addr & RTE_CACHE_LINE_MASK;
/*
* sanity check: phys_addr and addr must have the same
return -1;
/* memseg is really too small, don't bother with it */
- if (memseg->len < (2 * CACHE_LINE_SIZE)) {
+ if (memseg->len < (2 * RTE_CACHE_LINE_SIZE)) {
memseg->len = 0;
return 0;
}
/* align start address */
- off = (CACHE_LINE_SIZE - phys_align) & CACHE_LINE_MASK;
+ off = (RTE_CACHE_LINE_SIZE - phys_align) & RTE_CACHE_LINE_MASK;
memseg->phys_addr += off;
memseg->addr = (char *)memseg->addr + off;
memseg->len -= off;
/* align end address */
- memseg->len &= ~((uint64_t)CACHE_LINE_MASK);
+ memseg->len &= ~((uint64_t)RTE_CACHE_LINE_MASK);
return 0;
}
/* mirror the runtime memsegs from config */
free_memseg = mcfg->free_memseg;
-
+
/* secondary processes don't need to initialise anything */
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
return 0;
return 0;
}
+
+/* Walk all reserved memory zones */
+void rte_memzone_walk(void (*func)(const struct rte_memzone *, void *),
+ void *arg)
+{
+ struct rte_mem_config *mcfg;
+ unsigned i;
+
+ mcfg = rte_eal_get_configuration()->mem_config;
+
+ rte_rwlock_read_lock(&mcfg->mlock);
+ for (i=0; i<RTE_MAX_MEMZONE; i++) {
+ if (mcfg->memzone[i].addr != NULL)
+ (*func)(&mcfg->memzone[i], arg);
+ }
+ rte_rwlock_read_unlock(&mcfg->mlock);
+}