#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_common.h>
+#include <rte_string_fns.h>
#include "test.h"
const struct rte_memseg *ms;
int hugepage_2MB_avail = 0;
int hugepage_1GB_avail = 0;
- const int size = 100;
+ const size_t size = 100;
int i = 0;
ms = rte_eal_get_physmem_layout();
for (i = 0; i < RTE_MAX_MEMSEG; i++) {
const struct rte_memzone *memzone_aligned_1024;
/* memzone that should automatically be adjusted to align on 64 bytes */
- memzone_aligned_32 = rte_memzone_lookup("aligned_32");
- if (memzone_aligned_32 == NULL)
- memzone_aligned_32 = rte_memzone_reserve_aligned("aligned_32", 100,
+ memzone_aligned_32 = rte_memzone_reserve_aligned("aligned_32", 100,
SOCKET_ID_ANY, 0, 32);
/* memzone that is supposed to be aligned on a 128 byte boundary */
- memzone_aligned_128 = rte_memzone_lookup("aligned_128");
- if (memzone_aligned_128 == NULL)
- memzone_aligned_128 = rte_memzone_reserve_aligned("aligned_128", 100,
+ memzone_aligned_128 = rte_memzone_reserve_aligned("aligned_128", 100,
SOCKET_ID_ANY, 0, 128);
/* memzone that is supposed to be aligned on a 256 byte boundary */
- memzone_aligned_256 = rte_memzone_lookup("aligned_256");
- if (memzone_aligned_256 == NULL)
- memzone_aligned_256 = rte_memzone_reserve_aligned("aligned_256", 100,
+ memzone_aligned_256 = rte_memzone_reserve_aligned("aligned_256", 100,
SOCKET_ID_ANY, 0, 256);
/* memzone that is supposed to be aligned on a 512 byte boundary */
- memzone_aligned_512 = rte_memzone_lookup("aligned_512");
- if (memzone_aligned_512 == NULL)
- memzone_aligned_512 = rte_memzone_reserve_aligned("aligned_512", 100,
+ memzone_aligned_512 = rte_memzone_reserve_aligned("aligned_512", 100,
SOCKET_ID_ANY, 0, 512);
/* memzone that is supposed to be aligned on a 1024 byte boundary */
- memzone_aligned_1024 = rte_memzone_lookup("aligned_1024");
- if (memzone_aligned_1024 == NULL)
- memzone_aligned_1024 = rte_memzone_reserve_aligned("aligned_1024", 100,
+ memzone_aligned_1024 = rte_memzone_reserve_aligned("aligned_1024", 100,
SOCKET_ID_ANY, 0, 1024);
printf("check alignments and lengths\n");
return -1;
if ((memzone_aligned_32->len & CACHE_LINE_MASK) != 0)
return -1;
+
if (memzone_aligned_128 == NULL) {
printf("Unable to reserve 128-byte aligned memzone!\n");
return -1;
return -1;
if ((memzone_aligned_128->len & CACHE_LINE_MASK) != 0)
return -1;
+
if (memzone_aligned_256 == NULL) {
printf("Unable to reserve 256-byte aligned memzone!\n");
return -1;
return -1;
if ((memzone_aligned_256->len & CACHE_LINE_MASK) != 0)
return -1;
+
if (memzone_aligned_512 == NULL) {
printf("Unable to reserve 512-byte aligned memzone!\n");
return -1;
return -1;
if ((memzone_aligned_512->len & CACHE_LINE_MASK) != 0)
return -1;
+
if (memzone_aligned_1024 == NULL) {
printf("Unable to reserve 1024-byte aligned memzone!\n");
return -1;
return 0;
}
+static int
+check_memzone_bounded(const char *name, uint32_t len, uint32_t align,
+ uint32_t bound)
+{
+ const struct rte_memzone *mz;
+ phys_addr_t bmask;
+
+ bmask = ~((phys_addr_t)bound - 1);
+
+ if ((mz = rte_memzone_reserve_bounded(name, len, SOCKET_ID_ANY, 0,
+ align, bound)) == NULL) {
+ printf("%s(%s): memzone creation failed\n",
+ __func__, name);
+ return (-1);
+ }
+
+ if ((mz->phys_addr & ((phys_addr_t)align - 1)) != 0) {
+ printf("%s(%s): invalid phys addr alignment\n",
+ __func__, mz->name);
+ return (-1);
+ }
+
+ if (((uintptr_t) mz->addr & ((uintptr_t)align - 1)) != 0) {
+ printf("%s(%s): invalid virtual addr alignment\n",
+ __func__, mz->name);
+ return (-1);
+ }
+
+ if ((mz->len & CACHE_LINE_MASK) != 0 || mz->len < len ||
+ mz->len < CACHE_LINE_SIZE) {
+ printf("%s(%s): invalid length\n",
+ __func__, mz->name);
+ return (-1);
+ }
+
+ if ((mz->phys_addr & bmask) !=
+ ((mz->phys_addr + mz->len - 1) & bmask)) {
+ printf("%s(%s): invalid memzone boundary %u crossed\n",
+ __func__, mz->name, bound);
+ return (-1);
+ }
+
+ return (0);
+}
+
+static int
+test_memzone_bounded(void)
+{
+ const struct rte_memzone *memzone_err;
+ const char *name;
+ int rc;
+
+ /* should fail as boundary is not power of two */
+ name = "bounded_error_31";
+ if ((memzone_err = rte_memzone_reserve_bounded(name,
+ 100, SOCKET_ID_ANY, 0, 32, UINT32_MAX)) != NULL) {
+ printf("%s(%s)created a memzone with invalid boundary "
+ "conditions\n", __func__, memzone_err->name);
+ return (-1);
+ }
+
+ /* should fail as len is greater then boundary */
+ name = "bounded_error_32";
+ if ((memzone_err = rte_memzone_reserve_bounded(name,
+ 100, SOCKET_ID_ANY, 0, 32, 32)) != NULL) {
+ printf("%s(%s)created a memzone with invalid boundary "
+ "conditions\n", __func__, memzone_err->name);
+ return (-1);
+ }
+
+ if ((rc = check_memzone_bounded("bounded_128", 100, 128, 128)) != 0)
+ return (rc);
+
+ if ((rc = check_memzone_bounded("bounded_256", 100, 256, 128)) != 0)
+ return (rc);
+
+ if ((rc = check_memzone_bounded("bounded_1K", 100, 64, 1024)) != 0)
+ return (rc);
+
+ if ((rc = check_memzone_bounded("bounded_1K_MAX", 0, 64, 1024)) != 0)
+ return (rc);
+
+ return (0);
+}
+
+static int
+test_memzone_reserve_memory_in_smallest_segment(void)
+{
+ const struct rte_memzone *mz;
+ const struct rte_memseg *ms, *min_ms, *prev_min_ms;
+ size_t min_len, prev_min_len;
+ const struct rte_config *config;
+ int i;
+
+ config = rte_eal_get_configuration();
+
+ min_ms = NULL; /*< smallest segment */
+ prev_min_ms = NULL; /*< second smallest segment */
+
+ /* find two smallest segments */
+ for (i = 0; i < RTE_MAX_MEMSEG; i++) {
+ ms = &config->mem_config->free_memseg[i];
+
+ if (ms->addr == NULL)
+ break;
+ if (ms->len == 0)
+ continue;
+
+ if (min_ms == NULL)
+ min_ms = ms;
+ else if (min_ms->len > ms->len) {
+ /* set last smallest to second last */
+ prev_min_ms = min_ms;
+
+ /* set new smallest */
+ min_ms = ms;
+ }
+ else if (prev_min_ms == NULL) {
+ prev_min_ms = ms;
+ }
+ }
+
+ if (min_ms == NULL || prev_min_ms == NULL) {
+ printf("Smallest segments not found!\n");
+ return -1;
+ }
+
+ min_len = min_ms->len;
+ prev_min_len = prev_min_ms->len;
+
+ /* try reserving a memzone in the smallest memseg */
+ mz = rte_memzone_reserve("smallest_mz", CACHE_LINE_SIZE,
+ SOCKET_ID_ANY, 0);
+ if (mz == NULL) {
+ printf("Failed to reserve memory from smallest memseg!\n");
+ return -1;
+ }
+ if (prev_min_ms->len != prev_min_len &&
+ min_ms->len != min_len - CACHE_LINE_SIZE) {
+ printf("Reserved memory from wrong memseg!\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/* this test is a bit tricky, and thus warrants explanation.
+ *
+ * first, we find two smallest memsegs to conduct our experiments on.
+ *
+ * then, we bring them within alignment from each other: if second segment is
+ * twice+ as big as the first, reserve memory from that segment; if second
+ * segment is comparable in length to the first, then cut the first segment
+ * down until it becomes less than half of second segment, and then cut down
+ * the second segment to be within alignment of the first.
+ *
+ * then, we have to pass the following test: if segments are within alignment
+ * of each other (that is, the difference is less than 256 bytes, which is what
+ * our alignment will be), segment with smallest offset should be picked.
+ *
+ * we know that min_ms will be our smallest segment, so we need to make sure
+ * that we adjust the alignments so that the bigger segment has smallest
+ * alignment (in our case, smallest segment will have 64-byte alignment, while
+ * bigger segment will have 128-byte alignment).
+ */
+static int
+test_memzone_reserve_memory_with_smallest_offset(void)
+{
+ const struct rte_memseg *ms, *min_ms, *prev_min_ms;
+ size_t len, min_len, prev_min_len;
+ const struct rte_config *config;
+ int i, align;
+
+ config = rte_eal_get_configuration();
+
+ min_ms = NULL; /*< smallest segment */
+ prev_min_ms = NULL; /*< second smallest segment */
+ align = CACHE_LINE_SIZE * 4;
+
+ /* find two smallest segments */
+ for (i = 0; i < RTE_MAX_MEMSEG; i++) {
+ ms = &config->mem_config->free_memseg[i];
+
+ if (ms->addr == NULL)
+ break;
+ if (ms->len == 0)
+ continue;
+
+ if (min_ms == NULL)
+ min_ms = ms;
+ else if (min_ms->len > ms->len) {
+ /* set last smallest to second last */
+ prev_min_ms = min_ms;
+
+ /* set new smallest */
+ min_ms = ms;
+ }
+ else if (prev_min_ms == NULL) {
+ prev_min_ms = ms;
+ }
+ }
+
+ if (min_ms == NULL || prev_min_ms == NULL) {
+ printf("Smallest segments not found!\n");
+ return -1;
+ }
+
+ prev_min_len = prev_min_ms->len;
+ min_len = min_ms->len;
+
+ /* if smallest segment is bigger than half of bigger segment */
+ if (prev_min_ms->len - min_ms->len <= min_ms->len) {
+
+ len = (min_ms->len * 2) - prev_min_ms->len;
+
+ /* make sure final length is *not* aligned */
+ while (((min_ms->addr_64 + len) & (align-1)) == 0)
+ len += CACHE_LINE_SIZE;
+
+ if (rte_memzone_reserve("dummy_mz1", len, SOCKET_ID_ANY, 0) == NULL) {
+ printf("Cannot reserve memory!\n");
+ return -1;
+ }
+
+ /* check if we got memory from correct segment */
+ if (min_ms->len != min_len - len) {
+ printf("Reserved memory from wrong segment!\n");
+ return -1;
+ }
+ }
+ /* if we don't need to touch smallest segment but it's aligned */
+ else if ((min_ms->addr_64 & (align-1)) == 0) {
+ if (rte_memzone_reserve("align_mz1", CACHE_LINE_SIZE,
+ SOCKET_ID_ANY, 0) == NULL) {
+ printf("Cannot reserve memory!\n");
+ return -1;
+ }
+ if (min_ms->len != min_len - CACHE_LINE_SIZE) {
+ printf("Reserved memory from wrong segment!\n");
+ return -1;
+ }
+ }
+
+ /* if smallest segment is less than half of bigger segment */
+ if (prev_min_ms->len - min_ms->len > min_ms->len) {
+ len = prev_min_ms->len - min_ms->len - align;
+
+ /* make sure final length is aligned */
+ while (((prev_min_ms->addr_64 + len) & (align-1)) != 0)
+ len += CACHE_LINE_SIZE;
+
+ if (rte_memzone_reserve("dummy_mz2", len, SOCKET_ID_ANY, 0) == NULL) {
+ printf("Cannot reserve memory!\n");
+ return -1;
+ }
+
+ /* check if we got memory from correct segment */
+ if (prev_min_ms->len != prev_min_len - len) {
+ printf("Reserved memory from wrong segment!\n");
+ return -1;
+ }
+ }
+ len = CACHE_LINE_SIZE;
+
+
+
+ prev_min_len = prev_min_ms->len;
+ min_len = min_ms->len;
+
+ if (min_len >= prev_min_len || prev_min_len - min_len > (unsigned) align) {
+ printf("Segments are of wrong lengths!\n");
+ return -1;
+ }
+
+ /* try reserving from a bigger segment */
+ if (rte_memzone_reserve_aligned("smallest_offset", len, SOCKET_ID_ANY, 0, align) ==
+ NULL) {
+ printf("Cannot reserve memory!\n");
+ return -1;
+ }
+
+ /* check if we got memory from correct segment */
+ if (min_ms->len != min_len && prev_min_ms->len != (prev_min_len - len)) {
+ printf("Reserved memory from segment with smaller offset!\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+test_memzone_reserve_remainder(void)
+{
+ const struct rte_memzone *mz1, *mz2;
+ const struct rte_memseg *ms, *min_ms = NULL;
+ size_t min_len;
+ const struct rte_config *config;
+ int i, align;
+
+ min_len = 0;
+ align = CACHE_LINE_SIZE;
+
+ config = rte_eal_get_configuration();
+
+ /* find minimum free contiguous length */
+ for (i = 0; i < RTE_MAX_MEMSEG; i++) {
+ ms = &config->mem_config->free_memseg[i];
+
+ if (ms->addr == NULL)
+ break;
+ if (ms->len == 0)
+ continue;
+
+ if (min_len == 0 || ms->len < min_len) {
+ min_len = ms->len;
+ min_ms = ms;
+
+ /* find maximum alignment this segment is able to hold */
+ align = CACHE_LINE_SIZE;
+ while ((ms->addr_64 & (align-1)) == 0) {
+ align <<= 1;
+ }
+ }
+ }
+
+ if (min_ms == NULL) {
+ printf("Minimal sized segment not found!\n");
+ return -1;
+ }
+
+ /* try reserving min_len bytes with alignment - this should not affect our
+ * memseg, the memory will be taken from a different one.
+ */
+ mz1 = rte_memzone_reserve_aligned("reserve_remainder_1", min_len,
+ SOCKET_ID_ANY, 0, align);
+ if (mz1 == NULL) {
+ printf("Failed to reserve %zu bytes aligned on %i bytes\n", min_len,
+ align);
+ return -1;
+ }
+ if (min_ms->len != min_len) {
+ printf("Memseg memory should not have been reserved!\n");
+ return -1;
+ }
+
+ /* try reserving min_len bytes with less alignment - this should fill up
+ * the segment.
+ */
+ mz2 = rte_memzone_reserve("reserve_remainder_2", min_len,
+ SOCKET_ID_ANY, 0);
+ if (mz2 == NULL) {
+ printf("Failed to reserve %zu bytes\n", min_len);
+ return -1;
+ }
+ if (min_ms->len != 0) {
+ printf("Memseg memory should have been reserved!\n");
+ return -1;
+ }
+
+ return 0;
+}
+
int
test_memzone(void)
{
const struct rte_memzone *memzone1;
const struct rte_memzone *memzone2;
const struct rte_memzone *memzone3;
+ const struct rte_memzone *memzone4;
const struct rte_memzone *mz;
- memzone1 = rte_memzone_lookup("testzone1");
- if (memzone1 == NULL)
- memzone1 = rte_memzone_reserve("testzone1", 100,
+ memzone1 = rte_memzone_reserve("testzone1", 100,
SOCKET_ID_ANY, 0);
- memzone2 = rte_memzone_lookup("testzone2");
- if (memzone2 == NULL)
- memzone2 = rte_memzone_reserve("testzone2", 1000,
+ memzone2 = rte_memzone_reserve("testzone2", 1000,
0, 0);
- memzone3 = rte_memzone_lookup("testzone3");
- if (memzone3 == NULL)
- memzone3 = rte_memzone_reserve("testzone3", 1000,
+ memzone3 = rte_memzone_reserve("testzone3", 1000,
1, 0);
+ memzone4 = rte_memzone_reserve("testzone4", 1024,
+ SOCKET_ID_ANY, 0);
+
/* memzone3 may be NULL if we don't have NUMA */
- if (memzone1 == NULL || memzone2 == NULL)
+ if (memzone1 == NULL || memzone2 == NULL || memzone4 == NULL)
return -1;
rte_memzone_dump();
if (memzone3 != NULL && ((memzone3->len & CACHE_LINE_MASK) != 0 ||
memzone3->len == 0))
return -1;
+ if (memzone4->len != 1024)
+ return -1;
/* check that zones don't overlap */
printf("check overlapping\n");
if (test_memzone_reserving_zone_size_bigger_than_the_maximum() < 0)
return -1;
+ printf("test reserving memory in smallest segments\n");
+ if (test_memzone_reserve_memory_in_smallest_segment() < 0)
+ return -1;
+
+ printf("test reserving memory in segments with smallest offsets\n");
+ if (test_memzone_reserve_memory_with_smallest_offset() < 0)
+ return -1;
+
printf("test memzone_reserve flags\n");
if (test_memzone_reserve_flags() < 0)
return -1;
if (test_memzone_aligned() < 0)
return -1;
+ printf("test boundary alignment for memzone_reserve\n");
+ if (test_memzone_bounded() < 0)
+ return -1;
+
printf("test invalid alignment for memzone_reserve\n");
if (test_memzone_invalid_alignment() < 0)
return -1;
+ printf("test reserving amounts of memory equal to segment's length\n");
+ if (test_memzone_reserve_remainder() < 0)
+ return -1;
+
printf("test reserving the largest size memzone possible\n");
if (test_memzone_reserve_max() < 0)
return -1;
len, socket_id, flags, CACHE_LINE_SIZE);
}
+/*
+ * Helper function for memzone_reserve_aligned_thread_unsafe().
+ * Calculate address offset from the start of the segment.
+ * Align offset in that way that it satisfy istart alignmnet and
+ * buffer of the requested length would not cross specified boundary.
+ */
+static inline phys_addr_t
+align_phys_boundary(const struct rte_memseg *ms, size_t len, size_t align,
+ size_t bound)
+{
+ phys_addr_t addr_offset, bmask, end, start;
+ size_t step;
+
+ step = RTE_MAX(align, bound);
+ bmask = ~((phys_addr_t)bound - 1);
+
+ /* calculate offset to closest alignment */
+ start = RTE_ALIGN_CEIL(ms->phys_addr, align);
+ addr_offset = start - ms->phys_addr;
+
+ while (addr_offset + len < ms->len) {
+
+ /* check, do we meet boundary condition */
+ end = start + len - (len != 0);
+ if ((start & bmask) == (end & bmask))
+ break;
+
+ /* calculate next offset */
+ start = RTE_ALIGN_CEIL(start + 1, step);
+ addr_offset = start - ms->phys_addr;
+ }
+
+ return (addr_offset);
+}
+
static const struct rte_memzone *
memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
- int socket_id, unsigned flags, unsigned align)
+ int socket_id, unsigned flags, unsigned align, unsigned bound)
{
struct rte_mem_config *mcfg;
unsigned i = 0;
int memseg_idx = -1;
- uint64_t addr_offset;
+ uint64_t addr_offset, seg_offset = 0;
size_t requested_len;
size_t memseg_len = 0;
phys_addr_t memseg_physaddr;
return NULL;
}
+ /* if alignment is not a power of two */
+ if (!rte_is_power_of_2(align)) {
+ RTE_LOG(ERR, EAL, "%s(): Invalid alignment: %u\n", __func__,
+ align);
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* alignment less than cache size is not allowed */
+ if (align < CACHE_LINE_SIZE)
+ align = CACHE_LINE_SIZE;
+
+
/* align length on cache boundary. Check for overflow before doing so */
if (len > SIZE_MAX - CACHE_LINE_MASK) {
rte_errno = EINVAL; /* requested size too big */
return NULL;
}
+
len += CACHE_LINE_MASK;
len &= ~((size_t) CACHE_LINE_MASK);
- /* save requested length */
- requested_len = len;
+ /* save minimal requested length */
+ requested_len = RTE_MAX((size_t)CACHE_LINE_SIZE, len);
- /* reserve extra space for future alignment */
- if (len)
- len += align;
+ /* check that boundary condition is valid */
+ if (bound != 0 &&
+ (requested_len > bound || !rte_is_power_of_2(bound))) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
/* find the smallest segment matching requirements */
for (i = 0; i < RTE_MAX_MEMSEG; i++) {
socket_id != free_memseg[i].socket_id)
continue;
+ /*
+ * calculate offset to closest alignment that
+ * meets boundary conditions.
+ */
+ addr_offset = align_phys_boundary(free_memseg + i,
+ requested_len, align, bound);
+
/* check len */
- if (len != 0 && len > free_memseg[i].len)
+ if ((requested_len + addr_offset) > free_memseg[i].len)
continue;
/* check flags for hugepage sizes */
if (memseg_idx == -1) {
memseg_idx = i;
memseg_len = free_memseg[i].len;
+ seg_offset = addr_offset;
}
/* find the biggest contiguous zone */
else if (len == 0) {
if (free_memseg[i].len > memseg_len) {
memseg_idx = i;
memseg_len = free_memseg[i].len;
+ seg_offset = addr_offset;
}
}
/*
* find the smallest (we already checked that current
* zone length is > len
*/
- else if (free_memseg[i].len < memseg_len) {
+ else if (free_memseg[i].len + align < memseg_len ||
+ (free_memseg[i].len <= memseg_len + align &&
+ addr_offset < seg_offset)) {
memseg_idx = i;
memseg_len = free_memseg[i].len;
+ seg_offset = addr_offset;
}
}
*/
if ((flags & RTE_MEMZONE_SIZE_HINT_ONLY) &&
((flags & RTE_MEMZONE_1GB) || (flags & RTE_MEMZONE_2MB)))
- return memzone_reserve_aligned_thread_unsafe(name, len - align,
- socket_id, 0, align);
+ return memzone_reserve_aligned_thread_unsafe(name,
+ len, socket_id, 0, align, bound);
RTE_LOG(ERR, EAL, "%s(%s, %zu, %d): "
"No appropriate segment found\n",
return NULL;
}
- /* get offset needed to adjust alignment */
- addr_offset = RTE_ALIGN_CEIL(free_memseg[memseg_idx].phys_addr, align) -
- free_memseg[memseg_idx].phys_addr;
-
/* save aligned physical and virtual addresses */
- memseg_physaddr = free_memseg[memseg_idx].phys_addr + addr_offset;
+ memseg_physaddr = free_memseg[memseg_idx].phys_addr + seg_offset;
memseg_addr = RTE_PTR_ADD(free_memseg[memseg_idx].addr,
- (uintptr_t) addr_offset);
+ (uintptr_t) seg_offset);
/* if we are looking for a biggest memzone */
- if (requested_len == 0)
- requested_len = memseg_len - addr_offset;
+ if (len == 0) {
+ if (bound == 0)
+ requested_len = memseg_len - seg_offset;
+ else
+ requested_len = RTE_ALIGN_CEIL(memseg_physaddr + 1,
+ bound) - memseg_physaddr;
+ }
/* set length to correct value */
- len = (size_t)addr_offset + requested_len;
+ len = (size_t)seg_offset + requested_len;
/* update our internal state */
free_memseg[memseg_idx].len -= len;
mz->hugepage_sz = free_memseg[memseg_idx].hugepage_sz;
mz->socket_id = free_memseg[memseg_idx].socket_id;
mz->flags = 0;
+ mz->memseg_id = memseg_idx;
return mz;
}
return NULL;
}
- /* if alignment is not a power of two */
- if (!rte_is_power_of_2(align)) {
- RTE_LOG(ERR, EAL, "%s(): Invalid alignment: %u\n", __func__,
- align);
+ /* get pointer to global configuration */
+ mcfg = rte_eal_get_configuration()->mem_config;
+
+ rte_rwlock_write_lock(&mcfg->mlock);
+
+ mz = memzone_reserve_aligned_thread_unsafe(
+ name, len, socket_id, flags, align, 0);
+
+ rte_rwlock_write_unlock(&mcfg->mlock);
+
+ return mz;
+}
+
+/*
+ * Return a pointer to a correctly filled memzone descriptor (with a
+ * specified alignment and boundary).
+ * If the allocation cannot be done, return NULL.
+ */
+const struct rte_memzone *
+rte_memzone_reserve_bounded(const char *name, size_t len,
+ int socket_id, unsigned flags, unsigned align, unsigned bound)
+{
+ struct rte_mem_config *mcfg;
+ const struct rte_memzone *mz = NULL;
+
+ /* both sizes cannot be explicitly called for */
+ if ((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB)) {
rte_errno = EINVAL;
return NULL;
}
- /* alignment less than cache size is not allowed */
- if (align < CACHE_LINE_SIZE)
- align = CACHE_LINE_SIZE;
-
/* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config;
rte_rwlock_write_lock(&mcfg->mlock);
mz = memzone_reserve_aligned_thread_unsafe(
- name, len, socket_id, flags, align);
+ name, len, socket_id, flags, align, bound);
rte_rwlock_write_unlock(&mcfg->mlock);
return mz;
}
+
/*
* Lookup for the memzone identified by the given name
*/
int32_t socket_id; /**< NUMA socket ID. */
uint32_t flags; /**< Characteristics of this memzone. */
+ uint32_t memseg_id; /** <store the memzone is from which memseg. */
} __attribute__((__packed__));
/**
* @param len
* The size of the memory to be reserved. If it
* is 0, the biggest contiguous zone will be reserved.
+ * @param socket_id
+ * The socket identifier in the case of
+ * NUMA. The value can be SOCKET_ID_ANY if there is no NUMA
+ * constraint for the reserved zone.
+ * @param flags
+ * The flags parameter is used to request memzones to be
+ * taken from 1GB or 2MB hugepages.
+ * - RTE_MEMZONE_2MB - Reserve from 2MB pages
+ * - RTE_MEMZONE_1GB - Reserve from 1GB pages
+ * - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if
+ * the requested page size is unavailable.
+ * If this flag is not set, the function
+ * will return error on an unavailable size
+ * request.
* @param align
* Alignment for resulting memzone. Must be a power of 2.
+ * @return
+ * A pointer to a correctly-filled read-only memzone descriptor, or NULL
+ * on error.
+ * On error case, rte_errno will be set appropriately:
+ * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ * - E_RTE_SECONDARY - function was called from a secondary process instance
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a memzone with the same name already exists
+ * - ENOMEM - no appropriate memory area found in which to create memzone
+ * - EINVAL - invalid parameters
+ */
+const struct rte_memzone *rte_memzone_reserve_aligned(const char *name,
+ size_t len, int socket_id,
+ unsigned flags, unsigned align);
+
+/**
+ * Reserve a portion of physical memory with specified alignment and
+ * boundary.
+ *
+ * This function reserves some memory with specified alignment and
+ * boundary, and returns a pointer to a correctly filled memzone
+ * descriptor. If the allocation cannot be done or if the alignment
+ * or boundary are not a power of 2, returns NULL.
+ * Memory buffer is reserved in a way, that it wouldn't cross specified
+ * boundary. That implies that requested length should be less or equal
+ * then boundary.
+ * Note: A reserved zone cannot be freed.
+ *
+ * @param name
+ * The name of the memzone. If it already exists, the function will
+ * fail and return NULL.
+ * @param len
+ * The size of the memory to be reserved. If it
+ * is 0, the biggest contiguous zone will be reserved.
* @param socket_id
* The socket identifier in the case of
* NUMA. The value can be SOCKET_ID_ANY if there is no NUMA
* If this flag is not set, the function
* will return error on an unavailable size
* request.
+ * @param align
+ * Alignment for resulting memzone. Must be a power of 2.
+ * @param bound
+ * Boundary for resulting memzone. Must be a power of 2 or zero.
+ * Zero value implies no boundary condition.
* @return
* A pointer to a correctly-filled read-only memzone descriptor, or NULL
* on error.
* - ENOMEM - no appropriate memory area found in which to create memzone
* - EINVAL - invalid parameters
*/
-const struct rte_memzone *rte_memzone_reserve_aligned(const char *name,
- size_t len, int socket_id, unsigned flags,
- unsigned align);
+const struct rte_memzone *rte_memzone_reserve_bounded(const char *name,
+ size_t len, int socket_id,
+ unsigned flags, unsigned align, unsigned bound);
/**
* Lookup for a memzone.