static const struct rte_memzone *
memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
int socket_id, unsigned int flags, unsigned int align,
- unsigned int bound, bool contig)
+ unsigned int bound)
{
struct rte_memzone *mz;
struct rte_mem_config *mcfg;
size_t requested_len;
int socket, i;
+ bool contig;
/* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config;
if (!rte_eal_has_hugepages())
socket_id = SOCKET_ID_ANY;
+ contig = (flags & RTE_MEMZONE_IOVA_CONTIG) != 0;
+ /* malloc only cares about size flags, remove contig flag from flags */
+ flags &= ~RTE_MEMZONE_IOVA_CONTIG;
+
if (len == 0) {
+ /* len == 0 is only allowed for non-contiguous zones */
+ if (contig) {
+ RTE_LOG(DEBUG, EAL, "Reserving zero-length contiguous memzones is not supported\n");
+ rte_errno = EINVAL;
+ return NULL;
+ }
if (bound != 0)
requested_len = bound;
else {
static const struct rte_memzone *
rte_memzone_reserve_thread_safe(const char *name, size_t len, int socket_id,
- unsigned int flags, unsigned int align, unsigned int bound,
- bool contig)
+ unsigned int flags, unsigned int align, unsigned int bound)
{
struct rte_mem_config *mcfg;
const struct rte_memzone *mz = NULL;
rte_rwlock_write_lock(&mcfg->mlock);
mz = memzone_reserve_aligned_thread_unsafe(
- name, len, socket_id, flags, align, bound, contig);
+ name, len, socket_id, flags, align, bound);
rte_rwlock_write_unlock(&mcfg->mlock);
unsigned flags, unsigned align, unsigned bound)
{
return rte_memzone_reserve_thread_safe(name, len, socket_id, flags,
- align, bound, false);
+ align, bound);
}
/*
unsigned flags, unsigned align)
{
return rte_memzone_reserve_thread_safe(name, len, socket_id, flags,
- align, 0, false);
+ align, 0);
}
/*
unsigned flags)
{
return rte_memzone_reserve_thread_safe(name, len, socket_id,
- flags, RTE_CACHE_LINE_SIZE, 0,
- false);
+ flags, RTE_CACHE_LINE_SIZE, 0);
}
int
*/
#include <stdio.h>
+#include <rte_compat.h>
#include <rte_memory.h>
#include <rte_common.h>
#define RTE_MEMZONE_512MB 0x00040000 /**< Use 512MB pages. */
#define RTE_MEMZONE_4GB 0x00080000 /**< Use 4GB pages. */
#define RTE_MEMZONE_SIZE_HINT_ONLY 0x00000004 /**< Use available page size */
+#define RTE_MEMZONE_IOVA_CONTIG 0x00100000 /**< Ask for IOVA-contiguous memzone. */
/**
* A structure describing a memzone, which is a contiguous portion of
* If this flag is not set, the function
* will return error on an unavailable size
* request.
+ * - RTE_MEMZONE_IOVA_CONTIG - Ensure reserved memzone is IOVA-contiguous.
+ * This option should be used when allocating
+ * memory intended for hardware rings etc.
* @return
* A pointer to a correctly-filled read-only memzone descriptor, or NULL
* on error.
* If this flag is not set, the function
* will return error on an unavailable size
* request.
+ * - RTE_MEMZONE_IOVA_CONTIG - Ensure reserved memzone is IOVA-contiguous.
+ * This option should be used when allocating
+ * memory intended for hardware rings etc.
* @param align
* Alignment for resulting memzone. Must be a power of 2.
* @return
* If this flag is not set, the function
* will return error on an unavailable size
* request.
+ * - RTE_MEMZONE_IOVA_CONTIG - Ensure reserved memzone is IOVA-contiguous.
+ * This option should be used when allocating
+ * memory intended for hardware rings etc.
* @param align
* Alignment for resulting memzone. Must be a power of 2.
* @param bound