Previously, to calculate length of memory area covered by a memseg
list, we would've needed to multiply page size by length of fbarray
backing that memseg list. This is not obvious and unnecessarily
low level, so store length in the memseg list itself.
This breaks ABI, so bump the EAL ABI version and document the
change. Also, while we're breaking ABI, pack the members a little
better.
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
Acked-by: Shreyansh Jain <shreyansh.jain@nxp.com>
``rte_config`` structure on account of improving DPDK usability when
using either ``--legacy-mem`` or ``--single-file-segments`` flags.
``rte_config`` structure on account of improving DPDK usability when
using either ``--legacy-mem`` or ``--single-file-segments`` flags.
+* eal: EAL library ABI version was changed due to previously announced work on
+ supporting external memory in DPDK:
+ - structure ``rte_memseg_list`` now has a new field indicating length
+ of memory addressed by the segment list
+
Removed Items
-------------
Removed Items
-------------
static int
find_max_end_va(const struct rte_memseg_list *msl, void *arg)
{
static int
find_max_end_va(const struct rte_memseg_list *msl, void *arg)
{
- size_t sz = msl->memseg_arr.len * msl->page_sz;
void *end_va = RTE_PTR_ADD(msl->base_va, sz);
void **max_va = arg;
void *end_va = RTE_PTR_ADD(msl->base_va, sz);
void **max_va = arg;
EXPORT_MAP := ../../rte_eal_version.map
EXPORT_MAP := ../../rte_eal_version.map
# specific to bsdapp exec-env
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) := eal.c
# specific to bsdapp exec-env
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) := eal.c
}
msl->base_va = addr;
msl->page_sz = page_sz;
}
msl->base_va = addr;
msl->page_sz = page_sz;
+ msl->len = internal_config.memory;
msl->socket_id = 0;
/* populate memsegs. each memseg is 1 page long */
msl->socket_id = 0;
/* populate memsegs. each memseg is 1 page long */
return -1;
}
msl->base_va = addr;
return -1;
}
msl->base_va = addr;
/* a memseg list was specified, check if it's the right one */
start = msl->base_va;
/* a memseg list was specified, check if it's the right one */
start = msl->base_va;
- end = RTE_PTR_ADD(start, (size_t)msl->page_sz * msl->memseg_arr.len);
+ end = RTE_PTR_ADD(start, msl->len);
if (addr < start || addr >= end)
return NULL;
if (addr < start || addr >= end)
return NULL;
msl = &mcfg->memsegs[msl_idx];
start = msl->base_va;
msl = &mcfg->memsegs[msl_idx];
start = msl->base_va;
- end = RTE_PTR_ADD(start,
- (size_t)msl->page_sz * msl->memseg_arr.len);
+ end = RTE_PTR_ADD(start, msl->len);
if (addr >= start && addr < end)
break;
}
if (addr >= start && addr < end)
break;
}
uint64_t addr_64;
/**< Makes sure addr is always 64-bits */
};
uint64_t addr_64;
/**< Makes sure addr is always 64-bits */
};
- int socket_id; /**< Socket ID for all memsegs in this list. */
uint64_t page_sz; /**< Page size for all memsegs in this list. */
uint64_t page_sz; /**< Page size for all memsegs in this list. */
+ int socket_id; /**< Socket ID for all memsegs in this list. */
volatile uint32_t version; /**< version number for multiprocess sync. */
volatile uint32_t version; /**< version number for multiprocess sync. */
+ size_t len; /**< Length of memory area covered by this memseg list. */
struct rte_fbarray memseg_arr;
};
struct rte_fbarray memseg_arr;
};
int msl_idx, seg_idx, ret, dir_fd = -1;
start_addr = (uintptr_t) msl->base_va;
int msl_idx, seg_idx, ret, dir_fd = -1;
start_addr = (uintptr_t) msl->base_va;
- end_addr = start_addr + msl->memseg_arr.len * (size_t)msl->page_sz;
+ end_addr = start_addr + msl->len;
if ((uintptr_t)wa->ms->addr < start_addr ||
(uintptr_t)wa->ms->addr >= end_addr)
if ((uintptr_t)wa->ms->addr < start_addr ||
(uintptr_t)wa->ms->addr >= end_addr)
return -1;
}
local_msl->base_va = primary_msl->base_va;
return -1;
}
local_msl->base_va = primary_msl->base_va;
+ local_msl->len = primary_msl->len;
return -1;
}
msl->base_va = addr;
return -1;
}
msl->base_va = addr;
msl->base_va = addr;
msl->page_sz = page_sz;
msl->socket_id = 0;
msl->base_va = addr;
msl->page_sz = page_sz;
msl->socket_id = 0;
+ msl->len = internal_config.memory;
/* populate memsegs. each memseg is one page long */
for (cur_seg = 0; cur_seg < n_segs; cur_seg++) {
/* populate memsegs. each memseg is one page long */
for (cur_seg = 0; cur_seg < n_segs; cur_seg++) {
if (msl->memseg_arr.count > 0)
continue;
/* this is an unused list, deallocate it */
if (msl->memseg_arr.count > 0)
continue;
/* this is an unused list, deallocate it */
- mem_sz = (size_t)msl->page_sz * msl->memseg_arr.len;
munmap(msl->base_va, mem_sz);
msl->base_va = NULL;
munmap(msl->base_va, mem_sz);
msl->base_va = NULL;