/**
* Physical memory segment descriptor.
*/
+#define RTE_MEMSEG_FLAG_DO_NOT_FREE (1 << 0)
+/**< Prevent this segment from being freed back to the OS. */
struct rte_memseg {
RTE_STD_C11
union {
int32_t socket_id; /**< NUMA socket ID. */
uint32_t nchannel; /**< Number of channels. */
uint32_t nrank; /**< Number of ranks. */
+ uint32_t flags; /**< Memseg-specific flags */
} __rte_packed;
/**
void *start, *aligned_start, *end, *aligned_end;
size_t len, aligned_len, page_sz;
struct rte_memseg_list *msl;
+ unsigned int i, n_segs;
int ret;
if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
if (aligned_len < page_sz)
goto free_unlock;
+ /* we can free something. however, some of these pages may be marked as
+ * unfreeable, so also check that as well
+ */
+ n_segs = aligned_len / page_sz;
+ for (i = 0; i < n_segs; i++) {
+ const struct rte_memseg *tmp =
+ rte_mem_virt2memseg(aligned_start, msl);
+
+ if (tmp->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE) {
+ /* this is an unfreeable segment, so move start */
+ aligned_start = RTE_PTR_ADD(tmp->addr, tmp->len);
+ }
+ }
+
+ /* recalculate length and number of segments */
+ aligned_len = RTE_PTR_DIFF(aligned_end, aligned_start);
+ n_segs = aligned_len / page_sz;
+
+ /* check if we can still free some pages */
+ if (n_segs == 0)
+ goto free_unlock;
+
rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
/*
struct free_walk_param wa;
int i, walk_res;
+ /* if this page is marked as unfreeable, fail */
+ if (cur->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE) {
+ RTE_LOG(DEBUG, EAL, "Page is not allowed to be freed\n");
+ ret = -1;
+ continue;
+ }
+
memset(&wa, 0, sizeof(wa));
for (i = 0; i < (int)RTE_DIM(internal_config.hugepage_info);
hp_sz_idx++) {
for (socket_id = 0; socket_id < RTE_MAX_NUMA_NODES;
socket_id++) {
+ struct rte_memseg **pages;
struct hugepage_info *hpi = &used_hp[hp_sz_idx];
unsigned int num_pages = hpi->num_pages[socket_id];
- int num_pages_alloc;
+ int num_pages_alloc, i;
if (num_pages == 0)
continue;
+ pages = malloc(sizeof(*pages) * num_pages);
+
RTE_LOG(DEBUG, EAL, "Allocating %u pages of size %" PRIu64 "M on socket %i\n",
num_pages, hpi->hugepage_sz >> 20, socket_id);
- num_pages_alloc = eal_memalloc_alloc_seg_bulk(NULL,
+ num_pages_alloc = eal_memalloc_alloc_seg_bulk(pages,
num_pages, hpi->hugepage_sz,
socket_id, true);
- if (num_pages_alloc < 0)
+ if (num_pages_alloc < 0) {
+ free(pages);
return -1;
+ }
+
+ /* mark preallocated pages as unfreeable */
+ for (i = 0; i < num_pages_alloc; i++) {
+ struct rte_memseg *ms = pages[i];
+ ms->flags |= RTE_MEMSEG_FLAG_DO_NOT_FREE;
+ }
+ free(pages);
}
}
return 0;