From bec5625588d936acabc35f097b36f626d46a11c3 Mon Sep 17 00:00:00 2001 From: Anatoly Burakov Date: Fri, 22 Feb 2019 16:14:02 +0000 Subject: [PATCH] mem: improve best-effort allocation Previously, when using non-exact allocation, we were requesting N pages to be allocated, but allowed the memory subsystem to allocate less than requested. However, we were still expecting to see N contigous free pages in the memseg list. This presents a problem because there is no way to try and allocate as many pages as possible, even if there isn't enough contiguous free entries in the list. To address this, use the new "find biggest" fbarray API's when allocating non-exact number of pages. This way, we will first check how many entries in the list are actually available, and then try to allocate up to that number. Signed-off-by: Anatoly Burakov --- lib/librte_eal/linux/eal/eal_memalloc.c | 33 +++++++++++++++++++++---- 1 file changed, 28 insertions(+), 5 deletions(-) diff --git a/lib/librte_eal/linux/eal/eal_memalloc.c b/lib/librte_eal/linux/eal/eal_memalloc.c index b6fb183db4..14c3ea838e 100644 --- a/lib/librte_eal/linux/eal/eal_memalloc.c +++ b/lib/librte_eal/linux/eal/eal_memalloc.c @@ -874,10 +874,32 @@ alloc_seg_walk(const struct rte_memseg_list *msl, void *arg) need = wa->n_segs; /* try finding space in memseg list */ - cur_idx = rte_fbarray_find_next_n_free(&cur_msl->memseg_arr, 0, need); - if (cur_idx < 0) - return 0; - start_idx = cur_idx; + if (wa->exact) { + /* if we require exact number of pages in a list, find them */ + cur_idx = rte_fbarray_find_next_n_free(&cur_msl->memseg_arr, 0, + need); + if (cur_idx < 0) + return 0; + start_idx = cur_idx; + } else { + int cur_len; + + /* we don't require exact number of pages, so we're going to go + * for best-effort allocation. that means finding the biggest + * unused block, and going with that. + */ + cur_idx = rte_fbarray_find_biggest_free(&cur_msl->memseg_arr, + 0); + if (cur_idx < 0) + return 0; + start_idx = cur_idx; + /* adjust the size to possibly be smaller than original + * request, but do not allow it to be bigger. + */ + cur_len = rte_fbarray_find_contig_free(&cur_msl->memseg_arr, + cur_idx); + need = RTE_MIN(need, (unsigned int)cur_len); + } /* do not allow any page allocations during the time we're allocating, * because file creation and locking operations are not atomic, @@ -954,7 +976,8 @@ out: cur_msl->version++; if (dir_fd >= 0) close(dir_fd); - return 1; + /* if we didn't allocate any segments, move on to the next list */ + return i > 0; } struct free_walk_param { -- 2.20.1