mem: attempt multiple hugepage allocations at init
authorAnatoly Burakov <anatoly.burakov@intel.com>
Fri, 22 Feb 2019 16:14:03 +0000 (16:14 +0000)
committerThomas Monjalon <thomas@monjalon.net>
Thu, 28 Mar 2019 22:28:58 +0000 (23:28 +0100)
When requesting memory with ``-m`` or ``--socket-mem`` flags,
currently the init will fail if the requested memory amount was
bigger than any one memseg list, even if total amount of
available memory was sufficient.

Fix this by making EAL to attempt to allocate pages multiple
times, until we either fulfill our memory requirements, or run
out of hugepages to allocate.

Bugzilla ID: 95

Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
lib/librte_eal/linux/eal/eal_memory.c

index f6ee403..23fe441 100644 (file)
@@ -1784,30 +1784,47 @@ eal_hugepage_init(void)
                        struct rte_memseg **pages;
                        struct hugepage_info *hpi = &used_hp[hp_sz_idx];
                        unsigned int num_pages = hpi->num_pages[socket_id];
-                       int num_pages_alloc, i;
+                       unsigned int num_pages_alloc;
 
                        if (num_pages == 0)
                                continue;
 
-                       pages = malloc(sizeof(*pages) * num_pages);
-
                        RTE_LOG(DEBUG, EAL, "Allocating %u pages of size %" PRIu64 "M on socket %i\n",
                                num_pages, hpi->hugepage_sz >> 20, socket_id);
 
-                       num_pages_alloc = eal_memalloc_alloc_seg_bulk(pages,
-                                       num_pages, hpi->hugepage_sz,
-                                       socket_id, true);
-                       if (num_pages_alloc < 0) {
+                       /* we may not be able to allocate all pages in one go,
+                        * because we break up our memory map into multiple
+                        * memseg lists. therefore, try allocating multiple
+                        * times and see if we can get the desired number of
+                        * pages from multiple allocations.
+                        */
+
+                       num_pages_alloc = 0;
+                       do {
+                               int i, cur_pages, needed;
+
+                               needed = num_pages - num_pages_alloc;
+
+                               pages = malloc(sizeof(*pages) * needed);
+
+                               /* do not request exact number of pages */
+                               cur_pages = eal_memalloc_alloc_seg_bulk(pages,
+                                               needed, hpi->hugepage_sz,
+                                               socket_id, false);
+                               if (cur_pages <= 0) {
+                                       free(pages);
+                                       return -1;
+                               }
+
+                               /* mark preallocated pages as unfreeable */
+                               for (i = 0; i < cur_pages; i++) {
+                                       struct rte_memseg *ms = pages[i];
+                                       ms->flags |= RTE_MEMSEG_FLAG_DO_NOT_FREE;
+                               }
                                free(pages);
-                               return -1;
-                       }
 
-                       /* mark preallocated pages as unfreeable */
-                       for (i = 0; i < num_pages_alloc; i++) {
-                               struct rte_memseg *ms = pages[i];
-                               ms->flags |= RTE_MEMSEG_FLAG_DO_NOT_FREE;
-                       }
-                       free(pages);
+                               num_pages_alloc += cur_pages;
+                       } while (num_pages_alloc != num_pages);
                }
        }
        /* if socket limits were specified, set them */