eal: add option to limit memory allocation on sockets
[dpdk.git] / lib / librte_eal / linuxapp / eal / eal_memory.c
index eb430a0..a48a350 100644 (file)
@@ -259,7 +259,6 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
        int fd;
        unsigned i;
        void *virtaddr;
-       struct flock lck = {0};
 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
        int node_id = -1;
        int essential_prev = 0;
@@ -378,13 +377,8 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
                }
                *(int *)virtaddr = 0;
 
-
                /* set shared lock on the file. */
-               lck.l_type = F_RDLCK;
-               lck.l_whence = SEEK_SET;
-               lck.l_start = 0;
-               lck.l_len = hugepage_sz;
-               if (fcntl(fd, F_SETLK, &lck) == -1) {
+               if (flock(fd, LOCK_SH) < 0) {
                        RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
                                __func__, strerror(errno));
                        close(fd);
@@ -708,7 +702,6 @@ remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end)
 #endif
                struct hugepage_file *hfile = &hugepages[cur_page];
                struct rte_memseg *ms = rte_fbarray_get(arr, ms_idx);
-               struct flock lck;
                void *addr;
                int fd;
 
@@ -719,11 +712,7 @@ remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end)
                        return -1;
                }
                /* set shared lock on the file. */
-               lck.l_type = F_RDLCK;
-               lck.l_whence = SEEK_SET;
-               lck.l_start = 0;
-               lck.l_len = page_sz;
-               if (fcntl(fd, F_SETLK, &lck) == -1) {
+               if (flock(fd, LOCK_SH) < 0) {
                        RTE_LOG(DEBUG, EAL, "Could not lock '%s': %s\n",
                                        hfile->filepath, strerror(errno));
                        close(fd);
@@ -778,6 +767,34 @@ remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end)
        return 0;
 }
 
+static uint64_t
+get_mem_amount(uint64_t page_sz, uint64_t max_mem)
+{
+       uint64_t area_sz, max_pages;
+
+       /* limit to RTE_MAX_MEMSEG_PER_LIST pages or RTE_MAX_MEM_MB_PER_LIST */
+       max_pages = RTE_MAX_MEMSEG_PER_LIST;
+       max_mem = RTE_MIN((uint64_t)RTE_MAX_MEM_MB_PER_LIST << 20, max_mem);
+
+       area_sz = RTE_MIN(page_sz * max_pages, max_mem);
+
+       /* make sure the list isn't smaller than the page size */
+       area_sz = RTE_MAX(area_sz, page_sz);
+
+       return RTE_ALIGN(area_sz, page_sz);
+}
+
+static int
+free_memseg_list(struct rte_memseg_list *msl)
+{
+       if (rte_fbarray_destroy(&msl->memseg_arr)) {
+               RTE_LOG(ERR, EAL, "Cannot destroy memseg list\n");
+               return -1;
+       }
+       memset(msl, 0, sizeof(*msl));
+       return 0;
+}
+
 #define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
 static int
 alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
@@ -1160,8 +1177,8 @@ calc_num_pages_per_socket(uint64_t * memory,
        for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_mem != 0; socket++) {
                /* skips if the memory on specific socket wasn't requested */
                for (i = 0; i < num_hp_info && memory[socket] != 0; i++){
-                       snprintf(hp_used[i].hugedir, sizeof(hp_used[i].hugedir),
-                                       "%s", hp_info[i].hugedir);
+                       strlcpy(hp_used[i].hugedir, hp_info[i].hugedir,
+                               sizeof(hp_used[i].hugedir));
                        hp_used[i].num_pages[socket] = RTE_MIN(
                                        memory[socket] / hp_info[i].hugepage_sz,
                                        hp_info[i].num_pages[socket]);
@@ -1329,7 +1346,7 @@ eal_legacy_hugepage_init(void)
                }
 
                addr = mmap(NULL, internal_config.memory, PROT_READ | PROT_WRITE,
-                               MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+                               MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
                if (addr == MAP_FAILED) {
                        RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
                                        strerror(errno));
@@ -1510,7 +1527,7 @@ eal_legacy_hugepage_init(void)
        }
 
        /* create shared memory */
-       hugepage = create_shared_memory(eal_hugepage_file_path(),
+       hugepage = create_shared_memory(eal_hugepage_data_path(),
                        nr_hugefiles * sizeof(struct hugepage_file));
 
        if (hugepage == NULL) {
@@ -1603,6 +1620,27 @@ fail:
        return -1;
 }
 
+static int __rte_unused
+hugepage_count_walk(const struct rte_memseg_list *msl, void *arg)
+{
+       struct hugepage_info *hpi = arg;
+
+       if (msl->page_sz != hpi->hugepage_sz)
+               return 0;
+
+       hpi->num_pages[msl->socket_id] += msl->memseg_arr.len;
+       return 0;
+}
+
+static int
+limits_callback(int socket_id, size_t cur_limit, size_t new_len)
+{
+       RTE_SET_USED(socket_id);
+       RTE_SET_USED(cur_limit);
+       RTE_SET_USED(new_len);
+       return -1;
+}
+
 static int
 eal_hugepage_init(void)
 {
@@ -1617,10 +1655,29 @@ eal_hugepage_init(void)
        for (hp_sz_idx = 0;
                        hp_sz_idx < (int) internal_config.num_hugepage_sizes;
                        hp_sz_idx++) {
+#ifndef RTE_ARCH_64
+               struct hugepage_info dummy;
+               unsigned int i;
+#endif
                /* also initialize used_hp hugepage sizes in used_hp */
                struct hugepage_info *hpi;
                hpi = &internal_config.hugepage_info[hp_sz_idx];
                used_hp[hp_sz_idx].hugepage_sz = hpi->hugepage_sz;
+
+#ifndef RTE_ARCH_64
+               /* for 32-bit, limit number of pages on socket to whatever we've
+                * preallocated, as we cannot allocate more.
+                */
+               memset(&dummy, 0, sizeof(dummy));
+               dummy.hugepage_sz = hpi->hugepage_sz;
+               if (rte_memseg_list_walk(hugepage_count_walk, &dummy) < 0)
+                       return -1;
+
+               for (i = 0; i < RTE_DIM(dummy.num_pages); i++) {
+                       hpi->num_pages[i] = RTE_MIN(hpi->num_pages[i],
+                                       dummy.num_pages[i]);
+               }
+#endif
        }
 
        /* make a copy of socket_mem, needed for balanced allocation. */
@@ -1638,21 +1695,45 @@ eal_hugepage_init(void)
                        hp_sz_idx++) {
                for (socket_id = 0; socket_id < RTE_MAX_NUMA_NODES;
                                socket_id++) {
+                       struct rte_memseg **pages;
                        struct hugepage_info *hpi = &used_hp[hp_sz_idx];
                        unsigned int num_pages = hpi->num_pages[socket_id];
-                       int num_pages_alloc;
+                       int num_pages_alloc, i;
 
                        if (num_pages == 0)
                                continue;
 
+                       pages = malloc(sizeof(*pages) * num_pages);
+
                        RTE_LOG(DEBUG, EAL, "Allocating %u pages of size %" PRIu64 "M on socket %i\n",
                                num_pages, hpi->hugepage_sz >> 20, socket_id);
 
-                       num_pages_alloc = eal_memalloc_alloc_seg_bulk(NULL,
+                       num_pages_alloc = eal_memalloc_alloc_seg_bulk(pages,
                                        num_pages, hpi->hugepage_sz,
                                        socket_id, true);
-                       if (num_pages_alloc < 0)
+                       if (num_pages_alloc < 0) {
+                               free(pages);
                                return -1;
+                       }
+
+                       /* mark preallocated pages as unfreeable */
+                       for (i = 0; i < num_pages_alloc; i++) {
+                               struct rte_memseg *ms = pages[i];
+                               ms->flags |= RTE_MEMSEG_FLAG_DO_NOT_FREE;
+                       }
+                       free(pages);
+               }
+       }
+       /* if socket limits were specified, set them */
+       if (internal_config.force_socket_limits) {
+               unsigned int i;
+               for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
+                       uint64_t limit = internal_config.socket_limit[i];
+                       if (limit == 0)
+                               continue;
+                       if (rte_mem_alloc_validator_register("socket-limit",
+                                       limits_callback, i, limit))
+                               RTE_LOG(ERR, EAL, "Failed to register socket limits validator callback\n");
                }
        }
        return 0;
@@ -1695,16 +1776,18 @@ eal_legacy_hugepage_attach(void)
 
        test_phys_addrs_available();
 
-       fd_hugepage = open(eal_hugepage_file_path(), O_RDONLY);
+       fd_hugepage = open(eal_hugepage_data_path(), O_RDONLY);
        if (fd_hugepage < 0) {
-               RTE_LOG(ERR, EAL, "Could not open %s\n", eal_hugepage_file_path());
+               RTE_LOG(ERR, EAL, "Could not open %s\n",
+                               eal_hugepage_data_path());
                goto error;
        }
 
        size = getFileSize(fd_hugepage);
        hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0);
        if (hp == MAP_FAILED) {
-               RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_file_path());
+               RTE_LOG(ERR, EAL, "Could not mmap %s\n",
+                               eal_hugepage_data_path());
                goto error;
        }
 
@@ -1720,7 +1803,6 @@ eal_legacy_hugepage_attach(void)
                struct hugepage_file *hf = &hp[i];
                size_t map_sz = hf->size;
                void *map_addr = hf->final_va;
-               struct flock lck;
 
                /* if size is zero, no more pages left */
                if (map_sz == 0)
@@ -1738,15 +1820,12 @@ eal_legacy_hugepage_attach(void)
                if (map_addr == MAP_FAILED) {
                        RTE_LOG(ERR, EAL, "Could not map %s: %s\n",
                                hf->filepath, strerror(errno));
+                       close(fd);
                        goto error;
                }
 
                /* set shared lock on the file. */
-               lck.l_type = F_RDLCK;
-               lck.l_whence = SEEK_SET;
-               lck.l_start = 0;
-               lck.l_len = map_sz;
-               if (fcntl(fd, F_SETLK, &lck) == -1) {
+               if (flock(fd, LOCK_SH) < 0) {
                        RTE_LOG(DEBUG, EAL, "%s(): Locking file failed: %s\n",
                                __func__, strerror(errno));
                        close(fd);
@@ -1810,3 +1889,316 @@ rte_eal_using_phys_addrs(void)
 {
        return phys_addrs_available;
 }
+
+static int __rte_unused
+memseg_primary_init_32(void)
+{
+       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+       int active_sockets, hpi_idx, msl_idx = 0;
+       unsigned int socket_id, i;
+       struct rte_memseg_list *msl;
+       uint64_t extra_mem_per_socket, total_extra_mem, total_requested_mem;
+       uint64_t max_mem;
+
+       /* no-huge does not need this at all */
+       if (internal_config.no_hugetlbfs)
+               return 0;
+
+       /* this is a giant hack, but desperate times call for desperate
+        * measures. in legacy 32-bit mode, we cannot preallocate VA space,
+        * because having upwards of 2 gigabytes of VA space already mapped will
+        * interfere with our ability to map and sort hugepages.
+        *
+        * therefore, in legacy 32-bit mode, we will be initializing memseg
+        * lists much later - in eal_memory.c, right after we unmap all the
+        * unneeded pages. this will not affect secondary processes, as those
+        * should be able to mmap the space without (too many) problems.
+        */
+       if (internal_config.legacy_mem)
+               return 0;
+
+       /* 32-bit mode is a very special case. we cannot know in advance where
+        * the user will want to allocate their memory, so we have to do some
+        * heuristics.
+        */
+       active_sockets = 0;
+       total_requested_mem = 0;
+       if (internal_config.force_sockets)
+               for (i = 0; i < rte_socket_count(); i++) {
+                       uint64_t mem;
+
+                       socket_id = rte_socket_id_by_idx(i);
+                       mem = internal_config.socket_mem[socket_id];
+
+                       if (mem == 0)
+                               continue;
+
+                       active_sockets++;
+                       total_requested_mem += mem;
+               }
+       else
+               total_requested_mem = internal_config.memory;
+
+       max_mem = (uint64_t)RTE_MAX_MEM_MB << 20;
+       if (total_requested_mem > max_mem) {
+               RTE_LOG(ERR, EAL, "Invalid parameters: 32-bit process can at most use %uM of memory\n",
+                               (unsigned int)(max_mem >> 20));
+               return -1;
+       }
+       total_extra_mem = max_mem - total_requested_mem;
+       extra_mem_per_socket = active_sockets == 0 ? total_extra_mem :
+                       total_extra_mem / active_sockets;
+
+       /* the allocation logic is a little bit convoluted, but here's how it
+        * works, in a nutshell:
+        *  - if user hasn't specified on which sockets to allocate memory via
+        *    --socket-mem, we allocate all of our memory on master core socket.
+        *  - if user has specified sockets to allocate memory on, there may be
+        *    some "unused" memory left (e.g. if user has specified --socket-mem
+        *    such that not all memory adds up to 2 gigabytes), so add it to all
+        *    sockets that are in use equally.
+        *
+        * page sizes are sorted by size in descending order, so we can safely
+        * assume that we dispense with bigger page sizes first.
+        */
+
+       /* create memseg lists */
+       for (i = 0; i < rte_socket_count(); i++) {
+               int hp_sizes = (int) internal_config.num_hugepage_sizes;
+               uint64_t max_socket_mem, cur_socket_mem;
+               unsigned int master_lcore_socket;
+               struct rte_config *cfg = rte_eal_get_configuration();
+               bool skip;
+
+               socket_id = rte_socket_id_by_idx(i);
+
+#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
+               if (socket_id > 0)
+                       break;
+#endif
+
+               /* if we didn't specifically request memory on this socket */
+               skip = active_sockets != 0 &&
+                               internal_config.socket_mem[socket_id] == 0;
+               /* ...or if we didn't specifically request memory on *any*
+                * socket, and this is not master lcore
+                */
+               master_lcore_socket = rte_lcore_to_socket_id(cfg->master_lcore);
+               skip |= active_sockets == 0 && socket_id != master_lcore_socket;
+
+               if (skip) {
+                       RTE_LOG(DEBUG, EAL, "Will not preallocate memory on socket %u\n",
+                                       socket_id);
+                       continue;
+               }
+
+               /* max amount of memory on this socket */
+               max_socket_mem = (active_sockets != 0 ?
+                                       internal_config.socket_mem[socket_id] :
+                                       internal_config.memory) +
+                                       extra_mem_per_socket;
+               cur_socket_mem = 0;
+
+               for (hpi_idx = 0; hpi_idx < hp_sizes; hpi_idx++) {
+                       uint64_t max_pagesz_mem, cur_pagesz_mem = 0;
+                       uint64_t hugepage_sz;
+                       struct hugepage_info *hpi;
+                       int type_msl_idx, max_segs, total_segs = 0;
+
+                       hpi = &internal_config.hugepage_info[hpi_idx];
+                       hugepage_sz = hpi->hugepage_sz;
+
+                       /* check if pages are actually available */
+                       if (hpi->num_pages[socket_id] == 0)
+                               continue;
+
+                       max_segs = RTE_MAX_MEMSEG_PER_TYPE;
+                       max_pagesz_mem = max_socket_mem - cur_socket_mem;
+
+                       /* make it multiple of page size */
+                       max_pagesz_mem = RTE_ALIGN_FLOOR(max_pagesz_mem,
+                                       hugepage_sz);
+
+                       RTE_LOG(DEBUG, EAL, "Attempting to preallocate "
+                                       "%" PRIu64 "M on socket %i\n",
+                                       max_pagesz_mem >> 20, socket_id);
+
+                       type_msl_idx = 0;
+                       while (cur_pagesz_mem < max_pagesz_mem &&
+                                       total_segs < max_segs) {
+                               uint64_t cur_mem;
+                               unsigned int n_segs;
+
+                               if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
+                                       RTE_LOG(ERR, EAL,
+                                               "No more space in memseg lists, please increase %s\n",
+                                               RTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));
+                                       return -1;
+                               }
+
+                               msl = &mcfg->memsegs[msl_idx];
+
+                               cur_mem = get_mem_amount(hugepage_sz,
+                                               max_pagesz_mem);
+                               n_segs = cur_mem / hugepage_sz;
+
+                               if (alloc_memseg_list(msl, hugepage_sz, n_segs,
+                                               socket_id, type_msl_idx)) {
+                                       /* failing to allocate a memseg list is
+                                        * a serious error.
+                                        */
+                                       RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
+                                       return -1;
+                               }
+
+                               if (alloc_va_space(msl)) {
+                                       /* if we couldn't allocate VA space, we
+                                        * can try with smaller page sizes.
+                                        */
+                                       RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list, retrying with different page size\n");
+                                       /* deallocate memseg list */
+                                       if (free_memseg_list(msl))
+                                               return -1;
+                                       break;
+                               }
+
+                               total_segs += msl->memseg_arr.len;
+                               cur_pagesz_mem = total_segs * hugepage_sz;
+                               type_msl_idx++;
+                               msl_idx++;
+                       }
+                       cur_socket_mem += cur_pagesz_mem;
+               }
+               if (cur_socket_mem == 0) {
+                       RTE_LOG(ERR, EAL, "Cannot allocate VA space on socket %u\n",
+                               socket_id);
+                       return -1;
+               }
+       }
+
+       return 0;
+}
+
+static int __rte_unused
+memseg_primary_init(void)
+{
+       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+       int i, socket_id, hpi_idx, msl_idx = 0;
+       struct rte_memseg_list *msl;
+       uint64_t max_mem, total_mem;
+
+       /* no-huge does not need this at all */
+       if (internal_config.no_hugetlbfs)
+               return 0;
+
+       max_mem = (uint64_t)RTE_MAX_MEM_MB << 20;
+       total_mem = 0;
+
+       /* create memseg lists */
+       for (hpi_idx = 0; hpi_idx < (int) internal_config.num_hugepage_sizes;
+                       hpi_idx++) {
+               struct hugepage_info *hpi;
+               uint64_t hugepage_sz;
+
+               hpi = &internal_config.hugepage_info[hpi_idx];
+               hugepage_sz = hpi->hugepage_sz;
+
+               for (i = 0; i < (int) rte_socket_count(); i++) {
+                       uint64_t max_type_mem, total_type_mem = 0;
+                       int type_msl_idx, max_segs, total_segs = 0;
+
+                       socket_id = rte_socket_id_by_idx(i);
+
+#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
+                       if (socket_id > 0)
+                               break;
+#endif
+
+                       if (total_mem >= max_mem)
+                               break;
+
+                       max_type_mem = RTE_MIN(max_mem - total_mem,
+                               (uint64_t)RTE_MAX_MEM_MB_PER_TYPE << 20);
+                       max_segs = RTE_MAX_MEMSEG_PER_TYPE;
+
+                       type_msl_idx = 0;
+                       while (total_type_mem < max_type_mem &&
+                                       total_segs < max_segs) {
+                               uint64_t cur_max_mem, cur_mem;
+                               unsigned int n_segs;
+
+                               if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
+                                       RTE_LOG(ERR, EAL,
+                                               "No more space in memseg lists, please increase %s\n",
+                                               RTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));
+                                       return -1;
+                               }
+
+                               msl = &mcfg->memsegs[msl_idx++];
+
+                               cur_max_mem = max_type_mem - total_type_mem;
+
+                               cur_mem = get_mem_amount(hugepage_sz,
+                                               cur_max_mem);
+                               n_segs = cur_mem / hugepage_sz;
+
+                               if (alloc_memseg_list(msl, hugepage_sz, n_segs,
+                                               socket_id, type_msl_idx))
+                                       return -1;
+
+                               total_segs += msl->memseg_arr.len;
+                               total_type_mem = total_segs * hugepage_sz;
+                               type_msl_idx++;
+
+                               if (alloc_va_space(msl)) {
+                                       RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list\n");
+                                       return -1;
+                               }
+                       }
+                       total_mem += total_type_mem;
+               }
+       }
+       return 0;
+}
+
+static int
+memseg_secondary_init(void)
+{
+       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+       int msl_idx = 0;
+       struct rte_memseg_list *msl;
+
+       for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
+
+               msl = &mcfg->memsegs[msl_idx];
+
+               /* skip empty memseg lists */
+               if (msl->memseg_arr.len == 0)
+                       continue;
+
+               if (rte_fbarray_attach(&msl->memseg_arr)) {
+                       RTE_LOG(ERR, EAL, "Cannot attach to primary process memseg lists\n");
+                       return -1;
+               }
+
+               /* preallocate VA space */
+               if (alloc_va_space(msl)) {
+                       RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n");
+                       return -1;
+               }
+       }
+
+       return 0;
+}
+
+int
+rte_eal_memseg_init(void)
+{
+       return rte_eal_process_type() == RTE_PROC_PRIMARY ?
+#ifndef RTE_ARCH_64
+                       memseg_primary_init_32() :
+#else
+                       memseg_primary_init() :
+#endif
+                       memseg_secondary_init();
+}