crypto/qat: add device files
[dpdk.git] / lib / librte_eal / linuxapp / eal / eal_memory.c
index daab364..c917de1 100644 (file)
@@ -40,6 +40,7 @@
 #include <rte_string_fns.h>
 
 #include "eal_private.h"
+#include "eal_memalloc.h"
 #include "eal_internal_cfg.h"
 #include "eal_filesystem.h"
 #include "eal_hugepages.h"
@@ -258,7 +259,6 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
        int fd;
        unsigned i;
        void *virtaddr;
-       struct flock lck = {0};
 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
        int node_id = -1;
        int essential_prev = 0;
@@ -377,13 +377,8 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
                }
                *(int *)virtaddr = 0;
 
-
                /* set shared lock on the file. */
-               lck.l_type = F_RDLCK;
-               lck.l_whence = SEEK_SET;
-               lck.l_start = 0;
-               lck.l_len = hugepage_sz;
-               if (fcntl(fd, F_SETLK, &lck) == -1) {
+               if (flock(fd, LOCK_SH) < 0) {
                        RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
                                __func__, strerror(errno));
                        close(fd);
@@ -707,7 +702,6 @@ remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end)
 #endif
                struct hugepage_file *hfile = &hugepages[cur_page];
                struct rte_memseg *ms = rte_fbarray_get(arr, ms_idx);
-               struct flock lck;
                void *addr;
                int fd;
 
@@ -718,11 +712,7 @@ remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end)
                        return -1;
                }
                /* set shared lock on the file. */
-               lck.l_type = F_RDLCK;
-               lck.l_whence = SEEK_SET;
-               lck.l_start = 0;
-               lck.l_len = page_sz;
-               if (fcntl(fd, F_SETLK, &lck) == -1) {
+               if (flock(fd, LOCK_SH) < 0) {
                        RTE_LOG(DEBUG, EAL, "Could not lock '%s': %s\n",
                                        hfile->filepath, strerror(errno));
                        close(fd);
@@ -1059,7 +1049,7 @@ get_socket_mem_size(int socket)
 
        for (i = 0; i < internal_config.num_hugepage_sizes; i++){
                struct hugepage_info *hpi = &internal_config.hugepage_info[i];
-               if (hpi->hugedir != NULL)
+               if (strnlen(hpi->hugedir, sizeof(hpi->hugedir)) != 0)
                        size += hpi->hugepage_sz * hpi->num_pages[socket];
        }
 
@@ -1159,7 +1149,8 @@ calc_num_pages_per_socket(uint64_t * memory,
        for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_mem != 0; socket++) {
                /* skips if the memory on specific socket wasn't requested */
                for (i = 0; i < num_hp_info && memory[socket] != 0; i++){
-                       hp_used[i].hugedir = hp_info[i].hugedir;
+                       strlcpy(hp_used[i].hugedir, hp_info[i].hugedir,
+                               sizeof(hp_used[i].hugedir));
                        hp_used[i].num_pages[socket] = RTE_MIN(
                                        memory[socket] / hp_info[i].hugepage_sz,
                                        hp_info[i].num_pages[socket]);
@@ -1234,7 +1225,7 @@ eal_get_hugepage_mem_size(void)
 
        for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
                struct hugepage_info *hpi = &internal_config.hugepage_info[i];
-               if (hpi->hugedir != NULL) {
+               if (strnlen(hpi->hugedir, sizeof(hpi->hugedir)) != 0) {
                        for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
                                size += hpi->hugepage_sz * hpi->num_pages[j];
                        }
@@ -1327,7 +1318,7 @@ eal_legacy_hugepage_init(void)
                }
 
                addr = mmap(NULL, internal_config.memory, PROT_READ | PROT_WRITE,
-                               MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+                               MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
                if (addr == MAP_FAILED) {
                        RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
                                        strerror(errno));
@@ -1508,7 +1499,7 @@ eal_legacy_hugepage_init(void)
        }
 
        /* create shared memory */
-       hugepage = create_shared_memory(eal_hugepage_info_path(),
+       hugepage = create_shared_memory(eal_hugepage_data_path(),
                        nr_hugefiles * sizeof(struct hugepage_file));
 
        if (hugepage == NULL) {
@@ -1601,6 +1592,104 @@ fail:
        return -1;
 }
 
+static int __rte_unused
+hugepage_count_walk(const struct rte_memseg_list *msl, void *arg)
+{
+       struct hugepage_info *hpi = arg;
+
+       if (msl->page_sz != hpi->hugepage_sz)
+               return 0;
+
+       hpi->num_pages[msl->socket_id] += msl->memseg_arr.len;
+       return 0;
+}
+
+static int
+eal_hugepage_init(void)
+{
+       struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
+       uint64_t memory[RTE_MAX_NUMA_NODES];
+       int hp_sz_idx, socket_id;
+
+       test_phys_addrs_available();
+
+       memset(used_hp, 0, sizeof(used_hp));
+
+       for (hp_sz_idx = 0;
+                       hp_sz_idx < (int) internal_config.num_hugepage_sizes;
+                       hp_sz_idx++) {
+#ifndef RTE_ARCH_64
+               struct hugepage_info dummy;
+               unsigned int i;
+#endif
+               /* also initialize used_hp hugepage sizes in used_hp */
+               struct hugepage_info *hpi;
+               hpi = &internal_config.hugepage_info[hp_sz_idx];
+               used_hp[hp_sz_idx].hugepage_sz = hpi->hugepage_sz;
+
+#ifndef RTE_ARCH_64
+               /* for 32-bit, limit number of pages on socket to whatever we've
+                * preallocated, as we cannot allocate more.
+                */
+               memset(&dummy, 0, sizeof(dummy));
+               dummy.hugepage_sz = hpi->hugepage_sz;
+               if (rte_memseg_list_walk(hugepage_count_walk, &dummy) < 0)
+                       return -1;
+
+               for (i = 0; i < RTE_DIM(dummy.num_pages); i++) {
+                       hpi->num_pages[i] = RTE_MIN(hpi->num_pages[i],
+                                       dummy.num_pages[i]);
+               }
+#endif
+       }
+
+       /* make a copy of socket_mem, needed for balanced allocation. */
+       for (hp_sz_idx = 0; hp_sz_idx < RTE_MAX_NUMA_NODES; hp_sz_idx++)
+               memory[hp_sz_idx] = internal_config.socket_mem[hp_sz_idx];
+
+       /* calculate final number of pages */
+       if (calc_num_pages_per_socket(memory,
+                       internal_config.hugepage_info, used_hp,
+                       internal_config.num_hugepage_sizes) < 0)
+               return -1;
+
+       for (hp_sz_idx = 0;
+                       hp_sz_idx < (int)internal_config.num_hugepage_sizes;
+                       hp_sz_idx++) {
+               for (socket_id = 0; socket_id < RTE_MAX_NUMA_NODES;
+                               socket_id++) {
+                       struct rte_memseg **pages;
+                       struct hugepage_info *hpi = &used_hp[hp_sz_idx];
+                       unsigned int num_pages = hpi->num_pages[socket_id];
+                       int num_pages_alloc, i;
+
+                       if (num_pages == 0)
+                               continue;
+
+                       pages = malloc(sizeof(*pages) * num_pages);
+
+                       RTE_LOG(DEBUG, EAL, "Allocating %u pages of size %" PRIu64 "M on socket %i\n",
+                               num_pages, hpi->hugepage_sz >> 20, socket_id);
+
+                       num_pages_alloc = eal_memalloc_alloc_seg_bulk(pages,
+                                       num_pages, hpi->hugepage_sz,
+                                       socket_id, true);
+                       if (num_pages_alloc < 0) {
+                               free(pages);
+                               return -1;
+                       }
+
+                       /* mark preallocated pages as unfreeable */
+                       for (i = 0; i < num_pages_alloc; i++) {
+                               struct rte_memseg *ms = pages[i];
+                               ms->flags |= RTE_MEMSEG_FLAG_DO_NOT_FREE;
+                       }
+                       free(pages);
+               }
+       }
+       return 0;
+}
+
 /*
  * uses fstat to report the size of a file on disk
  */
@@ -1638,16 +1727,18 @@ eal_legacy_hugepage_attach(void)
 
        test_phys_addrs_available();
 
-       fd_hugepage = open(eal_hugepage_info_path(), O_RDONLY);
+       fd_hugepage = open(eal_hugepage_data_path(), O_RDONLY);
        if (fd_hugepage < 0) {
-               RTE_LOG(ERR, EAL, "Could not open %s\n", eal_hugepage_info_path());
+               RTE_LOG(ERR, EAL, "Could not open %s\n",
+                               eal_hugepage_data_path());
                goto error;
        }
 
        size = getFileSize(fd_hugepage);
        hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0);
        if (hp == MAP_FAILED) {
-               RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path());
+               RTE_LOG(ERR, EAL, "Could not mmap %s\n",
+                               eal_hugepage_data_path());
                goto error;
        }
 
@@ -1663,7 +1754,6 @@ eal_legacy_hugepage_attach(void)
                struct hugepage_file *hf = &hp[i];
                size_t map_sz = hf->size;
                void *map_addr = hf->final_va;
-               struct flock lck;
 
                /* if size is zero, no more pages left */
                if (map_sz == 0)
@@ -1681,15 +1771,12 @@ eal_legacy_hugepage_attach(void)
                if (map_addr == MAP_FAILED) {
                        RTE_LOG(ERR, EAL, "Could not map %s: %s\n",
                                hf->filepath, strerror(errno));
+                       close(fd);
                        goto error;
                }
 
                /* set shared lock on the file. */
-               lck.l_type = F_RDLCK;
-               lck.l_whence = SEEK_SET;
-               lck.l_start = 0;
-               lck.l_len = map_sz;
-               if (fcntl(fd, F_SETLK, &lck) == -1) {
+               if (flock(fd, LOCK_SH) < 0) {
                        RTE_LOG(DEBUG, EAL, "%s(): Locking file failed: %s\n",
                                __func__, strerror(errno));
                        close(fd);
@@ -1720,20 +1807,32 @@ error:
        return -1;
 }
 
+static int
+eal_hugepage_attach(void)
+{
+       if (eal_memalloc_sync_with_primary()) {
+               RTE_LOG(ERR, EAL, "Could not map memory from primary process\n");
+               if (aslr_enabled() > 0)
+                       RTE_LOG(ERR, EAL, "It is recommended to disable ASLR in the kernel and retry running both primary and secondary processes\n");
+               return -1;
+       }
+       return 0;
+}
+
 int
 rte_eal_hugepage_init(void)
 {
-       if (internal_config.legacy_mem)
-               return eal_legacy_hugepage_init();
-       return -1;
+       return internal_config.legacy_mem ?
+                       eal_legacy_hugepage_init() :
+                       eal_hugepage_init();
 }
 
 int
 rte_eal_hugepage_attach(void)
 {
-       if (internal_config.legacy_mem)
-               return eal_legacy_hugepage_attach();
-       return -1;
+       return internal_config.legacy_mem ?
+                       eal_legacy_hugepage_attach() :
+                       eal_hugepage_attach();
 }
 
 int