common/sfc_efx: do not include libefx headers from efsys.h
[dpdk.git] / lib / librte_eal / freebsd / eal_memory.c
index 5bc2da1..b8b337a 100644 (file)
@@ -57,77 +57,58 @@ rte_eal_hugepage_init(void)
        uint64_t total_mem = 0;
        void *addr;
        unsigned int i, j, seg_idx = 0;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* get pointer to global configuration */
        mcfg = rte_eal_get_configuration()->mem_config;
 
        /* for debug purposes, hugetlbfs can be disabled */
-       if (internal_config.no_hugetlbfs) {
+       if (internal_conf->no_hugetlbfs) {
                struct rte_memseg_list *msl;
-               struct rte_fbarray *arr;
-               struct rte_memseg *ms;
-               uint64_t page_sz;
-               int n_segs, cur_seg;
+               uint64_t mem_sz, page_sz;
+               int n_segs;
 
                /* create a memseg list */
                msl = &mcfg->memsegs[0];
 
+               mem_sz = internal_conf->memory;
                page_sz = RTE_PGSIZE_4K;
-               n_segs = internal_config.memory / page_sz;
+               n_segs = mem_sz / page_sz;
 
-               if (rte_fbarray_init(&msl->memseg_arr, "nohugemem", n_segs,
-                               sizeof(struct rte_memseg))) {
-                       RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
+               if (eal_memseg_list_init_named(
+                               msl, "nohugemem", page_sz, n_segs, 0, true)) {
                        return -1;
                }
 
-               addr = mmap(NULL, internal_config.memory,
-                               PROT_READ | PROT_WRITE,
+               addr = mmap(NULL, mem_sz, PROT_READ | PROT_WRITE,
                                MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
                if (addr == MAP_FAILED) {
                        RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
                                        strerror(errno));
                        return -1;
                }
-               msl->base_va = addr;
-               msl->page_sz = page_sz;
-               msl->len = internal_config.memory;
-               msl->socket_id = 0;
-               msl->heap = 1;
-
-               /* populate memsegs. each memseg is 1 page long */
-               for (cur_seg = 0; cur_seg < n_segs; cur_seg++) {
-                       arr = &msl->memseg_arr;
 
-                       ms = rte_fbarray_get(arr, cur_seg);
-                       if (rte_eal_iova_mode() == RTE_IOVA_VA)
-                               ms->iova = (uintptr_t)addr;
-                       else
-                               ms->iova = RTE_BAD_IOVA;
-                       ms->addr = addr;
-                       ms->hugepage_sz = page_sz;
-                       ms->len = page_sz;
-                       ms->socket_id = 0;
+               msl->base_va = addr;
+               msl->len = mem_sz;
 
-                       rte_fbarray_set_used(arr, cur_seg);
+               eal_memseg_list_populate(msl, addr, n_segs);
 
-                       addr = RTE_PTR_ADD(addr, page_sz);
-               }
                return 0;
        }
 
        /* map all hugepages and sort them */
-       for (i = 0; i < internal_config.num_hugepage_sizes; i ++){
+       for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
                struct hugepage_info *hpi;
                rte_iova_t prev_end = 0;
                int prev_ms_idx = -1;
                uint64_t page_sz, mem_needed;
                unsigned int n_pages, max_pages;
 
-               hpi = &internal_config.hugepage_info[i];
+               hpi = &internal_conf->hugepage_info[i];
                page_sz = hpi->hugepage_sz;
                max_pages = hpi->num_pages[0];
-               mem_needed = RTE_ALIGN_CEIL(internal_config.memory - total_mem,
+               mem_needed = RTE_ALIGN_CEIL(internal_conf->memory - total_mem,
                                page_sz);
 
                n_pages = RTE_MIN(mem_needed / page_sz, max_pages);
@@ -231,14 +212,14 @@ rte_eal_hugepage_init(void)
 
                        total_mem += seg->len;
                }
-               if (total_mem >= internal_config.memory)
+               if (total_mem >= internal_conf->memory)
                        break;
        }
-       if (total_mem < internal_config.memory) {
+       if (total_mem < internal_conf->memory) {
                RTE_LOG(ERR, EAL, "Couldn't reserve requested memory, "
                                "requested: %" PRIu64 "M "
                                "available: %" PRIu64 "M\n",
-                               internal_config.memory >> 20, total_mem >> 20);
+                               internal_conf->memory >> 20, total_mem >> 20);
                return -1;
        }
        return 0;
@@ -271,13 +252,15 @@ attach_segment(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
 int
 rte_eal_hugepage_attach(void)
 {
-       const struct hugepage_info *hpi;
+       struct hugepage_info *hpi;
        int fd_hugepage = -1;
        unsigned int i;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
-       hpi = &internal_config.hugepage_info[0];
+       hpi = &internal_conf->hugepage_info[0];
 
-       for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
+       for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
                const struct hugepage_info *cur_hpi = &hpi[i];
                struct attach_walk_args wa;
 
@@ -336,64 +319,17 @@ get_mem_amount(uint64_t page_sz, uint64_t max_mem)
        return RTE_ALIGN(area_sz, page_sz);
 }
 
-#define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
-static int
-alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
-               int n_segs, int socket_id, int type_msl_idx)
-{
-       char name[RTE_FBARRAY_NAME_LEN];
-
-       snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,
-                type_msl_idx);
-       if (rte_fbarray_init(&msl->memseg_arr, name, n_segs,
-                       sizeof(struct rte_memseg))) {
-               RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n",
-                       rte_strerror(rte_errno));
-               return -1;
-       }
-
-       msl->page_sz = page_sz;
-       msl->socket_id = socket_id;
-       msl->base_va = NULL;
-
-       RTE_LOG(DEBUG, EAL, "Memseg list allocated: 0x%zxkB at socket %i\n",
-                       (size_t)page_sz >> 10, socket_id);
-
-       return 0;
-}
-
 static int
-alloc_va_space(struct rte_memseg_list *msl)
+memseg_list_alloc(struct rte_memseg_list *msl)
 {
-       uint64_t page_sz;
-       size_t mem_sz;
-       void *addr;
        int flags = 0;
 
 #ifdef RTE_ARCH_PPC_64
-       flags |= MAP_HUGETLB;
+       flags |= EAL_RESERVE_HUGEPAGES;
 #endif
-
-       page_sz = msl->page_sz;
-       mem_sz = page_sz * msl->memseg_arr.len;
-
-       addr = eal_get_virtual_area(msl->base_va, &mem_sz, page_sz, 0, flags);
-       if (addr == NULL) {
-               if (rte_errno == EADDRNOTAVAIL)
-                       RTE_LOG(ERR, EAL, "Could not mmap %llu bytes at [%p] - "
-                               "please use '--" OPT_BASE_VIRTADDR "' option\n",
-                               (unsigned long long)mem_sz, msl->base_va);
-               else
-                       RTE_LOG(ERR, EAL, "Cannot reserve memory\n");
-               return -1;
-       }
-       msl->base_va = addr;
-       msl->len = mem_sz;
-
-       return 0;
+       return eal_memseg_list_alloc(msl, flags);
 }
 
-
 static int
 memseg_primary_init(void)
 {
@@ -401,9 +337,11 @@ memseg_primary_init(void)
        int hpi_idx, msl_idx = 0;
        struct rte_memseg_list *msl;
        uint64_t max_mem, total_mem;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* no-huge does not need this at all */
-       if (internal_config.no_hugetlbfs)
+       if (internal_conf->no_hugetlbfs)
                return 0;
 
        /* FreeBSD has an issue where core dump will dump the entire memory
@@ -420,7 +358,7 @@ memseg_primary_init(void)
        total_mem = 0;
 
        /* create memseg lists */
-       for (hpi_idx = 0; hpi_idx < (int) internal_config.num_hugepage_sizes;
+       for (hpi_idx = 0; hpi_idx < (int) internal_conf->num_hugepage_sizes;
                        hpi_idx++) {
                uint64_t max_type_mem, total_type_mem = 0;
                uint64_t avail_mem;
@@ -428,7 +366,7 @@ memseg_primary_init(void)
                struct hugepage_info *hpi;
                uint64_t hugepage_sz;
 
-               hpi = &internal_config.hugepage_info[hpi_idx];
+               hpi = &internal_conf->hugepage_info[hpi_idx];
                hugepage_sz = hpi->hugepage_sz;
 
                /* no NUMA support on FreeBSD */
@@ -479,15 +417,15 @@ memseg_primary_init(void)
                                        cur_max_mem);
                        n_segs = cur_mem / hugepage_sz;
 
-                       if (alloc_memseg_list(msl, hugepage_sz, n_segs,
-                                       0, type_msl_idx))
+                       if (eal_memseg_list_init(msl, hugepage_sz, n_segs,
+                                       0, type_msl_idx, false))
                                return -1;
 
                        total_segs += msl->memseg_arr.len;
                        total_type_mem = total_segs * hugepage_sz;
                        type_msl_idx++;
 
-                       if (alloc_va_space(msl)) {
+                       if (memseg_list_alloc(msl)) {
                                RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list\n");
                                return -1;
                        }
@@ -518,7 +456,7 @@ memseg_secondary_init(void)
                }
 
                /* preallocate VA space */
-               if (alloc_va_space(msl)) {
+               if (memseg_list_alloc(msl)) {
                        RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n");
                        return -1;
                }