/*-
* BSD LICENSE
- *
+ *
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
- *
+ *
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#define _FILE_OFFSET_BITS 64
#include <errno.h>
#include <stdarg.h>
#include <stdlib.h>
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_launch.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_per_lcore.h>
static uint64_t baseaddr_offset;
+static unsigned proc_pagemap_readable;
+
#define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space"
-static uint64_t
-get_physaddr(void * virtaddr)
+static void
+test_proc_pagemap_readable(void)
+{
+ int fd = open("/proc/self/pagemap", O_RDONLY);
+
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL,
+ "Cannot open /proc/self/pagemap: %s. "
+ "virt2phys address translation will not work\n",
+ strerror(errno));
+ return;
+ }
+
+ /* Is readable */
+ close(fd);
+ proc_pagemap_readable = 1;
+}
+
+/* Lock page in physical memory and prevent from swapping. */
+int
+rte_mem_lock_page(const void *virt)
+{
+ unsigned long virtual = (unsigned long)virt;
+ int page_size = getpagesize();
+ unsigned long aligned = (virtual & ~ (page_size - 1));
+ return mlock((void*)aligned, page_size);
+}
+
+/*
+ * Get physical address of any mapped virtual address in the current process.
+ */
+phys_addr_t
+rte_mem_virt2phy(const void *virtaddr)
{
int fd;
uint64_t page, physaddr;
unsigned long virt_pfn;
int page_size;
+ off_t offset;
+
+ /* Cannot parse /proc/self/pagemap, no need to log errors everywhere */
+ if (!proc_pagemap_readable)
+ return RTE_BAD_PHYS_ADDR;
/* standard page size */
page_size = getpagesize();
if (fd < 0) {
RTE_LOG(ERR, EAL, "%s(): cannot open /proc/self/pagemap: %s\n",
__func__, strerror(errno));
- return (uint64_t) -1;
+ return RTE_BAD_PHYS_ADDR;
}
- off_t offset;
virt_pfn = (unsigned long)virtaddr / page_size;
offset = sizeof(uint64_t) * virt_pfn;
if (lseek(fd, offset, SEEK_SET) == (off_t) -1) {
RTE_LOG(ERR, EAL, "%s(): seek error in /proc/self/pagemap: %s\n",
__func__, strerror(errno));
close(fd);
- return (uint64_t) -1;
+ return RTE_BAD_PHYS_ADDR;
}
if (read(fd, &page, sizeof(uint64_t)) < 0) {
RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
__func__, strerror(errno));
close(fd);
- return (uint64_t) -1;
+ return RTE_BAD_PHYS_ADDR;
}
/*
* the pfn (page frame number) are bits 0-54 (see
* pagemap.txt in linux Documentation)
*/
- physaddr = ((page & 0x7fffffffffffffULL) * page_size);
+ physaddr = ((page & 0x7fffffffffffffULL) * page_size)
+ + ((unsigned long)virtaddr % page_size);
close(fd);
return physaddr;
}
phys_addr_t addr;
for (i = 0; i < hpi->num_pages[0]; i++) {
- addr = get_physaddr(hugepg_tbl[i].orig_va);
- if (addr == (phys_addr_t) -1)
+ addr = rte_mem_virt2phy(hugepg_tbl[i].orig_va);
+ if (addr == RTE_BAD_PHYS_ADDR)
return -1;
hugepg_tbl[i].physaddr = addr;
}
}
/*
- * Try to mmap *size bytes in /dev/zero. If it is succesful, return the
+ * Try to mmap *size bytes in /dev/zero. If it is successful, return the
* pointer to the mmap'd area and keep *size unmodified. Else, retry
* with a smaller zone: decrease *size by hugepage_sz until it reaches
* 0. In this case, return NULL. Note: this function returns an address
}
else addr = NULL;
- RTE_LOG(INFO, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
+ RTE_LOG(DEBUG, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
fd = open("/dev/zero", O_RDONLY);
if (fd < 0){
if (addr == MAP_FAILED) {
close(fd);
- RTE_LOG(INFO, EAL, "Cannot get a virtual area\n");
+ RTE_LOG(ERR, EAL, "Cannot get a virtual area: %s\n",
+ strerror(errno));
return NULL;
}
aligned_addr &= (~(hugepage_sz - 1));
addr = (void *)(aligned_addr);
- RTE_LOG(INFO, EAL, "Virtual area found at %p (size = 0x%zx)\n",
+ RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n",
addr, *size);
/* increment offset */
#endif
for (i = 0; i < hpi->num_pages[0]; i++) {
- size_t hugepage_sz = hpi->hugepage_sz;
+ uint64_t hugepage_sz = hpi->hugepage_sz;
if (orig) {
hugepg_tbl[i].file_id = i;
#endif
hugepg_tbl[i].filepath[sizeof(hugepg_tbl[i].filepath) - 1] = '\0';
}
-#ifndef RTE_ARCH_X86_64
- /* for 32-bit systems, don't remap 1G pages, just reuse original
- * map address as final map address.
+#ifndef RTE_ARCH_64
+ /* for 32-bit systems, don't remap 1G and 16G pages, just reuse
+ * original map address as final map address.
*/
- else if (hugepage_sz == RTE_PGSIZE_1G){
+ else if ((hugepage_sz == RTE_PGSIZE_1G)
+ || (hugepage_sz == RTE_PGSIZE_16G)) {
hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va;
hugepg_tbl[i].orig_va = NULL;
continue;
* physical block: count the number of
* contiguous physical pages. */
for (j = i+1; j < hpi->num_pages[0] ; j++) {
+#ifdef RTE_ARCH_PPC_64
+ /* The physical addresses are sorted in
+ * descending order on PPC64 */
+ if (hugepg_tbl[j].physaddr !=
+ hugepg_tbl[j-1].physaddr - hugepage_sz)
+ break;
+#else
if (hugepg_tbl[j].physaddr !=
hugepg_tbl[j-1].physaddr + hugepage_sz)
break;
+#endif
}
num_pages = j - i;
vma_len = num_pages * hugepage_sz;
while (i < hpi->num_pages[0]) {
-#ifndef RTE_ARCH_X86_64
- /* for 32-bit systems, don't remap 1G pages, just reuse original
- * map address as final map address.
+#ifndef RTE_ARCH_64
+ /* for 32-bit systems, don't remap 1G pages and 16G pages,
+ * just reuse original map address as final map address.
*/
- if (hugepage_sz == RTE_PGSIZE_1G){
+ if ((hugepage_sz == RTE_PGSIZE_1G)
+ || (hugepage_sz == RTE_PGSIZE_16G)) {
hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va;
hugepg_tbl[i].orig_va = NULL;
i++;
* physical block: count the number of
* contiguous physical pages. */
for (j = i+1; j < hpi->num_pages[0] ; j++) {
- if (hugepg_tbl[j].physaddr != hugepg_tbl[j-1].physaddr + hugepage_sz)
+#ifdef RTE_ARCH_PPC_64
+ /* The physical addresses are sorted in descending
+ * order on PPC64 */
+ if (hugepg_tbl[j].physaddr !=
+ hugepg_tbl[j-1].physaddr - hugepage_sz)
+ break;
+#else
+ if (hugepg_tbl[j].physaddr !=
+ hugepg_tbl[j-1].physaddr + hugepage_sz)
break;
+#endif
}
num_pages = j - i;
vma_len = num_pages * hugepage_sz;
return -1;
}
- rte_snprintf(hugepg_tbl[page_idx].filepath, MAX_HUGEPAGE_PATH, "%s",
+ snprintf(hugepg_tbl[page_idx].filepath, MAX_HUGEPAGE_PATH, "%s",
filepath);
- physaddr = get_physaddr(vma_addr);
+ physaddr = rte_mem_virt2phy(vma_addr);
- if (physaddr == (phys_addr_t) -1)
+ if (physaddr == RTE_BAD_PHYS_ADDR)
return -1;
hugepg_tbl[page_idx].final_va = vma_addr;
expected_physaddr = hugepg_tbl[page_idx].physaddr + offset;
page_addr = RTE_PTR_ADD(vma_addr, offset);
- physaddr = get_physaddr(page_addr);
+ physaddr = rte_mem_virt2phy(page_addr);
if (physaddr != expected_physaddr) {
RTE_LOG(ERR, EAL, "Segment sanity check failed: wrong physaddr "
f = fopen("/proc/self/numa_maps", "r");
if (f == NULL) {
- RTE_LOG(INFO, EAL, "cannot open /proc/self/numa_maps,"
+ RTE_LOG(NOTICE, EAL, "cannot open /proc/self/numa_maps,"
" consider that all memory is in socket_id 0\n");
return 0;
}
- rte_snprintf(hugedir_str, sizeof(hugedir_str),
- "%s/", hpi->hugedir);
+ snprintf(hugedir_str, sizeof(hugedir_str),
+ "%s/%s", hpi->hugedir, internal_config.hugefile_prefix);
/* parse numa map */
while (fgets(buf, sizeof(buf), f) != NULL) {
}
/*
- * Sort the hugepg_tbl by physical address (lower addresses first). We
- * use a slow algorithm, but we won't have millions of pages, and this
- * is only done at init time.
+ * Sort the hugepg_tbl by physical address (lower addresses first on x86,
+ * higher address first on powerpc). We use a slow algorithm, but we won't
+ * have millions of pages, and this is only done at init time.
*/
static int
sort_by_physaddr(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
{
unsigned i, j;
- int smallest_idx;
- uint64_t smallest_addr;
+ int compare_idx;
+ uint64_t compare_addr;
struct hugepage_file tmp;
for (i = 0; i < hpi->num_pages[0]; i++) {
- smallest_addr = 0;
- smallest_idx = -1;
+ compare_addr = 0;
+ compare_idx = -1;
/*
* browse all entries starting at 'i', and find the
*/
for (j=i; j< hpi->num_pages[0]; j++) {
- if (smallest_addr == 0 ||
- hugepg_tbl[j].physaddr < smallest_addr) {
- smallest_addr = hugepg_tbl[j].physaddr;
- smallest_idx = j;
+ if (compare_addr == 0 ||
+#ifdef RTE_ARCH_PPC_64
+ hugepg_tbl[j].physaddr > compare_addr) {
+#else
+ hugepg_tbl[j].physaddr < compare_addr) {
+#endif
+ compare_addr = hugepg_tbl[j].physaddr;
+ compare_idx = j;
}
}
/* should not happen */
- if (smallest_idx == -1) {
+ if (compare_idx == -1) {
RTE_LOG(ERR, EAL, "%s(): error in physaddr sorting\n", __func__);
return -1;
}
/* swap the 2 entries in the table */
- memcpy(&tmp, &hugepg_tbl[smallest_idx], sizeof(struct hugepage_file));
- memcpy(&hugepg_tbl[smallest_idx], &hugepg_tbl[i],
- sizeof(struct hugepage_file));
+ memcpy(&tmp, &hugepg_tbl[compare_idx],
+ sizeof(struct hugepage_file));
+ memcpy(&hugepg_tbl[compare_idx], &hugepg_tbl[i],
+ sizeof(struct hugepage_file));
memcpy(&hugepg_tbl[i], &tmp, sizeof(struct hugepage_file));
}
return 0;
size += hpi->hugepage_sz * hpi->num_pages[socket];
}
- return (size);
+ return size;
}
/*
if (num_hp_info == 0)
return -1;
- for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_mem != 0; socket++) {
- /* if specific memory amounts per socket weren't requested */
- if (internal_config.force_sockets == 0) {
+ /* if specific memory amounts per socket weren't requested */
+ if (internal_config.force_sockets == 0) {
+ int cpu_per_socket[RTE_MAX_NUMA_NODES];
+ size_t default_size, total_size;
+ unsigned lcore_id;
+
+ /* Compute number of cores per socket */
+ memset(cpu_per_socket, 0, sizeof(cpu_per_socket));
+ RTE_LCORE_FOREACH(lcore_id) {
+ cpu_per_socket[rte_lcore_to_socket_id(lcore_id)]++;
+ }
+
+ /*
+ * Automatically spread requested memory amongst detected sockets according
+ * to number of cores from cpu mask present on each socket
+ */
+ total_size = internal_config.memory;
+ for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {
+
+ /* Set memory amount per socket */
+ default_size = (internal_config.memory * cpu_per_socket[socket])
+ / rte_lcore_count();
+
+ /* Limit to maximum available memory on socket */
+ default_size = RTE_MIN(default_size, get_socket_mem_size(socket));
+
+ /* Update sizes */
+ memory[socket] = default_size;
+ total_size -= default_size;
+ }
+
+ /*
+ * If some memory is remaining, try to allocate it by getting all
+ * available memory from sockets, one after the other
+ */
+ for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {
/* take whatever is available */
- memory[socket] = RTE_MIN(get_socket_mem_size(socket),
- total_mem);
+ default_size = RTE_MIN(get_socket_mem_size(socket) - memory[socket],
+ total_size);
+
+ /* Update sizes */
+ memory[socket] += default_size;
+ total_size -= default_size;
}
+ }
+
+ for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_mem != 0; socket++) {
/* skips if the memory on specific socket wasn't requested */
for (i = 0; i < num_hp_info && memory[socket] != 0; i++){
hp_used[i].hugedir = hp_info[i].hugedir;
0x100000);
available = requested -
((unsigned) (memory[socket] / 0x100000));
- RTE_LOG(INFO, EAL, "Not enough memory available on socket %u! "
+ RTE_LOG(ERR, EAL, "Not enough memory available on socket %u! "
"Requested: %uMB, available: %uMB\n", socket,
requested, available);
return -1;
if (total_mem > 0) {
requested = (unsigned) (internal_config.memory / 0x100000);
available = requested - (unsigned) (total_mem / 0x100000);
- RTE_LOG(INFO, EAL, "Not enough memory available! Requested: %uMB,"
+ RTE_LOG(ERR, EAL, "Not enough memory available! Requested: %uMB,"
" available: %uMB\n", requested, available);
return -1;
}
* 6. unmap the first mapping
* 7. fill memsegs in configuration with contiguous zones
*/
-static int
+int
rte_eal_hugepage_init(void)
{
struct rte_mem_config *mcfg;
int new_pages_count[MAX_HUGEPAGE_SIZES];
#endif
+ test_proc_pagemap_readable();
+
memset(used_hp, 0, sizeof(used_hp));
/* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config;
- /* for debug purposes, hugetlbfs can be disabled */
+ /* hugetlbfs can be disabled */
if (internal_config.no_hugetlbfs) {
- addr = malloc(internal_config.memory);
+ addr = mmap(NULL, internal_config.memory, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+ if (addr == MAP_FAILED) {
+ RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
+ strerror(errno));
+ return -1;
+ }
mcfg->memseg[0].phys_addr = (phys_addr_t)(uintptr_t)addr;
mcfg->memseg[0].addr = addr;
+ mcfg->memseg[0].hugepage_sz = RTE_PGSIZE_4K;
mcfg->memseg[0].len = internal_config.memory;
mcfg->memseg[0].socket_id = 0;
return 0;
}
+/* check if app runs on Xen Dom0 */
+ if (internal_config.xen_dom0_support) {
+#ifdef RTE_LIBRTE_XEN_DOM0
+ /* use dom0_mm kernel driver to init memory */
+ if (rte_xen_dom0_memory_init() < 0)
+ return -1;
+ else
+ return 0;
+#endif
+ }
/* calculate total number of hugepages available. at this point we haven't
* yet started sorting them so they all are on socket 0 */
int socket = tmp_hp[i].socket_id;
/* find a hugepage info with right size and increment num_pages */
- for (j = 0; j < (int) internal_config.num_hugepage_sizes; j++) {
+ const int nb_hpsizes = RTE_MIN(MAX_HUGEPAGE_SIZES,
+ (int)internal_config.num_hugepage_sizes);
+ for (j = 0; j < nb_hpsizes; j++) {
if (tmp_hp[i].size ==
internal_config.hugepage_info[j].hugepage_sz) {
#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
if (used_hp[i].num_pages[j] > 0) {
- RTE_LOG(INFO, EAL,
- "Requesting %u pages of size %uMB"
- " from socket %i\n",
- used_hp[i].num_pages[j],
- (unsigned)
- (used_hp[i].hugepage_sz / 0x100000),
- j);
+ RTE_LOG(DEBUG, EAL,
+ "Requesting %u pages of size %uMB"
+ " from socket %i\n",
+ used_hp[i].num_pages[j],
+ (unsigned)
+ (used_hp[i].hugepage_sz / 0x100000),
+ j);
}
}
}
new_memseg = 1;
else if (hugepage[i].size != hugepage[i-1].size)
new_memseg = 1;
+
+#ifdef RTE_ARCH_PPC_64
+ /* On PPC64 architecture, the mmap always start from higher
+ * virtual address to lower address. Here, both the physical
+ * address and virtual address are in descending order */
+ else if ((hugepage[i-1].physaddr - hugepage[i].physaddr) !=
+ hugepage[i].size)
+ new_memseg = 1;
+ else if (((unsigned long)hugepage[i-1].final_va -
+ (unsigned long)hugepage[i].final_va) != hugepage[i].size)
+ new_memseg = 1;
+#else
else if ((hugepage[i].physaddr - hugepage[i-1].physaddr) !=
hugepage[i].size)
new_memseg = 1;
else if (((unsigned long)hugepage[i].final_va -
(unsigned long)hugepage[i-1].final_va) != hugepage[i].size)
new_memseg = 1;
+#endif
if (new_memseg) {
j += 1;
}
/* continuation of previous memseg */
else {
+#ifdef RTE_ARCH_PPC_64
+ /* Use the phy and virt address of the last page as segment
+ * address for IBM Power architecture */
+ mcfg->memseg[j].phys_addr = hugepage[i].physaddr;
+ mcfg->memseg[j].addr = hugepage[i].final_va;
+#endif
mcfg->memseg[j].len += mcfg->memseg[j].hugepage_sz;
}
hugepage[i].memseg_id = j;
"of memory.\n",
i, nr_hugefiles, RTE_STR(CONFIG_RTE_MAX_MEMSEG),
RTE_MAX_MEMSEG);
- return (-ENOMEM);
+ return -ENOMEM;
}
return 0;
* configuration and finds the hugepages which form that segment, mapping them
* in order to form a contiguous block in the virtual memory space
*/
-static int
+int
rte_eal_hugepage_attach(void)
{
const struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
"into secondary processes\n");
}
+ test_proc_pagemap_readable();
+
+ if (internal_config.xen_dom0_support) {
+#ifdef RTE_LIBRTE_XEN_DOM0
+ if (rte_xen_dom0_memory_attach() < 0) {
+ RTE_LOG(ERR, EAL,"Failed to attach memory setments of primay "
+ "process\n");
+ return -1;
+ }
+ return 0;
+#endif
+ }
+
fd_zero = open("/dev/zero", O_RDONLY);
if (fd_zero < 0) {
RTE_LOG(ERR, EAL, "Could not open /dev/zero\n");
close(fd_hugepage);
return -1;
}
-
-static int
-rte_eal_memdevice_init(void)
-{
- struct rte_config *config;
-
- if (rte_eal_process_type() == RTE_PROC_SECONDARY)
- return 0;
-
- config = rte_eal_get_configuration();
- config->mem_config->nchannel = internal_config.force_nchannel;
- config->mem_config->nrank = internal_config.force_nrank;
-
- return 0;
-}
-
-
-/* init memory subsystem */
-int
-rte_eal_memory_init(void)
-{
- RTE_LOG(INFO, EAL, "Setting up memory...\n");
- const int retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
- rte_eal_hugepage_init() :
- rte_eal_hugepage_attach();
- if (retval < 0)
- return -1;
-
- if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0)
- return -1;
-
- return 0;
-}