#include <rte_memory.h>
#include <rte_launch.h>
#include <rte_eal.h>
-#include <rte_eal_memconfig.h>
#include <rte_errno.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include "eal_internal_cfg.h"
#include "eal_filesystem.h"
#include "eal_hugepages.h"
+#include "eal_memcfg.h"
#include "eal_options.h"
#include "eal_vfio.h"
#include "hotplug_mp.h"
#define SOCKET_MEM_STRLEN (RTE_MAX_NUMA_NODES * 10)
+#define KERNEL_IOMMU_GROUPS_PATH "/sys/kernel/iommu_groups"
+
/* Allow the application to print its usage message too if set */
static rte_usage_hook_t rte_application_usage_hook = NULL;
return ptype;
}
-/* copies data from internal config to shared config */
-static void
-eal_update_mem_config(void)
-{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- mcfg->legacy_mem = internal_config.legacy_mem;
- mcfg->single_file_segments = internal_config.single_file_segments;
-}
-
-/* copies data from shared config to internal config */
-static void
-eal_update_internal_config(void)
-{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- internal_config.legacy_mem = mcfg->legacy_mem;
- internal_config.single_file_segments = mcfg->single_file_segments;
-}
-
/* Sets up rte_config structure with the pointer to shared memory config.*/
static int
rte_config_init(void)
case RTE_PROC_PRIMARY:
if (rte_eal_config_create() < 0)
return -1;
- eal_update_mem_config();
+ eal_mcfg_update_from_internal();
break;
case RTE_PROC_SECONDARY:
if (rte_eal_config_attach() < 0)
return -1;
- rte_eal_mcfg_wait_complete(rte_config.mem_config);
+ eal_mcfg_wait_complete();
+ if (eal_mcfg_check_version() < 0) {
+ RTE_LOG(ERR, EAL, "Primary and secondary process DPDK version mismatch\n");
+ return -1;
+ }
if (rte_eal_config_reattach() < 0)
return -1;
- eal_update_internal_config();
+ eal_mcfg_update_internal();
break;
case RTE_PROC_AUTO:
case RTE_PROC_INVALID:
return 0;
}
-inline static void
-rte_eal_mcfg_complete(void)
-{
- /* ALL shared mem_config related INIT DONE */
- if (rte_config.process_type == RTE_PROC_PRIMARY)
- rte_config.mem_config->magic = RTE_MAGIC;
-
- internal_config.init_complete = 1;
-}
-
/*
* Request iopl privilege for all RPL, returns 0 on success
* iopl() call is mostly for the i386 architecture. For other architectures,
RTE_LOG(ERR, EAL, "%s\n", msg);
}
+/*
+ * On Linux 3.6+, even if VFIO is not loaded, whenever IOMMU is enabled in the
+ * BIOS and in the kernel, /sys/kernel/iommu_groups path will contain kernel
+ * IOMMU groups. If IOMMU is not enabled, that path would be empty.
+ * Therefore, checking if the path is empty will tell us if IOMMU is enabled.
+ */
+static bool
+is_iommu_enabled(void)
+{
+ DIR *dir = opendir(KERNEL_IOMMU_GROUPS_PATH);
+ struct dirent *d;
+ int n = 0;
+
+ /* if directory doesn't exist, assume IOMMU is not enabled */
+ if (dir == NULL)
+ return false;
+
+ while ((d = readdir(dir)) != NULL) {
+ /* skip dot and dot-dot */
+ if (++n > 2)
+ break;
+ }
+ closedir(dir);
+
+ return n > 2;
+}
+
/* Launch threads, called at application init(). */
int
rte_eal_init(int argc, char **argv)
static char logid[PATH_MAX];
char cpuset[RTE_CPU_AFFINITY_STR_LEN];
char thread_name[RTE_MAX_THREAD_NAME_LEN];
+ bool phys_addrs;
/* checks if the machine is adequate */
if (!rte_cpu_is_supported()) {
return -1;
}
+ phys_addrs = rte_eal_using_phys_addrs() != 0;
+
/* if no EAL option "--iova-mode=<pa|va>", use bus IOVA scheme */
if (internal_config.iova_mode == RTE_IOVA_DC) {
- /* autodetect the IOVA mapping mode (default is RTE_IOVA_PA) */
- rte_eal_get_configuration()->iova_mode =
- rte_bus_get_iommu_class();
-
+ /* autodetect the IOVA mapping mode */
+ enum rte_iova_mode iova_mode = rte_bus_get_iommu_class();
+
+ if (iova_mode == RTE_IOVA_DC) {
+ RTE_LOG(DEBUG, EAL, "Buses did not request a specific IOVA mode.\n");
+
+ if (!phys_addrs) {
+ /* if we have no access to physical addresses,
+ * pick IOVA as VA mode.
+ */
+ iova_mode = RTE_IOVA_VA;
+ RTE_LOG(DEBUG, EAL, "Physical addresses are unavailable, selecting IOVA as VA mode.\n");
+ } else if (is_iommu_enabled()) {
+ /* we have an IOMMU, pick IOVA as VA mode */
+ iova_mode = RTE_IOVA_VA;
+ RTE_LOG(DEBUG, EAL, "IOMMU is available, selecting IOVA as VA mode.\n");
+ } else {
+ /* physical addresses available, and no IOMMU
+ * found, so pick IOVA as PA.
+ */
+ iova_mode = RTE_IOVA_PA;
+ RTE_LOG(DEBUG, EAL, "IOMMU is not available, selecting IOVA as PA mode.\n");
+ }
+ }
+#ifdef RTE_LIBRTE_KNI
/* Workaround for KNI which requires physical address to work */
- if (rte_eal_get_configuration()->iova_mode == RTE_IOVA_VA &&
+ if (iova_mode == RTE_IOVA_VA &&
rte_eal_check_module("rte_kni") == 1) {
- rte_eal_get_configuration()->iova_mode = RTE_IOVA_PA;
- RTE_LOG(WARNING, EAL,
- "Some devices want IOVA as VA but PA will be used because.. "
- "KNI module inserted\n");
+ if (phys_addrs) {
+ iova_mode = RTE_IOVA_PA;
+ RTE_LOG(WARNING, EAL, "Forcing IOVA as 'PA' because KNI module is loaded\n");
+ } else {
+ RTE_LOG(DEBUG, EAL, "KNI can not work since physical addresses are unavailable\n");
+ }
}
+#endif
+ rte_eal_get_configuration()->iova_mode = iova_mode;
} else {
rte_eal_get_configuration()->iova_mode =
internal_config.iova_mode;
}
+ if (rte_eal_iova_mode() == RTE_IOVA_PA && !phys_addrs) {
+ rte_eal_init_alert("Cannot use IOVA as 'PA' since physical addresses are not available");
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ RTE_LOG(INFO, EAL, "Selected IOVA mode '%s'\n",
+ rte_eal_iova_mode() == RTE_IOVA_PA ? "PA" : "VA");
+
if (internal_config.no_hugetlbfs == 0) {
/* rte_config isn't initialized yet */
ret = internal_config.process_type == RTE_PROC_PRIMARY ?
return -1;
}
- rte_eal_mcfg_complete();
+ eal_mcfg_complete();
/* Call each registered callback, if enabled */
rte_option_init();