#include <rte_memory.h>
#include <rte_launch.h>
#include <rte_eal.h>
-#include <rte_eal_memconfig.h>
#include <rte_errno.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include "eal_internal_cfg.h"
#include "eal_filesystem.h"
#include "eal_hugepages.h"
+#include "eal_memcfg.h"
#include "eal_options.h"
#include "eal_vfio.h"
#include "hotplug_mp.h"
#define SOCKET_MEM_STRLEN (RTE_MAX_NUMA_NODES * 10)
+#define KERNEL_IOMMU_GROUPS_PATH "/sys/kernel/iommu_groups"
+
/* Allow the application to print its usage message too if set */
static rte_usage_hook_t rte_application_usage_hook = NULL;
return ptype;
}
-/* copies data from internal config to shared config */
-static void
-eal_update_mem_config(void)
-{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- mcfg->legacy_mem = internal_config.legacy_mem;
- mcfg->single_file_segments = internal_config.single_file_segments;
-}
-
-/* copies data from shared config to internal config */
-static void
-eal_update_internal_config(void)
-{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- internal_config.legacy_mem = mcfg->legacy_mem;
- internal_config.single_file_segments = mcfg->single_file_segments;
-}
-
/* Sets up rte_config structure with the pointer to shared memory config.*/
static int
rte_config_init(void)
case RTE_PROC_PRIMARY:
if (rte_eal_config_create() < 0)
return -1;
- eal_update_mem_config();
+ eal_mcfg_update_from_internal();
break;
case RTE_PROC_SECONDARY:
if (rte_eal_config_attach() < 0)
return -1;
- rte_eal_mcfg_wait_complete(rte_config.mem_config);
+ eal_mcfg_wait_complete();
+ if (eal_mcfg_check_version() < 0) {
+ RTE_LOG(ERR, EAL, "Primary and secondary process DPDK version mismatch\n");
+ return -1;
+ }
if (rte_eal_config_reattach() < 0)
return -1;
- eal_update_internal_config();
+ eal_mcfg_update_internal();
break;
case RTE_PROC_AUTO:
case RTE_PROC_INVALID:
return 0;
}
-inline static void
-rte_eal_mcfg_complete(void)
-{
- /* ALL shared mem_config related INIT DONE */
- if (rte_config.process_type == RTE_PROC_PRIMARY)
- rte_config.mem_config->magic = RTE_MAGIC;
-
- internal_config.init_complete = 1;
-}
-
/*
* Request iopl privilege for all RPL, returns 0 on success
* iopl() call is mostly for the i386 architecture. For other architectures,
RTE_LOG(ERR, EAL, "%s\n", msg);
}
+/*
+ * On Linux 3.6+, even if VFIO is not loaded, whenever IOMMU is enabled in the
+ * BIOS and in the kernel, /sys/kernel/iommu_groups path will contain kernel
+ * IOMMU groups. If IOMMU is not enabled, that path would be empty.
+ * Therefore, checking if the path is empty will tell us if IOMMU is enabled.
+ */
+static bool
+is_iommu_enabled(void)
+{
+ DIR *dir = opendir(KERNEL_IOMMU_GROUPS_PATH);
+ struct dirent *d;
+ int n = 0;
+
+ /* if directory doesn't exist, assume IOMMU is not enabled */
+ if (dir == NULL)
+ return false;
+
+ while ((d = readdir(dir)) != NULL) {
+ /* skip dot and dot-dot */
+ if (++n > 2)
+ break;
+ }
+ closedir(dir);
+
+ return n > 2;
+}
+
/* Launch threads, called at application init(). */
int
rte_eal_init(int argc, char **argv)
enum rte_iova_mode iova_mode = rte_bus_get_iommu_class();
if (iova_mode == RTE_IOVA_DC) {
- iova_mode = phys_addrs ? RTE_IOVA_PA : RTE_IOVA_VA;
- RTE_LOG(DEBUG, EAL,
- "Buses did not request a specific IOVA mode, using '%s' based on physical addresses availability.\n",
- phys_addrs ? "PA" : "VA");
+ RTE_LOG(DEBUG, EAL, "Buses did not request a specific IOVA mode.\n");
+
+ if (!phys_addrs) {
+ /* if we have no access to physical addresses,
+ * pick IOVA as VA mode.
+ */
+ iova_mode = RTE_IOVA_VA;
+ RTE_LOG(DEBUG, EAL, "Physical addresses are unavailable, selecting IOVA as VA mode.\n");
+ } else if (is_iommu_enabled()) {
+ /* we have an IOMMU, pick IOVA as VA mode */
+ iova_mode = RTE_IOVA_VA;
+ RTE_LOG(DEBUG, EAL, "IOMMU is available, selecting IOVA as VA mode.\n");
+ } else {
+ /* physical addresses available, and no IOMMU
+ * found, so pick IOVA as PA.
+ */
+ iova_mode = RTE_IOVA_PA;
+ RTE_LOG(DEBUG, EAL, "IOMMU is not available, selecting IOVA as PA mode.\n");
+ }
}
#ifdef RTE_LIBRTE_KNI
/* Workaround for KNI which requires physical address to work */
return -1;
}
- rte_eal_mcfg_complete();
+ eal_mcfg_complete();
/* Call each registered callback, if enabled */
rte_option_init();