X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Flinux%2Feal%2Feal.c;h=c6ad231e0df1eedfe2f1f56783bc783b54178470;hb=8ac3591694e105d47968f5f29b8c19511f21e41c;hp=dfbbeddc4e4a46cfe382abe9d86c5368315791a3;hpb=4142b06e2d60734eb7f6dda5edc212993c461f0a;p=dpdk.git diff --git a/lib/librte_eal/linux/eal/eal.c b/lib/librte_eal/linux/eal/eal.c index dfbbeddc4e..c6ad231e0d 100644 --- a/lib/librte_eal/linux/eal/eal.c +++ b/lib/librte_eal/linux/eal/eal.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include @@ -57,13 +56,17 @@ #include "eal_internal_cfg.h" #include "eal_filesystem.h" #include "eal_hugepages.h" +#include "eal_memcfg.h" #include "eal_options.h" #include "eal_vfio.h" +#include "hotplug_mp.h" #define MEMSIZE_IF_NO_HUGE_PAGE (64ULL * 1024ULL * 1024ULL) #define SOCKET_MEM_STRLEN (RTE_MAX_NUMA_NODES * 10) +#define KERNEL_IOMMU_GROUPS_PATH "/sys/kernel/iommu_groups" + /* Allow the application to print its usage message too if set */ static rte_usage_hook_t rte_application_usage_hook = NULL; @@ -471,24 +474,6 @@ eal_proc_type_detect(void) return ptype; } -/* copies data from internal config to shared config */ -static void -eal_update_mem_config(void) -{ - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; - mcfg->legacy_mem = internal_config.legacy_mem; - mcfg->single_file_segments = internal_config.single_file_segments; -} - -/* copies data from shared config to internal config */ -static void -eal_update_internal_config(void) -{ - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; - internal_config.legacy_mem = mcfg->legacy_mem; - internal_config.single_file_segments = mcfg->single_file_segments; -} - /* Sets up rte_config structure with the pointer to shared memory config.*/ static int rte_config_init(void) @@ -499,15 +484,19 @@ rte_config_init(void) case RTE_PROC_PRIMARY: if (rte_eal_config_create() < 0) return -1; - eal_update_mem_config(); + eal_mcfg_update_from_internal(); break; case RTE_PROC_SECONDARY: if (rte_eal_config_attach() < 0) return -1; - rte_eal_mcfg_wait_complete(rte_config.mem_config); + eal_mcfg_wait_complete(); + if (eal_mcfg_check_version() < 0) { + RTE_LOG(ERR, EAL, "Primary and secondary process DPDK version mismatch\n"); + return -1; + } if (rte_eal_config_reattach() < 0) return -1; - eal_update_internal_config(); + eal_mcfg_update_internal(); break; case RTE_PROC_AUTO: case RTE_PROC_INVALID: @@ -932,16 +921,6 @@ sync_func(__attribute__((unused)) void *arg) return 0; } -inline static void -rte_eal_mcfg_complete(void) -{ - /* ALL shared mem_config related INIT DONE */ - if (rte_config.process_type == RTE_PROC_PRIMARY) - rte_config.mem_config->magic = RTE_MAGIC; - - internal_config.init_complete = 1; -} - /* * Request iopl privilege for all RPL, returns 0 on success * iopl() call is mostly for the i386 architecture. For other architectures, @@ -973,6 +952,33 @@ static void rte_eal_init_alert(const char *msg) RTE_LOG(ERR, EAL, "%s\n", msg); } +/* + * On Linux 3.6+, even if VFIO is not loaded, whenever IOMMU is enabled in the + * BIOS and in the kernel, /sys/kernel/iommu_groups path will contain kernel + * IOMMU groups. If IOMMU is not enabled, that path would be empty. + * Therefore, checking if the path is empty will tell us if IOMMU is enabled. + */ +static bool +is_iommu_enabled(void) +{ + DIR *dir = opendir(KERNEL_IOMMU_GROUPS_PATH); + struct dirent *d; + int n = 0; + + /* if directory doesn't exist, assume IOMMU is not enabled */ + if (dir == NULL) + return false; + + while ((d = readdir(dir)) != NULL) { + /* skip dot and dot-dot */ + if (++n > 2) + break; + } + closedir(dir); + + return n > 2; +} + /* Launch threads, called at application init(). */ int rte_eal_init(int argc, char **argv) @@ -984,6 +990,7 @@ rte_eal_init(int argc, char **argv) static char logid[PATH_MAX]; char cpuset[RTE_CPU_AFFINITY_STR_LEN]; char thread_name[RTE_MAX_THREAD_NAME_LEN]; + bool phys_addrs; /* checks if the machine is adequate */ if (!rte_cpu_is_supported()) { @@ -1062,7 +1069,7 @@ rte_eal_init(int argc, char **argv) } /* register multi-process action callbacks for hotplug */ - if (rte_mp_dev_hotplug_init() < 0) { + if (eal_mp_dev_hotplug_init() < 0) { rte_eal_init_alert("failed to register mp callback for hotplug"); return -1; } @@ -1074,25 +1081,61 @@ rte_eal_init(int argc, char **argv) return -1; } + phys_addrs = rte_eal_using_phys_addrs() != 0; + /* if no EAL option "--iova-mode=", use bus IOVA scheme */ if (internal_config.iova_mode == RTE_IOVA_DC) { - /* autodetect the IOVA mapping mode (default is RTE_IOVA_PA) */ - rte_eal_get_configuration()->iova_mode = - rte_bus_get_iommu_class(); - + /* autodetect the IOVA mapping mode */ + enum rte_iova_mode iova_mode = rte_bus_get_iommu_class(); + + if (iova_mode == RTE_IOVA_DC) { + RTE_LOG(DEBUG, EAL, "Buses did not request a specific IOVA mode.\n"); + + if (!phys_addrs) { + /* if we have no access to physical addresses, + * pick IOVA as VA mode. + */ + iova_mode = RTE_IOVA_VA; + RTE_LOG(DEBUG, EAL, "Physical addresses are unavailable, selecting IOVA as VA mode.\n"); + } else if (is_iommu_enabled()) { + /* we have an IOMMU, pick IOVA as VA mode */ + iova_mode = RTE_IOVA_VA; + RTE_LOG(DEBUG, EAL, "IOMMU is available, selecting IOVA as VA mode.\n"); + } else { + /* physical addresses available, and no IOMMU + * found, so pick IOVA as PA. + */ + iova_mode = RTE_IOVA_PA; + RTE_LOG(DEBUG, EAL, "IOMMU is not available, selecting IOVA as PA mode.\n"); + } + } +#ifdef RTE_LIBRTE_KNI /* Workaround for KNI which requires physical address to work */ - if (rte_eal_get_configuration()->iova_mode == RTE_IOVA_VA && + if (iova_mode == RTE_IOVA_VA && rte_eal_check_module("rte_kni") == 1) { - rte_eal_get_configuration()->iova_mode = RTE_IOVA_PA; - RTE_LOG(WARNING, EAL, - "Some devices want IOVA as VA but PA will be used because.. " - "KNI module inserted\n"); + if (phys_addrs) { + iova_mode = RTE_IOVA_PA; + RTE_LOG(WARNING, EAL, "Forcing IOVA as 'PA' because KNI module is loaded\n"); + } else { + RTE_LOG(DEBUG, EAL, "KNI can not work since physical addresses are unavailable\n"); + } } +#endif + rte_eal_get_configuration()->iova_mode = iova_mode; } else { rte_eal_get_configuration()->iova_mode = internal_config.iova_mode; } + if (rte_eal_iova_mode() == RTE_IOVA_PA && !phys_addrs) { + rte_eal_init_alert("Cannot use IOVA as 'PA' since physical addresses are not available"); + rte_errno = EINVAL; + return -1; + } + + RTE_LOG(INFO, EAL, "Selected IOVA mode '%s'\n", + rte_eal_iova_mode() == RTE_IOVA_PA ? "PA" : "VA"); + if (internal_config.no_hugetlbfs == 0) { /* rte_config isn't initialized yet */ ret = internal_config.process_type == RTE_PROC_PRIMARY ? @@ -1122,8 +1165,6 @@ rte_eal_init(int argc, char **argv) #endif } - rte_srand(rte_rdtsc()); - if (rte_eal_log_init(logid, internal_config.syslog_facility) < 0) { rte_eal_init_alert("Cannot init logging."); rte_errno = ENOMEM; @@ -1267,7 +1308,7 @@ rte_eal_init(int argc, char **argv) return -1; } - rte_eal_mcfg_complete(); + eal_mcfg_complete(); /* Call each registered callback, if enabled */ rte_option_init();