#include <rte_common.h>
#include <rte_debug.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_cpuflags.h>
#include <rte_interrupts.h>
#include <rte_bus.h>
-#include <rte_pci.h>
#include <rte_dev.h>
#include <rte_devargs.h>
#include <rte_version.h>
#include <rte_atomic.h>
#include <malloc_heap.h>
+#include <rte_vfio.h>
#include "eal_private.h"
#include "eal_thread.h"
const char *
rte_eal_mbuf_default_mempool_ops(void)
{
- return internal_config.mbuf_pool_ops_name;
+ return internal_config.user_mbuf_pool_ops_name;
}
/* Return a pointer to the configuration structure */
break;
case OPT_MBUF_POOL_OPS_NAME_NUM:
- internal_config.mbuf_pool_ops_name = optarg;
+ internal_config.user_mbuf_pool_ops_name = optarg;
break;
default:
{
int vfio_enabled = 0;
- if (!internal_config.no_pci) {
- pci_vfio_enable();
- vfio_enabled |= pci_vfio_is_enabled();
- }
+ if (rte_vfio_enable("vfio"))
+ return -1;
+ vfio_enabled = rte_vfio_is_enabled("vfio");
if (vfio_enabled) {
return -1;
}
+ if (eal_plugins_init() < 0) {
+ rte_eal_init_alert("Cannot init plugins\n");
+ rte_errno = EINVAL;
+ rte_atomic32_clear(&run_once);
+ return -1;
+ }
+
if (eal_option_device_parse()) {
rte_errno = ENODEV;
rte_atomic32_clear(&run_once);
/* autodetect the iova mapping mode (default is iova_pa) */
rte_eal_get_configuration()->iova_mode = rte_bus_get_iommu_class();
+ /* Workaround for KNI which requires physical address to work */
+ if (rte_eal_get_configuration()->iova_mode == RTE_IOVA_VA &&
+ rte_eal_check_module("rte_kni") == 1) {
+ rte_eal_get_configuration()->iova_mode = RTE_IOVA_PA;
+ RTE_LOG(WARNING, EAL,
+ "Some devices want IOVA as VA but PA will be used because.. "
+ "KNI module inserted\n");
+ }
+
if (internal_config.no_hugetlbfs == 0 &&
internal_config.process_type != RTE_PROC_SECONDARY &&
eal_hugepage_info_init() < 0) {
eal_check_mem_on_local_socket();
- if (eal_plugins_init() < 0)
- rte_eal_init_alert("Cannot init plugins\n");
-
eal_thread_init_master(rte_config.master_lcore);
ret = eal_thread_dump_affinity(cpuset, RTE_CPU_AFFINITY_STR_LEN);
return fctret;
}
+int rte_eal_cleanup(void)
+{
+ rte_service_finalize();
+ return 0;
+}
+
/* get core role */
enum rte_lcore_role_t
rte_eal_lcore_role(unsigned lcore_id)
return ! internal_config.no_hugetlbfs;
}
+int rte_eal_has_pci(void)
+{
+ return !internal_config.no_pci;
+}
+
+int rte_eal_create_uio_dev(void)
+{
+ return internal_config.create_uio_dev;
+}
+
+enum rte_intr_mode
+rte_eal_vfio_intr_mode(void)
+{
+ return internal_config.vfio_intr_mode;
+}
+
int
rte_eal_check_module(const char *module_name)
{