/* used by rte_rdtsc() */
int rte_cycles_vmware_tsc_map;
-static const char *default_runtime_dir = "/var/run";
-
-int
-eal_create_runtime_dir(void)
-{
- const char *directory = default_runtime_dir;
- const char *xdg_runtime_dir = getenv("XDG_RUNTIME_DIR");
- const char *fallback = "/tmp";
- char run_dir[PATH_MAX];
- char tmp[PATH_MAX];
- int ret;
-
- if (getuid() != 0) {
- /* try XDG path first, fall back to /tmp */
- if (xdg_runtime_dir != NULL)
- directory = xdg_runtime_dir;
- else
- directory = fallback;
- }
- /* create DPDK subdirectory under runtime dir */
- ret = snprintf(tmp, sizeof(tmp), "%s/dpdk", directory);
- if (ret < 0 || ret == sizeof(tmp)) {
- RTE_LOG(ERR, EAL, "Error creating DPDK runtime path name\n");
- return -1;
- }
-
- /* create prefix-specific subdirectory under DPDK runtime dir */
- ret = snprintf(run_dir, sizeof(run_dir), "%s/%s",
- tmp, eal_get_hugefile_prefix());
- if (ret < 0 || ret == sizeof(run_dir)) {
- RTE_LOG(ERR, EAL, "Error creating prefix-specific runtime path name\n");
- return -1;
- }
-
- /* create the path if it doesn't exist. no "mkdir -p" here, so do it
- * step by step.
- */
- ret = mkdir(tmp, 0700);
- if (ret < 0 && errno != EEXIST) {
- RTE_LOG(ERR, EAL, "Error creating '%s': %s\n",
- tmp, strerror(errno));
- return -1;
- }
-
- ret = mkdir(run_dir, 0700);
- if (ret < 0 && errno != EEXIST) {
- RTE_LOG(ERR, EAL, "Error creating '%s': %s\n",
- run_dir, strerror(errno));
- return -1;
- }
-
- if (eal_set_runtime_dir(run_dir, sizeof(run_dir)))
- return -1;
-
- return 0;
-}
int
eal_clean_runtime_dir(void)
return 0;
}
-/* parse a sysfs (or other) file containing one integer value */
-int
-eal_parse_sysfs_value(const char *filename, unsigned long *val)
-{
- FILE *f;
- char buf[BUFSIZ];
- char *end = NULL;
-
- if ((f = fopen(filename, "r")) == NULL) {
- RTE_LOG(ERR, EAL, "%s(): cannot open sysfs value %s\n",
- __func__, filename);
- return -1;
- }
-
- if (fgets(buf, sizeof(buf), f) == NULL) {
- RTE_LOG(ERR, EAL, "%s(): cannot read sysfs value %s\n",
- __func__, filename);
- fclose(f);
- return -1;
- }
- *val = strtoul(buf, &end, 0);
- if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
- RTE_LOG(ERR, EAL, "%s(): cannot parse sysfs value %s\n",
- __func__, filename);
- fclose(f);
- return -1;
- }
- fclose(f);
- return 0;
-}
-
-
/* create memory configuration in shared/mmap memory. Take out
* a write lock on the memsegs, so we can auto-detect primary/secondary.
* This means we never close the file while running (auto-close on exit).
RTE_LOG(ERR, EAL, "Could not store mbuf pool ops name\n");
else {
/* free old ops name */
- if (internal_conf->user_mbuf_pool_ops_name !=
- NULL)
- free(internal_conf->user_mbuf_pool_ops_name);
+ free(internal_conf->user_mbuf_pool_ops_name);
internal_conf->user_mbuf_pool_ops_name =
ops_name;
const struct rte_config *config = rte_eal_get_configuration();
struct internal_config *internal_conf =
eal_get_internal_configuration();
+ bool has_phys_addr;
+ enum rte_iova_mode iova_mode;
/* checks if the machine is adequate */
if (!rte_cpu_is_supported()) {
return -1;
}
- /* if no EAL option "--iova-mode=<pa|va>", use bus IOVA scheme */
- if (internal_conf->iova_mode == RTE_IOVA_DC) {
- /* autodetect the IOVA mapping mode (default is RTE_IOVA_PA) */
- enum rte_iova_mode iova_mode = rte_bus_get_iommu_class();
-
- if (iova_mode == RTE_IOVA_DC)
- iova_mode = RTE_IOVA_PA;
- rte_eal_get_configuration()->iova_mode = iova_mode;
- } else {
- rte_eal_get_configuration()->iova_mode =
- internal_conf->iova_mode;
+ /*
+ * PA are only available for hugepages via contigmem.
+ * If contigmem is inaccessible, rte_eal_hugepage_init() will fail
+ * with a message describing the cause.
+ */
+ has_phys_addr = internal_conf->no_hugetlbfs == 0;
+ iova_mode = internal_conf->iova_mode;
+ if (iova_mode == RTE_IOVA_PA && !has_phys_addr) {
+ rte_eal_init_alert("Cannot use IOVA as 'PA' since physical addresses are not available");
+ rte_errno = EINVAL;
+ return -1;
}
-
+ if (iova_mode == RTE_IOVA_DC) {
+ RTE_LOG(DEBUG, EAL, "Specific IOVA mode is not requested, autodetecting\n");
+ if (has_phys_addr) {
+ RTE_LOG(DEBUG, EAL, "Selecting IOVA mode according to bus requests\n");
+ iova_mode = rte_bus_get_iommu_class();
+ if (iova_mode == RTE_IOVA_DC)
+ iova_mode = RTE_IOVA_PA;
+ } else {
+ iova_mode = RTE_IOVA_VA;
+ }
+ }
+ rte_eal_get_configuration()->iova_mode = iova_mode;
RTE_LOG(INFO, EAL, "Selected IOVA mode '%s'\n",
rte_eal_iova_mode() == RTE_IOVA_PA ? "PA" : "VA");
rte_mp_channel_cleanup();
/* after this point, any DPDK pointers will become dangling */
rte_eal_memory_detach();
+ rte_eal_alarm_cleanup();
rte_trace_save();
eal_trace_fini();
eal_cleanup_config(internal_conf);
__rte_unused int *vfio_dev_fd,
__rte_unused struct vfio_device_info *device_info)
{
+ rte_errno = ENOTSUP;
return -1;
}
__rte_unused const char *dev_addr,
__rte_unused int fd)
{
+ rte_errno = ENOTSUP;
return -1;
}
int rte_vfio_enable(__rte_unused const char *modname)
{
+ rte_errno = ENOTSUP;
return -1;
}
int rte_vfio_clear_group(__rte_unused int vfio_group_fd)
{
- return 0;
+ rte_errno = ENOTSUP;
+ return -1;
}
int
__rte_unused const char *dev_addr,
__rte_unused int *iommu_group_num)
{
+ rte_errno = ENOTSUP;
return -1;
}
int
rte_vfio_get_container_fd(void)
{
+ rte_errno = ENOTSUP;
return -1;
}
int
rte_vfio_get_group_fd(__rte_unused int iommu_group_num)
{
+ rte_errno = ENOTSUP;
return -1;
}
int
rte_vfio_container_create(void)
{
+ rte_errno = ENOTSUP;
return -1;
}
int
rte_vfio_container_destroy(__rte_unused int container_fd)
{
+ rte_errno = ENOTSUP;
return -1;
}
rte_vfio_container_group_bind(__rte_unused int container_fd,
__rte_unused int iommu_group_num)
{
+ rte_errno = ENOTSUP;
return -1;
}
rte_vfio_container_group_unbind(__rte_unused int container_fd,
__rte_unused int iommu_group_num)
{
+ rte_errno = ENOTSUP;
return -1;
}
__rte_unused uint64_t iova,
__rte_unused uint64_t len)
{
+ rte_errno = ENOTSUP;
return -1;
}
__rte_unused uint64_t iova,
__rte_unused uint64_t len)
{
+ rte_errno = ENOTSUP;
return -1;
}