1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
5 #ifndef _EAL_PRIVATE_H_
6 #define _EAL_PRIVATE_H_
13 #include <rte_lcore.h>
14 #include <rte_memory.h>
16 #include "eal_internal_cfg.h"
19 * Structure storing internal configuration (per-lcore)
22 pthread_t thread_id; /**< pthread identifier */
23 int pipe_main2worker[2]; /**< communication pipe with main */
24 int pipe_worker2main[2]; /**< communication pipe with main */
26 lcore_function_t * volatile f; /**< function to call */
27 void * volatile arg; /**< argument of function */
28 volatile int ret; /**< return value of function */
30 volatile enum rte_lcore_state_t state; /**< lcore state */
31 unsigned int socket_id; /**< physical socket id for this lcore */
32 unsigned int core_id; /**< core number on socket for this lcore */
33 int core_index; /**< relative index, starting from 0 */
34 uint8_t core_role; /**< role of core eg: OFF, RTE, SERVICE */
36 rte_cpuset_t cpuset; /**< cpu set which the lcore affinity to */
39 extern struct lcore_config lcore_config[RTE_MAX_LCORE];
42 * The global RTE configuration structure.
45 uint32_t main_lcore; /**< Id of the main lcore */
46 uint32_t lcore_count; /**< Number of available logical cores. */
47 uint32_t numa_node_count; /**< Number of detected NUMA nodes. */
48 uint32_t numa_nodes[RTE_MAX_NUMA_NODES]; /**< List of detected NUMA nodes. */
49 uint32_t service_lcore_count;/**< Number of available service cores. */
50 enum rte_lcore_role_t lcore_role[RTE_MAX_LCORE]; /**< State of cores. */
52 /** Primary or secondary configuration */
53 enum rte_proc_type_t process_type;
55 /** PA or VA mapping mode */
56 enum rte_iova_mode iova_mode;
59 * Pointer to memory configuration, which may be shared across multiple
62 struct rte_mem_config *mem_config;
66 * Get the global configuration structure.
69 * A pointer to the global configuration structure.
71 struct rte_config *rte_eal_get_configuration(void);
74 * Initialize the memzone subsystem (private to eal).
80 int rte_eal_memzone_init(void);
83 * Fill configuration with number of physical and logical processors
85 * This function is private to EAL.
87 * Parse /proc/cpuinfo to get the number of physical and logical
88 * processors on the machine.
91 * 0 on success, negative on error
93 int rte_eal_cpu_init(void);
98 * This function is private to EAL.
100 * Preallocate virtual memory.
103 * 0 on success, negative on error
105 int rte_eal_memseg_init(void);
110 * This function is private to EAL.
112 * Fill configuration structure with these infos, and return 0 on success.
115 * 0 on success, negative on error
117 int rte_eal_memory_init(void);
122 * This function is private to EAL.
124 * Mmap memory areas used by HPET (high precision event timer) that will
125 * provide our time reference, and configure the TSC frequency also for it
126 * to be used as a reference.
129 * 0 on success, negative on error
131 int rte_eal_timer_init(void);
134 * Init tail queues for non-EAL library structures. This is to allow
135 * the rings, mempools, etc. lists to be shared among multiple processes
137 * This function is private to EAL
140 * 0 on success, negative on error
142 int rte_eal_tailqs_init(void);
145 * Init interrupt handling.
147 * This function is private to EAL.
150 * 0 on success, negative on error
152 int rte_eal_intr_init(void);
155 * Init alarm mechanism. This is to allow a callback be called after
158 * This function is private to EAL.
161 * 0 on success, negative on error
163 int rte_eal_alarm_init(void);
166 * Function is to check if the kernel module(like, vfio, vfio_iommu_type1,
170 * The module's name which need to be checked
173 * -1 means some error happens(NULL pointer or open failure)
174 * 0 means the module not loaded
175 * 1 means the module loaded
177 int rte_eal_check_module(const char *module_name);
180 * Memory reservation flags.
182 enum eal_mem_reserve_flags {
184 * Reserve hugepages. May be unsupported by some platforms.
186 EAL_RESERVE_HUGEPAGES = 1 << 0,
188 * Force reserving memory at the requested address.
189 * This can be a destructive action depending on the implementation.
191 * @see RTE_MAP_FORCE_ADDRESS for description of possible consequences
192 * (although implementations are not required to use it).
194 EAL_RESERVE_FORCE_ADDRESS = 1 << 1
198 * Get virtual area of specified size from the OS.
200 * This function is private to the EAL.
202 * @param requested_addr
203 * Address where to request address space.
205 * Size of requested area.
207 * Page size on which to align requested virtual area.
209 * EAL_VIRTUAL_AREA_* flags.
210 * @param reserve_flags
211 * Extra flags passed directly to eal_mem_reserve().
214 * Virtual area address if successful.
215 * NULL if unsuccessful.
218 #define EAL_VIRTUAL_AREA_ADDR_IS_HINT (1 << 0)
219 /**< don't fail if cannot get exact requested address. */
220 #define EAL_VIRTUAL_AREA_ALLOW_SHRINK (1 << 1)
221 /**< try getting smaller sized (decrement by page size) virtual areas if cannot
222 * get area of requested size.
224 #define EAL_VIRTUAL_AREA_UNMAP (1 << 2)
225 /**< immediately unmap reserved virtual area. */
227 eal_get_virtual_area(void *requested_addr, size_t *size,
228 size_t page_sz, int flags, int reserve_flags);
231 * Initialize a memory segment list and create its backing storage.
234 * Memory segment list to be filled.
236 * Name for the backing storage.
238 * Size of segment pages in the MSL.
240 * Number of segments.
242 * Socket ID. Must not be SOCKET_ID_ANY.
244 * Mark MSL as pointing to a heap.
246 * 0 on success, (-1) on failure and rte_errno is set.
249 eal_memseg_list_init_named(struct rte_memseg_list *msl, const char *name,
250 uint64_t page_sz, int n_segs, int socket_id, bool heap);
253 * Initialize memory segment list and create its backing storage
254 * with a name corresponding to MSL parameters.
256 * @param type_msl_idx
257 * Index of the MSL among other MSLs of the same socket and page size.
259 * @see eal_memseg_list_init_named for remaining parameters description.
262 eal_memseg_list_init(struct rte_memseg_list *msl, uint64_t page_sz,
263 int n_segs, int socket_id, int type_msl_idx, bool heap);
266 * Reserve VA space for a memory segment list
267 * previously initialized with eal_memseg_list_init().
270 * Initialized memory segment list with page size defined.
271 * @param reserve_flags
272 * Extra memory reservation flags. Can be 0 if unnecessary.
274 * 0 on success, (-1) on failure and rte_errno is set.
277 eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags);
280 * Populate MSL, each segment is one page long.
283 * Initialized memory segment list with page size defined.
285 * Starting address of list segments.
287 * Number of segments to populate.
290 eal_memseg_list_populate(struct rte_memseg_list *msl, void *addr, int n_segs);
293 * Distribute available memory between MSLs.
296 * 0 on success, (-1) on failure.
299 eal_dynmem_memseg_lists_init(void);
302 * Preallocate hugepages for dynamic allocation.
305 * 0 on success, (-1) on failure.
308 eal_dynmem_hugepage_init(void);
311 * Given the list of hugepage sizes and the number of pages thereof,
312 * calculate the best number of pages of each size to fulfill the request
313 * for RAM on each NUMA node.
316 * Amounts of memory requested for each NUMA node of RTE_MAX_NUMA_NODES.
318 * Information about hugepages of different size.
320 * Receives information about used hugepages of each size.
322 * Number of elements in hp_info and hp_used.
324 * 0 on success, (-1) on failure.
327 eal_dynmem_calc_num_pages_per_socket(
328 uint64_t *memory, struct hugepage_info *hp_info,
329 struct hugepage_info *hp_used, unsigned int num_hp_info);
334 * This function is private to the EAL.
336 unsigned eal_cpu_core_id(unsigned lcore_id);
339 * Check if cpu is present.
341 * This function is private to the EAL.
343 int eal_cpu_detected(unsigned lcore_id);
346 * Set TSC frequency from precise value or estimation
348 * This function is private to the EAL.
350 void set_tsc_freq(void);
353 * Get precise TSC frequency from system
355 * This function is private to the EAL.
357 uint64_t get_tsc_freq(void);
360 * Get TSC frequency if the architecture supports.
362 * This function is private to the EAL.
365 * The number of TSC cycles in one second.
366 * Returns zero if the architecture support is not available.
368 uint64_t get_tsc_freq_arch(void);
371 * Allocate a free lcore to associate to a non-EAL thread.
374 * - the id of a lcore with role ROLE_NON_EAL on success.
375 * - RTE_MAX_LCORE if none was available or initializing was refused (see
376 * rte_lcore_callback_register).
378 unsigned int eal_lcore_non_eal_allocate(void);
381 * Release the lcore used by a non-EAL thread.
382 * Counterpart of eal_lcore_non_eal_allocate().
385 * The lcore with role ROLE_NON_EAL to release.
387 void eal_lcore_non_eal_release(unsigned int lcore_id);
390 * Prepare physical memory mapping
391 * i.e. hugepages on Linux and
394 * This function is private to the EAL.
396 int rte_eal_hugepage_init(void);
399 * Creates memory mapping in secondary process
400 * i.e. hugepages on Linux and
403 * This function is private to the EAL.
405 int rte_eal_hugepage_attach(void);
408 * Detaches all memory mappings from a process.
410 * This function is private to the EAL.
412 int rte_eal_memory_detach(void);
415 * Find a bus capable of identifying a device.
418 * A device identifier (PCI address, virtual PMD name, ...).
421 * A valid bus handle if found.
422 * NULL if no bus is able to parse this device.
424 struct rte_bus *rte_bus_find_by_device_name(const char *str);
427 * Create the unix channel for primary/secondary communication.
433 int rte_mp_channel_init(void);
436 * Primary/secondary communication cleanup.
438 void rte_mp_channel_cleanup(void);
442 * Parse a device string and store its information in an
443 * rte_devargs structure.
445 * A device description is split by layers of abstraction of the device:
446 * bus, class and driver. Each layer will offer a set of properties that
447 * can be applied either to configure or recognize a device.
449 * This function will parse those properties and prepare the rte_devargs
450 * to be given to each layers for processing.
452 * Note: if the "data" field of the devargs points to devstr,
453 * then no dynamic allocation is performed and the rte_devargs
454 * can be safely discarded.
456 * Otherwise ``data`` will hold a workable copy of devstr, that will be
457 * used by layers descriptors within rte_devargs. In this case,
458 * any rte_devargs should be cleaned-up before being freed.
461 * rte_devargs structure to fill.
468 * Negative errno values on error (rte_errno is set).
471 rte_devargs_layers_parse(struct rte_devargs *devargs,
475 * probe a device at local process.
478 * Device arguments including bus, class and driver properties.
480 * new device be probed as output.
482 * 0 on success, negative on error.
484 int local_dev_probe(const char *devargs, struct rte_device **new_dev);
487 * Hotplug remove a given device from a specific bus at local process.
490 * Data structure of the device to remove.
492 * 0 on success, negative on error.
494 int local_dev_remove(struct rte_device *dev);
497 * Iterate over all buses to find the corresponding bus to handle the sigbus
499 * @param failure_addr
500 * Pointer of the fault address of the sigbus error.
503 * 0 success to handle the sigbus.
504 * -1 failed to handle the sigbus
505 * 1 no bus can handler the sigbus
507 int rte_bus_sigbus_handler(const void *failure_addr);
511 * Register the sigbus handler.
514 * - On success, zero.
515 * - On failure, a negative value.
518 dev_sigbus_handler_register(void);
522 * Unregister the sigbus handler.
525 * - On success, zero.
526 * - On failure, a negative value.
529 dev_sigbus_handler_unregister(void);
532 * Get OS-specific EAL mapping base address.
535 eal_get_baseaddr(void);
538 eal_malloc_no_trace(const char *type, size_t size, unsigned int align);
540 void eal_free_no_trace(void *addr);
542 /** Options for eal_file_open(). */
543 enum eal_open_flags {
544 /** Open file for reading. */
545 EAL_OPEN_READONLY = 0x00,
546 /** Open file for reading and writing. */
547 EAL_OPEN_READWRITE = 0x02,
549 * Create the file if it doesn't exist.
550 * New files are only accessible to the owner (0600 equivalent).
552 EAL_OPEN_CREATE = 0x04
556 * Open or create a file.
561 * A combination of eal_open_flags controlling operation and FD behavior.
563 * Open file descriptor on success, (-1) on failure and rte_errno is set.
566 eal_file_open(const char *path, int flags);
568 /** File locking operation. */
570 EAL_FLOCK_SHARED, /**< Acquire a shared lock. */
571 EAL_FLOCK_EXCLUSIVE, /**< Acquire an exclusive lock. */
572 EAL_FLOCK_UNLOCK /**< Release a previously taken lock. */
575 /** Behavior on file locking conflict. */
576 enum eal_flock_mode {
577 EAL_FLOCK_WAIT, /**< Wait until the file gets unlocked to lock it. */
578 EAL_FLOCK_RETURN /**< Return immediately if the file is locked. */
582 * Lock or unlock the file.
584 * On failure @code rte_errno @endcode is set to the error code
585 * specified by POSIX flock(3) description.
588 * Opened file descriptor.
590 * Operation to perform.
592 * Behavior on conflict.
594 * 0 on success, (-1) on failure.
597 eal_file_lock(int fd, enum eal_flock_op op, enum eal_flock_mode mode);
600 * Truncate or extend the file to the specified size.
602 * On failure @code rte_errno @endcode is set to the error code
603 * specified by POSIX ftruncate(3) description.
606 * Opened file descriptor.
610 * 0 on success, (-1) on failure.
613 eal_file_truncate(int fd, ssize_t size);
616 * Reserve a region of virtual memory.
618 * Use eal_mem_free() to free reserved memory.
620 * @param requested_addr
621 * A desired reservation address which must be page-aligned.
622 * The system might not respect it.
623 * NULL means the address will be chosen by the system.
625 * Reservation size. Must be a multiple of system page size.
627 * Reservation options, a combination of eal_mem_reserve_flags.
629 * Starting address of the reserved area on success, NULL on failure.
630 * Callers must not access this memory until remapping it.
633 eal_mem_reserve(void *requested_addr, size_t size, int flags);
636 * Free memory obtained by eal_mem_reserve() and possibly allocated.
638 * If *virt* and *size* describe a part of the reserved region,
639 * only this part of the region is freed (accurately up to the system
640 * page size). If *virt* points to allocated memory, *size* must match
641 * the one specified on allocation. The behavior is undefined
642 * if the memory pointed by *virt* is obtained from another source
646 * A virtual address in a region previously reserved.
648 * Number of bytes to unreserve.
651 eal_mem_free(void *virt, size_t size);
654 * Configure memory region inclusion into dumps.
657 * Starting address of the region.
659 * Size of the region.
661 * True to include memory into dumps, false to exclude.
663 * 0 on success, (-1) on failure and rte_errno is set.
666 eal_mem_set_dump(void *virt, size_t size, bool dump);
669 * Sets the runtime directory of DPDK
672 * The new runtime directory path of DPDK
674 * The size of the new runtime directory path in bytes.
676 * 0 on success, (-1) on failure.
679 eal_set_runtime_dir(char *run_dir, size_t size);
682 * Get the internal configuration structure.
685 * A pointer to the internal configuration structure.
687 struct internal_config *
688 eal_get_internal_configuration(void);
691 * Get the current value of the rte_application_usage pointer
694 * Pointer to the current value of rte_application_usage .
697 eal_get_application_usage_hook(void);
700 * Instruct primary process that a secondary process wants to attach.
702 bool __rte_mp_enable(void);
705 * Init per-lcore info in current thread.
708 * identifier of lcore.
710 * CPU affinity for this thread.
712 void __rte_thread_init(unsigned int lcore_id, rte_cpuset_t *cpuset);
715 * Uninitialize per-lcore info for current thread.
717 void __rte_thread_uninit(void);
720 * asprintf(3) replacement for Windows.
722 #ifdef RTE_EXEC_ENV_WINDOWS
723 __rte_format_printf(2, 3)
724 int eal_asprintf(char **buffer, const char *format, ...);
726 #define asprintf(buffer, format, ...) \
727 eal_asprintf(buffer, format, ##__VA_ARGS__)
730 #endif /* _EAL_PRIVATE_H_ */