1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
11 * Memory-related RTE API.
22 #include <rte_common.h>
23 #include <rte_compat.h>
24 #include <rte_config.h>
28 RTE_PGSIZE_4K = 1ULL << 12,
29 RTE_PGSIZE_64K = 1ULL << 16,
30 RTE_PGSIZE_256K = 1ULL << 18,
31 RTE_PGSIZE_2M = 1ULL << 21,
32 RTE_PGSIZE_16M = 1ULL << 24,
33 RTE_PGSIZE_256M = 1ULL << 28,
34 RTE_PGSIZE_512M = 1ULL << 29,
35 RTE_PGSIZE_1G = 1ULL << 30,
36 RTE_PGSIZE_4G = 1ULL << 32,
37 RTE_PGSIZE_16G = 1ULL << 34,
40 #define SOCKET_ID_ANY -1 /**< Any NUMA socket. */
41 #define RTE_CACHE_LINE_MASK (RTE_CACHE_LINE_SIZE-1) /**< Cache line mask. */
43 #define RTE_CACHE_LINE_ROUNDUP(size) \
44 (RTE_CACHE_LINE_SIZE * ((size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE))
45 /**< Return the first cache-aligned value greater or equal to size. */
47 /**< Cache line size in terms of log2 */
48 #if RTE_CACHE_LINE_SIZE == 64
49 #define RTE_CACHE_LINE_SIZE_LOG2 6
50 #elif RTE_CACHE_LINE_SIZE == 128
51 #define RTE_CACHE_LINE_SIZE_LOG2 7
53 #error "Unsupported cache line size"
56 #define RTE_CACHE_LINE_MIN_SIZE 64 /**< Minimum Cache line size. */
59 * Force alignment to cache line.
61 #define __rte_cache_aligned __rte_aligned(RTE_CACHE_LINE_SIZE)
64 * Force minimum cache line alignment.
66 #define __rte_cache_min_aligned __rte_aligned(RTE_CACHE_LINE_MIN_SIZE)
68 typedef uint64_t phys_addr_t; /**< Physical address. */
69 #define RTE_BAD_PHYS_ADDR ((phys_addr_t)-1)
71 * IO virtual address type.
72 * When the physical addressing mode (IOVA as PA) is in use,
73 * the translation from an IO virtual address (IOVA) to a physical address
74 * is a direct mapping, i.e. the same value.
75 * Otherwise, in virtual mode (IOVA as VA), an IOMMU may do the translation.
77 typedef uint64_t rte_iova_t;
78 #define RTE_BAD_IOVA ((rte_iova_t)-1)
81 * Physical memory segment descriptor.
86 phys_addr_t phys_addr; /**< deprecated - Start physical address. */
87 rte_iova_t iova; /**< Start IO address. */
91 void *addr; /**< Start virtual address. */
92 uint64_t addr_64; /**< Makes sure addr is always 64 bits */
94 size_t len; /**< Length of the segment. */
95 uint64_t hugepage_sz; /**< The pagesize of underlying memory */
96 int32_t socket_id; /**< NUMA socket ID. */
97 uint32_t nchannel; /**< Number of channels. */
98 uint32_t nrank; /**< Number of ranks. */
102 * Lock page in physical memory and prevent from swapping.
105 * The virtual address.
107 * 0 on success, negative on error.
109 int rte_mem_lock_page(const void *virt);
112 * Get physical address of any mapped virtual address in the current process.
113 * It is found by browsing the /proc/self/pagemap special file.
114 * The page must be locked.
117 * The virtual address.
119 * The physical address or RTE_BAD_IOVA on error.
121 phys_addr_t rte_mem_virt2phy(const void *virt);
124 * Get IO virtual address of any mapped virtual address in the current process.
127 * The virtual address.
129 * The IO address or RTE_BAD_IOVA on error.
131 rte_iova_t rte_mem_virt2iova(const void *virt);
134 * Get virtual memory address corresponding to iova address.
139 * Virtual address corresponding to iova address (or NULL if address does not
140 * exist within DPDK memory map).
142 __rte_experimental void *
143 rte_mem_iova2virt(rte_iova_t iova);
146 * Get memseg to which a particular virtual address belongs.
149 * The virtual address.
151 * Memseg pointer on success, or NULL on error.
153 __rte_experimental struct rte_memseg *
154 rte_mem_virt2memseg(const void *virt);
157 * Memseg walk function prototype.
159 * Returning 0 will continue walk
160 * Returning 1 will stop the walk
161 * Returning -1 will stop the walk and report error
163 typedef int (*rte_memseg_walk_t)(const struct rte_memseg *ms, void *arg);
166 * Memseg contig walk function prototype. This will trigger a callback on every
167 * VA-contiguous are starting at memseg ``ms``, so total valid VA space at each
168 * callback call will be [``ms->addr``, ``ms->addr + len``).
170 * Returning 0 will continue walk
171 * Returning 1 will stop the walk
172 * Returning -1 will stop the walk and report error
174 typedef int (*rte_memseg_contig_walk_t)(const struct rte_memseg *ms,
175 size_t len, void *arg);
178 * Walk list of all memsegs.
183 * Argument passed to iterator
185 * 0 if walked over the entire list
186 * 1 if stopped by the user
187 * -1 if user function reported error
189 int __rte_experimental
190 rte_memseg_walk(rte_memseg_walk_t func, void *arg);
193 * Walk each VA-contiguous area.
198 * Argument passed to iterator
200 * 0 if walked over the entire list
201 * 1 if stopped by the user
202 * -1 if user function reported error
204 int __rte_experimental
205 rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg);
208 * Get the layout of the available physical memory.
210 * It can be useful for an application to have the full physical
211 * memory layout to decide the size of a memory zone to reserve. This
212 * table is stored in rte_config (see rte_eal_get_configuration()).
215 * - On success, return a pointer to a read-only table of struct
216 * rte_physmem_desc elements, containing the layout of all
217 * addressable physical memory. The last element of the table
218 * contains a NULL address.
219 * - On error, return NULL. This should not happen since it is a fatal
220 * error that will probably cause the entire system to panic.
222 const struct rte_memseg *rte_eal_get_physmem_layout(void);
225 * Dump the physical memory layout to a file.
228 * A pointer to a file for output
230 void rte_dump_physmem_layout(FILE *f);
233 * Get the total amount of available physical memory.
236 * The total amount of available physical memory in bytes.
238 uint64_t rte_eal_get_physmem_size(void);
241 * Get the number of memory channels.
244 * The number of memory channels on the system. The value is 0 if unknown
245 * or not the same on all devices.
247 unsigned rte_memory_get_nchannel(void);
250 * Get the number of memory ranks.
253 * The number of memory ranks on the system. The value is 0 if unknown or
254 * not the same on all devices.
256 unsigned rte_memory_get_nrank(void);
259 * Drivers based on uio will not load unless physical
260 * addresses are obtainable. It is only possible to get
261 * physical addresses when running as a privileged user.
264 * 1 if the system is able to obtain physical addresses.
265 * 0 if using DMA addresses through an IOMMU.
267 int rte_eal_using_phys_addrs(void);
273 #endif /* _RTE_MEMORY_H_ */