1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
9 #include <rte_fbarray.h>
10 #include <rte_memzone.h>
11 #include <rte_memory.h>
12 #include <rte_string_fns.h>
13 #include <rte_rwlock.h>
15 #include "eal_private.h"
16 #include "eal_internal_cfg.h"
17 #include "eal_memalloc.h"
19 struct mem_event_callback_entry {
20 TAILQ_ENTRY(mem_event_callback_entry) next;
21 char name[RTE_MEM_EVENT_CALLBACK_NAME_LEN];
22 rte_mem_event_callback_t clb;
26 struct mem_alloc_validator_entry {
27 TAILQ_ENTRY(mem_alloc_validator_entry) next;
28 char name[RTE_MEM_ALLOC_VALIDATOR_NAME_LEN];
29 rte_mem_alloc_validator_t clb;
34 /** Double linked list of actions. */
35 TAILQ_HEAD(mem_event_callback_entry_list, mem_event_callback_entry);
36 TAILQ_HEAD(mem_alloc_validator_entry_list, mem_alloc_validator_entry);
38 static struct mem_event_callback_entry_list mem_event_callback_list =
39 TAILQ_HEAD_INITIALIZER(mem_event_callback_list);
40 static rte_rwlock_t mem_event_rwlock = RTE_RWLOCK_INITIALIZER;
42 static struct mem_alloc_validator_entry_list mem_alloc_validator_list =
43 TAILQ_HEAD_INITIALIZER(mem_alloc_validator_list);
44 static rte_rwlock_t mem_alloc_validator_rwlock = RTE_RWLOCK_INITIALIZER;
46 static struct mem_event_callback_entry *
47 find_mem_event_callback(const char *name, void *arg)
49 struct mem_event_callback_entry *r;
51 TAILQ_FOREACH(r, &mem_event_callback_list, next) {
52 if (!strcmp(r->name, name) && r->arg == arg)
58 static struct mem_alloc_validator_entry *
59 find_mem_alloc_validator(const char *name, int socket_id)
61 struct mem_alloc_validator_entry *r;
63 TAILQ_FOREACH(r, &mem_alloc_validator_list, next) {
64 if (!strcmp(r->name, name) && r->socket_id == socket_id)
71 eal_memalloc_is_contig(const struct rte_memseg_list *msl, void *start,
74 void *end, *aligned_start, *aligned_end;
75 size_t pgsz = (size_t)msl->page_sz;
76 const struct rte_memseg *ms;
78 /* for IOVA_VA, it's always contiguous */
79 if (rte_eal_iova_mode() == RTE_IOVA_VA && !msl->external)
82 /* for legacy memory, it's always contiguous */
83 if (internal_config.legacy_mem)
86 end = RTE_PTR_ADD(start, len);
88 /* for nohuge, we check pagemap, otherwise check memseg */
89 if (!rte_eal_has_hugepages()) {
90 rte_iova_t cur, expected;
92 aligned_start = RTE_PTR_ALIGN_FLOOR(start, pgsz);
93 aligned_end = RTE_PTR_ALIGN_CEIL(end, pgsz);
95 /* if start and end are on the same page, bail out early */
96 if (RTE_PTR_DIFF(aligned_end, aligned_start) == pgsz)
99 /* skip first iteration */
100 cur = rte_mem_virt2iova(aligned_start);
101 expected = cur + pgsz;
102 aligned_start = RTE_PTR_ADD(aligned_start, pgsz);
104 while (aligned_start < aligned_end) {
105 cur = rte_mem_virt2iova(aligned_start);
108 aligned_start = RTE_PTR_ADD(aligned_start, pgsz);
112 int start_seg, end_seg, cur_seg;
113 rte_iova_t cur, expected;
115 aligned_start = RTE_PTR_ALIGN_FLOOR(start, pgsz);
116 aligned_end = RTE_PTR_ALIGN_CEIL(end, pgsz);
118 start_seg = RTE_PTR_DIFF(aligned_start, msl->base_va) /
120 end_seg = RTE_PTR_DIFF(aligned_end, msl->base_va) /
123 /* if start and end are on the same page, bail out early */
124 if (RTE_PTR_DIFF(aligned_end, aligned_start) == pgsz)
127 /* skip first iteration */
128 ms = rte_fbarray_get(&msl->memseg_arr, start_seg);
130 expected = cur + pgsz;
132 /* if we can't access IOVA addresses, assume non-contiguous */
133 if (cur == RTE_BAD_IOVA)
136 for (cur_seg = start_seg + 1; cur_seg < end_seg;
137 cur_seg++, expected += pgsz) {
138 ms = rte_fbarray_get(&msl->memseg_arr, cur_seg);
140 if (ms->iova != expected)
148 eal_memalloc_mem_event_callback_register(const char *name,
149 rte_mem_event_callback_t clb, void *arg)
151 struct mem_event_callback_entry *entry;
153 if (name == NULL || clb == NULL) {
157 len = strnlen(name, RTE_MEM_EVENT_CALLBACK_NAME_LEN);
161 } else if (len == RTE_MEM_EVENT_CALLBACK_NAME_LEN) {
162 rte_errno = ENAMETOOLONG;
165 rte_rwlock_write_lock(&mem_event_rwlock);
167 entry = find_mem_event_callback(name, arg);
174 entry = malloc(sizeof(*entry));
181 /* callback successfully created and is valid, add it to the list */
184 strlcpy(entry->name, name, RTE_MEM_EVENT_CALLBACK_NAME_LEN);
185 TAILQ_INSERT_TAIL(&mem_event_callback_list, entry, next);
189 RTE_LOG(DEBUG, EAL, "Mem event callback '%s:%p' registered\n",
193 rte_rwlock_write_unlock(&mem_event_rwlock);
198 eal_memalloc_mem_event_callback_unregister(const char *name, void *arg)
200 struct mem_event_callback_entry *entry;
207 len = strnlen(name, RTE_MEM_EVENT_CALLBACK_NAME_LEN);
211 } else if (len == RTE_MEM_EVENT_CALLBACK_NAME_LEN) {
212 rte_errno = ENAMETOOLONG;
215 rte_rwlock_write_lock(&mem_event_rwlock);
217 entry = find_mem_event_callback(name, arg);
223 TAILQ_REMOVE(&mem_event_callback_list, entry, next);
228 RTE_LOG(DEBUG, EAL, "Mem event callback '%s:%p' unregistered\n",
232 rte_rwlock_write_unlock(&mem_event_rwlock);
237 eal_memalloc_mem_event_notify(enum rte_mem_event event, const void *start,
240 struct mem_event_callback_entry *entry;
242 rte_rwlock_read_lock(&mem_event_rwlock);
244 TAILQ_FOREACH(entry, &mem_event_callback_list, next) {
245 RTE_LOG(DEBUG, EAL, "Calling mem event callback '%s:%p'\n",
246 entry->name, entry->arg);
247 entry->clb(event, start, len, entry->arg);
250 rte_rwlock_read_unlock(&mem_event_rwlock);
254 eal_memalloc_mem_alloc_validator_register(const char *name,
255 rte_mem_alloc_validator_t clb, int socket_id, size_t limit)
257 struct mem_alloc_validator_entry *entry;
259 if (name == NULL || clb == NULL || socket_id < 0) {
263 len = strnlen(name, RTE_MEM_ALLOC_VALIDATOR_NAME_LEN);
267 } else if (len == RTE_MEM_ALLOC_VALIDATOR_NAME_LEN) {
268 rte_errno = ENAMETOOLONG;
271 rte_rwlock_write_lock(&mem_alloc_validator_rwlock);
273 entry = find_mem_alloc_validator(name, socket_id);
280 entry = malloc(sizeof(*entry));
287 /* callback successfully created and is valid, add it to the list */
289 entry->socket_id = socket_id;
290 entry->limit = limit;
291 strlcpy(entry->name, name, RTE_MEM_ALLOC_VALIDATOR_NAME_LEN);
292 TAILQ_INSERT_TAIL(&mem_alloc_validator_list, entry, next);
296 RTE_LOG(DEBUG, EAL, "Mem alloc validator '%s' on socket %i with limit %zu registered\n",
297 name, socket_id, limit);
300 rte_rwlock_write_unlock(&mem_alloc_validator_rwlock);
305 eal_memalloc_mem_alloc_validator_unregister(const char *name, int socket_id)
307 struct mem_alloc_validator_entry *entry;
310 if (name == NULL || socket_id < 0) {
314 len = strnlen(name, RTE_MEM_ALLOC_VALIDATOR_NAME_LEN);
318 } else if (len == RTE_MEM_ALLOC_VALIDATOR_NAME_LEN) {
319 rte_errno = ENAMETOOLONG;
322 rte_rwlock_write_lock(&mem_alloc_validator_rwlock);
324 entry = find_mem_alloc_validator(name, socket_id);
330 TAILQ_REMOVE(&mem_alloc_validator_list, entry, next);
335 RTE_LOG(DEBUG, EAL, "Mem alloc validator '%s' on socket %i unregistered\n",
339 rte_rwlock_write_unlock(&mem_alloc_validator_rwlock);
344 eal_memalloc_mem_alloc_validate(int socket_id, size_t new_len)
346 struct mem_alloc_validator_entry *entry;
349 rte_rwlock_read_lock(&mem_alloc_validator_rwlock);
351 TAILQ_FOREACH(entry, &mem_alloc_validator_list, next) {
352 if (entry->socket_id != socket_id || entry->limit > new_len)
354 RTE_LOG(DEBUG, EAL, "Calling mem alloc validator '%s' on socket %i\n",
355 entry->name, entry->socket_id);
356 if (entry->clb(socket_id, entry->limit, new_len) < 0)
360 rte_rwlock_read_unlock(&mem_alloc_validator_rwlock);