1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
9 #include <rte_fbarray.h>
10 #include <rte_memzone.h>
11 #include <rte_memory.h>
12 #include <rte_eal_memconfig.h>
13 #include <rte_rwlock.h>
15 #include "eal_private.h"
16 #include "eal_internal_cfg.h"
17 #include "eal_memalloc.h"
19 struct mem_event_callback_entry {
20 TAILQ_ENTRY(mem_event_callback_entry) next;
21 char name[RTE_MEM_EVENT_CALLBACK_NAME_LEN];
22 rte_mem_event_callback_t clb;
25 /** Double linked list of actions. */
26 TAILQ_HEAD(mem_event_callback_entry_list, mem_event_callback_entry);
28 static struct mem_event_callback_entry_list mem_event_callback_list =
29 TAILQ_HEAD_INITIALIZER(mem_event_callback_list);
31 static rte_rwlock_t mem_event_rwlock = RTE_RWLOCK_INITIALIZER;
33 static struct mem_event_callback_entry *
34 find_mem_event_callback(const char *name)
36 struct mem_event_callback_entry *r;
38 TAILQ_FOREACH(r, &mem_event_callback_list, next) {
39 if (!strcmp(r->name, name))
46 eal_memalloc_is_contig(const struct rte_memseg_list *msl, void *start,
49 void *end, *aligned_start, *aligned_end;
50 size_t pgsz = (size_t)msl->page_sz;
51 const struct rte_memseg *ms;
53 /* for IOVA_VA, it's always contiguous */
54 if (rte_eal_iova_mode() == RTE_IOVA_VA)
57 /* for legacy memory, it's always contiguous */
58 if (internal_config.legacy_mem)
61 end = RTE_PTR_ADD(start, len);
63 /* for nohuge, we check pagemap, otherwise check memseg */
64 if (!rte_eal_has_hugepages()) {
65 rte_iova_t cur, expected;
67 aligned_start = RTE_PTR_ALIGN_FLOOR(start, pgsz);
68 aligned_end = RTE_PTR_ALIGN_CEIL(end, pgsz);
70 /* if start and end are on the same page, bail out early */
71 if (RTE_PTR_DIFF(aligned_end, aligned_start) == pgsz)
74 /* skip first iteration */
75 cur = rte_mem_virt2iova(aligned_start);
76 expected = cur + pgsz;
77 aligned_start = RTE_PTR_ADD(aligned_start, pgsz);
79 while (aligned_start < aligned_end) {
80 cur = rte_mem_virt2iova(aligned_start);
83 aligned_start = RTE_PTR_ADD(aligned_start, pgsz);
87 int start_seg, end_seg, cur_seg;
88 rte_iova_t cur, expected;
90 aligned_start = RTE_PTR_ALIGN_FLOOR(start, pgsz);
91 aligned_end = RTE_PTR_ALIGN_CEIL(end, pgsz);
93 start_seg = RTE_PTR_DIFF(aligned_start, msl->base_va) /
95 end_seg = RTE_PTR_DIFF(aligned_end, msl->base_va) /
98 /* if start and end are on the same page, bail out early */
99 if (RTE_PTR_DIFF(aligned_end, aligned_start) == pgsz)
102 /* skip first iteration */
103 ms = rte_fbarray_get(&msl->memseg_arr, start_seg);
105 expected = cur + pgsz;
107 /* if we can't access IOVA addresses, assume non-contiguous */
108 if (cur == RTE_BAD_IOVA)
111 for (cur_seg = start_seg + 1; cur_seg < end_seg;
112 cur_seg++, expected += pgsz) {
113 ms = rte_fbarray_get(&msl->memseg_arr, cur_seg);
115 if (ms->iova != expected)
123 eal_memalloc_mem_event_callback_register(const char *name,
124 rte_mem_event_callback_t clb)
126 struct mem_event_callback_entry *entry;
128 if (name == NULL || clb == NULL) {
132 len = strnlen(name, RTE_MEM_EVENT_CALLBACK_NAME_LEN);
136 } else if (len == RTE_MEM_EVENT_CALLBACK_NAME_LEN) {
137 rte_errno = ENAMETOOLONG;
140 rte_rwlock_write_lock(&mem_event_rwlock);
142 entry = find_mem_event_callback(name);
149 entry = malloc(sizeof(*entry));
156 /* callback successfully created and is valid, add it to the list */
158 snprintf(entry->name, RTE_MEM_EVENT_CALLBACK_NAME_LEN, "%s", name);
159 TAILQ_INSERT_TAIL(&mem_event_callback_list, entry, next);
163 RTE_LOG(DEBUG, EAL, "Mem event callback '%s' registered\n", name);
166 rte_rwlock_write_unlock(&mem_event_rwlock);
171 eal_memalloc_mem_event_callback_unregister(const char *name)
173 struct mem_event_callback_entry *entry;
180 len = strnlen(name, RTE_MEM_EVENT_CALLBACK_NAME_LEN);
184 } else if (len == RTE_MEM_EVENT_CALLBACK_NAME_LEN) {
185 rte_errno = ENAMETOOLONG;
188 rte_rwlock_write_lock(&mem_event_rwlock);
190 entry = find_mem_event_callback(name);
196 TAILQ_REMOVE(&mem_event_callback_list, entry, next);
201 RTE_LOG(DEBUG, EAL, "Mem event callback '%s' unregistered\n", name);
204 rte_rwlock_write_unlock(&mem_event_rwlock);
209 eal_memalloc_mem_event_notify(enum rte_mem_event event, const void *start,
212 struct mem_event_callback_entry *entry;
214 rte_rwlock_read_lock(&mem_event_rwlock);
216 TAILQ_FOREACH(entry, &mem_event_callback_list, next) {
217 RTE_LOG(DEBUG, EAL, "Calling mem event callback %s",
219 entry->clb(event, start, len);
222 rte_rwlock_read_unlock(&mem_event_rwlock);