malloc: support callbacks on memory events
[dpdk.git] / lib / librte_eal / common / eal_common_memalloc.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation
3  */
4
5 #include <string.h>
6
7 #include <rte_errno.h>
8 #include <rte_lcore.h>
9 #include <rte_fbarray.h>
10 #include <rte_memzone.h>
11 #include <rte_memory.h>
12 #include <rte_eal_memconfig.h>
13 #include <rte_rwlock.h>
14
15 #include "eal_private.h"
16 #include "eal_internal_cfg.h"
17 #include "eal_memalloc.h"
18
19 struct mem_event_callback_entry {
20         TAILQ_ENTRY(mem_event_callback_entry) next;
21         char name[RTE_MEM_EVENT_CALLBACK_NAME_LEN];
22         rte_mem_event_callback_t clb;
23 };
24
25 /** Double linked list of actions. */
26 TAILQ_HEAD(mem_event_callback_entry_list, mem_event_callback_entry);
27
28 static struct mem_event_callback_entry_list mem_event_callback_list =
29         TAILQ_HEAD_INITIALIZER(mem_event_callback_list);
30
31 static rte_rwlock_t mem_event_rwlock = RTE_RWLOCK_INITIALIZER;
32
33 static struct mem_event_callback_entry *
34 find_mem_event_callback(const char *name)
35 {
36         struct mem_event_callback_entry *r;
37
38         TAILQ_FOREACH(r, &mem_event_callback_list, next) {
39                 if (!strcmp(r->name, name))
40                         break;
41         }
42         return r;
43 }
44
45 bool
46 eal_memalloc_is_contig(const struct rte_memseg_list *msl, void *start,
47                 size_t len)
48 {
49         void *end, *aligned_start, *aligned_end;
50         size_t pgsz = (size_t)msl->page_sz;
51         const struct rte_memseg *ms;
52
53         /* for IOVA_VA, it's always contiguous */
54         if (rte_eal_iova_mode() == RTE_IOVA_VA)
55                 return true;
56
57         /* for legacy memory, it's always contiguous */
58         if (internal_config.legacy_mem)
59                 return true;
60
61         end = RTE_PTR_ADD(start, len);
62
63         /* for nohuge, we check pagemap, otherwise check memseg */
64         if (!rte_eal_has_hugepages()) {
65                 rte_iova_t cur, expected;
66
67                 aligned_start = RTE_PTR_ALIGN_FLOOR(start, pgsz);
68                 aligned_end = RTE_PTR_ALIGN_CEIL(end, pgsz);
69
70                 /* if start and end are on the same page, bail out early */
71                 if (RTE_PTR_DIFF(aligned_end, aligned_start) == pgsz)
72                         return true;
73
74                 /* skip first iteration */
75                 cur = rte_mem_virt2iova(aligned_start);
76                 expected = cur + pgsz;
77                 aligned_start = RTE_PTR_ADD(aligned_start, pgsz);
78
79                 while (aligned_start < aligned_end) {
80                         cur = rte_mem_virt2iova(aligned_start);
81                         if (cur != expected)
82                                 return false;
83                         aligned_start = RTE_PTR_ADD(aligned_start, pgsz);
84                         expected += pgsz;
85                 }
86         } else {
87                 int start_seg, end_seg, cur_seg;
88                 rte_iova_t cur, expected;
89
90                 aligned_start = RTE_PTR_ALIGN_FLOOR(start, pgsz);
91                 aligned_end = RTE_PTR_ALIGN_CEIL(end, pgsz);
92
93                 start_seg = RTE_PTR_DIFF(aligned_start, msl->base_va) /
94                                 pgsz;
95                 end_seg = RTE_PTR_DIFF(aligned_end, msl->base_va) /
96                                 pgsz;
97
98                 /* if start and end are on the same page, bail out early */
99                 if (RTE_PTR_DIFF(aligned_end, aligned_start) == pgsz)
100                         return true;
101
102                 /* skip first iteration */
103                 ms = rte_fbarray_get(&msl->memseg_arr, start_seg);
104                 cur = ms->iova;
105                 expected = cur + pgsz;
106
107                 /* if we can't access IOVA addresses, assume non-contiguous */
108                 if (cur == RTE_BAD_IOVA)
109                         return false;
110
111                 for (cur_seg = start_seg + 1; cur_seg < end_seg;
112                                 cur_seg++, expected += pgsz) {
113                         ms = rte_fbarray_get(&msl->memseg_arr, cur_seg);
114
115                         if (ms->iova != expected)
116                                 return false;
117                 }
118         }
119         return true;
120 }
121
122 int
123 eal_memalloc_mem_event_callback_register(const char *name,
124                 rte_mem_event_callback_t clb)
125 {
126         struct mem_event_callback_entry *entry;
127         int ret, len;
128         if (name == NULL || clb == NULL) {
129                 rte_errno = EINVAL;
130                 return -1;
131         }
132         len = strnlen(name, RTE_MEM_EVENT_CALLBACK_NAME_LEN);
133         if (len == 0) {
134                 rte_errno = EINVAL;
135                 return -1;
136         } else if (len == RTE_MEM_EVENT_CALLBACK_NAME_LEN) {
137                 rte_errno = ENAMETOOLONG;
138                 return -1;
139         }
140         rte_rwlock_write_lock(&mem_event_rwlock);
141
142         entry = find_mem_event_callback(name);
143         if (entry != NULL) {
144                 rte_errno = EEXIST;
145                 ret = -1;
146                 goto unlock;
147         }
148
149         entry = malloc(sizeof(*entry));
150         if (entry == NULL) {
151                 rte_errno = ENOMEM;
152                 ret = -1;
153                 goto unlock;
154         }
155
156         /* callback successfully created and is valid, add it to the list */
157         entry->clb = clb;
158         snprintf(entry->name, RTE_MEM_EVENT_CALLBACK_NAME_LEN, "%s", name);
159         TAILQ_INSERT_TAIL(&mem_event_callback_list, entry, next);
160
161         ret = 0;
162
163         RTE_LOG(DEBUG, EAL, "Mem event callback '%s' registered\n", name);
164
165 unlock:
166         rte_rwlock_write_unlock(&mem_event_rwlock);
167         return ret;
168 }
169
170 int
171 eal_memalloc_mem_event_callback_unregister(const char *name)
172 {
173         struct mem_event_callback_entry *entry;
174         int ret, len;
175
176         if (name == NULL) {
177                 rte_errno = EINVAL;
178                 return -1;
179         }
180         len = strnlen(name, RTE_MEM_EVENT_CALLBACK_NAME_LEN);
181         if (len == 0) {
182                 rte_errno = EINVAL;
183                 return -1;
184         } else if (len == RTE_MEM_EVENT_CALLBACK_NAME_LEN) {
185                 rte_errno = ENAMETOOLONG;
186                 return -1;
187         }
188         rte_rwlock_write_lock(&mem_event_rwlock);
189
190         entry = find_mem_event_callback(name);
191         if (entry == NULL) {
192                 rte_errno = ENOENT;
193                 ret = -1;
194                 goto unlock;
195         }
196         TAILQ_REMOVE(&mem_event_callback_list, entry, next);
197         free(entry);
198
199         ret = 0;
200
201         RTE_LOG(DEBUG, EAL, "Mem event callback '%s' unregistered\n", name);
202
203 unlock:
204         rte_rwlock_write_unlock(&mem_event_rwlock);
205         return ret;
206 }
207
208 void
209 eal_memalloc_mem_event_notify(enum rte_mem_event event, const void *start,
210                 size_t len)
211 {
212         struct mem_event_callback_entry *entry;
213
214         rte_rwlock_read_lock(&mem_event_rwlock);
215
216         TAILQ_FOREACH(entry, &mem_event_callback_list, next) {
217                 RTE_LOG(DEBUG, EAL, "Calling mem event callback %s",
218                         entry->name);
219                 entry->clb(event, start, len);
220         }
221
222         rte_rwlock_read_unlock(&mem_event_rwlock);
223 }