d88b2cd42c3b289fd6e938c1f1bcdc50808486c5
[dpdk.git] / lib / librte_eal / common / eal_common_trace.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2020 Marvell International Ltd.
3  */
4
5 #include <fnmatch.h>
6 #include <inttypes.h>
7 #include <sys/queue.h>
8 #include <regex.h>
9
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_per_lcore.h>
14 #include <rte_string_fns.h>
15
16 #include "eal_trace.h"
17
18 RTE_DEFINE_PER_LCORE(volatile int, trace_point_sz);
19 static RTE_DEFINE_PER_LCORE(char, ctf_field[TRACE_CTF_FIELD_SIZE]);
20 static RTE_DEFINE_PER_LCORE(int, ctf_count);
21
22 static struct trace_point_head tp_list = STAILQ_HEAD_INITIALIZER(tp_list);
23 static struct trace trace;
24
25 struct trace *
26 trace_obj_get(void)
27 {
28         return &trace;
29 }
30
31 struct trace_point_head *
32 trace_list_head_get(void)
33 {
34         return &tp_list;
35 }
36
37 int
38 eal_trace_init(void)
39 {
40         /* One of the trace point registration failed */
41         if (trace.register_errno) {
42                 rte_errno = trace.register_errno;
43                 goto fail;
44         }
45
46         if (!rte_trace_is_enabled())
47                 return 0;
48
49         rte_spinlock_init(&trace.lock);
50
51         /* Is duplicate trace name registered */
52         if (trace_has_duplicate_entry())
53                 goto fail;
54
55         /* Generate UUID ver 4 with total size of events and number of
56          * events
57          */
58         trace_uuid_generate();
59
60         /* Create trace directory */
61         if (trace_mkdir())
62                 goto fail;
63
64         /* Save current epoch timestamp for future use */
65         if (trace_epoch_time_save() < 0)
66                 goto fail;
67
68         rte_trace_mode_set(trace.mode);
69
70         return 0;
71
72 fail:
73         trace_err("failed to initialize trace [%s]", rte_strerror(rte_errno));
74         return -rte_errno;
75 }
76
77 void
78 eal_trace_fini(void)
79 {
80         if (!rte_trace_is_enabled())
81                 return;
82 }
83
84 bool
85 rte_trace_is_enabled(void)
86 {
87         return trace.status;
88 }
89
90 static void
91 trace_mode_set(rte_trace_point_t *trace, enum rte_trace_mode mode)
92 {
93         if (mode == RTE_TRACE_MODE_OVERWRITE)
94                 __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_DISCARD,
95                         __ATOMIC_RELEASE);
96         else
97                 __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_DISCARD,
98                         __ATOMIC_RELEASE);
99 }
100
101 void
102 rte_trace_mode_set(enum rte_trace_mode mode)
103 {
104         struct trace_point *tp;
105
106         if (!rte_trace_is_enabled())
107                 return;
108
109         STAILQ_FOREACH(tp, &tp_list, next)
110                 trace_mode_set(tp->handle, mode);
111
112         trace.mode = mode;
113 }
114
115 enum
116 rte_trace_mode rte_trace_mode_get(void)
117 {
118         return trace.mode;
119 }
120
121 static bool
122 trace_point_is_invalid(rte_trace_point_t *t)
123 {
124         return (t == NULL) || (trace_id_get(t) >= trace.nb_trace_points);
125 }
126
127 bool
128 rte_trace_point_is_enabled(rte_trace_point_t *trace)
129 {
130         uint64_t val;
131
132         if (trace_point_is_invalid(trace))
133                 return false;
134
135         val = __atomic_load_n(trace, __ATOMIC_ACQUIRE);
136         return (val & __RTE_TRACE_FIELD_ENABLE_MASK) != 0;
137 }
138
139 int
140 rte_trace_point_enable(rte_trace_point_t *trace)
141 {
142         if (trace_point_is_invalid(trace))
143                 return -ERANGE;
144
145         __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_MASK,
146                 __ATOMIC_RELEASE);
147         return 0;
148 }
149
150 int
151 rte_trace_point_disable(rte_trace_point_t *trace)
152 {
153         if (trace_point_is_invalid(trace))
154                 return -ERANGE;
155
156         __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_MASK,
157                 __ATOMIC_RELEASE);
158         return 0;
159 }
160
161 int
162 rte_trace_pattern(const char *pattern, bool enable)
163 {
164         struct trace_point *tp;
165         int rc = 0, found = 0;
166
167         STAILQ_FOREACH(tp, &tp_list, next) {
168                 if (fnmatch(pattern, tp->name, 0) == 0) {
169                         if (enable)
170                                 rc = rte_trace_point_enable(tp->handle);
171                         else
172                                 rc = rte_trace_point_disable(tp->handle);
173                         found = 1;
174                 }
175                 if (rc < 0)
176                         return rc;
177         }
178
179         return rc | found;
180 }
181
182 int
183 rte_trace_regexp(const char *regex, bool enable)
184 {
185         struct trace_point *tp;
186         int rc = 0, found = 0;
187         regex_t r;
188
189         if (regcomp(&r, regex, 0) != 0)
190                 return -EINVAL;
191
192         STAILQ_FOREACH(tp, &tp_list, next) {
193                 if (regexec(&r, tp->name, 0, NULL, 0) == 0) {
194                         if (enable)
195                                 rc = rte_trace_point_enable(tp->handle);
196                         else
197                                 rc = rte_trace_point_disable(tp->handle);
198                         found = 1;
199                 }
200                 if (rc < 0)
201                         return rc;
202         }
203         regfree(&r);
204
205         return rc | found;
206 }
207
208 rte_trace_point_t *
209 rte_trace_point_lookup(const char *name)
210 {
211         struct trace_point *tp;
212
213         if (name == NULL)
214                 return NULL;
215
216         STAILQ_FOREACH(tp, &tp_list, next)
217                 if (strncmp(tp->name, name, TRACE_POINT_NAME_SIZE) == 0)
218                         return tp->handle;
219
220         return NULL;
221 }
222
223 int
224 __rte_trace_point_register(rte_trace_point_t *handle, const char *name,
225                 void (*register_fn)(void))
226 {
227         char *field = RTE_PER_LCORE(ctf_field);
228         struct trace_point *tp;
229         uint16_t sz;
230
231         /* Sanity checks of arguments */
232         if (name == NULL || register_fn == NULL || handle == NULL) {
233                 trace_err("invalid arguments");
234                 rte_errno = EINVAL;
235                 goto fail;
236         }
237
238         /* Check the size of the trace point object */
239         RTE_PER_LCORE(trace_point_sz) = 0;
240         RTE_PER_LCORE(ctf_count) = 0;
241         register_fn();
242         if (RTE_PER_LCORE(trace_point_sz) == 0) {
243                 trace_err("missing rte_trace_emit_header() in register fn");
244                 rte_errno = EBADF;
245                 goto fail;
246         }
247
248         /* Is size overflowed */
249         if (RTE_PER_LCORE(trace_point_sz) > UINT16_MAX) {
250                 trace_err("trace point size overflowed");
251                 rte_errno = ENOSPC;
252                 goto fail;
253         }
254
255         /* Are we running out of space to store trace points? */
256         if (trace.nb_trace_points > UINT16_MAX) {
257                 trace_err("trace point exceeds the max count");
258                 rte_errno = ENOSPC;
259                 goto fail;
260         }
261
262         /* Get the size of the trace point */
263         sz = RTE_PER_LCORE(trace_point_sz);
264         tp = calloc(1, sizeof(struct trace_point));
265         if (tp == NULL) {
266                 trace_err("fail to allocate trace point memory");
267                 rte_errno = ENOMEM;
268                 goto fail;
269         }
270
271         /* Initialize the trace point */
272         if (rte_strscpy(tp->name, name, TRACE_POINT_NAME_SIZE) < 0) {
273                 trace_err("name is too long");
274                 rte_errno = E2BIG;
275                 goto free;
276         }
277
278         /* Copy the field data for future use */
279         if (rte_strscpy(tp->ctf_field, field, TRACE_CTF_FIELD_SIZE) < 0) {
280                 trace_err("CTF field size is too long");
281                 rte_errno = E2BIG;
282                 goto free;
283         }
284
285         /* Clear field memory for the next event */
286         memset(field, 0, TRACE_CTF_FIELD_SIZE);
287
288         /* Form the trace handle */
289         *handle = sz;
290         *handle |= trace.nb_trace_points << __RTE_TRACE_FIELD_ID_SHIFT;
291
292         trace.nb_trace_points++;
293         tp->handle = handle;
294
295         /* Add the trace point at tail */
296         STAILQ_INSERT_TAIL(&tp_list, tp, next);
297         __atomic_thread_fence(__ATOMIC_RELEASE);
298
299         /* All Good !!! */
300         return 0;
301 free:
302         free(tp);
303 fail:
304         if (trace.register_errno == 0)
305                 trace.register_errno = rte_errno;
306
307         return -rte_errno;
308 }