923b1dc125387f0371ad0f53f4080f9cdb0721e0
[dpdk.git] / lib / librte_eal / common / eal_common_trace.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2020 Marvell International Ltd.
3  */
4
5 #include <fnmatch.h>
6 #include <inttypes.h>
7 #include <sys/queue.h>
8 #include <regex.h>
9
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_per_lcore.h>
14 #include <rte_string_fns.h>
15
16 #include "eal_trace.h"
17
18 RTE_DEFINE_PER_LCORE(volatile int, trace_point_sz);
19 static RTE_DEFINE_PER_LCORE(char, ctf_field[TRACE_CTF_FIELD_SIZE]);
20 static RTE_DEFINE_PER_LCORE(int, ctf_count);
21
22 static struct trace_point_head tp_list = STAILQ_HEAD_INITIALIZER(tp_list);
23 static struct trace trace;
24
25 struct trace *
26 trace_obj_get(void)
27 {
28         return &trace;
29 }
30
31 struct trace_point_head *
32 trace_list_head_get(void)
33 {
34         return &tp_list;
35 }
36
37 int
38 eal_trace_init(void)
39 {
40         /* One of the trace point registration failed */
41         if (trace.register_errno) {
42                 rte_errno = trace.register_errno;
43                 goto fail;
44         }
45
46         if (!rte_trace_is_enabled())
47                 return 0;
48
49         rte_spinlock_init(&trace.lock);
50
51         /* Is duplicate trace name registered */
52         if (trace_has_duplicate_entry())
53                 goto fail;
54
55         /* Generate UUID ver 4 with total size of events and number of
56          * events
57          */
58         trace_uuid_generate();
59
60         /* Generate CTF TDSL metadata */
61         if (trace_metadata_create() < 0)
62                 goto fail;
63
64         /* Create trace directory */
65         if (trace_mkdir())
66                 goto free_meta;
67
68         /* Save current epoch timestamp for future use */
69         if (trace_epoch_time_save() < 0)
70                 goto fail;
71
72         rte_trace_mode_set(trace.mode);
73
74         return 0;
75
76 free_meta:
77         trace_metadata_destroy();
78 fail:
79         trace_err("failed to initialize trace [%s]", rte_strerror(rte_errno));
80         return -rte_errno;
81 }
82
83 void
84 eal_trace_fini(void)
85 {
86         if (!rte_trace_is_enabled())
87                 return;
88         trace_metadata_destroy();
89 }
90
91 bool
92 rte_trace_is_enabled(void)
93 {
94         return trace.status;
95 }
96
97 static void
98 trace_mode_set(rte_trace_point_t *trace, enum rte_trace_mode mode)
99 {
100         if (mode == RTE_TRACE_MODE_OVERWRITE)
101                 __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_DISCARD,
102                         __ATOMIC_RELEASE);
103         else
104                 __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_DISCARD,
105                         __ATOMIC_RELEASE);
106 }
107
108 void
109 rte_trace_mode_set(enum rte_trace_mode mode)
110 {
111         struct trace_point *tp;
112
113         if (!rte_trace_is_enabled())
114                 return;
115
116         STAILQ_FOREACH(tp, &tp_list, next)
117                 trace_mode_set(tp->handle, mode);
118
119         trace.mode = mode;
120 }
121
122 enum
123 rte_trace_mode rte_trace_mode_get(void)
124 {
125         return trace.mode;
126 }
127
128 static bool
129 trace_point_is_invalid(rte_trace_point_t *t)
130 {
131         return (t == NULL) || (trace_id_get(t) >= trace.nb_trace_points);
132 }
133
134 bool
135 rte_trace_point_is_enabled(rte_trace_point_t *trace)
136 {
137         uint64_t val;
138
139         if (trace_point_is_invalid(trace))
140                 return false;
141
142         val = __atomic_load_n(trace, __ATOMIC_ACQUIRE);
143         return (val & __RTE_TRACE_FIELD_ENABLE_MASK) != 0;
144 }
145
146 int
147 rte_trace_point_enable(rte_trace_point_t *trace)
148 {
149         if (trace_point_is_invalid(trace))
150                 return -ERANGE;
151
152         __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_MASK,
153                 __ATOMIC_RELEASE);
154         return 0;
155 }
156
157 int
158 rte_trace_point_disable(rte_trace_point_t *trace)
159 {
160         if (trace_point_is_invalid(trace))
161                 return -ERANGE;
162
163         __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_MASK,
164                 __ATOMIC_RELEASE);
165         return 0;
166 }
167
168 int
169 rte_trace_pattern(const char *pattern, bool enable)
170 {
171         struct trace_point *tp;
172         int rc = 0, found = 0;
173
174         STAILQ_FOREACH(tp, &tp_list, next) {
175                 if (fnmatch(pattern, tp->name, 0) == 0) {
176                         if (enable)
177                                 rc = rte_trace_point_enable(tp->handle);
178                         else
179                                 rc = rte_trace_point_disable(tp->handle);
180                         found = 1;
181                 }
182                 if (rc < 0)
183                         return rc;
184         }
185
186         return rc | found;
187 }
188
189 int
190 rte_trace_regexp(const char *regex, bool enable)
191 {
192         struct trace_point *tp;
193         int rc = 0, found = 0;
194         regex_t r;
195
196         if (regcomp(&r, regex, 0) != 0)
197                 return -EINVAL;
198
199         STAILQ_FOREACH(tp, &tp_list, next) {
200                 if (regexec(&r, tp->name, 0, NULL, 0) == 0) {
201                         if (enable)
202                                 rc = rte_trace_point_enable(tp->handle);
203                         else
204                                 rc = rte_trace_point_disable(tp->handle);
205                         found = 1;
206                 }
207                 if (rc < 0)
208                         return rc;
209         }
210         regfree(&r);
211
212         return rc | found;
213 }
214
215 rte_trace_point_t *
216 rte_trace_point_lookup(const char *name)
217 {
218         struct trace_point *tp;
219
220         if (name == NULL)
221                 return NULL;
222
223         STAILQ_FOREACH(tp, &tp_list, next)
224                 if (strncmp(tp->name, name, TRACE_POINT_NAME_SIZE) == 0)
225                         return tp->handle;
226
227         return NULL;
228 }
229
230 int
231 __rte_trace_point_register(rte_trace_point_t *handle, const char *name,
232                 void (*register_fn)(void))
233 {
234         char *field = RTE_PER_LCORE(ctf_field);
235         struct trace_point *tp;
236         uint16_t sz;
237
238         /* Sanity checks of arguments */
239         if (name == NULL || register_fn == NULL || handle == NULL) {
240                 trace_err("invalid arguments");
241                 rte_errno = EINVAL;
242                 goto fail;
243         }
244
245         /* Check the size of the trace point object */
246         RTE_PER_LCORE(trace_point_sz) = 0;
247         RTE_PER_LCORE(ctf_count) = 0;
248         register_fn();
249         if (RTE_PER_LCORE(trace_point_sz) == 0) {
250                 trace_err("missing rte_trace_emit_header() in register fn");
251                 rte_errno = EBADF;
252                 goto fail;
253         }
254
255         /* Is size overflowed */
256         if (RTE_PER_LCORE(trace_point_sz) > UINT16_MAX) {
257                 trace_err("trace point size overflowed");
258                 rte_errno = ENOSPC;
259                 goto fail;
260         }
261
262         /* Are we running out of space to store trace points? */
263         if (trace.nb_trace_points > UINT16_MAX) {
264                 trace_err("trace point exceeds the max count");
265                 rte_errno = ENOSPC;
266                 goto fail;
267         }
268
269         /* Get the size of the trace point */
270         sz = RTE_PER_LCORE(trace_point_sz);
271         tp = calloc(1, sizeof(struct trace_point));
272         if (tp == NULL) {
273                 trace_err("fail to allocate trace point memory");
274                 rte_errno = ENOMEM;
275                 goto fail;
276         }
277
278         /* Initialize the trace point */
279         if (rte_strscpy(tp->name, name, TRACE_POINT_NAME_SIZE) < 0) {
280                 trace_err("name is too long");
281                 rte_errno = E2BIG;
282                 goto free;
283         }
284
285         /* Copy the field data for future use */
286         if (rte_strscpy(tp->ctf_field, field, TRACE_CTF_FIELD_SIZE) < 0) {
287                 trace_err("CTF field size is too long");
288                 rte_errno = E2BIG;
289                 goto free;
290         }
291
292         /* Clear field memory for the next event */
293         memset(field, 0, TRACE_CTF_FIELD_SIZE);
294
295         /* Form the trace handle */
296         *handle = sz;
297         *handle |= trace.nb_trace_points << __RTE_TRACE_FIELD_ID_SHIFT;
298
299         trace.nb_trace_points++;
300         tp->handle = handle;
301
302         /* Add the trace point at tail */
303         STAILQ_INSERT_TAIL(&tp_list, tp, next);
304         __atomic_thread_fence(__ATOMIC_RELEASE);
305
306         /* All Good !!! */
307         return 0;
308 free:
309         free(tp);
310 fail:
311         if (trace.register_errno == 0)
312                 trace.register_errno = rte_errno;
313
314         return -rte_errno;
315 }