trace: add internal init and fini interface
[dpdk.git] / lib / librte_eal / common / eal_common_trace.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2020 Marvell International Ltd.
3  */
4
5 #include <fnmatch.h>
6 #include <inttypes.h>
7 #include <sys/queue.h>
8 #include <regex.h>
9
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_per_lcore.h>
14 #include <rte_string_fns.h>
15
16 #include "eal_trace.h"
17
18 RTE_DEFINE_PER_LCORE(volatile int, trace_point_sz);
19 static RTE_DEFINE_PER_LCORE(char, ctf_field[TRACE_CTF_FIELD_SIZE]);
20 static RTE_DEFINE_PER_LCORE(int, ctf_count);
21
22 static struct trace_point_head tp_list = STAILQ_HEAD_INITIALIZER(tp_list);
23 static struct trace trace;
24
25 struct trace *
26 trace_obj_get(void)
27 {
28         return &trace;
29 }
30
31 struct trace_point_head *
32 trace_list_head_get(void)
33 {
34         return &tp_list;
35 }
36
37 int
38 eal_trace_init(void)
39 {
40         /* One of the trace point registration failed */
41         if (trace.register_errno) {
42                 rte_errno = trace.register_errno;
43                 goto fail;
44         }
45
46         if (!rte_trace_is_enabled())
47                 return 0;
48
49         rte_spinlock_init(&trace.lock);
50
51         /* Is duplicate trace name registered */
52         if (trace_has_duplicate_entry())
53                 goto fail;
54
55         /* Generate UUID ver 4 with total size of events and number of
56          * events
57          */
58         trace_uuid_generate();
59
60         /* Create trace directory */
61         if (trace_mkdir())
62                 goto fail;
63
64
65         rte_trace_mode_set(trace.mode);
66
67         return 0;
68
69 fail:
70         trace_err("failed to initialize trace [%s]", rte_strerror(rte_errno));
71         return -rte_errno;
72 }
73
74 void
75 eal_trace_fini(void)
76 {
77         if (!rte_trace_is_enabled())
78                 return;
79 }
80
81 bool
82 rte_trace_is_enabled(void)
83 {
84         return trace.status;
85 }
86
87 static void
88 trace_mode_set(rte_trace_point_t *trace, enum rte_trace_mode mode)
89 {
90         if (mode == RTE_TRACE_MODE_OVERWRITE)
91                 __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_DISCARD,
92                         __ATOMIC_RELEASE);
93         else
94                 __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_DISCARD,
95                         __ATOMIC_RELEASE);
96 }
97
98 void
99 rte_trace_mode_set(enum rte_trace_mode mode)
100 {
101         struct trace_point *tp;
102
103         if (!rte_trace_is_enabled())
104                 return;
105
106         STAILQ_FOREACH(tp, &tp_list, next)
107                 trace_mode_set(tp->handle, mode);
108
109         trace.mode = mode;
110 }
111
112 enum
113 rte_trace_mode rte_trace_mode_get(void)
114 {
115         return trace.mode;
116 }
117
118 static bool
119 trace_point_is_invalid(rte_trace_point_t *t)
120 {
121         return (t == NULL) || (trace_id_get(t) >= trace.nb_trace_points);
122 }
123
124 bool
125 rte_trace_point_is_enabled(rte_trace_point_t *trace)
126 {
127         uint64_t val;
128
129         if (trace_point_is_invalid(trace))
130                 return false;
131
132         val = __atomic_load_n(trace, __ATOMIC_ACQUIRE);
133         return (val & __RTE_TRACE_FIELD_ENABLE_MASK) != 0;
134 }
135
136 int
137 rte_trace_point_enable(rte_trace_point_t *trace)
138 {
139         if (trace_point_is_invalid(trace))
140                 return -ERANGE;
141
142         __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_MASK,
143                 __ATOMIC_RELEASE);
144         return 0;
145 }
146
147 int
148 rte_trace_point_disable(rte_trace_point_t *trace)
149 {
150         if (trace_point_is_invalid(trace))
151                 return -ERANGE;
152
153         __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_MASK,
154                 __ATOMIC_RELEASE);
155         return 0;
156 }
157
158 int
159 rte_trace_pattern(const char *pattern, bool enable)
160 {
161         struct trace_point *tp;
162         int rc = 0, found = 0;
163
164         STAILQ_FOREACH(tp, &tp_list, next) {
165                 if (fnmatch(pattern, tp->name, 0) == 0) {
166                         if (enable)
167                                 rc = rte_trace_point_enable(tp->handle);
168                         else
169                                 rc = rte_trace_point_disable(tp->handle);
170                         found = 1;
171                 }
172                 if (rc < 0)
173                         return rc;
174         }
175
176         return rc | found;
177 }
178
179 int
180 rte_trace_regexp(const char *regex, bool enable)
181 {
182         struct trace_point *tp;
183         int rc = 0, found = 0;
184         regex_t r;
185
186         if (regcomp(&r, regex, 0) != 0)
187                 return -EINVAL;
188
189         STAILQ_FOREACH(tp, &tp_list, next) {
190                 if (regexec(&r, tp->name, 0, NULL, 0) == 0) {
191                         if (enable)
192                                 rc = rte_trace_point_enable(tp->handle);
193                         else
194                                 rc = rte_trace_point_disable(tp->handle);
195                         found = 1;
196                 }
197                 if (rc < 0)
198                         return rc;
199         }
200         regfree(&r);
201
202         return rc | found;
203 }
204
205 rte_trace_point_t *
206 rte_trace_point_lookup(const char *name)
207 {
208         struct trace_point *tp;
209
210         if (name == NULL)
211                 return NULL;
212
213         STAILQ_FOREACH(tp, &tp_list, next)
214                 if (strncmp(tp->name, name, TRACE_POINT_NAME_SIZE) == 0)
215                         return tp->handle;
216
217         return NULL;
218 }
219
220 int
221 __rte_trace_point_register(rte_trace_point_t *handle, const char *name,
222                 void (*register_fn)(void))
223 {
224         char *field = RTE_PER_LCORE(ctf_field);
225         struct trace_point *tp;
226         uint16_t sz;
227
228         /* Sanity checks of arguments */
229         if (name == NULL || register_fn == NULL || handle == NULL) {
230                 trace_err("invalid arguments");
231                 rte_errno = EINVAL;
232                 goto fail;
233         }
234
235         /* Check the size of the trace point object */
236         RTE_PER_LCORE(trace_point_sz) = 0;
237         RTE_PER_LCORE(ctf_count) = 0;
238         register_fn();
239         if (RTE_PER_LCORE(trace_point_sz) == 0) {
240                 trace_err("missing rte_trace_emit_header() in register fn");
241                 rte_errno = EBADF;
242                 goto fail;
243         }
244
245         /* Is size overflowed */
246         if (RTE_PER_LCORE(trace_point_sz) > UINT16_MAX) {
247                 trace_err("trace point size overflowed");
248                 rte_errno = ENOSPC;
249                 goto fail;
250         }
251
252         /* Are we running out of space to store trace points? */
253         if (trace.nb_trace_points > UINT16_MAX) {
254                 trace_err("trace point exceeds the max count");
255                 rte_errno = ENOSPC;
256                 goto fail;
257         }
258
259         /* Get the size of the trace point */
260         sz = RTE_PER_LCORE(trace_point_sz);
261         tp = calloc(1, sizeof(struct trace_point));
262         if (tp == NULL) {
263                 trace_err("fail to allocate trace point memory");
264                 rte_errno = ENOMEM;
265                 goto fail;
266         }
267
268         /* Initialize the trace point */
269         if (rte_strscpy(tp->name, name, TRACE_POINT_NAME_SIZE) < 0) {
270                 trace_err("name is too long");
271                 rte_errno = E2BIG;
272                 goto free;
273         }
274
275         /* Copy the field data for future use */
276         if (rte_strscpy(tp->ctf_field, field, TRACE_CTF_FIELD_SIZE) < 0) {
277                 trace_err("CTF field size is too long");
278                 rte_errno = E2BIG;
279                 goto free;
280         }
281
282         /* Clear field memory for the next event */
283         memset(field, 0, TRACE_CTF_FIELD_SIZE);
284
285         /* Form the trace handle */
286         *handle = sz;
287         *handle |= trace.nb_trace_points << __RTE_TRACE_FIELD_ID_SHIFT;
288
289         trace.nb_trace_points++;
290         tp->handle = handle;
291
292         /* Add the trace point at tail */
293         STAILQ_INSERT_TAIL(&tp_list, tp, next);
294         __atomic_thread_fence(__ATOMIC_RELEASE);
295
296         /* All Good !!! */
297         return 0;
298 free:
299         free(tp);
300 fail:
301         if (trace.register_errno == 0)
302                 trace.register_errno = rte_errno;
303
304         return -rte_errno;
305 }