97389036de978bc1f35314505fd4250ec0134b01
[dpdk.git] / lib / librte_eal / common / eal_common_trace.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2020 Marvell International Ltd.
3  */
4
5 #include <fnmatch.h>
6 #include <inttypes.h>
7 #include <sys/queue.h>
8 #include <regex.h>
9
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_per_lcore.h>
14 #include <rte_string_fns.h>
15
16 #include "eal_trace.h"
17
18 RTE_DEFINE_PER_LCORE(volatile int, trace_point_sz);
19 RTE_DEFINE_PER_LCORE(void *, trace_mem);
20 static RTE_DEFINE_PER_LCORE(char, ctf_field[TRACE_CTF_FIELD_SIZE]);
21 static RTE_DEFINE_PER_LCORE(int, ctf_count);
22
23 static struct trace_point_head tp_list = STAILQ_HEAD_INITIALIZER(tp_list);
24 static struct trace trace;
25
26 struct trace *
27 trace_obj_get(void)
28 {
29         return &trace;
30 }
31
32 struct trace_point_head *
33 trace_list_head_get(void)
34 {
35         return &tp_list;
36 }
37
38 int
39 eal_trace_init(void)
40 {
41         uint8_t i;
42
43         /* Trace memory should start with 8B aligned for natural alignment */
44         RTE_BUILD_BUG_ON((offsetof(struct __rte_trace_header, mem) % 8) != 0);
45
46         /* One of the trace point registration failed */
47         if (trace.register_errno) {
48                 rte_errno = trace.register_errno;
49                 goto fail;
50         }
51
52         if (trace.args.nb_args)
53                 trace.status = true;
54
55         if (!rte_trace_is_enabled())
56                 return 0;
57
58         rte_spinlock_init(&trace.lock);
59
60         /* Is duplicate trace name registered */
61         if (trace_has_duplicate_entry())
62                 goto fail;
63
64         /* Generate UUID ver 4 with total size of events and number of
65          * events
66          */
67         trace_uuid_generate();
68
69         /* Generate CTF TDSL metadata */
70         if (trace_metadata_create() < 0)
71                 goto fail;
72
73         /* Create trace directory */
74         if (trace_mkdir())
75                 goto free_meta;
76
77         /* Save current epoch timestamp for future use */
78         if (trace_epoch_time_save() < 0)
79                 goto fail;
80
81         /* Apply global configurations */
82         for (i = 0; i < trace.args.nb_args; i++)
83                 trace_args_apply(trace.args.args[i]);
84
85         rte_trace_mode_set(trace.mode);
86
87         return 0;
88
89 free_meta:
90         trace_metadata_destroy();
91 fail:
92         trace_err("failed to initialize trace [%s]", rte_strerror(rte_errno));
93         return -rte_errno;
94 }
95
96 void
97 eal_trace_fini(void)
98 {
99         if (!rte_trace_is_enabled())
100                 return;
101         trace_mem_per_thread_free();
102         trace_metadata_destroy();
103         eal_trace_args_free();
104 }
105
106 bool
107 rte_trace_is_enabled(void)
108 {
109         return trace.status;
110 }
111
112 static void
113 trace_mode_set(rte_trace_point_t *trace, enum rte_trace_mode mode)
114 {
115         if (mode == RTE_TRACE_MODE_OVERWRITE)
116                 __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_DISCARD,
117                         __ATOMIC_RELEASE);
118         else
119                 __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_DISCARD,
120                         __ATOMIC_RELEASE);
121 }
122
123 void
124 rte_trace_mode_set(enum rte_trace_mode mode)
125 {
126         struct trace_point *tp;
127
128         if (!rte_trace_is_enabled())
129                 return;
130
131         STAILQ_FOREACH(tp, &tp_list, next)
132                 trace_mode_set(tp->handle, mode);
133
134         trace.mode = mode;
135 }
136
137 enum
138 rte_trace_mode rte_trace_mode_get(void)
139 {
140         return trace.mode;
141 }
142
143 static bool
144 trace_point_is_invalid(rte_trace_point_t *t)
145 {
146         return (t == NULL) || (trace_id_get(t) >= trace.nb_trace_points);
147 }
148
149 bool
150 rte_trace_point_is_enabled(rte_trace_point_t *trace)
151 {
152         uint64_t val;
153
154         if (trace_point_is_invalid(trace))
155                 return false;
156
157         val = __atomic_load_n(trace, __ATOMIC_ACQUIRE);
158         return (val & __RTE_TRACE_FIELD_ENABLE_MASK) != 0;
159 }
160
161 int
162 rte_trace_point_enable(rte_trace_point_t *trace)
163 {
164         if (trace_point_is_invalid(trace))
165                 return -ERANGE;
166
167         __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_MASK,
168                 __ATOMIC_RELEASE);
169         return 0;
170 }
171
172 int
173 rte_trace_point_disable(rte_trace_point_t *trace)
174 {
175         if (trace_point_is_invalid(trace))
176                 return -ERANGE;
177
178         __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_MASK,
179                 __ATOMIC_RELEASE);
180         return 0;
181 }
182
183 int
184 rte_trace_pattern(const char *pattern, bool enable)
185 {
186         struct trace_point *tp;
187         int rc = 0, found = 0;
188
189         STAILQ_FOREACH(tp, &tp_list, next) {
190                 if (fnmatch(pattern, tp->name, 0) == 0) {
191                         if (enable)
192                                 rc = rte_trace_point_enable(tp->handle);
193                         else
194                                 rc = rte_trace_point_disable(tp->handle);
195                         found = 1;
196                 }
197                 if (rc < 0)
198                         return rc;
199         }
200
201         return rc | found;
202 }
203
204 int
205 rte_trace_regexp(const char *regex, bool enable)
206 {
207         struct trace_point *tp;
208         int rc = 0, found = 0;
209         regex_t r;
210
211         if (regcomp(&r, regex, 0) != 0)
212                 return -EINVAL;
213
214         STAILQ_FOREACH(tp, &tp_list, next) {
215                 if (regexec(&r, tp->name, 0, NULL, 0) == 0) {
216                         if (enable)
217                                 rc = rte_trace_point_enable(tp->handle);
218                         else
219                                 rc = rte_trace_point_disable(tp->handle);
220                         found = 1;
221                 }
222                 if (rc < 0)
223                         return rc;
224         }
225         regfree(&r);
226
227         return rc | found;
228 }
229
230 rte_trace_point_t *
231 rte_trace_point_lookup(const char *name)
232 {
233         struct trace_point *tp;
234
235         if (name == NULL)
236                 return NULL;
237
238         STAILQ_FOREACH(tp, &tp_list, next)
239                 if (strncmp(tp->name, name, TRACE_POINT_NAME_SIZE) == 0)
240                         return tp->handle;
241
242         return NULL;
243 }
244
245 static void
246 trace_point_dump(FILE *f, struct trace_point *tp)
247 {
248         rte_trace_point_t *handle = tp->handle;
249
250         fprintf(f, "\tid %d, %s, size is %d, %s\n",
251                 trace_id_get(handle), tp->name,
252                 (uint16_t)(*handle & __RTE_TRACE_FIELD_SIZE_MASK),
253                 rte_trace_point_is_enabled(handle) ? "enabled" : "disabled");
254 }
255
256 static void
257 trace_lcore_mem_dump(FILE *f)
258 {
259         struct trace *trace = trace_obj_get();
260         struct __rte_trace_header *header;
261         uint32_t count;
262
263         if (trace->nb_trace_mem_list == 0)
264                 return;
265
266         rte_spinlock_lock(&trace->lock);
267         fprintf(f, "nb_trace_mem_list = %d\n", trace->nb_trace_mem_list);
268         fprintf(f, "\nTrace mem info\n--------------\n");
269         for (count = 0; count < trace->nb_trace_mem_list; count++) {
270                 header = trace->lcore_meta[count].mem;
271                 fprintf(f, "\tid %d, mem=%p, area=%s, lcore_id=%d, name=%s\n",
272                 count, header,
273                 trace_area_to_string(trace->lcore_meta[count].area),
274                 header->stream_header.lcore_id,
275                 header->stream_header.thread_name);
276         }
277         rte_spinlock_unlock(&trace->lock);
278 }
279
280 void
281 rte_trace_dump(FILE *f)
282 {
283         struct trace_point_head *tp_list = trace_list_head_get();
284         struct trace *trace = trace_obj_get();
285         struct trace_point *tp;
286
287         fprintf(f, "\nGlobal info\n-----------\n");
288         fprintf(f, "status = %s\n",
289                 rte_trace_is_enabled() ? "enabled" : "disabled");
290         fprintf(f, "mode = %s\n",
291                 trace_mode_to_string(rte_trace_mode_get()));
292         fprintf(f, "dir = %s\n", trace->dir);
293         fprintf(f, "buffer len = %d\n", trace->buff_len);
294         fprintf(f, "number of trace points = %d\n", trace->nb_trace_points);
295
296         trace_lcore_mem_dump(f);
297         fprintf(f, "\nTrace point info\n----------------\n");
298         STAILQ_FOREACH(tp, tp_list, next)
299                 trace_point_dump(f, tp);
300 }
301
302 void
303 __rte_trace_mem_per_thread_alloc(void)
304 {
305         struct trace *trace = trace_obj_get();
306         struct __rte_trace_header *header;
307         uint32_t count;
308
309         if (!rte_trace_is_enabled())
310                 return;
311
312         if (RTE_PER_LCORE(trace_mem))
313                 return;
314
315         rte_spinlock_lock(&trace->lock);
316
317         count = trace->nb_trace_mem_list;
318
319         /* Allocate room for storing the thread trace mem meta */
320         trace->lcore_meta = realloc(trace->lcore_meta,
321                 sizeof(trace->lcore_meta[0]) * (count + 1));
322
323         /* Provide dummy space for fast path to consume */
324         if (trace->lcore_meta == NULL) {
325                 trace_crit("trace mem meta memory realloc failed");
326                 header = NULL;
327                 goto fail;
328         }
329
330         /* First attempt from huge page */
331         header = eal_malloc_no_trace(NULL, trace_mem_sz(trace->buff_len), 8);
332         if (header) {
333                 trace->lcore_meta[count].area = TRACE_AREA_HUGEPAGE;
334                 goto found;
335         }
336
337         /* Second attempt from heap */
338         header = malloc(trace_mem_sz(trace->buff_len));
339         if (header == NULL) {
340                 trace_crit("trace mem malloc attempt failed");
341                 header = NULL;
342                 goto fail;
343
344         }
345
346         /* Second attempt from heap is success */
347         trace->lcore_meta[count].area = TRACE_AREA_HEAP;
348
349         /* Initialize the trace header */
350 found:
351         header->offset = 0;
352         header->len = trace->buff_len;
353         header->stream_header.magic = TRACE_CTF_MAGIC;
354         rte_uuid_copy(header->stream_header.uuid, trace->uuid);
355         header->stream_header.lcore_id = rte_lcore_id();
356
357         /* Store the thread name */
358         char *name = header->stream_header.thread_name;
359         memset(name, 0, __RTE_TRACE_EMIT_STRING_LEN_MAX);
360         rte_thread_getname(pthread_self(), name,
361                 __RTE_TRACE_EMIT_STRING_LEN_MAX);
362
363         trace->lcore_meta[count].mem = header;
364         trace->nb_trace_mem_list++;
365 fail:
366         RTE_PER_LCORE(trace_mem) = header;
367         rte_spinlock_unlock(&trace->lock);
368 }
369
370 void
371 trace_mem_per_thread_free(void)
372 {
373         struct trace *trace = trace_obj_get();
374         uint32_t count;
375         void *mem;
376
377         if (!rte_trace_is_enabled())
378                 return;
379
380         rte_spinlock_lock(&trace->lock);
381         for (count = 0; count < trace->nb_trace_mem_list; count++) {
382                 mem = trace->lcore_meta[count].mem;
383                 if (trace->lcore_meta[count].area == TRACE_AREA_HUGEPAGE)
384                         eal_free_no_trace(mem);
385                 else if (trace->lcore_meta[count].area == TRACE_AREA_HEAP)
386                         free(mem);
387         }
388         rte_spinlock_unlock(&trace->lock);
389 }
390
391 void
392 __rte_trace_point_emit_field(size_t sz, const char *in, const char *datatype)
393 {
394         char *field = RTE_PER_LCORE(ctf_field);
395         int count = RTE_PER_LCORE(ctf_count);
396         size_t size;
397         int rc;
398
399         size = RTE_MAX(0, TRACE_CTF_FIELD_SIZE - 1 - count);
400         RTE_PER_LCORE(trace_point_sz) += sz;
401         rc = snprintf(RTE_PTR_ADD(field, count), size, "%s %s;", datatype, in);
402         if (rc <= 0 || (size_t)rc >= size) {
403                 RTE_PER_LCORE(trace_point_sz) = 0;
404                 trace_crit("CTF field is too long");
405                 return;
406         }
407         RTE_PER_LCORE(ctf_count) += rc;
408 }
409
410 int
411 __rte_trace_point_register(rte_trace_point_t *handle, const char *name,
412                 void (*register_fn)(void))
413 {
414         char *field = RTE_PER_LCORE(ctf_field);
415         struct trace_point *tp;
416         uint16_t sz;
417
418         /* Sanity checks of arguments */
419         if (name == NULL || register_fn == NULL || handle == NULL) {
420                 trace_err("invalid arguments");
421                 rte_errno = EINVAL;
422                 goto fail;
423         }
424
425         /* Check the size of the trace point object */
426         RTE_PER_LCORE(trace_point_sz) = 0;
427         RTE_PER_LCORE(ctf_count) = 0;
428         register_fn();
429         if (RTE_PER_LCORE(trace_point_sz) == 0) {
430                 trace_err("missing rte_trace_emit_header() in register fn");
431                 rte_errno = EBADF;
432                 goto fail;
433         }
434
435         /* Is size overflowed */
436         if (RTE_PER_LCORE(trace_point_sz) > UINT16_MAX) {
437                 trace_err("trace point size overflowed");
438                 rte_errno = ENOSPC;
439                 goto fail;
440         }
441
442         /* Are we running out of space to store trace points? */
443         if (trace.nb_trace_points > UINT16_MAX) {
444                 trace_err("trace point exceeds the max count");
445                 rte_errno = ENOSPC;
446                 goto fail;
447         }
448
449         /* Get the size of the trace point */
450         sz = RTE_PER_LCORE(trace_point_sz);
451         tp = calloc(1, sizeof(struct trace_point));
452         if (tp == NULL) {
453                 trace_err("fail to allocate trace point memory");
454                 rte_errno = ENOMEM;
455                 goto fail;
456         }
457
458         /* Initialize the trace point */
459         if (rte_strscpy(tp->name, name, TRACE_POINT_NAME_SIZE) < 0) {
460                 trace_err("name is too long");
461                 rte_errno = E2BIG;
462                 goto free;
463         }
464
465         /* Copy the field data for future use */
466         if (rte_strscpy(tp->ctf_field, field, TRACE_CTF_FIELD_SIZE) < 0) {
467                 trace_err("CTF field size is too long");
468                 rte_errno = E2BIG;
469                 goto free;
470         }
471
472         /* Clear field memory for the next event */
473         memset(field, 0, TRACE_CTF_FIELD_SIZE);
474
475         /* Form the trace handle */
476         *handle = sz;
477         *handle |= trace.nb_trace_points << __RTE_TRACE_FIELD_ID_SHIFT;
478
479         trace.nb_trace_points++;
480         tp->handle = handle;
481
482         /* Add the trace point at tail */
483         STAILQ_INSERT_TAIL(&tp_list, tp, next);
484         __atomic_thread_fence(__ATOMIC_RELEASE);
485
486         /* All Good !!! */
487         return 0;
488 free:
489         free(tp);
490 fail:
491         if (trace.register_errno == 0)
492                 trace.register_errno = rte_errno;
493
494         return -rte_errno;
495 }