eal: introduce thread uninit helper
[dpdk.git] / lib / librte_eal / common / eal_common_trace.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2020 Marvell International Ltd.
3  */
4
5 #include <fnmatch.h>
6 #include <inttypes.h>
7 #include <sys/queue.h>
8 #include <regex.h>
9
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_per_lcore.h>
14 #include <rte_string_fns.h>
15
16 #include "eal_trace.h"
17
18 RTE_DEFINE_PER_LCORE(volatile int, trace_point_sz);
19 RTE_DEFINE_PER_LCORE(void *, trace_mem);
20 static RTE_DEFINE_PER_LCORE(char, ctf_field[TRACE_CTF_FIELD_SIZE]);
21 static RTE_DEFINE_PER_LCORE(int, ctf_count);
22
23 static struct trace_point_head tp_list = STAILQ_HEAD_INITIALIZER(tp_list);
24 static struct trace trace = { .args = STAILQ_HEAD_INITIALIZER(trace.args), };
25
26 struct trace *
27 trace_obj_get(void)
28 {
29         return &trace;
30 }
31
32 struct trace_point_head *
33 trace_list_head_get(void)
34 {
35         return &tp_list;
36 }
37
38 int
39 eal_trace_init(void)
40 {
41         struct trace_arg *arg;
42
43         /* Trace memory should start with 8B aligned for natural alignment */
44         RTE_BUILD_BUG_ON((offsetof(struct __rte_trace_header, mem) % 8) != 0);
45
46         /* One of the trace point registration failed */
47         if (trace.register_errno) {
48                 rte_errno = trace.register_errno;
49                 goto fail;
50         }
51
52         if (!STAILQ_EMPTY(&trace.args))
53                 trace.status = true;
54
55         if (!rte_trace_is_enabled())
56                 return 0;
57
58         rte_spinlock_init(&trace.lock);
59
60         /* Is duplicate trace name registered */
61         if (trace_has_duplicate_entry())
62                 goto fail;
63
64         /* Generate UUID ver 4 with total size of events and number of
65          * events
66          */
67         trace_uuid_generate();
68
69         /* Apply buffer size configuration for trace output */
70         trace_bufsz_args_apply();
71
72         /* Generate CTF TDSL metadata */
73         if (trace_metadata_create() < 0)
74                 goto fail;
75
76         /* Create trace directory */
77         if (trace_mkdir())
78                 goto free_meta;
79
80         /* Save current epoch timestamp for future use */
81         if (trace_epoch_time_save() < 0)
82                 goto fail;
83
84         /* Apply global configurations */
85         STAILQ_FOREACH(arg, &trace.args, next)
86                 trace_args_apply(arg->val);
87
88         rte_trace_mode_set(trace.mode);
89
90         return 0;
91
92 free_meta:
93         trace_metadata_destroy();
94 fail:
95         trace_err("failed to initialize trace [%s]", rte_strerror(rte_errno));
96         return -rte_errno;
97 }
98
99 void
100 eal_trace_fini(void)
101 {
102         if (!rte_trace_is_enabled())
103                 return;
104         trace_mem_free();
105         trace_metadata_destroy();
106         eal_trace_args_free();
107 }
108
109 bool
110 rte_trace_is_enabled(void)
111 {
112         return trace.status;
113 }
114
115 static void
116 trace_mode_set(rte_trace_point_t *trace, enum rte_trace_mode mode)
117 {
118         if (mode == RTE_TRACE_MODE_OVERWRITE)
119                 __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_DISCARD,
120                         __ATOMIC_RELEASE);
121         else
122                 __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_DISCARD,
123                         __ATOMIC_RELEASE);
124 }
125
126 void
127 rte_trace_mode_set(enum rte_trace_mode mode)
128 {
129         struct trace_point *tp;
130
131         if (!rte_trace_is_enabled())
132                 return;
133
134         STAILQ_FOREACH(tp, &tp_list, next)
135                 trace_mode_set(tp->handle, mode);
136
137         trace.mode = mode;
138 }
139
140 enum
141 rte_trace_mode rte_trace_mode_get(void)
142 {
143         return trace.mode;
144 }
145
146 static bool
147 trace_point_is_invalid(rte_trace_point_t *t)
148 {
149         return (t == NULL) || (trace_id_get(t) >= trace.nb_trace_points);
150 }
151
152 bool
153 rte_trace_point_is_enabled(rte_trace_point_t *trace)
154 {
155         uint64_t val;
156
157         if (trace_point_is_invalid(trace))
158                 return false;
159
160         val = __atomic_load_n(trace, __ATOMIC_ACQUIRE);
161         return (val & __RTE_TRACE_FIELD_ENABLE_MASK) != 0;
162 }
163
164 int
165 rte_trace_point_enable(rte_trace_point_t *trace)
166 {
167         if (trace_point_is_invalid(trace))
168                 return -ERANGE;
169
170         __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_MASK,
171                 __ATOMIC_RELEASE);
172         return 0;
173 }
174
175 int
176 rte_trace_point_disable(rte_trace_point_t *trace)
177 {
178         if (trace_point_is_invalid(trace))
179                 return -ERANGE;
180
181         __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_MASK,
182                 __ATOMIC_RELEASE);
183         return 0;
184 }
185
186 int
187 rte_trace_pattern(const char *pattern, bool enable)
188 {
189         struct trace_point *tp;
190         int rc = 0, found = 0;
191
192         STAILQ_FOREACH(tp, &tp_list, next) {
193                 if (fnmatch(pattern, tp->name, 0) == 0) {
194                         if (enable)
195                                 rc = rte_trace_point_enable(tp->handle);
196                         else
197                                 rc = rte_trace_point_disable(tp->handle);
198                         found = 1;
199                 }
200                 if (rc < 0)
201                         return rc;
202         }
203
204         return rc | found;
205 }
206
207 int
208 rte_trace_regexp(const char *regex, bool enable)
209 {
210         struct trace_point *tp;
211         int rc = 0, found = 0;
212         regex_t r;
213
214         if (regcomp(&r, regex, 0) != 0)
215                 return -EINVAL;
216
217         STAILQ_FOREACH(tp, &tp_list, next) {
218                 if (regexec(&r, tp->name, 0, NULL, 0) == 0) {
219                         if (enable)
220                                 rc = rte_trace_point_enable(tp->handle);
221                         else
222                                 rc = rte_trace_point_disable(tp->handle);
223                         found = 1;
224                 }
225                 if (rc < 0)
226                         return rc;
227         }
228         regfree(&r);
229
230         return rc | found;
231 }
232
233 rte_trace_point_t *
234 rte_trace_point_lookup(const char *name)
235 {
236         struct trace_point *tp;
237
238         if (name == NULL)
239                 return NULL;
240
241         STAILQ_FOREACH(tp, &tp_list, next)
242                 if (strncmp(tp->name, name, TRACE_POINT_NAME_SIZE) == 0)
243                         return tp->handle;
244
245         return NULL;
246 }
247
248 static void
249 trace_point_dump(FILE *f, struct trace_point *tp)
250 {
251         rte_trace_point_t *handle = tp->handle;
252
253         fprintf(f, "\tid %d, %s, size is %d, %s\n",
254                 trace_id_get(handle), tp->name,
255                 (uint16_t)(*handle & __RTE_TRACE_FIELD_SIZE_MASK),
256                 rte_trace_point_is_enabled(handle) ? "enabled" : "disabled");
257 }
258
259 static void
260 trace_lcore_mem_dump(FILE *f)
261 {
262         struct trace *trace = trace_obj_get();
263         struct __rte_trace_header *header;
264         uint32_t count;
265
266         if (trace->nb_trace_mem_list == 0)
267                 return;
268
269         rte_spinlock_lock(&trace->lock);
270         fprintf(f, "nb_trace_mem_list = %d\n", trace->nb_trace_mem_list);
271         fprintf(f, "\nTrace mem info\n--------------\n");
272         for (count = 0; count < trace->nb_trace_mem_list; count++) {
273                 header = trace->lcore_meta[count].mem;
274                 fprintf(f, "\tid %d, mem=%p, area=%s, lcore_id=%d, name=%s\n",
275                 count, header,
276                 trace_area_to_string(trace->lcore_meta[count].area),
277                 header->stream_header.lcore_id,
278                 header->stream_header.thread_name);
279         }
280         rte_spinlock_unlock(&trace->lock);
281 }
282
283 void
284 rte_trace_dump(FILE *f)
285 {
286         struct trace_point_head *tp_list = trace_list_head_get();
287         struct trace *trace = trace_obj_get();
288         struct trace_point *tp;
289
290         fprintf(f, "\nGlobal info\n-----------\n");
291         fprintf(f, "status = %s\n",
292                 rte_trace_is_enabled() ? "enabled" : "disabled");
293         fprintf(f, "mode = %s\n",
294                 trace_mode_to_string(rte_trace_mode_get()));
295         fprintf(f, "dir = %s\n", trace->dir);
296         fprintf(f, "buffer len = %d\n", trace->buff_len);
297         fprintf(f, "number of trace points = %d\n", trace->nb_trace_points);
298
299         trace_lcore_mem_dump(f);
300         fprintf(f, "\nTrace point info\n----------------\n");
301         STAILQ_FOREACH(tp, tp_list, next)
302                 trace_point_dump(f, tp);
303 }
304
305 void
306 __rte_trace_mem_per_thread_alloc(void)
307 {
308         struct trace *trace = trace_obj_get();
309         struct __rte_trace_header *header;
310         uint32_t count;
311
312         if (!rte_trace_is_enabled())
313                 return;
314
315         if (RTE_PER_LCORE(trace_mem))
316                 return;
317
318         rte_spinlock_lock(&trace->lock);
319
320         count = trace->nb_trace_mem_list;
321
322         /* Allocate room for storing the thread trace mem meta */
323         trace->lcore_meta = realloc(trace->lcore_meta,
324                 sizeof(trace->lcore_meta[0]) * (count + 1));
325
326         /* Provide dummy space for fast path to consume */
327         if (trace->lcore_meta == NULL) {
328                 trace_crit("trace mem meta memory realloc failed");
329                 header = NULL;
330                 goto fail;
331         }
332
333         /* First attempt from huge page */
334         header = eal_malloc_no_trace(NULL, trace_mem_sz(trace->buff_len), 8);
335         if (header) {
336                 trace->lcore_meta[count].area = TRACE_AREA_HUGEPAGE;
337                 goto found;
338         }
339
340         /* Second attempt from heap */
341         header = malloc(trace_mem_sz(trace->buff_len));
342         if (header == NULL) {
343                 trace_crit("trace mem malloc attempt failed");
344                 header = NULL;
345                 goto fail;
346
347         }
348
349         /* Second attempt from heap is success */
350         trace->lcore_meta[count].area = TRACE_AREA_HEAP;
351
352         /* Initialize the trace header */
353 found:
354         header->offset = 0;
355         header->len = trace->buff_len;
356         header->stream_header.magic = TRACE_CTF_MAGIC;
357         rte_uuid_copy(header->stream_header.uuid, trace->uuid);
358         header->stream_header.lcore_id = rte_lcore_id();
359
360         /* Store the thread name */
361         char *name = header->stream_header.thread_name;
362         memset(name, 0, __RTE_TRACE_EMIT_STRING_LEN_MAX);
363         rte_thread_getname(pthread_self(), name,
364                 __RTE_TRACE_EMIT_STRING_LEN_MAX);
365
366         trace->lcore_meta[count].mem = header;
367         trace->nb_trace_mem_list++;
368 fail:
369         RTE_PER_LCORE(trace_mem) = header;
370         rte_spinlock_unlock(&trace->lock);
371 }
372
373 static void
374 trace_mem_per_thread_free_unlocked(struct thread_mem_meta *meta)
375 {
376         if (meta->area == TRACE_AREA_HUGEPAGE)
377                 eal_free_no_trace(meta->mem);
378         else if (meta->area == TRACE_AREA_HEAP)
379                 free(meta->mem);
380 }
381
382 void
383 trace_mem_per_thread_free(void)
384 {
385         struct trace *trace = trace_obj_get();
386         struct __rte_trace_header *header;
387         uint32_t count;
388
389         header = RTE_PER_LCORE(trace_mem);
390         if (header == NULL)
391                 return;
392
393         rte_spinlock_lock(&trace->lock);
394         for (count = 0; count < trace->nb_trace_mem_list; count++) {
395                 if (trace->lcore_meta[count].mem == header)
396                         break;
397         }
398         if (count != trace->nb_trace_mem_list) {
399                 struct thread_mem_meta *meta = &trace->lcore_meta[count];
400
401                 trace_mem_per_thread_free_unlocked(meta);
402                 if (count != trace->nb_trace_mem_list - 1) {
403                         memmove(meta, meta + 1,
404                                 sizeof(*meta) *
405                                  (trace->nb_trace_mem_list - count - 1));
406                 }
407                 trace->nb_trace_mem_list--;
408         }
409         rte_spinlock_unlock(&trace->lock);
410 }
411
412 void
413 trace_mem_free(void)
414 {
415         struct trace *trace = trace_obj_get();
416         uint32_t count;
417
418         if (!rte_trace_is_enabled())
419                 return;
420
421         rte_spinlock_lock(&trace->lock);
422         for (count = 0; count < trace->nb_trace_mem_list; count++) {
423                 trace_mem_per_thread_free_unlocked(&trace->lcore_meta[count]);
424         }
425         trace->nb_trace_mem_list = 0;
426         rte_spinlock_unlock(&trace->lock);
427 }
428
429 void
430 __rte_trace_point_emit_field(size_t sz, const char *in, const char *datatype)
431 {
432         char *field = RTE_PER_LCORE(ctf_field);
433         int count = RTE_PER_LCORE(ctf_count);
434         size_t size;
435         int rc;
436
437         size = RTE_MAX(0, TRACE_CTF_FIELD_SIZE - 1 - count);
438         RTE_PER_LCORE(trace_point_sz) += sz;
439         rc = snprintf(RTE_PTR_ADD(field, count), size, "%s %s;", datatype, in);
440         if (rc <= 0 || (size_t)rc >= size) {
441                 RTE_PER_LCORE(trace_point_sz) = 0;
442                 trace_crit("CTF field is too long");
443                 return;
444         }
445         RTE_PER_LCORE(ctf_count) += rc;
446 }
447
448 int
449 __rte_trace_point_register(rte_trace_point_t *handle, const char *name,
450                 void (*register_fn)(void))
451 {
452         char *field = RTE_PER_LCORE(ctf_field);
453         struct trace_point *tp;
454         uint16_t sz;
455
456         /* Sanity checks of arguments */
457         if (name == NULL || register_fn == NULL || handle == NULL) {
458                 trace_err("invalid arguments");
459                 rte_errno = EINVAL;
460                 goto fail;
461         }
462
463         /* Check the size of the trace point object */
464         RTE_PER_LCORE(trace_point_sz) = 0;
465         RTE_PER_LCORE(ctf_count) = 0;
466         register_fn();
467         if (RTE_PER_LCORE(trace_point_sz) == 0) {
468                 trace_err("missing rte_trace_emit_header() in register fn");
469                 rte_errno = EBADF;
470                 goto fail;
471         }
472
473         /* Is size overflowed */
474         if (RTE_PER_LCORE(trace_point_sz) > UINT16_MAX) {
475                 trace_err("trace point size overflowed");
476                 rte_errno = ENOSPC;
477                 goto fail;
478         }
479
480         /* Are we running out of space to store trace points? */
481         if (trace.nb_trace_points > UINT16_MAX) {
482                 trace_err("trace point exceeds the max count");
483                 rte_errno = ENOSPC;
484                 goto fail;
485         }
486
487         /* Get the size of the trace point */
488         sz = RTE_PER_LCORE(trace_point_sz);
489         tp = calloc(1, sizeof(struct trace_point));
490         if (tp == NULL) {
491                 trace_err("fail to allocate trace point memory");
492                 rte_errno = ENOMEM;
493                 goto fail;
494         }
495
496         /* Initialize the trace point */
497         if (rte_strscpy(tp->name, name, TRACE_POINT_NAME_SIZE) < 0) {
498                 trace_err("name is too long");
499                 rte_errno = E2BIG;
500                 goto free;
501         }
502
503         /* Copy the field data for future use */
504         if (rte_strscpy(tp->ctf_field, field, TRACE_CTF_FIELD_SIZE) < 0) {
505                 trace_err("CTF field size is too long");
506                 rte_errno = E2BIG;
507                 goto free;
508         }
509
510         /* Clear field memory for the next event */
511         memset(field, 0, TRACE_CTF_FIELD_SIZE);
512
513         /* Form the trace handle */
514         *handle = sz;
515         *handle |= trace.nb_trace_points << __RTE_TRACE_FIELD_ID_SHIFT;
516
517         trace.nb_trace_points++;
518         tp->handle = handle;
519
520         /* Add the trace point at tail */
521         STAILQ_INSERT_TAIL(&tp_list, tp, next);
522         __atomic_thread_fence(__ATOMIC_RELEASE);
523
524         /* All Good !!! */
525         return 0;
526 free:
527         free(tp);
528 fail:
529         if (trace.register_errno == 0)
530                 trace.register_errno = rte_errno;
531
532         return -rte_errno;
533 }