lib: remove librte_ prefix from directory names
[dpdk.git] / lib / eal / common / eal_common_trace.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2020 Marvell International Ltd.
3  */
4
5 #include <fnmatch.h>
6 #include <inttypes.h>
7 #include <sys/queue.h>
8 #include <regex.h>
9
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_per_lcore.h>
14 #include <rte_string_fns.h>
15
16 #include "eal_trace.h"
17
18 RTE_DEFINE_PER_LCORE(volatile int, trace_point_sz);
19 RTE_DEFINE_PER_LCORE(void *, trace_mem);
20 static RTE_DEFINE_PER_LCORE(char *, ctf_field);
21
22 static struct trace_point_head tp_list = STAILQ_HEAD_INITIALIZER(tp_list);
23 static struct trace trace = { .args = STAILQ_HEAD_INITIALIZER(trace.args), };
24
25 struct trace *
26 trace_obj_get(void)
27 {
28         return &trace;
29 }
30
31 struct trace_point_head *
32 trace_list_head_get(void)
33 {
34         return &tp_list;
35 }
36
37 int
38 eal_trace_init(void)
39 {
40         struct trace_arg *arg;
41
42         /* Trace memory should start with 8B aligned for natural alignment */
43         RTE_BUILD_BUG_ON((offsetof(struct __rte_trace_header, mem) % 8) != 0);
44
45         /* One of the trace point registration failed */
46         if (trace.register_errno) {
47                 rte_errno = trace.register_errno;
48                 goto fail;
49         }
50
51         if (!STAILQ_EMPTY(&trace.args))
52                 trace.status = true;
53
54         if (!rte_trace_is_enabled())
55                 return 0;
56
57         rte_spinlock_init(&trace.lock);
58
59         /* Is duplicate trace name registered */
60         if (trace_has_duplicate_entry())
61                 goto fail;
62
63         /* Generate UUID ver 4 with total size of events and number of
64          * events
65          */
66         trace_uuid_generate();
67
68         /* Apply buffer size configuration for trace output */
69         trace_bufsz_args_apply();
70
71         /* Generate CTF TDSL metadata */
72         if (trace_metadata_create() < 0)
73                 goto fail;
74
75         /* Create trace directory */
76         if (trace_mkdir())
77                 goto free_meta;
78
79         /* Save current epoch timestamp for future use */
80         if (trace_epoch_time_save() < 0)
81                 goto fail;
82
83         /* Apply global configurations */
84         STAILQ_FOREACH(arg, &trace.args, next)
85                 trace_args_apply(arg->val);
86
87         rte_trace_mode_set(trace.mode);
88
89         return 0;
90
91 free_meta:
92         trace_metadata_destroy();
93 fail:
94         trace_err("failed to initialize trace [%s]", rte_strerror(rte_errno));
95         return -rte_errno;
96 }
97
98 void
99 eal_trace_fini(void)
100 {
101         if (!rte_trace_is_enabled())
102                 return;
103         trace_mem_free();
104         trace_metadata_destroy();
105         eal_trace_args_free();
106 }
107
108 bool
109 rte_trace_is_enabled(void)
110 {
111         return trace.status;
112 }
113
114 static void
115 trace_mode_set(rte_trace_point_t *trace, enum rte_trace_mode mode)
116 {
117         if (mode == RTE_TRACE_MODE_OVERWRITE)
118                 __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_DISCARD,
119                         __ATOMIC_RELEASE);
120         else
121                 __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_DISCARD,
122                         __ATOMIC_RELEASE);
123 }
124
125 void
126 rte_trace_mode_set(enum rte_trace_mode mode)
127 {
128         struct trace_point *tp;
129
130         if (!rte_trace_is_enabled())
131                 return;
132
133         STAILQ_FOREACH(tp, &tp_list, next)
134                 trace_mode_set(tp->handle, mode);
135
136         trace.mode = mode;
137 }
138
139 enum
140 rte_trace_mode rte_trace_mode_get(void)
141 {
142         return trace.mode;
143 }
144
145 static bool
146 trace_point_is_invalid(rte_trace_point_t *t)
147 {
148         return (t == NULL) || (trace_id_get(t) >= trace.nb_trace_points);
149 }
150
151 bool
152 rte_trace_point_is_enabled(rte_trace_point_t *trace)
153 {
154         uint64_t val;
155
156         if (trace_point_is_invalid(trace))
157                 return false;
158
159         val = __atomic_load_n(trace, __ATOMIC_ACQUIRE);
160         return (val & __RTE_TRACE_FIELD_ENABLE_MASK) != 0;
161 }
162
163 int
164 rte_trace_point_enable(rte_trace_point_t *trace)
165 {
166         if (trace_point_is_invalid(trace))
167                 return -ERANGE;
168
169         __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_MASK,
170                 __ATOMIC_RELEASE);
171         return 0;
172 }
173
174 int
175 rte_trace_point_disable(rte_trace_point_t *trace)
176 {
177         if (trace_point_is_invalid(trace))
178                 return -ERANGE;
179
180         __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_MASK,
181                 __ATOMIC_RELEASE);
182         return 0;
183 }
184
185 int
186 rte_trace_pattern(const char *pattern, bool enable)
187 {
188         struct trace_point *tp;
189         int rc = 0, found = 0;
190
191         STAILQ_FOREACH(tp, &tp_list, next) {
192                 if (fnmatch(pattern, tp->name, 0) == 0) {
193                         if (enable)
194                                 rc = rte_trace_point_enable(tp->handle);
195                         else
196                                 rc = rte_trace_point_disable(tp->handle);
197                         found = 1;
198                 }
199                 if (rc < 0)
200                         return rc;
201         }
202
203         return rc | found;
204 }
205
206 int
207 rte_trace_regexp(const char *regex, bool enable)
208 {
209         struct trace_point *tp;
210         int rc = 0, found = 0;
211         regex_t r;
212
213         if (regcomp(&r, regex, 0) != 0)
214                 return -EINVAL;
215
216         STAILQ_FOREACH(tp, &tp_list, next) {
217                 if (regexec(&r, tp->name, 0, NULL, 0) == 0) {
218                         if (enable)
219                                 rc = rte_trace_point_enable(tp->handle);
220                         else
221                                 rc = rte_trace_point_disable(tp->handle);
222                         found = 1;
223                 }
224                 if (rc < 0)
225                         return rc;
226         }
227         regfree(&r);
228
229         return rc | found;
230 }
231
232 rte_trace_point_t *
233 rte_trace_point_lookup(const char *name)
234 {
235         struct trace_point *tp;
236
237         if (name == NULL)
238                 return NULL;
239
240         STAILQ_FOREACH(tp, &tp_list, next)
241                 if (strncmp(tp->name, name, TRACE_POINT_NAME_SIZE) == 0)
242                         return tp->handle;
243
244         return NULL;
245 }
246
247 static void
248 trace_point_dump(FILE *f, struct trace_point *tp)
249 {
250         rte_trace_point_t *handle = tp->handle;
251
252         fprintf(f, "\tid %d, %s, size is %d, %s\n",
253                 trace_id_get(handle), tp->name,
254                 (uint16_t)(*handle & __RTE_TRACE_FIELD_SIZE_MASK),
255                 rte_trace_point_is_enabled(handle) ? "enabled" : "disabled");
256 }
257
258 static void
259 trace_lcore_mem_dump(FILE *f)
260 {
261         struct trace *trace = trace_obj_get();
262         struct __rte_trace_header *header;
263         uint32_t count;
264
265         if (trace->nb_trace_mem_list == 0)
266                 return;
267
268         rte_spinlock_lock(&trace->lock);
269         fprintf(f, "nb_trace_mem_list = %d\n", trace->nb_trace_mem_list);
270         fprintf(f, "\nTrace mem info\n--------------\n");
271         for (count = 0; count < trace->nb_trace_mem_list; count++) {
272                 header = trace->lcore_meta[count].mem;
273                 fprintf(f, "\tid %d, mem=%p, area=%s, lcore_id=%d, name=%s\n",
274                 count, header,
275                 trace_area_to_string(trace->lcore_meta[count].area),
276                 header->stream_header.lcore_id,
277                 header->stream_header.thread_name);
278         }
279         rte_spinlock_unlock(&trace->lock);
280 }
281
282 void
283 rte_trace_dump(FILE *f)
284 {
285         struct trace_point_head *tp_list = trace_list_head_get();
286         struct trace *trace = trace_obj_get();
287         struct trace_point *tp;
288
289         fprintf(f, "\nGlobal info\n-----------\n");
290         fprintf(f, "status = %s\n",
291                 rte_trace_is_enabled() ? "enabled" : "disabled");
292         fprintf(f, "mode = %s\n",
293                 trace_mode_to_string(rte_trace_mode_get()));
294         fprintf(f, "dir = %s\n", trace->dir);
295         fprintf(f, "buffer len = %d\n", trace->buff_len);
296         fprintf(f, "number of trace points = %d\n", trace->nb_trace_points);
297
298         trace_lcore_mem_dump(f);
299         fprintf(f, "\nTrace point info\n----------------\n");
300         STAILQ_FOREACH(tp, tp_list, next)
301                 trace_point_dump(f, tp);
302 }
303
304 void
305 __rte_trace_mem_per_thread_alloc(void)
306 {
307         struct trace *trace = trace_obj_get();
308         struct __rte_trace_header *header;
309         uint32_t count;
310
311         if (!rte_trace_is_enabled())
312                 return;
313
314         if (RTE_PER_LCORE(trace_mem))
315                 return;
316
317         rte_spinlock_lock(&trace->lock);
318
319         count = trace->nb_trace_mem_list;
320
321         /* Allocate room for storing the thread trace mem meta */
322         trace->lcore_meta = realloc(trace->lcore_meta,
323                 sizeof(trace->lcore_meta[0]) * (count + 1));
324
325         /* Provide dummy space for fast path to consume */
326         if (trace->lcore_meta == NULL) {
327                 trace_crit("trace mem meta memory realloc failed");
328                 header = NULL;
329                 goto fail;
330         }
331
332         /* First attempt from huge page */
333         header = eal_malloc_no_trace(NULL, trace_mem_sz(trace->buff_len), 8);
334         if (header) {
335                 trace->lcore_meta[count].area = TRACE_AREA_HUGEPAGE;
336                 goto found;
337         }
338
339         /* Second attempt from heap */
340         header = malloc(trace_mem_sz(trace->buff_len));
341         if (header == NULL) {
342                 trace_crit("trace mem malloc attempt failed");
343                 header = NULL;
344                 goto fail;
345
346         }
347
348         /* Second attempt from heap is success */
349         trace->lcore_meta[count].area = TRACE_AREA_HEAP;
350
351         /* Initialize the trace header */
352 found:
353         header->offset = 0;
354         header->len = trace->buff_len;
355         header->stream_header.magic = TRACE_CTF_MAGIC;
356         rte_uuid_copy(header->stream_header.uuid, trace->uuid);
357         header->stream_header.lcore_id = rte_lcore_id();
358
359         /* Store the thread name */
360         char *name = header->stream_header.thread_name;
361         memset(name, 0, __RTE_TRACE_EMIT_STRING_LEN_MAX);
362         rte_thread_getname(pthread_self(), name,
363                 __RTE_TRACE_EMIT_STRING_LEN_MAX);
364
365         trace->lcore_meta[count].mem = header;
366         trace->nb_trace_mem_list++;
367 fail:
368         RTE_PER_LCORE(trace_mem) = header;
369         rte_spinlock_unlock(&trace->lock);
370 }
371
372 static void
373 trace_mem_per_thread_free_unlocked(struct thread_mem_meta *meta)
374 {
375         if (meta->area == TRACE_AREA_HUGEPAGE)
376                 eal_free_no_trace(meta->mem);
377         else if (meta->area == TRACE_AREA_HEAP)
378                 free(meta->mem);
379 }
380
381 void
382 trace_mem_per_thread_free(void)
383 {
384         struct trace *trace = trace_obj_get();
385         struct __rte_trace_header *header;
386         uint32_t count;
387
388         header = RTE_PER_LCORE(trace_mem);
389         if (header == NULL)
390                 return;
391
392         rte_spinlock_lock(&trace->lock);
393         for (count = 0; count < trace->nb_trace_mem_list; count++) {
394                 if (trace->lcore_meta[count].mem == header)
395                         break;
396         }
397         if (count != trace->nb_trace_mem_list) {
398                 struct thread_mem_meta *meta = &trace->lcore_meta[count];
399
400                 trace_mem_per_thread_free_unlocked(meta);
401                 if (count != trace->nb_trace_mem_list - 1) {
402                         memmove(meta, meta + 1,
403                                 sizeof(*meta) *
404                                  (trace->nb_trace_mem_list - count - 1));
405                 }
406                 trace->nb_trace_mem_list--;
407         }
408         rte_spinlock_unlock(&trace->lock);
409 }
410
411 void
412 trace_mem_free(void)
413 {
414         struct trace *trace = trace_obj_get();
415         uint32_t count;
416
417         if (!rte_trace_is_enabled())
418                 return;
419
420         rte_spinlock_lock(&trace->lock);
421         for (count = 0; count < trace->nb_trace_mem_list; count++) {
422                 trace_mem_per_thread_free_unlocked(&trace->lcore_meta[count]);
423         }
424         trace->nb_trace_mem_list = 0;
425         rte_spinlock_unlock(&trace->lock);
426 }
427
428 void
429 __rte_trace_point_emit_field(size_t sz, const char *in, const char *datatype)
430 {
431         char *field;
432         char *fixup;
433         int rc;
434
435         fixup = trace_metadata_fixup_field(in);
436         if (fixup != NULL)
437                 in = fixup;
438         rc = asprintf(&field, "%s        %s %s;\n",
439                 RTE_PER_LCORE(ctf_field) != NULL ?
440                         RTE_PER_LCORE(ctf_field) : "",
441                 datatype, in);
442         free(RTE_PER_LCORE(ctf_field));
443         free(fixup);
444         if (rc == -1) {
445                 RTE_PER_LCORE(trace_point_sz) = 0;
446                 RTE_PER_LCORE(ctf_field) = NULL;
447                 trace_crit("could not allocate CTF field");
448                 return;
449         }
450         RTE_PER_LCORE(trace_point_sz) += sz;
451         RTE_PER_LCORE(ctf_field) = field;
452 }
453
454 int
455 __rte_trace_point_register(rte_trace_point_t *handle, const char *name,
456                 void (*register_fn)(void))
457 {
458         struct trace_point *tp;
459         uint16_t sz;
460
461         /* Sanity checks of arguments */
462         if (name == NULL || register_fn == NULL || handle == NULL) {
463                 trace_err("invalid arguments");
464                 rte_errno = EINVAL;
465                 goto fail;
466         }
467
468         /* Check the size of the trace point object */
469         RTE_PER_LCORE(trace_point_sz) = 0;
470         register_fn();
471         if (RTE_PER_LCORE(trace_point_sz) == 0) {
472                 trace_err("missing rte_trace_emit_header() in register fn");
473                 rte_errno = EBADF;
474                 goto fail;
475         }
476
477         /* Is size overflowed */
478         if (RTE_PER_LCORE(trace_point_sz) > UINT16_MAX) {
479                 trace_err("trace point size overflowed");
480                 rte_errno = ENOSPC;
481                 goto fail;
482         }
483
484         /* Are we running out of space to store trace points? */
485         if (trace.nb_trace_points > UINT16_MAX) {
486                 trace_err("trace point exceeds the max count");
487                 rte_errno = ENOSPC;
488                 goto fail;
489         }
490
491         /* Get the size of the trace point */
492         sz = RTE_PER_LCORE(trace_point_sz);
493         tp = calloc(1, sizeof(struct trace_point));
494         if (tp == NULL) {
495                 trace_err("fail to allocate trace point memory");
496                 rte_errno = ENOMEM;
497                 goto fail;
498         }
499
500         /* Initialize the trace point */
501         if (rte_strscpy(tp->name, name, TRACE_POINT_NAME_SIZE) < 0) {
502                 trace_err("name is too long");
503                 rte_errno = E2BIG;
504                 goto free;
505         }
506
507         /* Copy the accumulated fields description and clear it for the next
508          * trace point.
509          */
510         tp->ctf_field = RTE_PER_LCORE(ctf_field);
511         RTE_PER_LCORE(ctf_field) = NULL;
512
513         /* Form the trace handle */
514         *handle = sz;
515         *handle |= trace.nb_trace_points << __RTE_TRACE_FIELD_ID_SHIFT;
516
517         trace.nb_trace_points++;
518         tp->handle = handle;
519
520         /* Add the trace point at tail */
521         STAILQ_INSERT_TAIL(&tp_list, tp, next);
522         __atomic_thread_fence(__ATOMIC_RELEASE);
523
524         /* All Good !!! */
525         return 0;
526 free:
527         free(tp);
528 fail:
529         if (trace.register_errno == 0)
530                 trace.register_errno = rte_errno;
531
532         return -rte_errno;
533 }