da269debeb52cf7b0426b7d1ab0dd079132b2dc5
[dpdk.git] / lib / librte_eal / common / eal_common_trace.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2020 Marvell International Ltd.
3  */
4
5 #include <fnmatch.h>
6 #include <inttypes.h>
7 #include <sys/queue.h>
8 #include <regex.h>
9
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_per_lcore.h>
14 #include <rte_string_fns.h>
15
16 #include "eal_trace.h"
17
18 RTE_DEFINE_PER_LCORE(volatile int, trace_point_sz);
19 RTE_DEFINE_PER_LCORE(void *, trace_mem);
20 static RTE_DEFINE_PER_LCORE(char, ctf_field[TRACE_CTF_FIELD_SIZE]);
21 static RTE_DEFINE_PER_LCORE(int, ctf_count);
22
23 static struct trace_point_head tp_list = STAILQ_HEAD_INITIALIZER(tp_list);
24 static struct trace trace;
25
26 struct trace *
27 trace_obj_get(void)
28 {
29         return &trace;
30 }
31
32 struct trace_point_head *
33 trace_list_head_get(void)
34 {
35         return &tp_list;
36 }
37
38 int
39 eal_trace_init(void)
40 {
41         /* Trace memory should start with 8B aligned for natural alignment */
42         RTE_BUILD_BUG_ON((offsetof(struct __rte_trace_header, mem) % 8) != 0);
43
44         /* One of the trace point registration failed */
45         if (trace.register_errno) {
46                 rte_errno = trace.register_errno;
47                 goto fail;
48         }
49
50         if (!rte_trace_is_enabled())
51                 return 0;
52
53         rte_spinlock_init(&trace.lock);
54
55         /* Is duplicate trace name registered */
56         if (trace_has_duplicate_entry())
57                 goto fail;
58
59         /* Generate UUID ver 4 with total size of events and number of
60          * events
61          */
62         trace_uuid_generate();
63
64         /* Generate CTF TDSL metadata */
65         if (trace_metadata_create() < 0)
66                 goto fail;
67
68         /* Create trace directory */
69         if (trace_mkdir())
70                 goto free_meta;
71
72         /* Save current epoch timestamp for future use */
73         if (trace_epoch_time_save() < 0)
74                 goto fail;
75
76         rte_trace_mode_set(trace.mode);
77
78         return 0;
79
80 free_meta:
81         trace_metadata_destroy();
82 fail:
83         trace_err("failed to initialize trace [%s]", rte_strerror(rte_errno));
84         return -rte_errno;
85 }
86
87 void
88 eal_trace_fini(void)
89 {
90         if (!rte_trace_is_enabled())
91                 return;
92         trace_mem_per_thread_free();
93         trace_metadata_destroy();
94 }
95
96 bool
97 rte_trace_is_enabled(void)
98 {
99         return trace.status;
100 }
101
102 static void
103 trace_mode_set(rte_trace_point_t *trace, enum rte_trace_mode mode)
104 {
105         if (mode == RTE_TRACE_MODE_OVERWRITE)
106                 __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_DISCARD,
107                         __ATOMIC_RELEASE);
108         else
109                 __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_DISCARD,
110                         __ATOMIC_RELEASE);
111 }
112
113 void
114 rte_trace_mode_set(enum rte_trace_mode mode)
115 {
116         struct trace_point *tp;
117
118         if (!rte_trace_is_enabled())
119                 return;
120
121         STAILQ_FOREACH(tp, &tp_list, next)
122                 trace_mode_set(tp->handle, mode);
123
124         trace.mode = mode;
125 }
126
127 enum
128 rte_trace_mode rte_trace_mode_get(void)
129 {
130         return trace.mode;
131 }
132
133 static bool
134 trace_point_is_invalid(rte_trace_point_t *t)
135 {
136         return (t == NULL) || (trace_id_get(t) >= trace.nb_trace_points);
137 }
138
139 bool
140 rte_trace_point_is_enabled(rte_trace_point_t *trace)
141 {
142         uint64_t val;
143
144         if (trace_point_is_invalid(trace))
145                 return false;
146
147         val = __atomic_load_n(trace, __ATOMIC_ACQUIRE);
148         return (val & __RTE_TRACE_FIELD_ENABLE_MASK) != 0;
149 }
150
151 int
152 rte_trace_point_enable(rte_trace_point_t *trace)
153 {
154         if (trace_point_is_invalid(trace))
155                 return -ERANGE;
156
157         __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_MASK,
158                 __ATOMIC_RELEASE);
159         return 0;
160 }
161
162 int
163 rte_trace_point_disable(rte_trace_point_t *trace)
164 {
165         if (trace_point_is_invalid(trace))
166                 return -ERANGE;
167
168         __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_MASK,
169                 __ATOMIC_RELEASE);
170         return 0;
171 }
172
173 int
174 rte_trace_pattern(const char *pattern, bool enable)
175 {
176         struct trace_point *tp;
177         int rc = 0, found = 0;
178
179         STAILQ_FOREACH(tp, &tp_list, next) {
180                 if (fnmatch(pattern, tp->name, 0) == 0) {
181                         if (enable)
182                                 rc = rte_trace_point_enable(tp->handle);
183                         else
184                                 rc = rte_trace_point_disable(tp->handle);
185                         found = 1;
186                 }
187                 if (rc < 0)
188                         return rc;
189         }
190
191         return rc | found;
192 }
193
194 int
195 rte_trace_regexp(const char *regex, bool enable)
196 {
197         struct trace_point *tp;
198         int rc = 0, found = 0;
199         regex_t r;
200
201         if (regcomp(&r, regex, 0) != 0)
202                 return -EINVAL;
203
204         STAILQ_FOREACH(tp, &tp_list, next) {
205                 if (regexec(&r, tp->name, 0, NULL, 0) == 0) {
206                         if (enable)
207                                 rc = rte_trace_point_enable(tp->handle);
208                         else
209                                 rc = rte_trace_point_disable(tp->handle);
210                         found = 1;
211                 }
212                 if (rc < 0)
213                         return rc;
214         }
215         regfree(&r);
216
217         return rc | found;
218 }
219
220 rte_trace_point_t *
221 rte_trace_point_lookup(const char *name)
222 {
223         struct trace_point *tp;
224
225         if (name == NULL)
226                 return NULL;
227
228         STAILQ_FOREACH(tp, &tp_list, next)
229                 if (strncmp(tp->name, name, TRACE_POINT_NAME_SIZE) == 0)
230                         return tp->handle;
231
232         return NULL;
233 }
234
235 static void
236 trace_point_dump(FILE *f, struct trace_point *tp)
237 {
238         rte_trace_point_t *handle = tp->handle;
239
240         fprintf(f, "\tid %d, %s, size is %d, %s\n",
241                 trace_id_get(handle), tp->name,
242                 (uint16_t)(*handle & __RTE_TRACE_FIELD_SIZE_MASK),
243                 rte_trace_point_is_enabled(handle) ? "enabled" : "disabled");
244 }
245
246 static void
247 trace_lcore_mem_dump(FILE *f)
248 {
249         struct trace *trace = trace_obj_get();
250         struct __rte_trace_header *header;
251         uint32_t count;
252
253         if (trace->nb_trace_mem_list == 0)
254                 return;
255
256         rte_spinlock_lock(&trace->lock);
257         fprintf(f, "nb_trace_mem_list = %d\n", trace->nb_trace_mem_list);
258         fprintf(f, "\nTrace mem info\n--------------\n");
259         for (count = 0; count < trace->nb_trace_mem_list; count++) {
260                 header = trace->lcore_meta[count].mem;
261                 fprintf(f, "\tid %d, mem=%p, area=%s, lcore_id=%d, name=%s\n",
262                 count, header,
263                 trace_area_to_string(trace->lcore_meta[count].area),
264                 header->stream_header.lcore_id,
265                 header->stream_header.thread_name);
266         }
267         rte_spinlock_unlock(&trace->lock);
268 }
269
270 void
271 rte_trace_dump(FILE *f)
272 {
273         struct trace_point_head *tp_list = trace_list_head_get();
274         struct trace *trace = trace_obj_get();
275         struct trace_point *tp;
276
277         fprintf(f, "\nGlobal info\n-----------\n");
278         fprintf(f, "status = %s\n",
279                 rte_trace_is_enabled() ? "enabled" : "disabled");
280         fprintf(f, "mode = %s\n",
281                 trace_mode_to_string(rte_trace_mode_get()));
282         fprintf(f, "dir = %s\n", trace->dir);
283         fprintf(f, "buffer len = %d\n", trace->buff_len);
284         fprintf(f, "number of trace points = %d\n", trace->nb_trace_points);
285
286         trace_lcore_mem_dump(f);
287         fprintf(f, "\nTrace point info\n----------------\n");
288         STAILQ_FOREACH(tp, tp_list, next)
289                 trace_point_dump(f, tp);
290 }
291
292 void
293 __rte_trace_mem_per_thread_alloc(void)
294 {
295         struct trace *trace = trace_obj_get();
296         struct __rte_trace_header *header;
297         uint32_t count;
298
299         if (!rte_trace_is_enabled())
300                 return;
301
302         if (RTE_PER_LCORE(trace_mem))
303                 return;
304
305         rte_spinlock_lock(&trace->lock);
306
307         count = trace->nb_trace_mem_list;
308
309         /* Allocate room for storing the thread trace mem meta */
310         trace->lcore_meta = realloc(trace->lcore_meta,
311                 sizeof(trace->lcore_meta[0]) * (count + 1));
312
313         /* Provide dummy space for fast path to consume */
314         if (trace->lcore_meta == NULL) {
315                 trace_crit("trace mem meta memory realloc failed");
316                 header = NULL;
317                 goto fail;
318         }
319
320         /* First attempt from huge page */
321         header = rte_malloc(NULL, trace_mem_sz(trace->buff_len), 8);
322         if (header) {
323                 trace->lcore_meta[count].area = TRACE_AREA_HUGEPAGE;
324                 goto found;
325         }
326
327         /* Second attempt from heap */
328         header = malloc(trace_mem_sz(trace->buff_len));
329         if (header == NULL) {
330                 trace_crit("trace mem malloc attempt failed");
331                 header = NULL;
332                 goto fail;
333
334         }
335
336         /* Second attempt from heap is success */
337         trace->lcore_meta[count].area = TRACE_AREA_HEAP;
338
339         /* Initialize the trace header */
340 found:
341         header->offset = 0;
342         header->len = trace->buff_len;
343         header->stream_header.magic = TRACE_CTF_MAGIC;
344         rte_uuid_copy(header->stream_header.uuid, trace->uuid);
345         header->stream_header.lcore_id = rte_lcore_id();
346
347         /* Store the thread name */
348         char *name = header->stream_header.thread_name;
349         memset(name, 0, __RTE_TRACE_EMIT_STRING_LEN_MAX);
350         rte_thread_getname(pthread_self(), name,
351                 __RTE_TRACE_EMIT_STRING_LEN_MAX);
352
353         trace->lcore_meta[count].mem = header;
354         trace->nb_trace_mem_list++;
355 fail:
356         RTE_PER_LCORE(trace_mem) = header;
357         rte_spinlock_unlock(&trace->lock);
358 }
359
360 void
361 trace_mem_per_thread_free(void)
362 {
363         struct trace *trace = trace_obj_get();
364         uint32_t count;
365         void *mem;
366
367         if (!rte_trace_is_enabled())
368                 return;
369
370         rte_spinlock_lock(&trace->lock);
371         for (count = 0; count < trace->nb_trace_mem_list; count++) {
372                 mem = trace->lcore_meta[count].mem;
373                 if (trace->lcore_meta[count].area == TRACE_AREA_HUGEPAGE)
374                         rte_free(mem);
375                 else if (trace->lcore_meta[count].area == TRACE_AREA_HEAP)
376                         free(mem);
377         }
378         rte_spinlock_unlock(&trace->lock);
379 }
380
381 void
382 __rte_trace_point_emit_field(size_t sz, const char *in, const char *datatype)
383 {
384         char *field = RTE_PER_LCORE(ctf_field);
385         int count = RTE_PER_LCORE(ctf_count);
386         size_t size;
387         int rc;
388
389         size = RTE_MAX(0, TRACE_CTF_FIELD_SIZE - 1 - count);
390         RTE_PER_LCORE(trace_point_sz) += sz;
391         rc = snprintf(RTE_PTR_ADD(field, count), size, "%s %s;", datatype, in);
392         if (rc <= 0 || (size_t)rc >= size) {
393                 RTE_PER_LCORE(trace_point_sz) = 0;
394                 trace_crit("CTF field is too long");
395                 return;
396         }
397         RTE_PER_LCORE(ctf_count) += rc;
398 }
399
400 int
401 __rte_trace_point_register(rte_trace_point_t *handle, const char *name,
402                 void (*register_fn)(void))
403 {
404         char *field = RTE_PER_LCORE(ctf_field);
405         struct trace_point *tp;
406         uint16_t sz;
407
408         /* Sanity checks of arguments */
409         if (name == NULL || register_fn == NULL || handle == NULL) {
410                 trace_err("invalid arguments");
411                 rte_errno = EINVAL;
412                 goto fail;
413         }
414
415         /* Check the size of the trace point object */
416         RTE_PER_LCORE(trace_point_sz) = 0;
417         RTE_PER_LCORE(ctf_count) = 0;
418         register_fn();
419         if (RTE_PER_LCORE(trace_point_sz) == 0) {
420                 trace_err("missing rte_trace_emit_header() in register fn");
421                 rte_errno = EBADF;
422                 goto fail;
423         }
424
425         /* Is size overflowed */
426         if (RTE_PER_LCORE(trace_point_sz) > UINT16_MAX) {
427                 trace_err("trace point size overflowed");
428                 rte_errno = ENOSPC;
429                 goto fail;
430         }
431
432         /* Are we running out of space to store trace points? */
433         if (trace.nb_trace_points > UINT16_MAX) {
434                 trace_err("trace point exceeds the max count");
435                 rte_errno = ENOSPC;
436                 goto fail;
437         }
438
439         /* Get the size of the trace point */
440         sz = RTE_PER_LCORE(trace_point_sz);
441         tp = calloc(1, sizeof(struct trace_point));
442         if (tp == NULL) {
443                 trace_err("fail to allocate trace point memory");
444                 rte_errno = ENOMEM;
445                 goto fail;
446         }
447
448         /* Initialize the trace point */
449         if (rte_strscpy(tp->name, name, TRACE_POINT_NAME_SIZE) < 0) {
450                 trace_err("name is too long");
451                 rte_errno = E2BIG;
452                 goto free;
453         }
454
455         /* Copy the field data for future use */
456         if (rte_strscpy(tp->ctf_field, field, TRACE_CTF_FIELD_SIZE) < 0) {
457                 trace_err("CTF field size is too long");
458                 rte_errno = E2BIG;
459                 goto free;
460         }
461
462         /* Clear field memory for the next event */
463         memset(field, 0, TRACE_CTF_FIELD_SIZE);
464
465         /* Form the trace handle */
466         *handle = sz;
467         *handle |= trace.nb_trace_points << __RTE_TRACE_FIELD_ID_SHIFT;
468
469         trace.nb_trace_points++;
470         tp->handle = handle;
471
472         /* Add the trace point at tail */
473         STAILQ_INSERT_TAIL(&tp_list, tp, next);
474         __atomic_thread_fence(__ATOMIC_RELEASE);
475
476         /* All Good !!! */
477         return 0;
478 free:
479         free(tp);
480 fail:
481         if (trace.register_errno == 0)
482                 trace.register_errno = rte_errno;
483
484         return -rte_errno;
485 }