43b1d4d7ab73bb080f4dd73066f5fdf2054ef6ff
[dpdk.git] / lib / librte_eal / common / eal_common_trace.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2020 Marvell International Ltd.
3  */
4
5 #include <fnmatch.h>
6 #include <inttypes.h>
7 #include <sys/queue.h>
8 #include <regex.h>
9
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_per_lcore.h>
14 #include <rte_string_fns.h>
15
16 #include "eal_trace.h"
17
18 RTE_DEFINE_PER_LCORE(volatile int, trace_point_sz);
19 RTE_DEFINE_PER_LCORE(void *, trace_mem);
20 static RTE_DEFINE_PER_LCORE(char, ctf_field[TRACE_CTF_FIELD_SIZE]);
21 static RTE_DEFINE_PER_LCORE(int, ctf_count);
22
23 static struct trace_point_head tp_list = STAILQ_HEAD_INITIALIZER(tp_list);
24 static struct trace trace;
25
26 struct trace *
27 trace_obj_get(void)
28 {
29         return &trace;
30 }
31
32 struct trace_point_head *
33 trace_list_head_get(void)
34 {
35         return &tp_list;
36 }
37
38 int
39 eal_trace_init(void)
40 {
41         /* Trace memory should start with 8B aligned for natural alignment */
42         RTE_BUILD_BUG_ON((offsetof(struct __rte_trace_header, mem) % 8) != 0);
43
44         /* One of the trace point registration failed */
45         if (trace.register_errno) {
46                 rte_errno = trace.register_errno;
47                 goto fail;
48         }
49
50         if (!rte_trace_is_enabled())
51                 return 0;
52
53         rte_spinlock_init(&trace.lock);
54
55         /* Is duplicate trace name registered */
56         if (trace_has_duplicate_entry())
57                 goto fail;
58
59         /* Generate UUID ver 4 with total size of events and number of
60          * events
61          */
62         trace_uuid_generate();
63
64         /* Generate CTF TDSL metadata */
65         if (trace_metadata_create() < 0)
66                 goto fail;
67
68         /* Create trace directory */
69         if (trace_mkdir())
70                 goto free_meta;
71
72         /* Save current epoch timestamp for future use */
73         if (trace_epoch_time_save() < 0)
74                 goto fail;
75
76         rte_trace_mode_set(trace.mode);
77
78         return 0;
79
80 free_meta:
81         trace_metadata_destroy();
82 fail:
83         trace_err("failed to initialize trace [%s]", rte_strerror(rte_errno));
84         return -rte_errno;
85 }
86
87 void
88 eal_trace_fini(void)
89 {
90         if (!rte_trace_is_enabled())
91                 return;
92         trace_mem_per_thread_free();
93         trace_metadata_destroy();
94 }
95
96 bool
97 rte_trace_is_enabled(void)
98 {
99         return trace.status;
100 }
101
102 static void
103 trace_mode_set(rte_trace_point_t *trace, enum rte_trace_mode mode)
104 {
105         if (mode == RTE_TRACE_MODE_OVERWRITE)
106                 __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_DISCARD,
107                         __ATOMIC_RELEASE);
108         else
109                 __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_DISCARD,
110                         __ATOMIC_RELEASE);
111 }
112
113 void
114 rte_trace_mode_set(enum rte_trace_mode mode)
115 {
116         struct trace_point *tp;
117
118         if (!rte_trace_is_enabled())
119                 return;
120
121         STAILQ_FOREACH(tp, &tp_list, next)
122                 trace_mode_set(tp->handle, mode);
123
124         trace.mode = mode;
125 }
126
127 enum
128 rte_trace_mode rte_trace_mode_get(void)
129 {
130         return trace.mode;
131 }
132
133 static bool
134 trace_point_is_invalid(rte_trace_point_t *t)
135 {
136         return (t == NULL) || (trace_id_get(t) >= trace.nb_trace_points);
137 }
138
139 bool
140 rte_trace_point_is_enabled(rte_trace_point_t *trace)
141 {
142         uint64_t val;
143
144         if (trace_point_is_invalid(trace))
145                 return false;
146
147         val = __atomic_load_n(trace, __ATOMIC_ACQUIRE);
148         return (val & __RTE_TRACE_FIELD_ENABLE_MASK) != 0;
149 }
150
151 int
152 rte_trace_point_enable(rte_trace_point_t *trace)
153 {
154         if (trace_point_is_invalid(trace))
155                 return -ERANGE;
156
157         __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_MASK,
158                 __ATOMIC_RELEASE);
159         return 0;
160 }
161
162 int
163 rte_trace_point_disable(rte_trace_point_t *trace)
164 {
165         if (trace_point_is_invalid(trace))
166                 return -ERANGE;
167
168         __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_MASK,
169                 __ATOMIC_RELEASE);
170         return 0;
171 }
172
173 int
174 rte_trace_pattern(const char *pattern, bool enable)
175 {
176         struct trace_point *tp;
177         int rc = 0, found = 0;
178
179         STAILQ_FOREACH(tp, &tp_list, next) {
180                 if (fnmatch(pattern, tp->name, 0) == 0) {
181                         if (enable)
182                                 rc = rte_trace_point_enable(tp->handle);
183                         else
184                                 rc = rte_trace_point_disable(tp->handle);
185                         found = 1;
186                 }
187                 if (rc < 0)
188                         return rc;
189         }
190
191         return rc | found;
192 }
193
194 int
195 rte_trace_regexp(const char *regex, bool enable)
196 {
197         struct trace_point *tp;
198         int rc = 0, found = 0;
199         regex_t r;
200
201         if (regcomp(&r, regex, 0) != 0)
202                 return -EINVAL;
203
204         STAILQ_FOREACH(tp, &tp_list, next) {
205                 if (regexec(&r, tp->name, 0, NULL, 0) == 0) {
206                         if (enable)
207                                 rc = rte_trace_point_enable(tp->handle);
208                         else
209                                 rc = rte_trace_point_disable(tp->handle);
210                         found = 1;
211                 }
212                 if (rc < 0)
213                         return rc;
214         }
215         regfree(&r);
216
217         return rc | found;
218 }
219
220 rte_trace_point_t *
221 rte_trace_point_lookup(const char *name)
222 {
223         struct trace_point *tp;
224
225         if (name == NULL)
226                 return NULL;
227
228         STAILQ_FOREACH(tp, &tp_list, next)
229                 if (strncmp(tp->name, name, TRACE_POINT_NAME_SIZE) == 0)
230                         return tp->handle;
231
232         return NULL;
233 }
234
235 void
236 __rte_trace_mem_per_thread_alloc(void)
237 {
238         struct trace *trace = trace_obj_get();
239         struct __rte_trace_header *header;
240         uint32_t count;
241
242         if (!rte_trace_is_enabled())
243                 return;
244
245         if (RTE_PER_LCORE(trace_mem))
246                 return;
247
248         rte_spinlock_lock(&trace->lock);
249
250         count = trace->nb_trace_mem_list;
251
252         /* Allocate room for storing the thread trace mem meta */
253         trace->lcore_meta = realloc(trace->lcore_meta,
254                 sizeof(trace->lcore_meta[0]) * (count + 1));
255
256         /* Provide dummy space for fast path to consume */
257         if (trace->lcore_meta == NULL) {
258                 trace_crit("trace mem meta memory realloc failed");
259                 header = NULL;
260                 goto fail;
261         }
262
263         /* First attempt from huge page */
264         header = rte_malloc(NULL, trace_mem_sz(trace->buff_len), 8);
265         if (header) {
266                 trace->lcore_meta[count].area = TRACE_AREA_HUGEPAGE;
267                 goto found;
268         }
269
270         /* Second attempt from heap */
271         header = malloc(trace_mem_sz(trace->buff_len));
272         if (header == NULL) {
273                 trace_crit("trace mem malloc attempt failed");
274                 header = NULL;
275                 goto fail;
276
277         }
278
279         /* Second attempt from heap is success */
280         trace->lcore_meta[count].area = TRACE_AREA_HEAP;
281
282         /* Initialize the trace header */
283 found:
284         header->offset = 0;
285         header->len = trace->buff_len;
286         header->stream_header.magic = TRACE_CTF_MAGIC;
287         rte_uuid_copy(header->stream_header.uuid, trace->uuid);
288         header->stream_header.lcore_id = rte_lcore_id();
289
290         /* Store the thread name */
291         char *name = header->stream_header.thread_name;
292         memset(name, 0, __RTE_TRACE_EMIT_STRING_LEN_MAX);
293         rte_thread_getname(pthread_self(), name,
294                 __RTE_TRACE_EMIT_STRING_LEN_MAX);
295
296         trace->lcore_meta[count].mem = header;
297         trace->nb_trace_mem_list++;
298 fail:
299         RTE_PER_LCORE(trace_mem) = header;
300         rte_spinlock_unlock(&trace->lock);
301 }
302
303 void
304 trace_mem_per_thread_free(void)
305 {
306         struct trace *trace = trace_obj_get();
307         uint32_t count;
308         void *mem;
309
310         if (!rte_trace_is_enabled())
311                 return;
312
313         rte_spinlock_lock(&trace->lock);
314         for (count = 0; count < trace->nb_trace_mem_list; count++) {
315                 mem = trace->lcore_meta[count].mem;
316                 if (trace->lcore_meta[count].area == TRACE_AREA_HUGEPAGE)
317                         rte_free(mem);
318                 else if (trace->lcore_meta[count].area == TRACE_AREA_HEAP)
319                         free(mem);
320         }
321         rte_spinlock_unlock(&trace->lock);
322 }
323
324 int
325 __rte_trace_point_register(rte_trace_point_t *handle, const char *name,
326                 void (*register_fn)(void))
327 {
328         char *field = RTE_PER_LCORE(ctf_field);
329         struct trace_point *tp;
330         uint16_t sz;
331
332         /* Sanity checks of arguments */
333         if (name == NULL || register_fn == NULL || handle == NULL) {
334                 trace_err("invalid arguments");
335                 rte_errno = EINVAL;
336                 goto fail;
337         }
338
339         /* Check the size of the trace point object */
340         RTE_PER_LCORE(trace_point_sz) = 0;
341         RTE_PER_LCORE(ctf_count) = 0;
342         register_fn();
343         if (RTE_PER_LCORE(trace_point_sz) == 0) {
344                 trace_err("missing rte_trace_emit_header() in register fn");
345                 rte_errno = EBADF;
346                 goto fail;
347         }
348
349         /* Is size overflowed */
350         if (RTE_PER_LCORE(trace_point_sz) > UINT16_MAX) {
351                 trace_err("trace point size overflowed");
352                 rte_errno = ENOSPC;
353                 goto fail;
354         }
355
356         /* Are we running out of space to store trace points? */
357         if (trace.nb_trace_points > UINT16_MAX) {
358                 trace_err("trace point exceeds the max count");
359                 rte_errno = ENOSPC;
360                 goto fail;
361         }
362
363         /* Get the size of the trace point */
364         sz = RTE_PER_LCORE(trace_point_sz);
365         tp = calloc(1, sizeof(struct trace_point));
366         if (tp == NULL) {
367                 trace_err("fail to allocate trace point memory");
368                 rte_errno = ENOMEM;
369                 goto fail;
370         }
371
372         /* Initialize the trace point */
373         if (rte_strscpy(tp->name, name, TRACE_POINT_NAME_SIZE) < 0) {
374                 trace_err("name is too long");
375                 rte_errno = E2BIG;
376                 goto free;
377         }
378
379         /* Copy the field data for future use */
380         if (rte_strscpy(tp->ctf_field, field, TRACE_CTF_FIELD_SIZE) < 0) {
381                 trace_err("CTF field size is too long");
382                 rte_errno = E2BIG;
383                 goto free;
384         }
385
386         /* Clear field memory for the next event */
387         memset(field, 0, TRACE_CTF_FIELD_SIZE);
388
389         /* Form the trace handle */
390         *handle = sz;
391         *handle |= trace.nb_trace_points << __RTE_TRACE_FIELD_ID_SHIFT;
392
393         trace.nb_trace_points++;
394         tp->handle = handle;
395
396         /* Add the trace point at tail */
397         STAILQ_INSERT_TAIL(&tp_list, tp, next);
398         __atomic_thread_fence(__ATOMIC_RELEASE);
399
400         /* All Good !!! */
401         return 0;
402 free:
403         free(tp);
404 fail:
405         if (trace.register_errno == 0)
406                 trace.register_errno = rte_errno;
407
408         return -rte_errno;
409 }