1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2020 Marvell International Ltd.
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_per_lcore.h>
14 #include <rte_string_fns.h>
16 #include "eal_trace.h"
18 RTE_DEFINE_PER_LCORE(volatile int, trace_point_sz);
19 RTE_DEFINE_PER_LCORE(void *, trace_mem);
20 static RTE_DEFINE_PER_LCORE(char, ctf_field[TRACE_CTF_FIELD_SIZE]);
21 static RTE_DEFINE_PER_LCORE(int, ctf_count);
23 static struct trace_point_head tp_list = STAILQ_HEAD_INITIALIZER(tp_list);
24 static struct trace trace;
32 struct trace_point_head *
33 trace_list_head_get(void)
43 /* Trace memory should start with 8B aligned for natural alignment */
44 RTE_BUILD_BUG_ON((offsetof(struct __rte_trace_header, mem) % 8) != 0);
46 /* One of the trace point registration failed */
47 if (trace.register_errno) {
48 rte_errno = trace.register_errno;
52 if (trace.args.nb_args)
55 if (!rte_trace_is_enabled())
58 rte_spinlock_init(&trace.lock);
60 /* Is duplicate trace name registered */
61 if (trace_has_duplicate_entry())
64 /* Generate UUID ver 4 with total size of events and number of
67 trace_uuid_generate();
69 /* Generate CTF TDSL metadata */
70 if (trace_metadata_create() < 0)
73 /* Create trace directory */
77 /* Save current epoch timestamp for future use */
78 if (trace_epoch_time_save() < 0)
81 /* Apply global configurations */
82 for (i = 0; i < trace.args.nb_args; i++)
83 trace_args_apply(trace.args.args[i]);
85 rte_trace_mode_set(trace.mode);
90 trace_metadata_destroy();
92 trace_err("failed to initialize trace [%s]", rte_strerror(rte_errno));
99 if (!rte_trace_is_enabled())
101 trace_mem_per_thread_free();
102 trace_metadata_destroy();
103 eal_trace_args_free();
107 rte_trace_is_enabled(void)
113 trace_mode_set(rte_trace_point_t *trace, enum rte_trace_mode mode)
115 if (mode == RTE_TRACE_MODE_OVERWRITE)
116 __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_DISCARD,
119 __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_DISCARD,
124 rte_trace_mode_set(enum rte_trace_mode mode)
126 struct trace_point *tp;
128 if (!rte_trace_is_enabled())
131 STAILQ_FOREACH(tp, &tp_list, next)
132 trace_mode_set(tp->handle, mode);
138 rte_trace_mode rte_trace_mode_get(void)
144 trace_point_is_invalid(rte_trace_point_t *t)
146 return (t == NULL) || (trace_id_get(t) >= trace.nb_trace_points);
150 rte_trace_point_is_enabled(rte_trace_point_t *trace)
154 if (trace_point_is_invalid(trace))
157 val = __atomic_load_n(trace, __ATOMIC_ACQUIRE);
158 return (val & __RTE_TRACE_FIELD_ENABLE_MASK) != 0;
162 rte_trace_point_enable(rte_trace_point_t *trace)
164 if (trace_point_is_invalid(trace))
167 __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_MASK,
173 rte_trace_point_disable(rte_trace_point_t *trace)
175 if (trace_point_is_invalid(trace))
178 __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_MASK,
184 rte_trace_pattern(const char *pattern, bool enable)
186 struct trace_point *tp;
187 int rc = 0, found = 0;
189 STAILQ_FOREACH(tp, &tp_list, next) {
190 if (fnmatch(pattern, tp->name, 0) == 0) {
192 rc = rte_trace_point_enable(tp->handle);
194 rc = rte_trace_point_disable(tp->handle);
205 rte_trace_regexp(const char *regex, bool enable)
207 struct trace_point *tp;
208 int rc = 0, found = 0;
211 if (regcomp(&r, regex, 0) != 0)
214 STAILQ_FOREACH(tp, &tp_list, next) {
215 if (regexec(&r, tp->name, 0, NULL, 0) == 0) {
217 rc = rte_trace_point_enable(tp->handle);
219 rc = rte_trace_point_disable(tp->handle);
231 rte_trace_point_lookup(const char *name)
233 struct trace_point *tp;
238 STAILQ_FOREACH(tp, &tp_list, next)
239 if (strncmp(tp->name, name, TRACE_POINT_NAME_SIZE) == 0)
246 trace_point_dump(FILE *f, struct trace_point *tp)
248 rte_trace_point_t *handle = tp->handle;
250 fprintf(f, "\tid %d, %s, size is %d, %s\n",
251 trace_id_get(handle), tp->name,
252 (uint16_t)(*handle & __RTE_TRACE_FIELD_SIZE_MASK),
253 rte_trace_point_is_enabled(handle) ? "enabled" : "disabled");
257 trace_lcore_mem_dump(FILE *f)
259 struct trace *trace = trace_obj_get();
260 struct __rte_trace_header *header;
263 if (trace->nb_trace_mem_list == 0)
266 rte_spinlock_lock(&trace->lock);
267 fprintf(f, "nb_trace_mem_list = %d\n", trace->nb_trace_mem_list);
268 fprintf(f, "\nTrace mem info\n--------------\n");
269 for (count = 0; count < trace->nb_trace_mem_list; count++) {
270 header = trace->lcore_meta[count].mem;
271 fprintf(f, "\tid %d, mem=%p, area=%s, lcore_id=%d, name=%s\n",
273 trace_area_to_string(trace->lcore_meta[count].area),
274 header->stream_header.lcore_id,
275 header->stream_header.thread_name);
277 rte_spinlock_unlock(&trace->lock);
281 rte_trace_dump(FILE *f)
283 struct trace_point_head *tp_list = trace_list_head_get();
284 struct trace *trace = trace_obj_get();
285 struct trace_point *tp;
287 fprintf(f, "\nGlobal info\n-----------\n");
288 fprintf(f, "status = %s\n",
289 rte_trace_is_enabled() ? "enabled" : "disabled");
290 fprintf(f, "mode = %s\n",
291 trace_mode_to_string(rte_trace_mode_get()));
292 fprintf(f, "dir = %s\n", trace->dir);
293 fprintf(f, "buffer len = %d\n", trace->buff_len);
294 fprintf(f, "number of trace points = %d\n", trace->nb_trace_points);
296 trace_lcore_mem_dump(f);
297 fprintf(f, "\nTrace point info\n----------------\n");
298 STAILQ_FOREACH(tp, tp_list, next)
299 trace_point_dump(f, tp);
303 __rte_trace_mem_per_thread_alloc(void)
305 struct trace *trace = trace_obj_get();
306 struct __rte_trace_header *header;
309 if (!rte_trace_is_enabled())
312 if (RTE_PER_LCORE(trace_mem))
315 rte_spinlock_lock(&trace->lock);
317 count = trace->nb_trace_mem_list;
319 /* Allocate room for storing the thread trace mem meta */
320 trace->lcore_meta = realloc(trace->lcore_meta,
321 sizeof(trace->lcore_meta[0]) * (count + 1));
323 /* Provide dummy space for fast path to consume */
324 if (trace->lcore_meta == NULL) {
325 trace_crit("trace mem meta memory realloc failed");
330 /* First attempt from huge page */
331 header = eal_malloc_no_trace(NULL, trace_mem_sz(trace->buff_len), 8);
333 trace->lcore_meta[count].area = TRACE_AREA_HUGEPAGE;
337 /* Second attempt from heap */
338 header = malloc(trace_mem_sz(trace->buff_len));
339 if (header == NULL) {
340 trace_crit("trace mem malloc attempt failed");
346 /* Second attempt from heap is success */
347 trace->lcore_meta[count].area = TRACE_AREA_HEAP;
349 /* Initialize the trace header */
352 header->len = trace->buff_len;
353 header->stream_header.magic = TRACE_CTF_MAGIC;
354 rte_uuid_copy(header->stream_header.uuid, trace->uuid);
355 header->stream_header.lcore_id = rte_lcore_id();
357 /* Store the thread name */
358 char *name = header->stream_header.thread_name;
359 memset(name, 0, __RTE_TRACE_EMIT_STRING_LEN_MAX);
360 rte_thread_getname(pthread_self(), name,
361 __RTE_TRACE_EMIT_STRING_LEN_MAX);
363 trace->lcore_meta[count].mem = header;
364 trace->nb_trace_mem_list++;
366 RTE_PER_LCORE(trace_mem) = header;
367 rte_spinlock_unlock(&trace->lock);
371 trace_mem_per_thread_free(void)
373 struct trace *trace = trace_obj_get();
377 if (!rte_trace_is_enabled())
380 rte_spinlock_lock(&trace->lock);
381 for (count = 0; count < trace->nb_trace_mem_list; count++) {
382 mem = trace->lcore_meta[count].mem;
383 if (trace->lcore_meta[count].area == TRACE_AREA_HUGEPAGE)
384 eal_free_no_trace(mem);
385 else if (trace->lcore_meta[count].area == TRACE_AREA_HEAP)
388 rte_spinlock_unlock(&trace->lock);
392 __rte_trace_point_emit_field(size_t sz, const char *in, const char *datatype)
394 char *field = RTE_PER_LCORE(ctf_field);
395 int count = RTE_PER_LCORE(ctf_count);
399 size = RTE_MAX(0, TRACE_CTF_FIELD_SIZE - 1 - count);
400 RTE_PER_LCORE(trace_point_sz) += sz;
401 rc = snprintf(RTE_PTR_ADD(field, count), size, "%s %s;", datatype, in);
402 if (rc <= 0 || (size_t)rc >= size) {
403 RTE_PER_LCORE(trace_point_sz) = 0;
404 trace_crit("CTF field is too long");
407 RTE_PER_LCORE(ctf_count) += rc;
411 __rte_trace_point_register(rte_trace_point_t *handle, const char *name,
412 void (*register_fn)(void))
414 char *field = RTE_PER_LCORE(ctf_field);
415 struct trace_point *tp;
418 /* Sanity checks of arguments */
419 if (name == NULL || register_fn == NULL || handle == NULL) {
420 trace_err("invalid arguments");
425 /* Check the size of the trace point object */
426 RTE_PER_LCORE(trace_point_sz) = 0;
427 RTE_PER_LCORE(ctf_count) = 0;
429 if (RTE_PER_LCORE(trace_point_sz) == 0) {
430 trace_err("missing rte_trace_emit_header() in register fn");
435 /* Is size overflowed */
436 if (RTE_PER_LCORE(trace_point_sz) > UINT16_MAX) {
437 trace_err("trace point size overflowed");
442 /* Are we running out of space to store trace points? */
443 if (trace.nb_trace_points > UINT16_MAX) {
444 trace_err("trace point exceeds the max count");
449 /* Get the size of the trace point */
450 sz = RTE_PER_LCORE(trace_point_sz);
451 tp = calloc(1, sizeof(struct trace_point));
453 trace_err("fail to allocate trace point memory");
458 /* Initialize the trace point */
459 if (rte_strscpy(tp->name, name, TRACE_POINT_NAME_SIZE) < 0) {
460 trace_err("name is too long");
465 /* Copy the field data for future use */
466 if (rte_strscpy(tp->ctf_field, field, TRACE_CTF_FIELD_SIZE) < 0) {
467 trace_err("CTF field size is too long");
472 /* Clear field memory for the next event */
473 memset(field, 0, TRACE_CTF_FIELD_SIZE);
475 /* Form the trace handle */
477 *handle |= trace.nb_trace_points << __RTE_TRACE_FIELD_ID_SHIFT;
479 trace.nb_trace_points++;
482 /* Add the trace point at tail */
483 STAILQ_INSERT_TAIL(&tp_list, tp, next);
484 __atomic_thread_fence(__ATOMIC_RELEASE);
491 if (trace.register_errno == 0)
492 trace.register_errno = rte_errno;