1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2020 Marvell International Ltd.
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_per_lcore.h>
14 #include <rte_string_fns.h>
16 #include "eal_trace.h"
18 RTE_DEFINE_PER_LCORE(volatile int, trace_point_sz);
19 RTE_DEFINE_PER_LCORE(void *, trace_mem);
20 static RTE_DEFINE_PER_LCORE(char, ctf_field[TRACE_CTF_FIELD_SIZE]);
21 static RTE_DEFINE_PER_LCORE(int, ctf_count);
23 static struct trace_point_head tp_list = STAILQ_HEAD_INITIALIZER(tp_list);
24 static struct trace trace;
32 struct trace_point_head *
33 trace_list_head_get(void)
41 /* Trace memory should start with 8B aligned for natural alignment */
42 RTE_BUILD_BUG_ON((offsetof(struct __rte_trace_header, mem) % 8) != 0);
44 /* One of the trace point registration failed */
45 if (trace.register_errno) {
46 rte_errno = trace.register_errno;
50 if (!rte_trace_is_enabled())
53 rte_spinlock_init(&trace.lock);
55 /* Is duplicate trace name registered */
56 if (trace_has_duplicate_entry())
59 /* Generate UUID ver 4 with total size of events and number of
62 trace_uuid_generate();
64 /* Generate CTF TDSL metadata */
65 if (trace_metadata_create() < 0)
68 /* Create trace directory */
72 /* Save current epoch timestamp for future use */
73 if (trace_epoch_time_save() < 0)
76 rte_trace_mode_set(trace.mode);
81 trace_metadata_destroy();
83 trace_err("failed to initialize trace [%s]", rte_strerror(rte_errno));
90 if (!rte_trace_is_enabled())
92 trace_mem_per_thread_free();
93 trace_metadata_destroy();
97 rte_trace_is_enabled(void)
103 trace_mode_set(rte_trace_point_t *trace, enum rte_trace_mode mode)
105 if (mode == RTE_TRACE_MODE_OVERWRITE)
106 __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_DISCARD,
109 __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_DISCARD,
114 rte_trace_mode_set(enum rte_trace_mode mode)
116 struct trace_point *tp;
118 if (!rte_trace_is_enabled())
121 STAILQ_FOREACH(tp, &tp_list, next)
122 trace_mode_set(tp->handle, mode);
128 rte_trace_mode rte_trace_mode_get(void)
134 trace_point_is_invalid(rte_trace_point_t *t)
136 return (t == NULL) || (trace_id_get(t) >= trace.nb_trace_points);
140 rte_trace_point_is_enabled(rte_trace_point_t *trace)
144 if (trace_point_is_invalid(trace))
147 val = __atomic_load_n(trace, __ATOMIC_ACQUIRE);
148 return (val & __RTE_TRACE_FIELD_ENABLE_MASK) != 0;
152 rte_trace_point_enable(rte_trace_point_t *trace)
154 if (trace_point_is_invalid(trace))
157 __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_MASK,
163 rte_trace_point_disable(rte_trace_point_t *trace)
165 if (trace_point_is_invalid(trace))
168 __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_MASK,
174 rte_trace_pattern(const char *pattern, bool enable)
176 struct trace_point *tp;
177 int rc = 0, found = 0;
179 STAILQ_FOREACH(tp, &tp_list, next) {
180 if (fnmatch(pattern, tp->name, 0) == 0) {
182 rc = rte_trace_point_enable(tp->handle);
184 rc = rte_trace_point_disable(tp->handle);
195 rte_trace_regexp(const char *regex, bool enable)
197 struct trace_point *tp;
198 int rc = 0, found = 0;
201 if (regcomp(&r, regex, 0) != 0)
204 STAILQ_FOREACH(tp, &tp_list, next) {
205 if (regexec(&r, tp->name, 0, NULL, 0) == 0) {
207 rc = rte_trace_point_enable(tp->handle);
209 rc = rte_trace_point_disable(tp->handle);
221 rte_trace_point_lookup(const char *name)
223 struct trace_point *tp;
228 STAILQ_FOREACH(tp, &tp_list, next)
229 if (strncmp(tp->name, name, TRACE_POINT_NAME_SIZE) == 0)
236 __rte_trace_mem_per_thread_alloc(void)
238 struct trace *trace = trace_obj_get();
239 struct __rte_trace_header *header;
242 if (!rte_trace_is_enabled())
245 if (RTE_PER_LCORE(trace_mem))
248 rte_spinlock_lock(&trace->lock);
250 count = trace->nb_trace_mem_list;
252 /* Allocate room for storing the thread trace mem meta */
253 trace->lcore_meta = realloc(trace->lcore_meta,
254 sizeof(trace->lcore_meta[0]) * (count + 1));
256 /* Provide dummy space for fast path to consume */
257 if (trace->lcore_meta == NULL) {
258 trace_crit("trace mem meta memory realloc failed");
263 /* First attempt from huge page */
264 header = rte_malloc(NULL, trace_mem_sz(trace->buff_len), 8);
266 trace->lcore_meta[count].area = TRACE_AREA_HUGEPAGE;
270 /* Second attempt from heap */
271 header = malloc(trace_mem_sz(trace->buff_len));
272 if (header == NULL) {
273 trace_crit("trace mem malloc attempt failed");
279 /* Second attempt from heap is success */
280 trace->lcore_meta[count].area = TRACE_AREA_HEAP;
282 /* Initialize the trace header */
285 header->len = trace->buff_len;
286 header->stream_header.magic = TRACE_CTF_MAGIC;
287 rte_uuid_copy(header->stream_header.uuid, trace->uuid);
288 header->stream_header.lcore_id = rte_lcore_id();
290 /* Store the thread name */
291 char *name = header->stream_header.thread_name;
292 memset(name, 0, __RTE_TRACE_EMIT_STRING_LEN_MAX);
293 rte_thread_getname(pthread_self(), name,
294 __RTE_TRACE_EMIT_STRING_LEN_MAX);
296 trace->lcore_meta[count].mem = header;
297 trace->nb_trace_mem_list++;
299 RTE_PER_LCORE(trace_mem) = header;
300 rte_spinlock_unlock(&trace->lock);
304 trace_mem_per_thread_free(void)
306 struct trace *trace = trace_obj_get();
310 if (!rte_trace_is_enabled())
313 rte_spinlock_lock(&trace->lock);
314 for (count = 0; count < trace->nb_trace_mem_list; count++) {
315 mem = trace->lcore_meta[count].mem;
316 if (trace->lcore_meta[count].area == TRACE_AREA_HUGEPAGE)
318 else if (trace->lcore_meta[count].area == TRACE_AREA_HEAP)
321 rte_spinlock_unlock(&trace->lock);
325 __rte_trace_point_register(rte_trace_point_t *handle, const char *name,
326 void (*register_fn)(void))
328 char *field = RTE_PER_LCORE(ctf_field);
329 struct trace_point *tp;
332 /* Sanity checks of arguments */
333 if (name == NULL || register_fn == NULL || handle == NULL) {
334 trace_err("invalid arguments");
339 /* Check the size of the trace point object */
340 RTE_PER_LCORE(trace_point_sz) = 0;
341 RTE_PER_LCORE(ctf_count) = 0;
343 if (RTE_PER_LCORE(trace_point_sz) == 0) {
344 trace_err("missing rte_trace_emit_header() in register fn");
349 /* Is size overflowed */
350 if (RTE_PER_LCORE(trace_point_sz) > UINT16_MAX) {
351 trace_err("trace point size overflowed");
356 /* Are we running out of space to store trace points? */
357 if (trace.nb_trace_points > UINT16_MAX) {
358 trace_err("trace point exceeds the max count");
363 /* Get the size of the trace point */
364 sz = RTE_PER_LCORE(trace_point_sz);
365 tp = calloc(1, sizeof(struct trace_point));
367 trace_err("fail to allocate trace point memory");
372 /* Initialize the trace point */
373 if (rte_strscpy(tp->name, name, TRACE_POINT_NAME_SIZE) < 0) {
374 trace_err("name is too long");
379 /* Copy the field data for future use */
380 if (rte_strscpy(tp->ctf_field, field, TRACE_CTF_FIELD_SIZE) < 0) {
381 trace_err("CTF field size is too long");
386 /* Clear field memory for the next event */
387 memset(field, 0, TRACE_CTF_FIELD_SIZE);
389 /* Form the trace handle */
391 *handle |= trace.nb_trace_points << __RTE_TRACE_FIELD_ID_SHIFT;
393 trace.nb_trace_points++;
396 /* Add the trace point at tail */
397 STAILQ_INSERT_TAIL(&tp_list, tp, next);
398 __atomic_thread_fence(__ATOMIC_RELEASE);
405 if (trace.register_errno == 0)
406 trace.register_errno = rte_errno;