1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2020 Marvell International Ltd.
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_per_lcore.h>
14 #include <rte_string_fns.h>
16 #include "eal_trace.h"
18 RTE_DEFINE_PER_LCORE(volatile int, trace_point_sz);
19 static RTE_DEFINE_PER_LCORE(char, ctf_field[TRACE_CTF_FIELD_SIZE]);
20 static RTE_DEFINE_PER_LCORE(int, ctf_count);
22 static struct trace_point_head tp_list = STAILQ_HEAD_INITIALIZER(tp_list);
23 static struct trace trace;
31 struct trace_point_head *
32 trace_list_head_get(void)
40 /* One of the trace point registration failed */
41 if (trace.register_errno) {
42 rte_errno = trace.register_errno;
46 if (!rte_trace_is_enabled())
49 rte_spinlock_init(&trace.lock);
51 /* Is duplicate trace name registered */
52 if (trace_has_duplicate_entry())
55 /* Generate UUID ver 4 with total size of events and number of
58 trace_uuid_generate();
60 /* Create trace directory */
64 /* Save current epoch timestamp for future use */
65 if (trace_epoch_time_save() < 0)
68 rte_trace_mode_set(trace.mode);
73 trace_err("failed to initialize trace [%s]", rte_strerror(rte_errno));
80 if (!rte_trace_is_enabled())
85 rte_trace_is_enabled(void)
91 trace_mode_set(rte_trace_point_t *trace, enum rte_trace_mode mode)
93 if (mode == RTE_TRACE_MODE_OVERWRITE)
94 __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_DISCARD,
97 __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_DISCARD,
102 rte_trace_mode_set(enum rte_trace_mode mode)
104 struct trace_point *tp;
106 if (!rte_trace_is_enabled())
109 STAILQ_FOREACH(tp, &tp_list, next)
110 trace_mode_set(tp->handle, mode);
116 rte_trace_mode rte_trace_mode_get(void)
122 trace_point_is_invalid(rte_trace_point_t *t)
124 return (t == NULL) || (trace_id_get(t) >= trace.nb_trace_points);
128 rte_trace_point_is_enabled(rte_trace_point_t *trace)
132 if (trace_point_is_invalid(trace))
135 val = __atomic_load_n(trace, __ATOMIC_ACQUIRE);
136 return (val & __RTE_TRACE_FIELD_ENABLE_MASK) != 0;
140 rte_trace_point_enable(rte_trace_point_t *trace)
142 if (trace_point_is_invalid(trace))
145 __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_MASK,
151 rte_trace_point_disable(rte_trace_point_t *trace)
153 if (trace_point_is_invalid(trace))
156 __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_MASK,
162 rte_trace_pattern(const char *pattern, bool enable)
164 struct trace_point *tp;
165 int rc = 0, found = 0;
167 STAILQ_FOREACH(tp, &tp_list, next) {
168 if (fnmatch(pattern, tp->name, 0) == 0) {
170 rc = rte_trace_point_enable(tp->handle);
172 rc = rte_trace_point_disable(tp->handle);
183 rte_trace_regexp(const char *regex, bool enable)
185 struct trace_point *tp;
186 int rc = 0, found = 0;
189 if (regcomp(&r, regex, 0) != 0)
192 STAILQ_FOREACH(tp, &tp_list, next) {
193 if (regexec(&r, tp->name, 0, NULL, 0) == 0) {
195 rc = rte_trace_point_enable(tp->handle);
197 rc = rte_trace_point_disable(tp->handle);
209 rte_trace_point_lookup(const char *name)
211 struct trace_point *tp;
216 STAILQ_FOREACH(tp, &tp_list, next)
217 if (strncmp(tp->name, name, TRACE_POINT_NAME_SIZE) == 0)
224 __rte_trace_point_register(rte_trace_point_t *handle, const char *name,
225 void (*register_fn)(void))
227 char *field = RTE_PER_LCORE(ctf_field);
228 struct trace_point *tp;
231 /* Sanity checks of arguments */
232 if (name == NULL || register_fn == NULL || handle == NULL) {
233 trace_err("invalid arguments");
238 /* Check the size of the trace point object */
239 RTE_PER_LCORE(trace_point_sz) = 0;
240 RTE_PER_LCORE(ctf_count) = 0;
242 if (RTE_PER_LCORE(trace_point_sz) == 0) {
243 trace_err("missing rte_trace_emit_header() in register fn");
248 /* Is size overflowed */
249 if (RTE_PER_LCORE(trace_point_sz) > UINT16_MAX) {
250 trace_err("trace point size overflowed");
255 /* Are we running out of space to store trace points? */
256 if (trace.nb_trace_points > UINT16_MAX) {
257 trace_err("trace point exceeds the max count");
262 /* Get the size of the trace point */
263 sz = RTE_PER_LCORE(trace_point_sz);
264 tp = calloc(1, sizeof(struct trace_point));
266 trace_err("fail to allocate trace point memory");
271 /* Initialize the trace point */
272 if (rte_strscpy(tp->name, name, TRACE_POINT_NAME_SIZE) < 0) {
273 trace_err("name is too long");
278 /* Copy the field data for future use */
279 if (rte_strscpy(tp->ctf_field, field, TRACE_CTF_FIELD_SIZE) < 0) {
280 trace_err("CTF field size is too long");
285 /* Clear field memory for the next event */
286 memset(field, 0, TRACE_CTF_FIELD_SIZE);
288 /* Form the trace handle */
290 *handle |= trace.nb_trace_points << __RTE_TRACE_FIELD_ID_SHIFT;
292 trace.nb_trace_points++;
295 /* Add the trace point at tail */
296 STAILQ_INSERT_TAIL(&tp_list, tp, next);
297 __atomic_thread_fence(__ATOMIC_RELEASE);
304 if (trace.register_errno == 0)
305 trace.register_errno = rte_errno;