trace: implement operation APIs
[dpdk.git] / lib / librte_eal / common / eal_common_trace.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2020 Marvell International Ltd.
3  */
4
5 #include <fnmatch.h>
6 #include <inttypes.h>
7 #include <sys/queue.h>
8 #include <regex.h>
9
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_per_lcore.h>
14 #include <rte_string_fns.h>
15
16 #include "eal_trace.h"
17
18 RTE_DEFINE_PER_LCORE(volatile int, trace_point_sz);
19 static RTE_DEFINE_PER_LCORE(char, ctf_field[TRACE_CTF_FIELD_SIZE]);
20 static RTE_DEFINE_PER_LCORE(int, ctf_count);
21
22 static struct trace_point_head tp_list = STAILQ_HEAD_INITIALIZER(tp_list);
23 static struct trace trace;
24
25 bool
26 rte_trace_is_enabled(void)
27 {
28         return trace.status;
29 }
30
31 static void
32 trace_mode_set(rte_trace_point_t *trace, enum rte_trace_mode mode)
33 {
34         if (mode == RTE_TRACE_MODE_OVERWRITE)
35                 __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_DISCARD,
36                         __ATOMIC_RELEASE);
37         else
38                 __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_DISCARD,
39                         __ATOMIC_RELEASE);
40 }
41
42 void
43 rte_trace_mode_set(enum rte_trace_mode mode)
44 {
45         struct trace_point *tp;
46
47         if (!rte_trace_is_enabled())
48                 return;
49
50         STAILQ_FOREACH(tp, &tp_list, next)
51                 trace_mode_set(tp->handle, mode);
52
53         trace.mode = mode;
54 }
55
56 enum
57 rte_trace_mode rte_trace_mode_get(void)
58 {
59         return trace.mode;
60 }
61
62 static bool
63 trace_point_is_invalid(rte_trace_point_t *t)
64 {
65         return (t == NULL) || (trace_id_get(t) >= trace.nb_trace_points);
66 }
67
68 bool
69 rte_trace_point_is_enabled(rte_trace_point_t *trace)
70 {
71         uint64_t val;
72
73         if (trace_point_is_invalid(trace))
74                 return false;
75
76         val = __atomic_load_n(trace, __ATOMIC_ACQUIRE);
77         return (val & __RTE_TRACE_FIELD_ENABLE_MASK) != 0;
78 }
79
80 int
81 rte_trace_point_enable(rte_trace_point_t *trace)
82 {
83         if (trace_point_is_invalid(trace))
84                 return -ERANGE;
85
86         __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_MASK,
87                 __ATOMIC_RELEASE);
88         return 0;
89 }
90
91 int
92 rte_trace_point_disable(rte_trace_point_t *trace)
93 {
94         if (trace_point_is_invalid(trace))
95                 return -ERANGE;
96
97         __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_MASK,
98                 __ATOMIC_RELEASE);
99         return 0;
100 }
101
102 int
103 rte_trace_pattern(const char *pattern, bool enable)
104 {
105         struct trace_point *tp;
106         int rc = 0, found = 0;
107
108         STAILQ_FOREACH(tp, &tp_list, next) {
109                 if (fnmatch(pattern, tp->name, 0) == 0) {
110                         if (enable)
111                                 rc = rte_trace_point_enable(tp->handle);
112                         else
113                                 rc = rte_trace_point_disable(tp->handle);
114                         found = 1;
115                 }
116                 if (rc < 0)
117                         return rc;
118         }
119
120         return rc | found;
121 }
122
123 int
124 rte_trace_regexp(const char *regex, bool enable)
125 {
126         struct trace_point *tp;
127         int rc = 0, found = 0;
128         regex_t r;
129
130         if (regcomp(&r, regex, 0) != 0)
131                 return -EINVAL;
132
133         STAILQ_FOREACH(tp, &tp_list, next) {
134                 if (regexec(&r, tp->name, 0, NULL, 0) == 0) {
135                         if (enable)
136                                 rc = rte_trace_point_enable(tp->handle);
137                         else
138                                 rc = rte_trace_point_disable(tp->handle);
139                         found = 1;
140                 }
141                 if (rc < 0)
142                         return rc;
143         }
144         regfree(&r);
145
146         return rc | found;
147 }
148
149 rte_trace_point_t *
150 rte_trace_point_lookup(const char *name)
151 {
152         struct trace_point *tp;
153
154         if (name == NULL)
155                 return NULL;
156
157         STAILQ_FOREACH(tp, &tp_list, next)
158                 if (strncmp(tp->name, name, TRACE_POINT_NAME_SIZE) == 0)
159                         return tp->handle;
160
161         return NULL;
162 }
163
164 int
165 __rte_trace_point_register(rte_trace_point_t *handle, const char *name,
166                 void (*register_fn)(void))
167 {
168         char *field = RTE_PER_LCORE(ctf_field);
169         struct trace_point *tp;
170         uint16_t sz;
171
172         /* Sanity checks of arguments */
173         if (name == NULL || register_fn == NULL || handle == NULL) {
174                 trace_err("invalid arguments");
175                 rte_errno = EINVAL;
176                 goto fail;
177         }
178
179         /* Check the size of the trace point object */
180         RTE_PER_LCORE(trace_point_sz) = 0;
181         RTE_PER_LCORE(ctf_count) = 0;
182         register_fn();
183         if (RTE_PER_LCORE(trace_point_sz) == 0) {
184                 trace_err("missing rte_trace_emit_header() in register fn");
185                 rte_errno = EBADF;
186                 goto fail;
187         }
188
189         /* Is size overflowed */
190         if (RTE_PER_LCORE(trace_point_sz) > UINT16_MAX) {
191                 trace_err("trace point size overflowed");
192                 rte_errno = ENOSPC;
193                 goto fail;
194         }
195
196         /* Are we running out of space to store trace points? */
197         if (trace.nb_trace_points > UINT16_MAX) {
198                 trace_err("trace point exceeds the max count");
199                 rte_errno = ENOSPC;
200                 goto fail;
201         }
202
203         /* Get the size of the trace point */
204         sz = RTE_PER_LCORE(trace_point_sz);
205         tp = calloc(1, sizeof(struct trace_point));
206         if (tp == NULL) {
207                 trace_err("fail to allocate trace point memory");
208                 rte_errno = ENOMEM;
209                 goto fail;
210         }
211
212         /* Initialize the trace point */
213         if (rte_strscpy(tp->name, name, TRACE_POINT_NAME_SIZE) < 0) {
214                 trace_err("name is too long");
215                 rte_errno = E2BIG;
216                 goto free;
217         }
218
219         /* Copy the field data for future use */
220         if (rte_strscpy(tp->ctf_field, field, TRACE_CTF_FIELD_SIZE) < 0) {
221                 trace_err("CTF field size is too long");
222                 rte_errno = E2BIG;
223                 goto free;
224         }
225
226         /* Clear field memory for the next event */
227         memset(field, 0, TRACE_CTF_FIELD_SIZE);
228
229         /* Form the trace handle */
230         *handle = sz;
231         *handle |= trace.nb_trace_points << __RTE_TRACE_FIELD_ID_SHIFT;
232
233         trace.nb_trace_points++;
234         tp->handle = handle;
235
236         /* Add the trace point at tail */
237         STAILQ_INSERT_TAIL(&tp_list, tp, next);
238         __atomic_thread_fence(__ATOMIC_RELEASE);
239
240         /* All Good !!! */
241         return 0;
242 free:
243         free(tp);
244 fail:
245         if (trace.register_errno == 0)
246                 trace.register_errno = rte_errno;
247
248         return -rte_errno;
249 }