static RTE_DEFINE_PER_LCORE(int, ctf_count);
static struct trace_point_head tp_list = STAILQ_HEAD_INITIALIZER(tp_list);
-static struct trace trace;
+static struct trace trace = { .args = STAILQ_HEAD_INITIALIZER(trace.args), };
struct trace *
trace_obj_get(void)
int
eal_trace_init(void)
{
+ struct trace_arg *arg;
+
/* Trace memory should start with 8B aligned for natural alignment */
RTE_BUILD_BUG_ON((offsetof(struct __rte_trace_header, mem) % 8) != 0);
goto fail;
}
+ if (!STAILQ_EMPTY(&trace.args))
+ trace.status = true;
+
if (!rte_trace_is_enabled())
return 0;
*/
trace_uuid_generate();
+ /* Apply buffer size configuration for trace output */
+ trace_bufsz_args_apply();
+
/* Generate CTF TDSL metadata */
if (trace_metadata_create() < 0)
goto fail;
if (trace_epoch_time_save() < 0)
goto fail;
+ /* Apply global configurations */
+ STAILQ_FOREACH(arg, &trace.args, next)
+ trace_args_apply(arg->val);
+
rte_trace_mode_set(trace.mode);
return 0;
{
if (!rte_trace_is_enabled())
return;
- trace_mem_per_thread_free();
+ trace_mem_free();
trace_metadata_destroy();
+ eal_trace_args_free();
}
bool
}
/* First attempt from huge page */
- header = rte_malloc(NULL, trace_mem_sz(trace->buff_len), 8);
+ header = eal_malloc_no_trace(NULL, trace_mem_sz(trace->buff_len), 8);
if (header) {
trace->lcore_meta[count].area = TRACE_AREA_HUGEPAGE;
goto found;
rte_spinlock_unlock(&trace->lock);
}
+static void
+trace_mem_per_thread_free_unlocked(struct thread_mem_meta *meta)
+{
+ if (meta->area == TRACE_AREA_HUGEPAGE)
+ eal_free_no_trace(meta->mem);
+ else if (meta->area == TRACE_AREA_HEAP)
+ free(meta->mem);
+}
+
void
trace_mem_per_thread_free(void)
+{
+ struct trace *trace = trace_obj_get();
+ struct __rte_trace_header *header;
+ uint32_t count;
+
+ header = RTE_PER_LCORE(trace_mem);
+ if (header == NULL)
+ return;
+
+ rte_spinlock_lock(&trace->lock);
+ for (count = 0; count < trace->nb_trace_mem_list; count++) {
+ if (trace->lcore_meta[count].mem == header)
+ break;
+ }
+ if (count != trace->nb_trace_mem_list) {
+ struct thread_mem_meta *meta = &trace->lcore_meta[count];
+
+ trace_mem_per_thread_free_unlocked(meta);
+ if (count != trace->nb_trace_mem_list - 1) {
+ memmove(meta, meta + 1,
+ sizeof(*meta) *
+ (trace->nb_trace_mem_list - count - 1));
+ }
+ trace->nb_trace_mem_list--;
+ }
+ rte_spinlock_unlock(&trace->lock);
+}
+
+void
+trace_mem_free(void)
{
struct trace *trace = trace_obj_get();
uint32_t count;
- void *mem;
if (!rte_trace_is_enabled())
return;
rte_spinlock_lock(&trace->lock);
for (count = 0; count < trace->nb_trace_mem_list; count++) {
- mem = trace->lcore_meta[count].mem;
- if (trace->lcore_meta[count].area == TRACE_AREA_HUGEPAGE)
- rte_free(mem);
- else if (trace->lcore_meta[count].area == TRACE_AREA_HEAP)
- free(mem);
+ trace_mem_per_thread_free_unlocked(&trace->lcore_meta[count]);
}
+ trace->nb_trace_mem_list = 0;
rte_spinlock_unlock(&trace->lock);
}
+void
+__rte_trace_point_emit_field(size_t sz, const char *in, const char *datatype)
+{
+ char *field = RTE_PER_LCORE(ctf_field);
+ int count = RTE_PER_LCORE(ctf_count);
+ size_t size;
+ int rc;
+
+ size = RTE_MAX(0, TRACE_CTF_FIELD_SIZE - 1 - count);
+ RTE_PER_LCORE(trace_point_sz) += sz;
+ rc = snprintf(RTE_PTR_ADD(field, count), size, "%s %s;", datatype, in);
+ if (rc <= 0 || (size_t)rc >= size) {
+ RTE_PER_LCORE(trace_point_sz) = 0;
+ trace_crit("CTF field is too long");
+ return;
+ }
+ RTE_PER_LCORE(ctf_count) += rc;
+}
+
int
__rte_trace_point_register(rte_trace_point_t *handle, const char *name,
void (*register_fn)(void))