net/ice/base: fix flow director rule completion report
[dpdk.git] / lib / librte_eal / common / eal_common_trace.c
index e82bb9e..875553d 100644 (file)
@@ -21,7 +21,7 @@ static RTE_DEFINE_PER_LCORE(char, ctf_field[TRACE_CTF_FIELD_SIZE]);
 static RTE_DEFINE_PER_LCORE(int, ctf_count);
 
 static struct trace_point_head tp_list = STAILQ_HEAD_INITIALIZER(tp_list);
-static struct trace trace;
+static struct trace trace = { .args = STAILQ_HEAD_INITIALIZER(trace.args), };
 
 struct trace *
 trace_obj_get(void)
@@ -38,6 +38,8 @@ trace_list_head_get(void)
 int
 eal_trace_init(void)
 {
+       struct trace_arg *arg;
+
        /* Trace memory should start with 8B aligned for natural alignment */
        RTE_BUILD_BUG_ON((offsetof(struct __rte_trace_header, mem) % 8) != 0);
 
@@ -47,6 +49,9 @@ eal_trace_init(void)
                goto fail;
        }
 
+       if (!STAILQ_EMPTY(&trace.args))
+               trace.status = true;
+
        if (!rte_trace_is_enabled())
                return 0;
 
@@ -61,6 +66,9 @@ eal_trace_init(void)
         */
        trace_uuid_generate();
 
+       /* Apply buffer size configuration for trace output */
+       trace_bufsz_args_apply();
+
        /* Generate CTF TDSL metadata */
        if (trace_metadata_create() < 0)
                goto fail;
@@ -73,6 +81,10 @@ eal_trace_init(void)
        if (trace_epoch_time_save() < 0)
                goto fail;
 
+       /* Apply global configurations */
+       STAILQ_FOREACH(arg, &trace.args, next)
+               trace_args_apply(arg->val);
+
        rte_trace_mode_set(trace.mode);
 
        return 0;
@@ -91,6 +103,7 @@ eal_trace_fini(void)
                return;
        trace_mem_per_thread_free();
        trace_metadata_destroy();
+       eal_trace_args_free();
 }
 
 bool
@@ -318,7 +331,7 @@ __rte_trace_mem_per_thread_alloc(void)
        }
 
        /* First attempt from huge page */
-       header = rte_malloc(NULL, trace_mem_sz(trace->buff_len), 8);
+       header = eal_malloc_no_trace(NULL, trace_mem_sz(trace->buff_len), 8);
        if (header) {
                trace->lcore_meta[count].area = TRACE_AREA_HUGEPAGE;
                goto found;
@@ -371,13 +384,32 @@ trace_mem_per_thread_free(void)
        for (count = 0; count < trace->nb_trace_mem_list; count++) {
                mem = trace->lcore_meta[count].mem;
                if (trace->lcore_meta[count].area == TRACE_AREA_HUGEPAGE)
-                       rte_free(mem);
+                       eal_free_no_trace(mem);
                else if (trace->lcore_meta[count].area == TRACE_AREA_HEAP)
                        free(mem);
        }
        rte_spinlock_unlock(&trace->lock);
 }
 
+void
+__rte_trace_point_emit_field(size_t sz, const char *in, const char *datatype)
+{
+       char *field = RTE_PER_LCORE(ctf_field);
+       int count = RTE_PER_LCORE(ctf_count);
+       size_t size;
+       int rc;
+
+       size = RTE_MAX(0, TRACE_CTF_FIELD_SIZE - 1 - count);
+       RTE_PER_LCORE(trace_point_sz) += sz;
+       rc = snprintf(RTE_PTR_ADD(field, count), size, "%s %s;", datatype, in);
+       if (rc <= 0 || (size_t)rc >= size) {
+               RTE_PER_LCORE(trace_point_sz) = 0;
+               trace_crit("CTF field is too long");
+               return;
+       }
+       RTE_PER_LCORE(ctf_count) += rc;
+}
+
 int
 __rte_trace_point_register(rte_trace_point_t *handle, const char *name,
                void (*register_fn)(void))