common/octeontx2: fix memory mapping API usage
[dpdk.git] / lib / librte_eal / common / eal_common_trace.c
index e82bb9e..b6da553 100644 (file)
@@ -21,7 +21,7 @@ static RTE_DEFINE_PER_LCORE(char, ctf_field[TRACE_CTF_FIELD_SIZE]);
 static RTE_DEFINE_PER_LCORE(int, ctf_count);
 
 static struct trace_point_head tp_list = STAILQ_HEAD_INITIALIZER(tp_list);
-static struct trace trace;
+static struct trace trace = { .args = STAILQ_HEAD_INITIALIZER(trace.args), };
 
 struct trace *
 trace_obj_get(void)
@@ -38,6 +38,8 @@ trace_list_head_get(void)
 int
 eal_trace_init(void)
 {
+       struct trace_arg *arg;
+
        /* Trace memory should start with 8B aligned for natural alignment */
        RTE_BUILD_BUG_ON((offsetof(struct __rte_trace_header, mem) % 8) != 0);
 
@@ -47,6 +49,9 @@ eal_trace_init(void)
                goto fail;
        }
 
+       if (!STAILQ_EMPTY(&trace.args))
+               trace.status = true;
+
        if (!rte_trace_is_enabled())
                return 0;
 
@@ -61,6 +66,9 @@ eal_trace_init(void)
         */
        trace_uuid_generate();
 
+       /* Apply buffer size configuration for trace output */
+       trace_bufsz_args_apply();
+
        /* Generate CTF TDSL metadata */
        if (trace_metadata_create() < 0)
                goto fail;
@@ -73,6 +81,10 @@ eal_trace_init(void)
        if (trace_epoch_time_save() < 0)
                goto fail;
 
+       /* Apply global configurations */
+       STAILQ_FOREACH(arg, &trace.args, next)
+               trace_args_apply(arg->val);
+
        rte_trace_mode_set(trace.mode);
 
        return 0;
@@ -89,8 +101,9 @@ eal_trace_fini(void)
 {
        if (!rte_trace_is_enabled())
                return;
-       trace_mem_per_thread_free();
+       trace_mem_free();
        trace_metadata_destroy();
+       eal_trace_args_free();
 }
 
 bool
@@ -318,7 +331,7 @@ __rte_trace_mem_per_thread_alloc(void)
        }
 
        /* First attempt from huge page */
-       header = rte_malloc(NULL, trace_mem_sz(trace->buff_len), 8);
+       header = eal_malloc_no_trace(NULL, trace_mem_sz(trace->buff_len), 8);
        if (header) {
                trace->lcore_meta[count].area = TRACE_AREA_HUGEPAGE;
                goto found;
@@ -357,27 +370,81 @@ fail:
        rte_spinlock_unlock(&trace->lock);
 }
 
+static void
+trace_mem_per_thread_free_unlocked(struct thread_mem_meta *meta)
+{
+       if (meta->area == TRACE_AREA_HUGEPAGE)
+               eal_free_no_trace(meta->mem);
+       else if (meta->area == TRACE_AREA_HEAP)
+               free(meta->mem);
+}
+
 void
 trace_mem_per_thread_free(void)
+{
+       struct trace *trace = trace_obj_get();
+       struct __rte_trace_header *header;
+       uint32_t count;
+
+       header = RTE_PER_LCORE(trace_mem);
+       if (header == NULL)
+               return;
+
+       rte_spinlock_lock(&trace->lock);
+       for (count = 0; count < trace->nb_trace_mem_list; count++) {
+               if (trace->lcore_meta[count].mem == header)
+                       break;
+       }
+       if (count != trace->nb_trace_mem_list) {
+               struct thread_mem_meta *meta = &trace->lcore_meta[count];
+
+               trace_mem_per_thread_free_unlocked(meta);
+               if (count != trace->nb_trace_mem_list - 1) {
+                       memmove(meta, meta + 1,
+                               sizeof(*meta) *
+                                (trace->nb_trace_mem_list - count - 1));
+               }
+               trace->nb_trace_mem_list--;
+       }
+       rte_spinlock_unlock(&trace->lock);
+}
+
+void
+trace_mem_free(void)
 {
        struct trace *trace = trace_obj_get();
        uint32_t count;
-       void *mem;
 
        if (!rte_trace_is_enabled())
                return;
 
        rte_spinlock_lock(&trace->lock);
        for (count = 0; count < trace->nb_trace_mem_list; count++) {
-               mem = trace->lcore_meta[count].mem;
-               if (trace->lcore_meta[count].area == TRACE_AREA_HUGEPAGE)
-                       rte_free(mem);
-               else if (trace->lcore_meta[count].area == TRACE_AREA_HEAP)
-                       free(mem);
+               trace_mem_per_thread_free_unlocked(&trace->lcore_meta[count]);
        }
+       trace->nb_trace_mem_list = 0;
        rte_spinlock_unlock(&trace->lock);
 }
 
+void
+__rte_trace_point_emit_field(size_t sz, const char *in, const char *datatype)
+{
+       char *field = RTE_PER_LCORE(ctf_field);
+       int count = RTE_PER_LCORE(ctf_count);
+       size_t size;
+       int rc;
+
+       size = RTE_MAX(0, TRACE_CTF_FIELD_SIZE - 1 - count);
+       RTE_PER_LCORE(trace_point_sz) += sz;
+       rc = snprintf(RTE_PTR_ADD(field, count), size, "%s %s;", datatype, in);
+       if (rc <= 0 || (size_t)rc >= size) {
+               RTE_PER_LCORE(trace_point_sz) = 0;
+               trace_crit("CTF field is too long");
+               return;
+       }
+       RTE_PER_LCORE(ctf_count) += rc;
+}
+
 int
 __rte_trace_point_register(rte_trace_point_t *handle, const char *name,
                void (*register_fn)(void))