support systemd service convention for runtime directory
[dpdk.git] / app / test-crypto-perf / cperf_test_pmd_cyclecount.c
index 69f0943..5842f29 100644 (file)
@@ -16,7 +16,7 @@
 #define PRETTY_HDR_FMT "%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n"
 #define PRETTY_LINE_FMT "%12u%12u%12u%12u%12u%12u%12u%12.0f%12.0f%12.0f\n"
 #define CSV_HDR_FMT "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n"
-#define CSV_LINE_FMT "%10u;%10u;%u;%u;%u;%u;%u;%.3f;%.3f;%.3f\n"
+#define CSV_LINE_FMT "%10u,%10u,%u,%u,%u,%u,%u,%.3f,%.3f,%.3f\n"
 
 struct cperf_pmd_cyclecount_ctx {
        uint8_t dev_id;
@@ -63,7 +63,7 @@ cperf_pmd_cyclecount_test_free(struct cperf_pmd_cyclecount_ctx *ctx)
                return;
 
        if (ctx->sess) {
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
                if (ctx->options->op_type == CPERF_PDCP ||
                                ctx->options->op_type == CPERF_DOCSIS) {
                        struct rte_security_ctx *sec_ctx =
@@ -181,7 +181,7 @@ pmd_cyclecount_bench_ops(struct pmd_cyclecount_state *state, uint32_t cur_op,
                                burst_size,
                                state->ctx->sess, state->opts,
                                state->ctx->test_vector, iv_offset,
-                               &imix_idx);
+                               &imix_idx, NULL);
 
 #ifdef CPERF_LINEARIZATION_ENABLE
                /* Check if source mbufs require coalescing */
@@ -232,7 +232,7 @@ pmd_cyclecount_build_ops(struct pmd_cyclecount_state *state,
                                burst_size,
                                state->ctx->sess, state->opts,
                                state->ctx->test_vector, iv_offset,
-                               &imix_idx);
+                               &imix_idx, NULL);
        }
        return 0;
 }
@@ -334,7 +334,7 @@ pmd_cyclecount_bench_burst_sz(
         * queue, so we never get any failed enqs unless the driver won't accept
         * the exact number of descriptors we requested, or the driver won't
         * wrap around the end of the TX ring. However, since we're only
-        * dequeueing once we've filled up the queue, we have to benchmark it
+        * dequeuing once we've filled up the queue, we have to benchmark it
         * piecemeal and then average out the results.
         */
        cur_op = 0;
@@ -404,7 +404,7 @@ cperf_pmd_cyclecount_test_runner(void *test_ctx)
        state.lcore = rte_lcore_id();
        state.linearize = 0;
 
-       static rte_atomic16_t display_once = RTE_ATOMIC16_INIT(0);
+       static uint16_t display_once;
        static bool warmup = true;
 
        /*
@@ -449,8 +449,10 @@ cperf_pmd_cyclecount_test_runner(void *test_ctx)
                        continue;
                }
 
+               uint16_t exp = 0;
                if (!opts->csv) {
-                       if (rte_atomic16_test_and_set(&display_once))
+                       if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
+                                       __ATOMIC_RELAXED, __ATOMIC_RELAXED))
                                printf(PRETTY_HDR_FMT, "lcore id", "Buf Size",
                                                "Burst Size", "Enqueued",
                                                "Dequeued", "Enq Retries",
@@ -466,7 +468,8 @@ cperf_pmd_cyclecount_test_runner(void *test_ctx)
                                        state.cycles_per_enq,
                                        state.cycles_per_deq);
                } else {
-                       if (rte_atomic16_test_and_set(&display_once))
+                       if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
+                                       __ATOMIC_RELAXED, __ATOMIC_RELAXED))
                                printf(CSV_HDR_FMT, "# lcore id", "Buf Size",
                                                "Burst Size", "Enqueued",
                                                "Dequeued", "Enq Retries",