test/distributor: fix race conditions on shutdown
authorLukasz Wojciechowski <l.wojciechow@partner.samsung.com>
Sat, 17 Oct 2020 03:06:55 +0000 (05:06 +0200)
committerDavid Marchand <david.marchand@redhat.com>
Mon, 19 Oct 2020 08:57:17 +0000 (10:57 +0200)
Instead of making delays in test code and waiting
for worker hopefully to reach proper states,
synchronize worker shutdown test cases with spin lock
on atomic variable.

Fixes: c0de0eb82e40 ("distributor: switch over to new API")
Cc: stable@dpdk.org
Signed-off-by: Lukasz Wojciechowski <l.wojciechow@partner.samsung.com>
Acked-by: David Hunt <david.hunt@intel.com>
app/test/test_distributor.c

index 3f0aeb7..fdb6ea9 100644 (file)
@@ -27,6 +27,7 @@ struct worker_params worker_params;
 /* statics - all zero-initialized by default */
 static volatile int quit;      /**< general quit variable for all threads */
 static volatile int zero_quit; /**< var for when we just want thr0 to quit*/
+static volatile int zero_sleep; /**< thr0 has quit basic loop and is sleeping*/
 static volatile unsigned worker_idx;
 static volatile unsigned zero_idx;
 
@@ -377,8 +378,10 @@ handle_work_for_shutdown_test(void *arg)
                /* for worker zero, allow it to restart to pick up last packet
                 * when all workers are shutting down.
                 */
+               __atomic_store_n(&zero_sleep, 1, __ATOMIC_RELEASE);
                while (zero_quit)
                        usleep(100);
+               __atomic_store_n(&zero_sleep, 0, __ATOMIC_RELEASE);
 
                num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
 
@@ -446,7 +449,12 @@ sanity_test_with_worker_shutdown(struct worker_params *wp,
 
        /* flush the distributor */
        rte_distributor_flush(d);
-       rte_delay_us(10000);
+       while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+               rte_distributor_flush(d);
+
+       zero_quit = 0;
+       while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+               rte_delay_us(100);
 
        for (i = 0; i < rte_lcore_count() - 1; i++)
                printf("Worker %u handled %u packets\n", i,
@@ -506,9 +514,14 @@ test_flush_with_worker_shutdown(struct worker_params *wp,
        /* flush the distributor */
        rte_distributor_flush(d);
 
-       rte_delay_us(10000);
+       while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+               rte_distributor_flush(d);
 
        zero_quit = 0;
+
+       while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+               rte_delay_us(100);
+
        for (i = 0; i < rte_lcore_count() - 1; i++)
                printf("Worker %u handled %u packets\n", i,
                        __atomic_load_n(&worker_stats[i].handled_packets,
@@ -616,6 +629,8 @@ quit_workers(struct worker_params *wp, struct rte_mempool *p)
        quit = 0;
        worker_idx = 0;
        zero_idx = RTE_MAX_LCORE;
+       zero_quit = 0;
+       zero_sleep = 0;
 }
 
 static int