test/distributor: fix lcores statistics
[dpdk.git] / app / test / test_distributor.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include "test.h"
6
7 #include <unistd.h>
8 #include <string.h>
9 #include <rte_cycles.h>
10 #include <rte_errno.h>
11 #include <rte_mempool.h>
12 #include <rte_mbuf.h>
13 #include <rte_distributor.h>
14 #include <rte_string_fns.h>
15
16 #define ITER_POWER 20 /* log 2 of how many iterations we do when timing. */
17 #define BURST 32
18 #define BIG_BATCH 1024
19
20 struct worker_params {
21         char name[64];
22         struct rte_distributor *dist;
23 };
24
25 struct worker_params worker_params;
26
27 /* statics - all zero-initialized by default */
28 static volatile int quit;      /**< general quit variable for all threads */
29 static volatile int zero_quit; /**< var for when we just want thr0 to quit*/
30 static volatile unsigned worker_idx;
31 static volatile unsigned zero_idx;
32
33 struct worker_stats {
34         volatile unsigned handled_packets;
35 } __rte_cache_aligned;
36 struct worker_stats worker_stats[RTE_MAX_LCORE];
37
38 /* returns the total count of the number of packets handled by the worker
39  * functions given below.
40  */
41 static inline unsigned
42 total_packet_count(void)
43 {
44         unsigned i, count = 0;
45         for (i = 0; i < worker_idx; i++)
46                 count += __atomic_load_n(&worker_stats[i].handled_packets,
47                                 __ATOMIC_RELAXED);
48         return count;
49 }
50
51 /* resets the packet counts for a new test */
52 static inline void
53 clear_packet_count(void)
54 {
55         unsigned int i;
56         for (i = 0; i < RTE_MAX_LCORE; i++)
57                 __atomic_store_n(&worker_stats[i].handled_packets, 0,
58                         __ATOMIC_RELAXED);
59 }
60
61 /* this is the basic worker function for sanity test
62  * it does nothing but return packets and count them.
63  */
64 static int
65 handle_work(void *arg)
66 {
67         struct rte_mbuf *buf[8] __rte_cache_aligned;
68         struct worker_params *wp = arg;
69         struct rte_distributor *db = wp->dist;
70         unsigned int num;
71         unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
72
73         num = rte_distributor_get_pkt(db, id, buf, NULL, 0);
74         while (!quit) {
75                 __atomic_fetch_add(&worker_stats[id].handled_packets, num,
76                                 __ATOMIC_RELAXED);
77                 num = rte_distributor_get_pkt(db, id,
78                                 buf, buf, num);
79         }
80         __atomic_fetch_add(&worker_stats[id].handled_packets, num,
81                         __ATOMIC_RELAXED);
82         rte_distributor_return_pkt(db, id, buf, num);
83         return 0;
84 }
85
86 /* do basic sanity testing of the distributor. This test tests the following:
87  * - send 32 packets through distributor with the same tag and ensure they
88  *   all go to the one worker
89  * - send 32 packets through the distributor with two different tags and
90  *   verify that they go equally to two different workers.
91  * - send 32 packets with different tags through the distributors and
92  *   just verify we get all packets back.
93  * - send 1024 packets through the distributor, gathering the returned packets
94  *   as we go. Then verify that we correctly got all 1024 pointers back again,
95  *   not necessarily in the same order (as different flows).
96  */
97 static int
98 sanity_test(struct worker_params *wp, struct rte_mempool *p)
99 {
100         struct rte_distributor *db = wp->dist;
101         struct rte_mbuf *bufs[BURST];
102         struct rte_mbuf *returns[BURST*2];
103         unsigned int i, count;
104         unsigned int retries;
105
106         printf("=== Basic distributor sanity tests ===\n");
107         clear_packet_count();
108         if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
109                 printf("line %d: Error getting mbufs from pool\n", __LINE__);
110                 return -1;
111         }
112
113         /* now set all hash values in all buffers to zero, so all pkts go to the
114          * one worker thread */
115         for (i = 0; i < BURST; i++)
116                 bufs[i]->hash.usr = 0;
117
118         rte_distributor_process(db, bufs, BURST);
119         count = 0;
120         do {
121
122                 rte_distributor_flush(db);
123                 count += rte_distributor_returned_pkts(db,
124                                 returns, BURST*2);
125         } while (count < BURST);
126
127         if (total_packet_count() != BURST) {
128                 printf("Line %d: Error, not all packets flushed. "
129                                 "Expected %u, got %u\n",
130                                 __LINE__, BURST, total_packet_count());
131                 return -1;
132         }
133
134         for (i = 0; i < rte_lcore_count() - 1; i++)
135                 printf("Worker %u handled %u packets\n", i,
136                         __atomic_load_n(&worker_stats[i].handled_packets,
137                                         __ATOMIC_RELAXED));
138         printf("Sanity test with all zero hashes done.\n");
139
140         /* pick two flows and check they go correctly */
141         if (rte_lcore_count() >= 3) {
142                 clear_packet_count();
143                 for (i = 0; i < BURST; i++)
144                         bufs[i]->hash.usr = (i & 1) << 8;
145
146                 rte_distributor_process(db, bufs, BURST);
147                 count = 0;
148                 do {
149                         rte_distributor_flush(db);
150                         count += rte_distributor_returned_pkts(db,
151                                         returns, BURST*2);
152                 } while (count < BURST);
153                 if (total_packet_count() != BURST) {
154                         printf("Line %d: Error, not all packets flushed. "
155                                         "Expected %u, got %u\n",
156                                         __LINE__, BURST, total_packet_count());
157                         return -1;
158                 }
159
160                 for (i = 0; i < rte_lcore_count() - 1; i++)
161                         printf("Worker %u handled %u packets\n", i,
162                                 __atomic_load_n(
163                                         &worker_stats[i].handled_packets,
164                                         __ATOMIC_RELAXED));
165                 printf("Sanity test with two hash values done\n");
166         }
167
168         /* give a different hash value to each packet,
169          * so load gets distributed */
170         clear_packet_count();
171         for (i = 0; i < BURST; i++)
172                 bufs[i]->hash.usr = i+1;
173
174         rte_distributor_process(db, bufs, BURST);
175         count = 0;
176         do {
177                 rte_distributor_flush(db);
178                 count += rte_distributor_returned_pkts(db,
179                                 returns, BURST*2);
180         } while (count < BURST);
181         if (total_packet_count() != BURST) {
182                 printf("Line %d: Error, not all packets flushed. "
183                                 "Expected %u, got %u\n",
184                                 __LINE__, BURST, total_packet_count());
185                 return -1;
186         }
187
188         for (i = 0; i < rte_lcore_count() - 1; i++)
189                 printf("Worker %u handled %u packets\n", i,
190                         __atomic_load_n(&worker_stats[i].handled_packets,
191                                         __ATOMIC_RELAXED));
192         printf("Sanity test with non-zero hashes done\n");
193
194         rte_mempool_put_bulk(p, (void *)bufs, BURST);
195
196         /* sanity test with BIG_BATCH packets to ensure they all arrived back
197          * from the returned packets function */
198         clear_packet_count();
199         struct rte_mbuf *many_bufs[BIG_BATCH], *return_bufs[BIG_BATCH];
200         unsigned num_returned = 0;
201
202         /* flush out any remaining packets */
203         rte_distributor_flush(db);
204         rte_distributor_clear_returns(db);
205
206         if (rte_mempool_get_bulk(p, (void *)many_bufs, BIG_BATCH) != 0) {
207                 printf("line %d: Error getting mbufs from pool\n", __LINE__);
208                 return -1;
209         }
210         for (i = 0; i < BIG_BATCH; i++)
211                 many_bufs[i]->hash.usr = i << 2;
212
213         printf("=== testing big burst (%s) ===\n", wp->name);
214         for (i = 0; i < BIG_BATCH/BURST; i++) {
215                 rte_distributor_process(db,
216                                 &many_bufs[i*BURST], BURST);
217                 count = rte_distributor_returned_pkts(db,
218                                 &return_bufs[num_returned],
219                                 BIG_BATCH - num_returned);
220                 num_returned += count;
221         }
222         rte_distributor_flush(db);
223         count = rte_distributor_returned_pkts(db,
224                 &return_bufs[num_returned],
225                         BIG_BATCH - num_returned);
226         num_returned += count;
227         retries = 0;
228         do {
229                 rte_distributor_flush(db);
230                 count = rte_distributor_returned_pkts(db,
231                                 &return_bufs[num_returned],
232                                 BIG_BATCH - num_returned);
233                 num_returned += count;
234                 retries++;
235         } while ((num_returned < BIG_BATCH) && (retries < 100));
236
237         if (num_returned != BIG_BATCH) {
238                 printf("line %d: Missing packets, expected %d\n",
239                                 __LINE__, num_returned);
240                 return -1;
241         }
242
243         /* big check -  make sure all packets made it back!! */
244         for (i = 0; i < BIG_BATCH; i++) {
245                 unsigned j;
246                 struct rte_mbuf *src = many_bufs[i];
247                 for (j = 0; j < BIG_BATCH; j++) {
248                         if (return_bufs[j] == src)
249                                 break;
250                 }
251
252                 if (j == BIG_BATCH) {
253                         printf("Error: could not find source packet #%u\n", i);
254                         return -1;
255                 }
256         }
257         printf("Sanity test of returned packets done\n");
258
259         rte_mempool_put_bulk(p, (void *)many_bufs, BIG_BATCH);
260
261         printf("\n");
262         return 0;
263 }
264
265
266 /* to test that the distributor does not lose packets, we use this worker
267  * function which frees mbufs when it gets them. The distributor thread does
268  * the mbuf allocation. If distributor drops packets we'll eventually run out
269  * of mbufs.
270  */
271 static int
272 handle_work_with_free_mbufs(void *arg)
273 {
274         struct rte_mbuf *buf[8] __rte_cache_aligned;
275         struct worker_params *wp = arg;
276         struct rte_distributor *d = wp->dist;
277         unsigned int i;
278         unsigned int num;
279         unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
280
281         num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
282         while (!quit) {
283                 __atomic_fetch_add(&worker_stats[id].handled_packets, num,
284                                 __ATOMIC_RELAXED);
285                 for (i = 0; i < num; i++)
286                         rte_pktmbuf_free(buf[i]);
287                 num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
288         }
289         __atomic_fetch_add(&worker_stats[id].handled_packets, num,
290                         __ATOMIC_RELAXED);
291         rte_distributor_return_pkt(d, id, buf, num);
292         return 0;
293 }
294
295 /* Perform a sanity test of the distributor with a large number of packets,
296  * where we allocate a new set of mbufs for each burst. The workers then
297  * free the mbufs. This ensures that we don't have any packet leaks in the
298  * library.
299  */
300 static int
301 sanity_test_with_mbuf_alloc(struct worker_params *wp, struct rte_mempool *p)
302 {
303         struct rte_distributor *d = wp->dist;
304         unsigned i;
305         struct rte_mbuf *bufs[BURST];
306
307         printf("=== Sanity test with mbuf alloc/free (%s) ===\n", wp->name);
308
309         clear_packet_count();
310         for (i = 0; i < ((1<<ITER_POWER)); i += BURST) {
311                 unsigned j;
312                 while (rte_mempool_get_bulk(p, (void *)bufs, BURST) < 0)
313                         rte_distributor_process(d, NULL, 0);
314                 for (j = 0; j < BURST; j++) {
315                         bufs[j]->hash.usr = (i+j) << 1;
316                 }
317
318                 rte_distributor_process(d, bufs, BURST);
319         }
320
321         rte_distributor_flush(d);
322
323         rte_delay_us(10000);
324
325         if (total_packet_count() < (1<<ITER_POWER)) {
326                 printf("Line %u: Packet count is incorrect, %u, expected %u\n",
327                                 __LINE__, total_packet_count(),
328                                 (1<<ITER_POWER));
329                 return -1;
330         }
331
332         printf("Sanity test with mbuf alloc/free passed\n\n");
333         return 0;
334 }
335
336 static int
337 handle_work_for_shutdown_test(void *arg)
338 {
339         struct rte_mbuf *buf[8] __rte_cache_aligned;
340         struct worker_params *wp = arg;
341         struct rte_distributor *d = wp->dist;
342         unsigned int num;
343         unsigned int zero_id = 0;
344         unsigned int zero_unset;
345         const unsigned int id = __atomic_fetch_add(&worker_idx, 1,
346                         __ATOMIC_RELAXED);
347
348         num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
349
350         if (num > 0) {
351                 zero_unset = RTE_MAX_LCORE;
352                 __atomic_compare_exchange_n(&zero_idx, &zero_unset, id,
353                         false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
354         }
355         zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);
356
357         /* wait for quit single globally, or for worker zero, wait
358          * for zero_quit */
359         while (!quit && !(id == zero_id && zero_quit)) {
360                 __atomic_fetch_add(&worker_stats[id].handled_packets, num,
361                                 __ATOMIC_RELAXED);
362                 num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
363
364                 if (num > 0) {
365                         zero_unset = RTE_MAX_LCORE;
366                         __atomic_compare_exchange_n(&zero_idx, &zero_unset, id,
367                                 false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
368                 }
369                 zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);
370         }
371
372         __atomic_fetch_add(&worker_stats[id].handled_packets, num,
373                         __ATOMIC_RELAXED);
374         if (id == zero_id) {
375                 rte_distributor_return_pkt(d, id, NULL, 0);
376
377                 /* for worker zero, allow it to restart to pick up last packet
378                  * when all workers are shutting down.
379                  */
380                 while (zero_quit)
381                         usleep(100);
382
383                 num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
384
385                 while (!quit) {
386                         __atomic_fetch_add(&worker_stats[id].handled_packets,
387                                         num, __ATOMIC_RELAXED);
388                         num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
389                 }
390         }
391         rte_distributor_return_pkt(d, id, buf, num);
392         return 0;
393 }
394
395
396 /* Perform a sanity test of the distributor with a large number of packets,
397  * where we allocate a new set of mbufs for each burst. The workers then
398  * free the mbufs. This ensures that we don't have any packet leaks in the
399  * library.
400  */
401 static int
402 sanity_test_with_worker_shutdown(struct worker_params *wp,
403                 struct rte_mempool *p)
404 {
405         struct rte_distributor *d = wp->dist;
406         struct rte_mbuf *bufs[BURST];
407         struct rte_mbuf *bufs2[BURST];
408         unsigned int i;
409         unsigned int failed = 0;
410
411         printf("=== Sanity test of worker shutdown ===\n");
412
413         clear_packet_count();
414
415         if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
416                 printf("line %d: Error getting mbufs from pool\n", __LINE__);
417                 return -1;
418         }
419
420         /*
421          * Now set all hash values in all buffers to same value so all
422          * pkts go to the one worker thread
423          */
424         for (i = 0; i < BURST; i++)
425                 bufs[i]->hash.usr = 1;
426
427         rte_distributor_process(d, bufs, BURST);
428         rte_distributor_flush(d);
429
430         /* at this point, we will have processed some packets and have a full
431          * backlog for the other ones at worker 0.
432          */
433
434         /* get more buffers to queue up, again setting them to the same flow */
435         if (rte_mempool_get_bulk(p, (void *)bufs2, BURST) != 0) {
436                 printf("line %d: Error getting mbufs from pool\n", __LINE__);
437                 rte_mempool_put_bulk(p, (void *)bufs, BURST);
438                 return -1;
439         }
440         for (i = 0; i < BURST; i++)
441                 bufs2[i]->hash.usr = 1;
442
443         /* get worker zero to quit */
444         zero_quit = 1;
445         rte_distributor_process(d, bufs2, BURST);
446
447         /* flush the distributor */
448         rte_distributor_flush(d);
449         rte_delay_us(10000);
450
451         for (i = 0; i < rte_lcore_count() - 1; i++)
452                 printf("Worker %u handled %u packets\n", i,
453                         __atomic_load_n(&worker_stats[i].handled_packets,
454                                         __ATOMIC_RELAXED));
455
456         if (total_packet_count() != BURST * 2) {
457                 printf("Line %d: Error, not all packets flushed. "
458                                 "Expected %u, got %u\n",
459                                 __LINE__, BURST * 2, total_packet_count());
460                 failed = 1;
461         }
462
463         rte_mempool_put_bulk(p, (void *)bufs, BURST);
464         rte_mempool_put_bulk(p, (void *)bufs2, BURST);
465
466         if (failed)
467                 return -1;
468
469         printf("Sanity test with worker shutdown passed\n\n");
470         return 0;
471 }
472
473 /* Test that the flush function is able to move packets between workers when
474  * one worker shuts down..
475  */
476 static int
477 test_flush_with_worker_shutdown(struct worker_params *wp,
478                 struct rte_mempool *p)
479 {
480         struct rte_distributor *d = wp->dist;
481         struct rte_mbuf *bufs[BURST];
482         unsigned int i;
483         unsigned int failed = 0;
484
485         printf("=== Test flush fn with worker shutdown (%s) ===\n", wp->name);
486
487         clear_packet_count();
488         if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
489                 printf("line %d: Error getting mbufs from pool\n", __LINE__);
490                 return -1;
491         }
492
493         /* now set all hash values in all buffers to zero, so all pkts go to the
494          * one worker thread */
495         for (i = 0; i < BURST; i++)
496                 bufs[i]->hash.usr = 0;
497
498         rte_distributor_process(d, bufs, BURST);
499         /* at this point, we will have processed some packets and have a full
500          * backlog for the other ones at worker 0.
501          */
502
503         /* get worker zero to quit */
504         zero_quit = 1;
505
506         /* flush the distributor */
507         rte_distributor_flush(d);
508
509         rte_delay_us(10000);
510
511         zero_quit = 0;
512         for (i = 0; i < rte_lcore_count() - 1; i++)
513                 printf("Worker %u handled %u packets\n", i,
514                         __atomic_load_n(&worker_stats[i].handled_packets,
515                                         __ATOMIC_RELAXED));
516
517         if (total_packet_count() != BURST) {
518                 printf("Line %d: Error, not all packets flushed. "
519                                 "Expected %u, got %u\n",
520                                 __LINE__, BURST, total_packet_count());
521                 failed = 1;
522         }
523
524         rte_mempool_put_bulk(p, (void *)bufs, BURST);
525
526         if (failed)
527                 return -1;
528
529         printf("Flush test with worker shutdown passed\n\n");
530         return 0;
531 }
532
533 static
534 int test_error_distributor_create_name(void)
535 {
536         struct rte_distributor *d = NULL;
537         struct rte_distributor *db = NULL;
538         char *name = NULL;
539
540         d = rte_distributor_create(name, rte_socket_id(),
541                         rte_lcore_count() - 1,
542                         RTE_DIST_ALG_SINGLE);
543         if (d != NULL || rte_errno != EINVAL) {
544                 printf("ERROR: No error on create() with NULL name param\n");
545                 return -1;
546         }
547
548         db = rte_distributor_create(name, rte_socket_id(),
549                         rte_lcore_count() - 1,
550                         RTE_DIST_ALG_BURST);
551         if (db != NULL || rte_errno != EINVAL) {
552                 printf("ERROR: No error on create() with NULL param\n");
553                 return -1;
554         }
555
556         return 0;
557 }
558
559
560 static
561 int test_error_distributor_create_numworkers(void)
562 {
563         struct rte_distributor *ds = NULL;
564         struct rte_distributor *db = NULL;
565
566         ds = rte_distributor_create("test_numworkers", rte_socket_id(),
567                         RTE_MAX_LCORE + 10,
568                         RTE_DIST_ALG_SINGLE);
569         if (ds != NULL || rte_errno != EINVAL) {
570                 printf("ERROR: No error on create() with num_workers > MAX\n");
571                 return -1;
572         }
573
574         db = rte_distributor_create("test_numworkers", rte_socket_id(),
575                         RTE_MAX_LCORE + 10,
576                         RTE_DIST_ALG_BURST);
577         if (db != NULL || rte_errno != EINVAL) {
578                 printf("ERROR: No error on create() num_workers > MAX\n");
579                 return -1;
580         }
581
582         return 0;
583 }
584
585
586 /* Useful function which ensures that all worker functions terminate */
587 static void
588 quit_workers(struct worker_params *wp, struct rte_mempool *p)
589 {
590         struct rte_distributor *d = wp->dist;
591         const unsigned num_workers = rte_lcore_count() - 1;
592         unsigned i;
593         struct rte_mbuf *bufs[RTE_MAX_LCORE];
594         if (rte_mempool_get_bulk(p, (void *)bufs, num_workers) != 0) {
595                 printf("line %d: Error getting mbufs from pool\n", __LINE__);
596                 return;
597         }
598
599         zero_quit = 0;
600         quit = 1;
601         for (i = 0; i < num_workers; i++)
602                 bufs[i]->hash.usr = i << 1;
603         rte_distributor_process(d, bufs, num_workers);
604
605         rte_distributor_process(d, NULL, 0);
606         rte_distributor_flush(d);
607         rte_eal_mp_wait_lcore();
608
609         rte_mempool_put_bulk(p, (void *)bufs, num_workers);
610
611         quit = 0;
612         worker_idx = 0;
613         zero_idx = RTE_MAX_LCORE;
614 }
615
616 static int
617 test_distributor(void)
618 {
619         static struct rte_distributor *ds;
620         static struct rte_distributor *db;
621         static struct rte_distributor *dist[2];
622         static struct rte_mempool *p;
623         int i;
624
625         if (rte_lcore_count() < 2) {
626                 printf("Not enough cores for distributor_autotest, expecting at least 2\n");
627                 return TEST_SKIPPED;
628         }
629
630         if (db == NULL) {
631                 db = rte_distributor_create("Test_dist_burst", rte_socket_id(),
632                                 rte_lcore_count() - 1,
633                                 RTE_DIST_ALG_BURST);
634                 if (db == NULL) {
635                         printf("Error creating burst distributor\n");
636                         return -1;
637                 }
638         } else {
639                 rte_distributor_flush(db);
640                 rte_distributor_clear_returns(db);
641         }
642
643         if (ds == NULL) {
644                 ds = rte_distributor_create("Test_dist_single",
645                                 rte_socket_id(),
646                                 rte_lcore_count() - 1,
647                         RTE_DIST_ALG_SINGLE);
648                 if (ds == NULL) {
649                         printf("Error creating single distributor\n");
650                         return -1;
651                 }
652         } else {
653                 rte_distributor_flush(ds);
654                 rte_distributor_clear_returns(ds);
655         }
656
657         const unsigned nb_bufs = (511 * rte_lcore_count()) < BIG_BATCH ?
658                         (BIG_BATCH * 2) - 1 : (511 * rte_lcore_count());
659         if (p == NULL) {
660                 p = rte_pktmbuf_pool_create("DT_MBUF_POOL", nb_bufs, BURST,
661                         0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
662                 if (p == NULL) {
663                         printf("Error creating mempool\n");
664                         return -1;
665                 }
666         }
667
668         dist[0] = ds;
669         dist[1] = db;
670
671         for (i = 0; i < 2; i++) {
672
673                 worker_params.dist = dist[i];
674                 if (i)
675                         strlcpy(worker_params.name, "burst",
676                                         sizeof(worker_params.name));
677                 else
678                         strlcpy(worker_params.name, "single",
679                                         sizeof(worker_params.name));
680
681                 rte_eal_mp_remote_launch(handle_work,
682                                 &worker_params, SKIP_MASTER);
683                 if (sanity_test(&worker_params, p) < 0)
684                         goto err;
685                 quit_workers(&worker_params, p);
686
687                 rte_eal_mp_remote_launch(handle_work_with_free_mbufs,
688                                 &worker_params, SKIP_MASTER);
689                 if (sanity_test_with_mbuf_alloc(&worker_params, p) < 0)
690                         goto err;
691                 quit_workers(&worker_params, p);
692
693                 if (rte_lcore_count() > 2) {
694                         rte_eal_mp_remote_launch(handle_work_for_shutdown_test,
695                                         &worker_params,
696                                         SKIP_MASTER);
697                         if (sanity_test_with_worker_shutdown(&worker_params,
698                                         p) < 0)
699                                 goto err;
700                         quit_workers(&worker_params, p);
701
702                         rte_eal_mp_remote_launch(handle_work_for_shutdown_test,
703                                         &worker_params,
704                                         SKIP_MASTER);
705                         if (test_flush_with_worker_shutdown(&worker_params,
706                                         p) < 0)
707                                 goto err;
708                         quit_workers(&worker_params, p);
709
710                 } else {
711                         printf("Too few cores to run worker shutdown test\n");
712                 }
713
714         }
715
716         if (test_error_distributor_create_numworkers() == -1 ||
717                         test_error_distributor_create_name() == -1) {
718                 printf("rte_distributor_create parameter check tests failed");
719                 return -1;
720         }
721
722         return 0;
723
724 err:
725         quit_workers(&worker_params, p);
726         return -1;
727 }
728
729 REGISTER_TEST_COMMAND(distributor_autotest, test_distributor);