1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
9 #include <rte_cycles.h>
10 #include <rte_errno.h>
11 #include <rte_mempool.h>
13 #include <rte_distributor.h>
14 #include <rte_string_fns.h>
16 #define ITER_POWER 20 /* log 2 of how many iterations we do when timing. */
18 #define BIG_BATCH 1024
20 struct worker_params {
22 struct rte_distributor *dist;
25 struct worker_params worker_params;
27 /* statics - all zero-initialized by default */
28 static volatile int quit; /**< general quit variable for all threads */
29 static volatile int zero_quit; /**< var for when we just want thr0 to quit*/
30 static volatile unsigned worker_idx;
31 static volatile unsigned zero_idx;
34 volatile unsigned handled_packets;
35 } __rte_cache_aligned;
36 struct worker_stats worker_stats[RTE_MAX_LCORE];
38 /* returns the total count of the number of packets handled by the worker
39 * functions given below.
41 static inline unsigned
42 total_packet_count(void)
44 unsigned i, count = 0;
45 for (i = 0; i < worker_idx; i++)
46 count += worker_stats[i].handled_packets;
50 /* resets the packet counts for a new test */
52 clear_packet_count(void)
54 memset(&worker_stats, 0, sizeof(worker_stats));
57 /* this is the basic worker function for sanity test
58 * it does nothing but return packets and count them.
61 handle_work(void *arg)
63 struct rte_mbuf *buf[8] __rte_cache_aligned;
64 struct worker_params *wp = arg;
65 struct rte_distributor *db = wp->dist;
67 unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
69 num = rte_distributor_get_pkt(db, id, buf, NULL, 0);
71 __atomic_fetch_add(&worker_stats[id].handled_packets, num,
73 num = rte_distributor_get_pkt(db, id,
76 __atomic_fetch_add(&worker_stats[id].handled_packets, num,
78 rte_distributor_return_pkt(db, id, buf, num);
82 /* do basic sanity testing of the distributor. This test tests the following:
83 * - send 32 packets through distributor with the same tag and ensure they
84 * all go to the one worker
85 * - send 32 packets through the distributor with two different tags and
86 * verify that they go equally to two different workers.
87 * - send 32 packets with different tags through the distributors and
88 * just verify we get all packets back.
89 * - send 1024 packets through the distributor, gathering the returned packets
90 * as we go. Then verify that we correctly got all 1024 pointers back again,
91 * not necessarily in the same order (as different flows).
94 sanity_test(struct worker_params *wp, struct rte_mempool *p)
96 struct rte_distributor *db = wp->dist;
97 struct rte_mbuf *bufs[BURST];
98 struct rte_mbuf *returns[BURST*2];
99 unsigned int i, count;
100 unsigned int retries;
102 printf("=== Basic distributor sanity tests ===\n");
103 clear_packet_count();
104 if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
105 printf("line %d: Error getting mbufs from pool\n", __LINE__);
109 /* now set all hash values in all buffers to zero, so all pkts go to the
110 * one worker thread */
111 for (i = 0; i < BURST; i++)
112 bufs[i]->hash.usr = 0;
114 rte_distributor_process(db, bufs, BURST);
118 rte_distributor_flush(db);
119 count += rte_distributor_returned_pkts(db,
121 } while (count < BURST);
123 if (total_packet_count() != BURST) {
124 printf("Line %d: Error, not all packets flushed. "
125 "Expected %u, got %u\n",
126 __LINE__, BURST, total_packet_count());
130 for (i = 0; i < rte_lcore_count() - 1; i++)
131 printf("Worker %u handled %u packets\n", i,
132 worker_stats[i].handled_packets);
133 printf("Sanity test with all zero hashes done.\n");
135 /* pick two flows and check they go correctly */
136 if (rte_lcore_count() >= 3) {
137 clear_packet_count();
138 for (i = 0; i < BURST; i++)
139 bufs[i]->hash.usr = (i & 1) << 8;
141 rte_distributor_process(db, bufs, BURST);
144 rte_distributor_flush(db);
145 count += rte_distributor_returned_pkts(db,
147 } while (count < BURST);
148 if (total_packet_count() != BURST) {
149 printf("Line %d: Error, not all packets flushed. "
150 "Expected %u, got %u\n",
151 __LINE__, BURST, total_packet_count());
155 for (i = 0; i < rte_lcore_count() - 1; i++)
156 printf("Worker %u handled %u packets\n", i,
157 worker_stats[i].handled_packets);
158 printf("Sanity test with two hash values done\n");
161 /* give a different hash value to each packet,
162 * so load gets distributed */
163 clear_packet_count();
164 for (i = 0; i < BURST; i++)
165 bufs[i]->hash.usr = i+1;
167 rte_distributor_process(db, bufs, BURST);
170 rte_distributor_flush(db);
171 count += rte_distributor_returned_pkts(db,
173 } while (count < BURST);
174 if (total_packet_count() != BURST) {
175 printf("Line %d: Error, not all packets flushed. "
176 "Expected %u, got %u\n",
177 __LINE__, BURST, total_packet_count());
181 for (i = 0; i < rte_lcore_count() - 1; i++)
182 printf("Worker %u handled %u packets\n", i,
183 worker_stats[i].handled_packets);
184 printf("Sanity test with non-zero hashes done\n");
186 rte_mempool_put_bulk(p, (void *)bufs, BURST);
188 /* sanity test with BIG_BATCH packets to ensure they all arrived back
189 * from the returned packets function */
190 clear_packet_count();
191 struct rte_mbuf *many_bufs[BIG_BATCH], *return_bufs[BIG_BATCH];
192 unsigned num_returned = 0;
194 /* flush out any remaining packets */
195 rte_distributor_flush(db);
196 rte_distributor_clear_returns(db);
198 if (rte_mempool_get_bulk(p, (void *)many_bufs, BIG_BATCH) != 0) {
199 printf("line %d: Error getting mbufs from pool\n", __LINE__);
202 for (i = 0; i < BIG_BATCH; i++)
203 many_bufs[i]->hash.usr = i << 2;
205 printf("=== testing big burst (%s) ===\n", wp->name);
206 for (i = 0; i < BIG_BATCH/BURST; i++) {
207 rte_distributor_process(db,
208 &many_bufs[i*BURST], BURST);
209 count = rte_distributor_returned_pkts(db,
210 &return_bufs[num_returned],
211 BIG_BATCH - num_returned);
212 num_returned += count;
214 rte_distributor_flush(db);
215 count = rte_distributor_returned_pkts(db,
216 &return_bufs[num_returned],
217 BIG_BATCH - num_returned);
218 num_returned += count;
221 rte_distributor_flush(db);
222 count = rte_distributor_returned_pkts(db,
223 &return_bufs[num_returned],
224 BIG_BATCH - num_returned);
225 num_returned += count;
227 } while ((num_returned < BIG_BATCH) && (retries < 100));
229 if (num_returned != BIG_BATCH) {
230 printf("line %d: Missing packets, expected %d\n",
231 __LINE__, num_returned);
235 /* big check - make sure all packets made it back!! */
236 for (i = 0; i < BIG_BATCH; i++) {
238 struct rte_mbuf *src = many_bufs[i];
239 for (j = 0; j < BIG_BATCH; j++) {
240 if (return_bufs[j] == src)
244 if (j == BIG_BATCH) {
245 printf("Error: could not find source packet #%u\n", i);
249 printf("Sanity test of returned packets done\n");
251 rte_mempool_put_bulk(p, (void *)many_bufs, BIG_BATCH);
258 /* to test that the distributor does not lose packets, we use this worker
259 * function which frees mbufs when it gets them. The distributor thread does
260 * the mbuf allocation. If distributor drops packets we'll eventually run out
264 handle_work_with_free_mbufs(void *arg)
266 struct rte_mbuf *buf[8] __rte_cache_aligned;
267 struct worker_params *wp = arg;
268 struct rte_distributor *d = wp->dist;
271 unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
273 num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
275 worker_stats[id].handled_packets += num;
276 for (i = 0; i < num; i++)
277 rte_pktmbuf_free(buf[i]);
278 num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
280 worker_stats[id].handled_packets += num;
281 rte_distributor_return_pkt(d, id, buf, num);
285 /* Perform a sanity test of the distributor with a large number of packets,
286 * where we allocate a new set of mbufs for each burst. The workers then
287 * free the mbufs. This ensures that we don't have any packet leaks in the
291 sanity_test_with_mbuf_alloc(struct worker_params *wp, struct rte_mempool *p)
293 struct rte_distributor *d = wp->dist;
295 struct rte_mbuf *bufs[BURST];
297 printf("=== Sanity test with mbuf alloc/free (%s) ===\n", wp->name);
299 clear_packet_count();
300 for (i = 0; i < ((1<<ITER_POWER)); i += BURST) {
302 while (rte_mempool_get_bulk(p, (void *)bufs, BURST) < 0)
303 rte_distributor_process(d, NULL, 0);
304 for (j = 0; j < BURST; j++) {
305 bufs[j]->hash.usr = (i+j) << 1;
308 rte_distributor_process(d, bufs, BURST);
311 rte_distributor_flush(d);
315 if (total_packet_count() < (1<<ITER_POWER)) {
316 printf("Line %u: Packet count is incorrect, %u, expected %u\n",
317 __LINE__, total_packet_count(),
322 printf("Sanity test with mbuf alloc/free passed\n\n");
327 handle_work_for_shutdown_test(void *arg)
329 struct rte_mbuf *buf[8] __rte_cache_aligned;
330 struct worker_params *wp = arg;
331 struct rte_distributor *d = wp->dist;
333 unsigned int zero_id = 0;
334 unsigned int zero_unset;
335 const unsigned int id = __atomic_fetch_add(&worker_idx, 1,
338 num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
341 zero_unset = RTE_MAX_LCORE;
342 __atomic_compare_exchange_n(&zero_idx, &zero_unset, id,
343 false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
345 zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);
347 /* wait for quit single globally, or for worker zero, wait
349 while (!quit && !(id == zero_id && zero_quit)) {
350 worker_stats[id].handled_packets += num;
351 num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
354 zero_unset = RTE_MAX_LCORE;
355 __atomic_compare_exchange_n(&zero_idx, &zero_unset, id,
356 false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
358 zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);
360 worker_stats[id].handled_packets += num;
363 rte_distributor_return_pkt(d, id, NULL, 0);
365 /* for worker zero, allow it to restart to pick up last packet
366 * when all workers are shutting down.
371 num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
374 worker_stats[id].handled_packets += num;
375 num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
378 rte_distributor_return_pkt(d, id, buf, num);
383 /* Perform a sanity test of the distributor with a large number of packets,
384 * where we allocate a new set of mbufs for each burst. The workers then
385 * free the mbufs. This ensures that we don't have any packet leaks in the
389 sanity_test_with_worker_shutdown(struct worker_params *wp,
390 struct rte_mempool *p)
392 struct rte_distributor *d = wp->dist;
393 struct rte_mbuf *bufs[BURST];
394 struct rte_mbuf *bufs2[BURST];
396 unsigned int failed = 0;
398 printf("=== Sanity test of worker shutdown ===\n");
400 clear_packet_count();
402 if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
403 printf("line %d: Error getting mbufs from pool\n", __LINE__);
408 * Now set all hash values in all buffers to same value so all
409 * pkts go to the one worker thread
411 for (i = 0; i < BURST; i++)
412 bufs[i]->hash.usr = 1;
414 rte_distributor_process(d, bufs, BURST);
415 rte_distributor_flush(d);
417 /* at this point, we will have processed some packets and have a full
418 * backlog for the other ones at worker 0.
421 /* get more buffers to queue up, again setting them to the same flow */
422 if (rte_mempool_get_bulk(p, (void *)bufs2, BURST) != 0) {
423 printf("line %d: Error getting mbufs from pool\n", __LINE__);
424 rte_mempool_put_bulk(p, (void *)bufs, BURST);
427 for (i = 0; i < BURST; i++)
428 bufs2[i]->hash.usr = 1;
430 /* get worker zero to quit */
432 rte_distributor_process(d, bufs2, BURST);
434 /* flush the distributor */
435 rte_distributor_flush(d);
438 for (i = 0; i < rte_lcore_count() - 1; i++)
439 printf("Worker %u handled %u packets\n", i,
440 worker_stats[i].handled_packets);
442 if (total_packet_count() != BURST * 2) {
443 printf("Line %d: Error, not all packets flushed. "
444 "Expected %u, got %u\n",
445 __LINE__, BURST * 2, total_packet_count());
449 rte_mempool_put_bulk(p, (void *)bufs, BURST);
450 rte_mempool_put_bulk(p, (void *)bufs2, BURST);
455 printf("Sanity test with worker shutdown passed\n\n");
459 /* Test that the flush function is able to move packets between workers when
460 * one worker shuts down..
463 test_flush_with_worker_shutdown(struct worker_params *wp,
464 struct rte_mempool *p)
466 struct rte_distributor *d = wp->dist;
467 struct rte_mbuf *bufs[BURST];
469 unsigned int failed = 0;
471 printf("=== Test flush fn with worker shutdown (%s) ===\n", wp->name);
473 clear_packet_count();
474 if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
475 printf("line %d: Error getting mbufs from pool\n", __LINE__);
479 /* now set all hash values in all buffers to zero, so all pkts go to the
480 * one worker thread */
481 for (i = 0; i < BURST; i++)
482 bufs[i]->hash.usr = 0;
484 rte_distributor_process(d, bufs, BURST);
485 /* at this point, we will have processed some packets and have a full
486 * backlog for the other ones at worker 0.
489 /* get worker zero to quit */
492 /* flush the distributor */
493 rte_distributor_flush(d);
498 for (i = 0; i < rte_lcore_count() - 1; i++)
499 printf("Worker %u handled %u packets\n", i,
500 worker_stats[i].handled_packets);
502 if (total_packet_count() != BURST) {
503 printf("Line %d: Error, not all packets flushed. "
504 "Expected %u, got %u\n",
505 __LINE__, BURST, total_packet_count());
509 rte_mempool_put_bulk(p, (void *)bufs, BURST);
514 printf("Flush test with worker shutdown passed\n\n");
519 int test_error_distributor_create_name(void)
521 struct rte_distributor *d = NULL;
522 struct rte_distributor *db = NULL;
525 d = rte_distributor_create(name, rte_socket_id(),
526 rte_lcore_count() - 1,
527 RTE_DIST_ALG_SINGLE);
528 if (d != NULL || rte_errno != EINVAL) {
529 printf("ERROR: No error on create() with NULL name param\n");
533 db = rte_distributor_create(name, rte_socket_id(),
534 rte_lcore_count() - 1,
536 if (db != NULL || rte_errno != EINVAL) {
537 printf("ERROR: No error on create() with NULL param\n");
546 int test_error_distributor_create_numworkers(void)
548 struct rte_distributor *ds = NULL;
549 struct rte_distributor *db = NULL;
551 ds = rte_distributor_create("test_numworkers", rte_socket_id(),
553 RTE_DIST_ALG_SINGLE);
554 if (ds != NULL || rte_errno != EINVAL) {
555 printf("ERROR: No error on create() with num_workers > MAX\n");
559 db = rte_distributor_create("test_numworkers", rte_socket_id(),
562 if (db != NULL || rte_errno != EINVAL) {
563 printf("ERROR: No error on create() num_workers > MAX\n");
571 /* Useful function which ensures that all worker functions terminate */
573 quit_workers(struct worker_params *wp, struct rte_mempool *p)
575 struct rte_distributor *d = wp->dist;
576 const unsigned num_workers = rte_lcore_count() - 1;
578 struct rte_mbuf *bufs[RTE_MAX_LCORE];
579 if (rte_mempool_get_bulk(p, (void *)bufs, num_workers) != 0) {
580 printf("line %d: Error getting mbufs from pool\n", __LINE__);
586 for (i = 0; i < num_workers; i++)
587 bufs[i]->hash.usr = i << 1;
588 rte_distributor_process(d, bufs, num_workers);
590 rte_distributor_process(d, NULL, 0);
591 rte_distributor_flush(d);
592 rte_eal_mp_wait_lcore();
594 rte_mempool_put_bulk(p, (void *)bufs, num_workers);
598 zero_idx = RTE_MAX_LCORE;
602 test_distributor(void)
604 static struct rte_distributor *ds;
605 static struct rte_distributor *db;
606 static struct rte_distributor *dist[2];
607 static struct rte_mempool *p;
610 if (rte_lcore_count() < 2) {
611 printf("Not enough cores for distributor_autotest, expecting at least 2\n");
616 db = rte_distributor_create("Test_dist_burst", rte_socket_id(),
617 rte_lcore_count() - 1,
620 printf("Error creating burst distributor\n");
624 rte_distributor_flush(db);
625 rte_distributor_clear_returns(db);
629 ds = rte_distributor_create("Test_dist_single",
631 rte_lcore_count() - 1,
632 RTE_DIST_ALG_SINGLE);
634 printf("Error creating single distributor\n");
638 rte_distributor_flush(ds);
639 rte_distributor_clear_returns(ds);
642 const unsigned nb_bufs = (511 * rte_lcore_count()) < BIG_BATCH ?
643 (BIG_BATCH * 2) - 1 : (511 * rte_lcore_count());
645 p = rte_pktmbuf_pool_create("DT_MBUF_POOL", nb_bufs, BURST,
646 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
648 printf("Error creating mempool\n");
656 for (i = 0; i < 2; i++) {
658 worker_params.dist = dist[i];
660 strlcpy(worker_params.name, "burst",
661 sizeof(worker_params.name));
663 strlcpy(worker_params.name, "single",
664 sizeof(worker_params.name));
666 rte_eal_mp_remote_launch(handle_work,
667 &worker_params, SKIP_MASTER);
668 if (sanity_test(&worker_params, p) < 0)
670 quit_workers(&worker_params, p);
672 rte_eal_mp_remote_launch(handle_work_with_free_mbufs,
673 &worker_params, SKIP_MASTER);
674 if (sanity_test_with_mbuf_alloc(&worker_params, p) < 0)
676 quit_workers(&worker_params, p);
678 if (rte_lcore_count() > 2) {
679 rte_eal_mp_remote_launch(handle_work_for_shutdown_test,
682 if (sanity_test_with_worker_shutdown(&worker_params,
685 quit_workers(&worker_params, p);
687 rte_eal_mp_remote_launch(handle_work_for_shutdown_test,
690 if (test_flush_with_worker_shutdown(&worker_params,
693 quit_workers(&worker_params, p);
696 printf("Too few cores to run worker shutdown test\n");
701 if (test_error_distributor_create_numworkers() == -1 ||
702 test_error_distributor_create_name() == -1) {
703 printf("rte_distributor_create parameter check tests failed");
710 quit_workers(&worker_params, p);
714 REGISTER_TEST_COMMAND(distributor_autotest, test_distributor);