1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
9 #include <rte_cycles.h>
10 #include <rte_errno.h>
11 #include <rte_mempool.h>
13 #include <rte_distributor.h>
14 #include <rte_string_fns.h>
16 #define ITER_POWER 20 /* log 2 of how many iterations we do when timing. */
18 #define BIG_BATCH 1024
20 struct worker_params {
22 struct rte_distributor *dist;
25 struct worker_params worker_params;
27 /* statics - all zero-initialized by default */
28 static volatile int quit; /**< general quit variable for all threads */
29 static volatile int zero_quit; /**< var for when we just want thr0 to quit*/
30 static volatile unsigned worker_idx;
31 static volatile unsigned zero_idx;
34 volatile unsigned handled_packets;
35 } __rte_cache_aligned;
36 struct worker_stats worker_stats[RTE_MAX_LCORE];
38 /* returns the total count of the number of packets handled by the worker
39 * functions given below.
41 static inline unsigned
42 total_packet_count(void)
44 unsigned i, count = 0;
45 for (i = 0; i < worker_idx; i++)
46 count += worker_stats[i].handled_packets;
50 /* resets the packet counts for a new test */
52 clear_packet_count(void)
54 memset(&worker_stats, 0, sizeof(worker_stats));
57 /* this is the basic worker function for sanity test
58 * it does nothing but return packets and count them.
61 handle_work(void *arg)
63 struct rte_mbuf *buf[8] __rte_cache_aligned;
64 struct worker_params *wp = arg;
65 struct rte_distributor *db = wp->dist;
66 unsigned int count = 0, num;
67 unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
69 num = rte_distributor_get_pkt(db, id, buf, NULL, 0);
71 __atomic_fetch_add(&worker_stats[id].handled_packets, num,
74 num = rte_distributor_get_pkt(db, id,
77 __atomic_fetch_add(&worker_stats[id].handled_packets, num,
80 rte_distributor_return_pkt(db, id, buf, num);
84 /* do basic sanity testing of the distributor. This test tests the following:
85 * - send 32 packets through distributor with the same tag and ensure they
86 * all go to the one worker
87 * - send 32 packets through the distributor with two different tags and
88 * verify that they go equally to two different workers.
89 * - send 32 packets with different tags through the distributors and
90 * just verify we get all packets back.
91 * - send 1024 packets through the distributor, gathering the returned packets
92 * as we go. Then verify that we correctly got all 1024 pointers back again,
93 * not necessarily in the same order (as different flows).
96 sanity_test(struct worker_params *wp, struct rte_mempool *p)
98 struct rte_distributor *db = wp->dist;
99 struct rte_mbuf *bufs[BURST];
100 struct rte_mbuf *returns[BURST*2];
101 unsigned int i, count;
102 unsigned int retries;
104 printf("=== Basic distributor sanity tests ===\n");
105 clear_packet_count();
106 if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
107 printf("line %d: Error getting mbufs from pool\n", __LINE__);
111 /* now set all hash values in all buffers to zero, so all pkts go to the
112 * one worker thread */
113 for (i = 0; i < BURST; i++)
114 bufs[i]->hash.usr = 0;
116 rte_distributor_process(db, bufs, BURST);
120 rte_distributor_flush(db);
121 count += rte_distributor_returned_pkts(db,
123 } while (count < BURST);
125 if (total_packet_count() != BURST) {
126 printf("Line %d: Error, not all packets flushed. "
127 "Expected %u, got %u\n",
128 __LINE__, BURST, total_packet_count());
132 for (i = 0; i < rte_lcore_count() - 1; i++)
133 printf("Worker %u handled %u packets\n", i,
134 worker_stats[i].handled_packets);
135 printf("Sanity test with all zero hashes done.\n");
137 /* pick two flows and check they go correctly */
138 if (rte_lcore_count() >= 3) {
139 clear_packet_count();
140 for (i = 0; i < BURST; i++)
141 bufs[i]->hash.usr = (i & 1) << 8;
143 rte_distributor_process(db, bufs, BURST);
146 rte_distributor_flush(db);
147 count += rte_distributor_returned_pkts(db,
149 } while (count < BURST);
150 if (total_packet_count() != BURST) {
151 printf("Line %d: Error, not all packets flushed. "
152 "Expected %u, got %u\n",
153 __LINE__, BURST, total_packet_count());
157 for (i = 0; i < rte_lcore_count() - 1; i++)
158 printf("Worker %u handled %u packets\n", i,
159 worker_stats[i].handled_packets);
160 printf("Sanity test with two hash values done\n");
163 /* give a different hash value to each packet,
164 * so load gets distributed */
165 clear_packet_count();
166 for (i = 0; i < BURST; i++)
167 bufs[i]->hash.usr = i+1;
169 rte_distributor_process(db, bufs, BURST);
172 rte_distributor_flush(db);
173 count += rte_distributor_returned_pkts(db,
175 } while (count < BURST);
176 if (total_packet_count() != BURST) {
177 printf("Line %d: Error, not all packets flushed. "
178 "Expected %u, got %u\n",
179 __LINE__, BURST, total_packet_count());
183 for (i = 0; i < rte_lcore_count() - 1; i++)
184 printf("Worker %u handled %u packets\n", i,
185 worker_stats[i].handled_packets);
186 printf("Sanity test with non-zero hashes done\n");
188 rte_mempool_put_bulk(p, (void *)bufs, BURST);
190 /* sanity test with BIG_BATCH packets to ensure they all arrived back
191 * from the returned packets function */
192 clear_packet_count();
193 struct rte_mbuf *many_bufs[BIG_BATCH], *return_bufs[BIG_BATCH];
194 unsigned num_returned = 0;
196 /* flush out any remaining packets */
197 rte_distributor_flush(db);
198 rte_distributor_clear_returns(db);
200 if (rte_mempool_get_bulk(p, (void *)many_bufs, BIG_BATCH) != 0) {
201 printf("line %d: Error getting mbufs from pool\n", __LINE__);
204 for (i = 0; i < BIG_BATCH; i++)
205 many_bufs[i]->hash.usr = i << 2;
207 printf("=== testing big burst (%s) ===\n", wp->name);
208 for (i = 0; i < BIG_BATCH/BURST; i++) {
209 rte_distributor_process(db,
210 &many_bufs[i*BURST], BURST);
211 count = rte_distributor_returned_pkts(db,
212 &return_bufs[num_returned],
213 BIG_BATCH - num_returned);
214 num_returned += count;
216 rte_distributor_flush(db);
217 count = rte_distributor_returned_pkts(db,
218 &return_bufs[num_returned],
219 BIG_BATCH - num_returned);
220 num_returned += count;
223 rte_distributor_flush(db);
224 count = rte_distributor_returned_pkts(db,
225 &return_bufs[num_returned],
226 BIG_BATCH - num_returned);
227 num_returned += count;
229 } while ((num_returned < BIG_BATCH) && (retries < 100));
231 if (num_returned != BIG_BATCH) {
232 printf("line %d: Missing packets, expected %d\n",
233 __LINE__, num_returned);
237 /* big check - make sure all packets made it back!! */
238 for (i = 0; i < BIG_BATCH; i++) {
240 struct rte_mbuf *src = many_bufs[i];
241 for (j = 0; j < BIG_BATCH; j++) {
242 if (return_bufs[j] == src)
246 if (j == BIG_BATCH) {
247 printf("Error: could not find source packet #%u\n", i);
251 printf("Sanity test of returned packets done\n");
253 rte_mempool_put_bulk(p, (void *)many_bufs, BIG_BATCH);
260 /* to test that the distributor does not lose packets, we use this worker
261 * function which frees mbufs when it gets them. The distributor thread does
262 * the mbuf allocation. If distributor drops packets we'll eventually run out
266 handle_work_with_free_mbufs(void *arg)
268 struct rte_mbuf *buf[8] __rte_cache_aligned;
269 struct worker_params *wp = arg;
270 struct rte_distributor *d = wp->dist;
271 unsigned int count = 0;
274 unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
276 num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
278 worker_stats[id].handled_packets += num;
280 for (i = 0; i < num; i++)
281 rte_pktmbuf_free(buf[i]);
282 num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
284 worker_stats[id].handled_packets += num;
286 rte_distributor_return_pkt(d, id, buf, num);
290 /* Perform a sanity test of the distributor with a large number of packets,
291 * where we allocate a new set of mbufs for each burst. The workers then
292 * free the mbufs. This ensures that we don't have any packet leaks in the
296 sanity_test_with_mbuf_alloc(struct worker_params *wp, struct rte_mempool *p)
298 struct rte_distributor *d = wp->dist;
300 struct rte_mbuf *bufs[BURST];
302 printf("=== Sanity test with mbuf alloc/free (%s) ===\n", wp->name);
304 clear_packet_count();
305 for (i = 0; i < ((1<<ITER_POWER)); i += BURST) {
307 while (rte_mempool_get_bulk(p, (void *)bufs, BURST) < 0)
308 rte_distributor_process(d, NULL, 0);
309 for (j = 0; j < BURST; j++) {
310 bufs[j]->hash.usr = (i+j) << 1;
311 rte_mbuf_refcnt_set(bufs[j], 1);
314 rte_distributor_process(d, bufs, BURST);
317 rte_distributor_flush(d);
321 if (total_packet_count() < (1<<ITER_POWER)) {
322 printf("Line %u: Packet count is incorrect, %u, expected %u\n",
323 __LINE__, total_packet_count(),
328 printf("Sanity test with mbuf alloc/free passed\n\n");
333 handle_work_for_shutdown_test(void *arg)
335 struct rte_mbuf *pkt = NULL;
336 struct rte_mbuf *buf[8] __rte_cache_aligned;
337 struct worker_params *wp = arg;
338 struct rte_distributor *d = wp->dist;
339 unsigned int count = 0;
341 unsigned int total = 0;
343 unsigned int returned = 0;
344 unsigned int zero_id = 0;
345 unsigned int zero_unset;
346 const unsigned int id = __atomic_fetch_add(&worker_idx, 1,
349 num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
352 zero_unset = RTE_MAX_LCORE;
353 __atomic_compare_exchange_n(&zero_idx, &zero_unset, id,
354 false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
356 zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);
358 /* wait for quit single globally, or for worker zero, wait
360 while (!quit && !(id == zero_id && zero_quit)) {
361 worker_stats[id].handled_packets += num;
363 for (i = 0; i < num; i++)
364 rte_pktmbuf_free(buf[i]);
365 num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
368 zero_unset = RTE_MAX_LCORE;
369 __atomic_compare_exchange_n(&zero_idx, &zero_unset, id,
370 false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
372 zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);
376 worker_stats[id].handled_packets += num;
378 returned = rte_distributor_return_pkt(d, id, buf, num);
381 /* for worker zero, allow it to restart to pick up last packet
382 * when all workers are shutting down.
387 num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
390 worker_stats[id].handled_packets += num;
392 rte_pktmbuf_free(pkt);
393 num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
395 returned = rte_distributor_return_pkt(d,
397 printf("Num returned = %d\n", returned);
403 /* Perform a sanity test of the distributor with a large number of packets,
404 * where we allocate a new set of mbufs for each burst. The workers then
405 * free the mbufs. This ensures that we don't have any packet leaks in the
409 sanity_test_with_worker_shutdown(struct worker_params *wp,
410 struct rte_mempool *p)
412 struct rte_distributor *d = wp->dist;
413 struct rte_mbuf *bufs[BURST];
416 printf("=== Sanity test of worker shutdown ===\n");
418 clear_packet_count();
420 if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
421 printf("line %d: Error getting mbufs from pool\n", __LINE__);
426 * Now set all hash values in all buffers to same value so all
427 * pkts go to the one worker thread
429 for (i = 0; i < BURST; i++)
430 bufs[i]->hash.usr = 1;
432 rte_distributor_process(d, bufs, BURST);
433 rte_distributor_flush(d);
435 /* at this point, we will have processed some packets and have a full
436 * backlog for the other ones at worker 0.
439 /* get more buffers to queue up, again setting them to the same flow */
440 if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
441 printf("line %d: Error getting mbufs from pool\n", __LINE__);
444 for (i = 0; i < BURST; i++)
445 bufs[i]->hash.usr = 1;
447 /* get worker zero to quit */
449 rte_distributor_process(d, bufs, BURST);
451 /* flush the distributor */
452 rte_distributor_flush(d);
455 for (i = 0; i < rte_lcore_count() - 1; i++)
456 printf("Worker %u handled %u packets\n", i,
457 worker_stats[i].handled_packets);
459 if (total_packet_count() != BURST * 2) {
460 printf("Line %d: Error, not all packets flushed. "
461 "Expected %u, got %u\n",
462 __LINE__, BURST * 2, total_packet_count());
466 printf("Sanity test with worker shutdown passed\n\n");
470 /* Test that the flush function is able to move packets between workers when
471 * one worker shuts down..
474 test_flush_with_worker_shutdown(struct worker_params *wp,
475 struct rte_mempool *p)
477 struct rte_distributor *d = wp->dist;
478 struct rte_mbuf *bufs[BURST];
481 printf("=== Test flush fn with worker shutdown (%s) ===\n", wp->name);
483 clear_packet_count();
484 if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
485 printf("line %d: Error getting mbufs from pool\n", __LINE__);
489 /* now set all hash values in all buffers to zero, so all pkts go to the
490 * one worker thread */
491 for (i = 0; i < BURST; i++)
492 bufs[i]->hash.usr = 0;
494 rte_distributor_process(d, bufs, BURST);
495 /* at this point, we will have processed some packets and have a full
496 * backlog for the other ones at worker 0.
499 /* get worker zero to quit */
502 /* flush the distributor */
503 rte_distributor_flush(d);
508 for (i = 0; i < rte_lcore_count() - 1; i++)
509 printf("Worker %u handled %u packets\n", i,
510 worker_stats[i].handled_packets);
512 if (total_packet_count() != BURST) {
513 printf("Line %d: Error, not all packets flushed. "
514 "Expected %u, got %u\n",
515 __LINE__, BURST, total_packet_count());
519 printf("Flush test with worker shutdown passed\n\n");
524 int test_error_distributor_create_name(void)
526 struct rte_distributor *d = NULL;
527 struct rte_distributor *db = NULL;
530 d = rte_distributor_create(name, rte_socket_id(),
531 rte_lcore_count() - 1,
532 RTE_DIST_ALG_SINGLE);
533 if (d != NULL || rte_errno != EINVAL) {
534 printf("ERROR: No error on create() with NULL name param\n");
538 db = rte_distributor_create(name, rte_socket_id(),
539 rte_lcore_count() - 1,
541 if (db != NULL || rte_errno != EINVAL) {
542 printf("ERROR: No error on create() with NULL param\n");
551 int test_error_distributor_create_numworkers(void)
553 struct rte_distributor *ds = NULL;
554 struct rte_distributor *db = NULL;
556 ds = rte_distributor_create("test_numworkers", rte_socket_id(),
558 RTE_DIST_ALG_SINGLE);
559 if (ds != NULL || rte_errno != EINVAL) {
560 printf("ERROR: No error on create() with num_workers > MAX\n");
564 db = rte_distributor_create("test_numworkers", rte_socket_id(),
567 if (db != NULL || rte_errno != EINVAL) {
568 printf("ERROR: No error on create() num_workers > MAX\n");
576 /* Useful function which ensures that all worker functions terminate */
578 quit_workers(struct worker_params *wp, struct rte_mempool *p)
580 struct rte_distributor *d = wp->dist;
581 const unsigned num_workers = rte_lcore_count() - 1;
583 struct rte_mbuf *bufs[RTE_MAX_LCORE];
584 rte_mempool_get_bulk(p, (void *)bufs, num_workers);
588 for (i = 0; i < num_workers; i++)
589 bufs[i]->hash.usr = i << 1;
590 rte_distributor_process(d, bufs, num_workers);
592 rte_mempool_put_bulk(p, (void *)bufs, num_workers);
594 rte_distributor_process(d, NULL, 0);
595 rte_distributor_flush(d);
596 rte_eal_mp_wait_lcore();
599 zero_idx = RTE_MAX_LCORE;
603 test_distributor(void)
605 static struct rte_distributor *ds;
606 static struct rte_distributor *db;
607 static struct rte_distributor *dist[2];
608 static struct rte_mempool *p;
611 if (rte_lcore_count() < 2) {
612 printf("Not enough cores for distributor_autotest, expecting at least 2\n");
617 db = rte_distributor_create("Test_dist_burst", rte_socket_id(),
618 rte_lcore_count() - 1,
621 printf("Error creating burst distributor\n");
625 rte_distributor_flush(db);
626 rte_distributor_clear_returns(db);
630 ds = rte_distributor_create("Test_dist_single",
632 rte_lcore_count() - 1,
633 RTE_DIST_ALG_SINGLE);
635 printf("Error creating single distributor\n");
639 rte_distributor_flush(ds);
640 rte_distributor_clear_returns(ds);
643 const unsigned nb_bufs = (511 * rte_lcore_count()) < BIG_BATCH ?
644 (BIG_BATCH * 2) - 1 : (511 * rte_lcore_count());
646 p = rte_pktmbuf_pool_create("DT_MBUF_POOL", nb_bufs, BURST,
647 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
649 printf("Error creating mempool\n");
657 for (i = 0; i < 2; i++) {
659 worker_params.dist = dist[i];
661 strlcpy(worker_params.name, "burst",
662 sizeof(worker_params.name));
664 strlcpy(worker_params.name, "single",
665 sizeof(worker_params.name));
667 rte_eal_mp_remote_launch(handle_work,
668 &worker_params, SKIP_MASTER);
669 if (sanity_test(&worker_params, p) < 0)
671 quit_workers(&worker_params, p);
673 rte_eal_mp_remote_launch(handle_work_with_free_mbufs,
674 &worker_params, SKIP_MASTER);
675 if (sanity_test_with_mbuf_alloc(&worker_params, p) < 0)
677 quit_workers(&worker_params, p);
679 if (rte_lcore_count() > 2) {
680 rte_eal_mp_remote_launch(handle_work_for_shutdown_test,
683 if (sanity_test_with_worker_shutdown(&worker_params,
686 quit_workers(&worker_params, p);
688 rte_eal_mp_remote_launch(handle_work_for_shutdown_test,
691 if (test_flush_with_worker_shutdown(&worker_params,
694 quit_workers(&worker_params, p);
697 printf("Too few cores to run worker shutdown test\n");
702 if (test_error_distributor_create_numworkers() == -1 ||
703 test_error_distributor_create_name() == -1) {
704 printf("rte_distributor_create parameter check tests failed");
711 quit_workers(&worker_params, p);
715 REGISTER_TEST_COMMAND(distributor_autotest, test_distributor);