X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest%2Ftest_distributor.c;h=8084c07943ebc88da0e14362b6559b01106a79bf;hb=e0f4a0ed4237e273b78728b7c7a3bf71d2d5218f;hp=00b1d41387b491d4b007a059ba511667fd1cfdef;hpb=824cb29c0e7b8a2b3ed285546c3a39a8e0b3cd44;p=dpdk.git diff --git a/app/test/test_distributor.c b/app/test/test_distributor.c index 00b1d41387..8084c07943 100644 --- a/app/test/test_distributor.c +++ b/app/test/test_distributor.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ #include "test.h" @@ -40,11 +11,19 @@ #include #include #include +#include #define ITER_POWER 20 /* log 2 of how many iterations we do when timing. */ #define BURST 32 #define BIG_BATCH 1024 +struct worker_params { + char name[64]; + struct rte_distributor *dist; +}; + +struct worker_params worker_params; + /* statics - all zero-initialized by default */ static volatile int quit; /**< general quit variable for all threads */ static volatile int zero_quit; /**< var for when we just want thr0 to quit*/ @@ -80,25 +59,32 @@ clear_packet_count(void) static int handle_work(void *arg) { - struct rte_mbuf *pkt = NULL; - struct rte_distributor *d = arg; - unsigned count = 0; - unsigned id = __sync_fetch_and_add(&worker_idx, 1); - - pkt = rte_distributor_get_pkt(d, id, NULL); + struct rte_mbuf *buf[8] __rte_cache_aligned; + struct worker_params *wp = arg; + struct rte_distributor *db = wp->dist; + unsigned int count = 0, num = 0; + unsigned int id = __sync_fetch_and_add(&worker_idx, 1); + int i; + + for (i = 0; i < 8; i++) + buf[i] = NULL; + num = rte_distributor_get_pkt(db, id, buf, buf, num); while (!quit) { - worker_stats[id].handled_packets++, count++; - pkt = rte_distributor_get_pkt(d, id, pkt); + worker_stats[id].handled_packets += num; + count += num; + num = rte_distributor_get_pkt(db, id, + buf, buf, num); } - worker_stats[id].handled_packets++, count++; - rte_distributor_return_pkt(d, id, pkt); + worker_stats[id].handled_packets += num; + count += num; + rte_distributor_return_pkt(db, id, buf, num); return 0; } /* do basic sanity testing of the distributor. This test tests the following: * - send 32 packets through distributor with the same tag and ensure they * all go to the one worker - * - send 32 packets throught the distributor with two different tags and + * - send 32 packets through the distributor with two different tags and * verify that they go equally to two different workers. * - send 32 packets with different tags through the distributors and * just verify we get all packets back. @@ -107,10 +93,13 @@ handle_work(void *arg) * not necessarily in the same order (as different flows). */ static int -sanity_test(struct rte_distributor *d, struct rte_mempool *p) +sanity_test(struct worker_params *wp, struct rte_mempool *p) { + struct rte_distributor *db = wp->dist; struct rte_mbuf *bufs[BURST]; - unsigned i; + struct rte_mbuf *returns[BURST*2]; + unsigned int i, count; + unsigned int retries; printf("=== Basic distributor sanity tests ===\n"); clear_packet_count(); @@ -124,8 +113,15 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p) for (i = 0; i < BURST; i++) bufs[i]->hash.usr = 0; - rte_distributor_process(d, bufs, BURST); - rte_distributor_flush(d); + rte_distributor_process(db, bufs, BURST); + count = 0; + do { + + rte_distributor_flush(db); + count += rte_distributor_returned_pkts(db, + returns, BURST*2); + } while (count < BURST); + if (total_packet_count() != BURST) { printf("Line %d: Error, not all packets flushed. " "Expected %u, got %u\n", @@ -137,8 +133,6 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p) printf("Worker %u handled %u packets\n", i, worker_stats[i].handled_packets); printf("Sanity test with all zero hashes done.\n"); - if (worker_stats[0].handled_packets != BURST) - return -1; /* pick two flows and check they go correctly */ if (rte_lcore_count() >= 3) { @@ -146,8 +140,13 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p) for (i = 0; i < BURST; i++) bufs[i]->hash.usr = (i & 1) << 8; - rte_distributor_process(d, bufs, BURST); - rte_distributor_flush(d); + rte_distributor_process(db, bufs, BURST); + count = 0; + do { + rte_distributor_flush(db); + count += rte_distributor_returned_pkts(db, + returns, BURST*2); + } while (count < BURST); if (total_packet_count() != BURST) { printf("Line %d: Error, not all packets flushed. " "Expected %u, got %u\n", @@ -159,20 +158,21 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p) printf("Worker %u handled %u packets\n", i, worker_stats[i].handled_packets); printf("Sanity test with two hash values done\n"); - - if (worker_stats[0].handled_packets != 16 || - worker_stats[1].handled_packets != 16) - return -1; } /* give a different hash value to each packet, * so load gets distributed */ clear_packet_count(); for (i = 0; i < BURST; i++) - bufs[i]->hash.usr = i; - - rte_distributor_process(d, bufs, BURST); - rte_distributor_flush(d); + bufs[i]->hash.usr = i+1; + + rte_distributor_process(db, bufs, BURST); + count = 0; + do { + rte_distributor_flush(db); + count += rte_distributor_returned_pkts(db, + returns, BURST*2); + } while (count < BURST); if (total_packet_count() != BURST) { printf("Line %d: Error, not all packets flushed. " "Expected %u, got %u\n", @@ -194,8 +194,9 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p) unsigned num_returned = 0; /* flush out any remaining packets */ - rte_distributor_flush(d); - rte_distributor_clear_returns(d); + rte_distributor_flush(db); + rte_distributor_clear_returns(db); + if (rte_mempool_get_bulk(p, (void *)many_bufs, BIG_BATCH) != 0) { printf("line %d: Error getting mbufs from pool\n", __LINE__); return -1; @@ -203,28 +204,44 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p) for (i = 0; i < BIG_BATCH; i++) many_bufs[i]->hash.usr = i << 2; + printf("=== testing big burst (%s) ===\n", wp->name); for (i = 0; i < BIG_BATCH/BURST; i++) { - rte_distributor_process(d, &many_bufs[i*BURST], BURST); - num_returned += rte_distributor_returned_pkts(d, + rte_distributor_process(db, + &many_bufs[i*BURST], BURST); + count = rte_distributor_returned_pkts(db, &return_bufs[num_returned], BIG_BATCH - num_returned); + num_returned += count; } - rte_distributor_flush(d); - num_returned += rte_distributor_returned_pkts(d, - &return_bufs[num_returned], BIG_BATCH - num_returned); + rte_distributor_flush(db); + count = rte_distributor_returned_pkts(db, + &return_bufs[num_returned], + BIG_BATCH - num_returned); + num_returned += count; + retries = 0; + do { + rte_distributor_flush(db); + count = rte_distributor_returned_pkts(db, + &return_bufs[num_returned], + BIG_BATCH - num_returned); + num_returned += count; + retries++; + } while ((num_returned < BIG_BATCH) && (retries < 100)); if (num_returned != BIG_BATCH) { - printf("line %d: Number returned is not the same as " - "number sent\n", __LINE__); + printf("line %d: Missing packets, expected %d\n", + __LINE__, num_returned); return -1; } + /* big check - make sure all packets made it back!! */ for (i = 0; i < BIG_BATCH; i++) { unsigned j; struct rte_mbuf *src = many_bufs[i]; - for (j = 0; j < BIG_BATCH; j++) + for (j = 0; j < BIG_BATCH; j++) { if (return_bufs[j] == src) break; + } if (j == BIG_BATCH) { printf("Error: could not find source packet #%u\n", i); @@ -248,19 +265,28 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p) static int handle_work_with_free_mbufs(void *arg) { - struct rte_mbuf *pkt = NULL; - struct rte_distributor *d = arg; - unsigned count = 0; - unsigned id = __sync_fetch_and_add(&worker_idx, 1); - - pkt = rte_distributor_get_pkt(d, id, NULL); + struct rte_mbuf *buf[8] __rte_cache_aligned; + struct worker_params *wp = arg; + struct rte_distributor *d = wp->dist; + unsigned int count = 0; + unsigned int i; + unsigned int num = 0; + unsigned int id = __sync_fetch_and_add(&worker_idx, 1); + + for (i = 0; i < 8; i++) + buf[i] = NULL; + num = rte_distributor_get_pkt(d, id, buf, buf, num); while (!quit) { - worker_stats[id].handled_packets++, count++; - rte_pktmbuf_free(pkt); - pkt = rte_distributor_get_pkt(d, id, pkt); + worker_stats[id].handled_packets += num; + count += num; + for (i = 0; i < num; i++) + rte_pktmbuf_free(buf[i]); + num = rte_distributor_get_pkt(d, + id, buf, buf, num); } - worker_stats[id].handled_packets++, count++; - rte_distributor_return_pkt(d, id, pkt); + worker_stats[id].handled_packets += num; + count += num; + rte_distributor_return_pkt(d, id, buf, num); return 0; } @@ -270,12 +296,14 @@ handle_work_with_free_mbufs(void *arg) * library. */ static int -sanity_test_with_mbuf_alloc(struct rte_distributor *d, struct rte_mempool *p) +sanity_test_with_mbuf_alloc(struct worker_params *wp, struct rte_mempool *p) { + struct rte_distributor *d = wp->dist; unsigned i; struct rte_mbuf *bufs[BURST]; - printf("=== Sanity test with mbuf alloc/free ===\n"); + printf("=== Sanity test with mbuf alloc/free (%s) ===\n", wp->name); + clear_packet_count(); for (i = 0; i < ((1<dist; + unsigned int count = 0; + unsigned int num = 0; + unsigned int total = 0; + unsigned int i; + unsigned int returned = 0; + const unsigned int id = __sync_fetch_and_add(&worker_idx, 1); + + num = rte_distributor_get_pkt(d, id, buf, buf, num); - pkt = rte_distributor_get_pkt(d, id, NULL); /* wait for quit single globally, or for worker zero, wait * for zero_quit */ while (!quit && !(id == 0 && zero_quit)) { - worker_stats[id].handled_packets++, count++; - rte_pktmbuf_free(pkt); - pkt = rte_distributor_get_pkt(d, id, NULL); + worker_stats[id].handled_packets += num; + count += num; + for (i = 0; i < num; i++) + rte_pktmbuf_free(buf[i]); + num = rte_distributor_get_pkt(d, + id, buf, buf, num); + total += num; } - worker_stats[id].handled_packets++, count++; - rte_distributor_return_pkt(d, id, pkt); + worker_stats[id].handled_packets += num; + count += num; + returned = rte_distributor_return_pkt(d, id, buf, num); if (id == 0) { /* for worker zero, allow it to restart to pick up last packet @@ -326,13 +369,18 @@ handle_work_for_shutdown_test(void *arg) */ while (zero_quit) usleep(100); - pkt = rte_distributor_get_pkt(d, id, NULL); + + num = rte_distributor_get_pkt(d, + id, buf, buf, num); + while (!quit) { worker_stats[id].handled_packets++, count++; rte_pktmbuf_free(pkt); - pkt = rte_distributor_get_pkt(d, id, NULL); + num = rte_distributor_get_pkt(d, id, buf, buf, num); } - rte_distributor_return_pkt(d, id, pkt); + returned = rte_distributor_return_pkt(d, + id, buf, num); + printf("Num returned = %d\n", returned); } return 0; } @@ -344,26 +392,32 @@ handle_work_for_shutdown_test(void *arg) * library. */ static int -sanity_test_with_worker_shutdown(struct rte_distributor *d, +sanity_test_with_worker_shutdown(struct worker_params *wp, struct rte_mempool *p) { + struct rte_distributor *d = wp->dist; struct rte_mbuf *bufs[BURST]; unsigned i; printf("=== Sanity test of worker shutdown ===\n"); clear_packet_count(); + if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) { printf("line %d: Error getting mbufs from pool\n", __LINE__); return -1; } - /* now set all hash values in all buffers to zero, so all pkts go to the - * one worker thread */ + /* + * Now set all hash values in all buffers to same value so all + * pkts go to the one worker thread + */ for (i = 0; i < BURST; i++) - bufs[i]->hash.usr = 0; + bufs[i]->hash.usr = 1; rte_distributor_process(d, bufs, BURST); + rte_distributor_flush(d); + /* at this point, we will have processed some packets and have a full * backlog for the other ones at worker 0. */ @@ -374,7 +428,7 @@ sanity_test_with_worker_shutdown(struct rte_distributor *d, return -1; } for (i = 0; i < BURST; i++) - bufs[i]->hash.usr = 0; + bufs[i]->hash.usr = 1; /* get worker zero to quit */ zero_quit = 1; @@ -382,6 +436,12 @@ sanity_test_with_worker_shutdown(struct rte_distributor *d, /* flush the distributor */ rte_distributor_flush(d); + rte_delay_us(10000); + + for (i = 0; i < rte_lcore_count() - 1; i++) + printf("Worker %u handled %u packets\n", i, + worker_stats[i].handled_packets); + if (total_packet_count() != BURST * 2) { printf("Line %d: Error, not all packets flushed. " "Expected %u, got %u\n", @@ -389,10 +449,6 @@ sanity_test_with_worker_shutdown(struct rte_distributor *d, return -1; } - for (i = 0; i < rte_lcore_count() - 1; i++) - printf("Worker %u handled %u packets\n", i, - worker_stats[i].handled_packets); - printf("Sanity test with worker shutdown passed\n\n"); return 0; } @@ -401,13 +457,14 @@ sanity_test_with_worker_shutdown(struct rte_distributor *d, * one worker shuts down.. */ static int -test_flush_with_worker_shutdown(struct rte_distributor *d, +test_flush_with_worker_shutdown(struct worker_params *wp, struct rte_mempool *p) { + struct rte_distributor *d = wp->dist; struct rte_mbuf *bufs[BURST]; unsigned i; - printf("=== Test flush fn with worker shutdown ===\n"); + printf("=== Test flush fn with worker shutdown (%s) ===\n", wp->name); clear_packet_count(); if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) { @@ -431,7 +488,13 @@ test_flush_with_worker_shutdown(struct rte_distributor *d, /* flush the distributor */ rte_distributor_flush(d); + rte_delay_us(10000); + zero_quit = 0; + for (i = 0; i < rte_lcore_count() - 1; i++) + printf("Worker %u handled %u packets\n", i, + worker_stats[i].handled_packets); + if (total_packet_count() != BURST) { printf("Line %d: Error, not all packets flushed. " "Expected %u, got %u\n", @@ -439,10 +502,6 @@ test_flush_with_worker_shutdown(struct rte_distributor *d, return -1; } - for (i = 0; i < rte_lcore_count() - 1; i++) - printf("Worker %u handled %u packets\n", i, - worker_stats[i].handled_packets); - printf("Flush test with worker shutdown passed\n\n"); return 0; } @@ -451,15 +510,25 @@ static int test_error_distributor_create_name(void) { struct rte_distributor *d = NULL; + struct rte_distributor *db = NULL; char *name = NULL; d = rte_distributor_create(name, rte_socket_id(), - rte_lcore_count() - 1); + rte_lcore_count() - 1, + RTE_DIST_ALG_SINGLE); if (d != NULL || rte_errno != EINVAL) { printf("ERROR: No error on create() with NULL name param\n"); return -1; } + db = rte_distributor_create(name, rte_socket_id(), + rte_lcore_count() - 1, + RTE_DIST_ALG_BURST); + if (db != NULL || rte_errno != EINVAL) { + printf("ERROR: No error on create() with NULL param\n"); + return -1; + } + return 0; } @@ -467,21 +536,34 @@ int test_error_distributor_create_name(void) static int test_error_distributor_create_numworkers(void) { - struct rte_distributor *d = NULL; - d = rte_distributor_create("test_numworkers", rte_socket_id(), - RTE_MAX_LCORE + 10); - if (d != NULL || rte_errno != EINVAL) { + struct rte_distributor *ds = NULL; + struct rte_distributor *db = NULL; + + ds = rte_distributor_create("test_numworkers", rte_socket_id(), + RTE_MAX_LCORE + 10, + RTE_DIST_ALG_SINGLE); + if (ds != NULL || rte_errno != EINVAL) { printf("ERROR: No error on create() with num_workers > MAX\n"); return -1; } + + db = rte_distributor_create("test_numworkers", rte_socket_id(), + RTE_MAX_LCORE + 10, + RTE_DIST_ALG_BURST); + if (db != NULL || rte_errno != EINVAL) { + printf("ERROR: No error on create() num_workers > MAX\n"); + return -1; + } + return 0; } /* Useful function which ensures that all worker functions terminate */ static void -quit_workers(struct rte_distributor *d, struct rte_mempool *p) +quit_workers(struct worker_params *wp, struct rte_mempool *p) { + struct rte_distributor *d = wp->dist; const unsigned num_workers = rte_lcore_count() - 1; unsigned i; struct rte_mbuf *bufs[RTE_MAX_LCORE]; @@ -505,24 +587,42 @@ quit_workers(struct rte_distributor *d, struct rte_mempool *p) static int test_distributor(void) { - static struct rte_distributor *d; + static struct rte_distributor *ds; + static struct rte_distributor *db; + static struct rte_distributor *dist[2]; static struct rte_mempool *p; + int i; if (rte_lcore_count() < 2) { - printf("ERROR: not enough cores to test distributor\n"); - return -1; + printf("Not enough cores for distributor_autotest, expecting at least 2\n"); + return TEST_SKIPPED; } - if (d == NULL) { - d = rte_distributor_create("Test_distributor", rte_socket_id(), - rte_lcore_count() - 1); - if (d == NULL) { - printf("Error creating distributor\n"); + if (db == NULL) { + db = rte_distributor_create("Test_dist_burst", rte_socket_id(), + rte_lcore_count() - 1, + RTE_DIST_ALG_BURST); + if (db == NULL) { + printf("Error creating burst distributor\n"); return -1; } } else { - rte_distributor_flush(d); - rte_distributor_clear_returns(d); + rte_distributor_flush(db); + rte_distributor_clear_returns(db); + } + + if (ds == NULL) { + ds = rte_distributor_create("Test_dist_single", + rte_socket_id(), + rte_lcore_count() - 1, + RTE_DIST_ALG_SINGLE); + if (ds == NULL) { + printf("Error creating single distributor\n"); + return -1; + } + } else { + rte_distributor_flush(ds); + rte_distributor_clear_returns(ds); } const unsigned nb_bufs = (511 * rte_lcore_count()) < BIG_BATCH ? @@ -536,31 +636,52 @@ test_distributor(void) } } - rte_eal_mp_remote_launch(handle_work, d, SKIP_MASTER); - if (sanity_test(d, p) < 0) - goto err; - quit_workers(d, p); + dist[0] = ds; + dist[1] = db; - rte_eal_mp_remote_launch(handle_work_with_free_mbufs, d, SKIP_MASTER); - if (sanity_test_with_mbuf_alloc(d, p) < 0) - goto err; - quit_workers(d, p); + for (i = 0; i < 2; i++) { - if (rte_lcore_count() > 2) { - rte_eal_mp_remote_launch(handle_work_for_shutdown_test, d, - SKIP_MASTER); - if (sanity_test_with_worker_shutdown(d, p) < 0) + worker_params.dist = dist[i]; + if (i) + strlcpy(worker_params.name, "burst", + sizeof(worker_params.name)); + else + strlcpy(worker_params.name, "single", + sizeof(worker_params.name)); + + rte_eal_mp_remote_launch(handle_work, + &worker_params, SKIP_MASTER); + if (sanity_test(&worker_params, p) < 0) goto err; - quit_workers(d, p); + quit_workers(&worker_params, p); - rte_eal_mp_remote_launch(handle_work_for_shutdown_test, d, - SKIP_MASTER); - if (test_flush_with_worker_shutdown(d, p) < 0) + rte_eal_mp_remote_launch(handle_work_with_free_mbufs, + &worker_params, SKIP_MASTER); + if (sanity_test_with_mbuf_alloc(&worker_params, p) < 0) goto err; - quit_workers(d, p); + quit_workers(&worker_params, p); + + if (rte_lcore_count() > 2) { + rte_eal_mp_remote_launch(handle_work_for_shutdown_test, + &worker_params, + SKIP_MASTER); + if (sanity_test_with_worker_shutdown(&worker_params, + p) < 0) + goto err; + quit_workers(&worker_params, p); + + rte_eal_mp_remote_launch(handle_work_for_shutdown_test, + &worker_params, + SKIP_MASTER); + if (test_flush_with_worker_shutdown(&worker_params, + p) < 0) + goto err; + quit_workers(&worker_params, p); + + } else { + printf("Too few cores to run worker shutdown test\n"); + } - } else { - printf("Not enough cores to run tests for worker shutdown\n"); } if (test_error_distributor_create_numworkers() == -1 || @@ -572,12 +693,8 @@ test_distributor(void) return 0; err: - quit_workers(d, p); + quit_workers(&worker_params, p); return -1; } -static struct test_command distributor_cmd = { - .command = "distributor_autotest", - .callback = test_distributor, -}; -REGISTER_TEST_COMMAND(distributor_cmd); +REGISTER_TEST_COMMAND(distributor_autotest, test_distributor);