1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2020 Marvell International Ltd.
12 #include <rte_common.h>
13 #include <rte_cycles.h>
14 #include <rte_errno.h>
15 #ifdef RTE_EXEC_ENV_WINDOWS
17 test_graph_perf_func(void)
19 printf("graph_perf not supported on Windows, skipping test\n");
25 #include <rte_graph.h>
26 #include <rte_graph_worker.h>
27 #include <rte_lcore.h>
28 #include <rte_malloc.h>
31 #define TEST_GRAPH_PERF_MZ "graph_perf_data"
32 #define TEST_GRAPH_SRC_NAME "test_graph_perf_source"
33 #define TEST_GRAPH_SRC_BRST_ONE_NAME "test_graph_perf_source_one"
34 #define TEST_GRAPH_WRK_NAME "test_graph_perf_worker"
35 #define TEST_GRAPH_SNK_NAME "test_graph_perf_sink"
37 #define SOURCES(map) RTE_DIM(map)
38 #define STAGES(map) RTE_DIM(map)
39 #define NODES_PER_STAGE(map) RTE_DIM(map[0])
40 #define SINKS(map) RTE_DIM(map[0])
42 #define MAX_EDGES_PER_NODE 7
44 struct test_node_data {
47 uint8_t next_nodes[MAX_EDGES_PER_NODE];
48 uint8_t next_percentage[MAX_EDGES_PER_NODE];
51 struct test_graph_perf {
54 struct test_node_data *node_data;
57 struct graph_lcore_data {
62 static struct test_node_data *
63 graph_get_node_data(struct test_graph_perf *graph_data, rte_node_t id)
65 struct test_node_data *node_data = NULL;
68 for (i = 0; i < graph_data->nb_nodes; i++)
69 if (graph_data->node_data[i].node_id == id) {
70 node_data = &graph_data->node_data[i];
78 test_node_ctx_init(const struct rte_graph *graph, struct rte_node *node)
80 struct test_graph_perf *graph_data;
81 struct test_node_data *node_data;
82 const struct rte_memzone *mz;
83 rte_node_t nid = node->id;
89 mz = rte_memzone_lookup(TEST_GRAPH_PERF_MZ);
92 graph_data = mz->addr;
93 node_data = graph_get_node_data(graph_data, nid);
94 node->ctx[0] = node->nb_edges;
95 for (i = 0; i < node->nb_edges && !node_data->is_sink; i++, edge++) {
96 node->ctx[i + 1] = edge;
97 node->ctx[i + 9] = node_data->next_percentage[i];
103 /* Source node function */
105 test_perf_node_worker_source(struct rte_graph *graph, struct rte_node *node,
106 void **objs, uint16_t nb_objs)
112 RTE_SET_USED(nb_objs);
114 /* Create a proportional stream for every next */
115 for (i = 0; i < node->ctx[0]; i++) {
116 count = (node->ctx[i + 9] * RTE_GRAPH_BURST_SIZE) / 100;
117 rte_node_next_stream_get(graph, node, node->ctx[i + 1], count);
118 rte_node_next_stream_put(graph, node, node->ctx[i + 1], count);
121 return RTE_GRAPH_BURST_SIZE;
124 static struct rte_node_register test_graph_perf_source = {
125 .name = TEST_GRAPH_SRC_NAME,
126 .process = test_perf_node_worker_source,
127 .flags = RTE_NODE_SOURCE_F,
128 .init = test_node_ctx_init,
131 RTE_NODE_REGISTER(test_graph_perf_source);
134 test_perf_node_worker_source_burst_one(struct rte_graph *graph,
135 struct rte_node *node, void **objs,
142 RTE_SET_USED(nb_objs);
144 /* Create a proportional stream for every next */
145 for (i = 0; i < node->ctx[0]; i++) {
146 count = (node->ctx[i + 9]) / 100;
147 rte_node_next_stream_get(graph, node, node->ctx[i + 1], count);
148 rte_node_next_stream_put(graph, node, node->ctx[i + 1], count);
154 static struct rte_node_register test_graph_perf_source_burst_one = {
155 .name = TEST_GRAPH_SRC_BRST_ONE_NAME,
156 .process = test_perf_node_worker_source_burst_one,
157 .flags = RTE_NODE_SOURCE_F,
158 .init = test_node_ctx_init,
161 RTE_NODE_REGISTER(test_graph_perf_source_burst_one);
163 /* Worker node function */
165 test_perf_node_worker(struct rte_graph *graph, struct rte_node *node,
166 void **objs, uint16_t nb_objs)
173 /* Move stream for single next node */
174 if (node->ctx[0] == 1) {
175 rte_node_next_stream_move(graph, node, node->ctx[1]);
179 /* Enqueue objects to next nodes proportionally */
180 for (i = 0; i < node->ctx[0]; i++) {
181 next = node->ctx[i + 1];
182 count = (node->ctx[i + 9] * nb_objs) / 100;
185 switch (count & (4 - 1)) {
187 rte_node_enqueue_x4(graph, node, next, objs[0],
188 objs[1], objs[2], objs[3]);
193 rte_node_enqueue_x1(graph, node, next, objs[0]);
198 rte_node_enqueue_x2(graph, node, next, objs[0],
204 rte_node_enqueue_x2(graph, node, next, objs[0],
206 rte_node_enqueue_x1(graph, node, next, objs[0]);
215 rte_node_enqueue(graph, node, next, objs, nb_objs - enq);
220 static struct rte_node_register test_graph_perf_worker = {
221 .name = TEST_GRAPH_WRK_NAME,
222 .process = test_perf_node_worker,
223 .init = test_node_ctx_init,
226 RTE_NODE_REGISTER(test_graph_perf_worker);
228 /* Last node in graph a.k.a sink node */
230 test_perf_node_sink(struct rte_graph *graph, struct rte_node *node, void **objs,
236 RTE_SET_USED(nb_objs);
241 static struct rte_node_register test_graph_perf_sink = {
242 .name = TEST_GRAPH_SNK_NAME,
243 .process = test_perf_node_sink,
244 .init = test_node_ctx_init,
247 RTE_NODE_REGISTER(test_graph_perf_sink);
250 graph_perf_setup(void)
252 if (rte_lcore_count() < 2) {
253 printf("Test requires at least 2 lcores\n");
261 graph_perf_teardown(void)
265 static inline rte_node_t
266 graph_node_get(const char *pname, char *nname)
268 rte_node_t pnode_id = rte_node_from_name(pname);
269 char lookup_name[RTE_NODE_NAMESIZE];
272 snprintf(lookup_name, RTE_NODE_NAMESIZE, "%s-%s", pname, nname);
273 node_id = rte_node_from_name(lookup_name);
275 if (node_id != RTE_NODE_ID_INVALID) {
276 if (rte_node_edge_count(node_id))
277 rte_node_edge_shrink(node_id, 0);
281 return rte_node_clone(pnode_id, nname);
285 graph_node_count_edges(uint32_t stage, uint16_t node, uint16_t nodes_per_stage,
286 uint8_t edge_map[][nodes_per_stage][nodes_per_stage],
287 char *ename[], struct test_node_data *node_data,
288 rte_node_t **node_map)
290 uint8_t total_percent = 0;
294 for (i = 0; i < nodes_per_stage && edges < MAX_EDGES_PER_NODE; i++) {
295 if (edge_map[stage + 1][i][node]) {
296 ename[edges] = malloc(sizeof(char) * RTE_NODE_NAMESIZE);
297 snprintf(ename[edges], RTE_NODE_NAMESIZE, "%s",
298 rte_node_id_to_name(node_map[stage + 1][i]));
299 node_data->next_nodes[edges] = node_map[stage + 1][i];
300 node_data->next_percentage[edges] =
301 edge_map[stage + 1][i][node];
303 total_percent += edge_map[stage + 1][i][node];
307 if (edges >= MAX_EDGES_PER_NODE || (edges && total_percent != 100)) {
308 for (i = 0; i < edges; i++)
310 return RTE_EDGE_ID_INVALID;
317 graph_init(const char *gname, uint8_t nb_srcs, uint8_t nb_sinks,
318 uint32_t stages, uint16_t nodes_per_stage,
319 uint8_t src_map[][nodes_per_stage], uint8_t snk_map[][nb_sinks],
320 uint8_t edge_map[][nodes_per_stage][nodes_per_stage],
323 struct test_graph_perf *graph_data;
324 char nname[RTE_NODE_NAMESIZE / 2];
325 struct test_node_data *node_data;
326 char *ename[nodes_per_stage];
327 struct rte_graph_param gconf;
328 const struct rte_memzone *mz;
329 uint8_t total_percent = 0;
330 rte_node_t *src_nodes;
331 rte_node_t *snk_nodes;
332 rte_node_t **node_map;
333 char **node_patterns;
334 rte_graph_t graph_id;
339 mz = rte_memzone_reserve(TEST_GRAPH_PERF_MZ,
340 sizeof(struct test_graph_perf), 0, 0);
342 printf("Failed to allocate graph common memory\n");
346 graph_data = mz->addr;
347 graph_data->nb_nodes = 0;
348 graph_data->node_data =
349 malloc(sizeof(struct test_node_data) *
350 (nb_srcs + nb_sinks + stages * nodes_per_stage));
351 if (graph_data->node_data == NULL) {
352 printf("Failed to reserve memzone for graph data\n");
356 node_patterns = malloc(sizeof(char *) *
357 (nb_srcs + nb_sinks + stages * nodes_per_stage));
358 if (node_patterns == NULL) {
359 printf("Failed to reserve memory for node patterns\n");
363 src_nodes = malloc(sizeof(rte_node_t) * nb_srcs);
364 if (src_nodes == NULL) {
365 printf("Failed to reserve memory for src nodes\n");
369 snk_nodes = malloc(sizeof(rte_node_t) * nb_sinks);
370 if (snk_nodes == NULL) {
371 printf("Failed to reserve memory for snk nodes\n");
375 node_map = malloc(sizeof(rte_node_t *) * stages +
376 sizeof(rte_node_t) * nodes_per_stage * stages);
377 if (node_map == NULL) {
378 printf("Failed to reserve memory for node map\n");
382 /* Setup the Graph */
383 for (i = 0; i < stages; i++) {
385 (rte_node_t *)(node_map + stages) + nodes_per_stage * i;
386 for (j = 0; j < nodes_per_stage; j++) {
388 for (k = 0; k < nodes_per_stage; k++)
389 total_percent += edge_map[i][j][k];
392 node_patterns[graph_data->nb_nodes] =
393 malloc(RTE_NODE_NAMESIZE);
394 if (node_patterns[graph_data->nb_nodes] == NULL) {
395 printf("Failed to create memory for pattern\n");
396 goto pattern_name_free;
399 /* Clone a worker node */
400 snprintf(nname, sizeof(nname), "%d-%d", i, j);
402 graph_node_get(TEST_GRAPH_WRK_NAME, nname);
403 if (node_map[i][j] == RTE_NODE_ID_INVALID) {
404 printf("Failed to create node[%s]\n", nname);
405 graph_data->nb_nodes++;
406 goto pattern_name_free;
408 snprintf(node_patterns[graph_data->nb_nodes],
409 RTE_NODE_NAMESIZE, "%s",
410 rte_node_id_to_name(node_map[i][j]));
412 &graph_data->node_data[graph_data->nb_nodes];
413 node_data->node_id = node_map[i][j];
414 node_data->is_sink = false;
415 graph_data->nb_nodes++;
419 for (i = 0; i < stages - 1; i++) {
420 for (j = 0; j < nodes_per_stage; j++) {
421 /* Count edges i.e connections of worker node to next */
423 graph_get_node_data(graph_data, node_map[i][j]);
424 edges = graph_node_count_edges(i, j, nodes_per_stage,
426 node_data, node_map);
427 if (edges == RTE_EDGE_ID_INVALID) {
428 printf("Invalid edge configuration\n");
429 goto pattern_name_free;
434 /* Connect a node in stage 'i' to nodes
435 * in stage 'i + 1' with edges.
437 count = rte_node_edge_update(
439 (const char **)(uintptr_t)ename, edges);
440 for (k = 0; k < edges; k++)
442 if (count != edges) {
443 printf("Couldn't add edges %d %d\n", edges,
445 goto pattern_name_free;
450 /* Setup Source nodes */
451 for (i = 0; i < nb_srcs; i++) {
454 node_patterns[graph_data->nb_nodes] = malloc(RTE_NODE_NAMESIZE);
455 if (node_patterns[graph_data->nb_nodes] == NULL) {
456 printf("Failed to create memory for pattern\n");
457 goto pattern_name_free;
459 /* Clone a source node */
460 snprintf(nname, sizeof(nname), "%d", i);
462 graph_node_get(burst_one ? TEST_GRAPH_SRC_BRST_ONE_NAME
463 : TEST_GRAPH_SRC_NAME,
465 if (src_nodes[i] == RTE_NODE_ID_INVALID) {
466 printf("Failed to create node[%s]\n", nname);
467 graph_data->nb_nodes++;
468 goto pattern_name_free;
470 snprintf(node_patterns[graph_data->nb_nodes], RTE_NODE_NAMESIZE,
471 "%s", rte_node_id_to_name(src_nodes[i]));
472 node_data = &graph_data->node_data[graph_data->nb_nodes];
473 node_data->node_id = src_nodes[i];
474 node_data->is_sink = false;
475 graph_data->nb_nodes++;
477 /* Prepare next node list to connect to */
478 for (j = 0; j < nodes_per_stage; j++) {
481 ename[edges] = malloc(sizeof(char) * RTE_NODE_NAMESIZE);
482 snprintf(ename[edges], RTE_NODE_NAMESIZE, "%s",
483 rte_node_id_to_name(node_map[0][j]));
484 node_data->next_nodes[edges] = node_map[0][j];
485 node_data->next_percentage[edges] = src_map[i][j];
487 total_percent += src_map[i][j];
492 if (edges >= MAX_EDGES_PER_NODE || total_percent != 100) {
493 printf("Invalid edge configuration\n");
494 for (j = 0; j < edges; j++)
496 goto pattern_name_free;
499 /* Connect to list of next nodes using edges */
500 count = rte_node_edge_update(src_nodes[i], 0,
501 (const char **)(uintptr_t)ename,
503 for (k = 0; k < edges; k++)
505 if (count != edges) {
506 printf("Couldn't add edges %d %d\n", edges, count);
507 goto pattern_name_free;
511 /* Setup Sink nodes */
512 for (i = 0; i < nb_sinks; i++) {
513 node_patterns[graph_data->nb_nodes] = malloc(RTE_NODE_NAMESIZE);
514 if (node_patterns[graph_data->nb_nodes] == NULL) {
515 printf("Failed to create memory for pattern\n");
516 goto pattern_name_free;
519 /* Clone a sink node */
520 snprintf(nname, sizeof(nname), "%d", i);
521 snk_nodes[i] = graph_node_get(TEST_GRAPH_SNK_NAME, nname);
522 if (snk_nodes[i] == RTE_NODE_ID_INVALID) {
523 printf("Failed to create node[%s]\n", nname);
524 graph_data->nb_nodes++;
525 goto pattern_name_free;
527 snprintf(node_patterns[graph_data->nb_nodes], RTE_NODE_NAMESIZE,
528 "%s", rte_node_id_to_name(snk_nodes[i]));
529 node_data = &graph_data->node_data[graph_data->nb_nodes];
530 node_data->node_id = snk_nodes[i];
531 node_data->is_sink = true;
532 graph_data->nb_nodes++;
535 /* Connect last stage worker nodes to sink nodes */
536 for (i = 0; i < nodes_per_stage; i++) {
539 node_data = graph_get_node_data(graph_data,
540 node_map[stages - 1][i]);
541 /* Prepare list of sink nodes to connect to */
542 for (j = 0; j < nb_sinks; j++) {
545 ename[edges] = malloc(sizeof(char) * RTE_NODE_NAMESIZE);
546 snprintf(ename[edges], RTE_NODE_NAMESIZE, "%s",
547 rte_node_id_to_name(snk_nodes[j]));
548 node_data->next_nodes[edges] = snk_nodes[j];
549 node_data->next_percentage[edges] = snk_map[i][j];
551 total_percent += snk_map[i][j];
555 if (edges >= MAX_EDGES_PER_NODE || total_percent != 100) {
556 printf("Invalid edge configuration\n");
557 for (j = 0; j < edges; j++)
559 goto pattern_name_free;
562 /* Connect a worker node to a list of sink nodes */
563 count = rte_node_edge_update(node_map[stages - 1][i], 0,
564 (const char **)(uintptr_t)ename,
566 for (k = 0; k < edges; k++)
568 if (count != edges) {
569 printf("Couldn't add edges %d %d\n", edges, count);
570 goto pattern_name_free;
575 gconf.socket_id = SOCKET_ID_ANY;
576 gconf.nb_node_patterns = graph_data->nb_nodes;
577 gconf.node_patterns = (const char **)(uintptr_t)node_patterns;
579 graph_id = rte_graph_create(gname, &gconf);
580 if (graph_id == RTE_GRAPH_ID_INVALID) {
581 printf("Graph creation failed with error = %d\n", rte_errno);
582 goto pattern_name_free;
584 graph_data->graph_id = graph_id;
587 for (i = 0; i < graph_data->nb_nodes; i++)
588 free(node_patterns[i]);
596 for (i = 0; i < graph_data->nb_nodes; i++)
597 free(node_patterns[i]);
605 free(graph_data->node_data);
607 rte_memzone_free(mz);
611 /* Worker thread function */
613 _graph_perf_wrapper(void *args)
615 struct graph_lcore_data *data = args;
616 struct rte_graph *graph;
619 graph = rte_graph_lookup(rte_graph_id_to_name(data->graph_id));
621 /* Graph walk until done */
623 rte_graph_walk(graph);
629 measure_perf_get(rte_graph_t graph_id)
631 const char *pattern = rte_graph_id_to_name(graph_id);
632 uint32_t lcore_id = rte_get_next_lcore(-1, 1, 0);
633 struct rte_graph_cluster_stats_param param;
634 struct rte_graph_cluster_stats *stats;
635 struct graph_lcore_data *data;
637 data = rte_zmalloc("Graph_perf", sizeof(struct graph_lcore_data),
638 RTE_CACHE_LINE_SIZE);
639 data->graph_id = graph_id;
642 /* Run graph worker thread function */
643 rte_eal_remote_launch(_graph_perf_wrapper, data, lcore_id);
645 /* Collect stats for few msecs */
646 if (rte_graph_has_stats_feature()) {
647 memset(¶m, 0, sizeof(param));
649 param.socket_id = SOCKET_ID_ANY;
650 param.graph_patterns = &pattern;
651 param.nb_graph_patterns = 1;
653 stats = rte_graph_cluster_stats_create(¶m);
655 printf("Failed to create stats\n");
660 rte_graph_cluster_stats_get(stats, true);
662 rte_graph_cluster_stats_get(stats, false);
663 rte_graph_cluster_stats_destroy(stats);
668 rte_eal_wait_lcore(lcore_id);
676 const struct rte_memzone *mz = rte_memzone_lookup(TEST_GRAPH_PERF_MZ);
677 struct test_graph_perf *graph_data;
681 graph_data = mz->addr;
683 rte_graph_destroy(graph_data->graph_id);
684 free(graph_data->node_data);
685 rte_memzone_free(rte_memzone_lookup(TEST_GRAPH_PERF_MZ));
691 const struct rte_memzone *mz;
692 struct test_graph_perf *graph_data;
694 mz = rte_memzone_lookup(TEST_GRAPH_PERF_MZ);
697 graph_data = mz->addr;
699 return measure_perf_get(graph_data->graph_id);
703 graph_hr_4s_1n_1src_1snk(void)
705 return measure_perf();
709 graph_hr_4s_1n_1src_1snk_brst_one(void)
711 return measure_perf();
715 graph_hr_4s_1n_2src_1snk(void)
717 return measure_perf();
721 graph_hr_4s_1n_1src_2snk(void)
723 return measure_perf();
727 graph_tree_4s_4n_1src_4snk(void)
729 return measure_perf();
733 graph_reverse_tree_3s_4n_1src_1snk(void)
735 return measure_perf();
739 graph_parallel_tree_5s_4n_4src_4snk(void)
741 return measure_perf();
753 uint8_t edge_map[][1][1] = {
759 uint8_t src_map[][1] = { {100} };
760 uint8_t snk_map[][1] = { {100} };
762 return graph_init("graph_hr", SOURCES(src_map), SINKS(snk_map),
763 STAGES(edge_map), NODES_PER_STAGE(edge_map), src_map,
764 snk_map, edge_map, 0);
774 graph_init_hr_brst_one(void)
776 uint8_t edge_map[][1][1] = {
782 uint8_t src_map[][1] = { {100} };
783 uint8_t snk_map[][1] = { {100} };
785 return graph_init("graph_hr", SOURCES(src_map), SINKS(snk_map),
786 STAGES(edge_map), NODES_PER_STAGE(edge_map), src_map,
787 snk_map, edge_map, 1);
797 graph_init_hr_multi_src(void)
799 uint8_t edge_map[][1][1] = {
805 uint8_t src_map[][1] = {
808 uint8_t snk_map[][1] = { {100} };
810 return graph_init("graph_hr", SOURCES(src_map), SINKS(snk_map),
811 STAGES(edge_map), NODES_PER_STAGE(edge_map), src_map,
812 snk_map, edge_map, 0);
822 graph_init_hr_multi_snk(void)
824 uint8_t edge_map[][1][1] = {
830 uint8_t src_map[][1] = { {100} };
831 uint8_t snk_map[][2] = { {50, 50} };
833 return graph_init("graph_hr", SOURCES(src_map), SINKS(snk_map),
834 STAGES(edge_map), NODES_PER_STAGE(edge_map), src_map,
835 snk_map, edge_map, 0);
845 graph_init_tree(void)
847 uint8_t edge_map[][4][4] = {
873 uint8_t src_map[][4] = { {100, 0, 0, 0} };
874 uint8_t snk_map[][4] = {
881 return graph_init("graph_full_split", SOURCES(src_map), SINKS(snk_map),
882 STAGES(edge_map), NODES_PER_STAGE(edge_map), src_map,
883 snk_map, edge_map, 0);
893 graph_init_reverse_tree(void)
895 uint8_t edge_map[][4][4] = {
915 uint8_t src_map[][4] = { {25, 25, 25, 25} };
916 uint8_t snk_map[][1] = { {100}, {100}, {0}, {0} };
918 return graph_init("graph_full_split", SOURCES(src_map), SINKS(snk_map),
919 STAGES(edge_map), NODES_PER_STAGE(edge_map), src_map,
920 snk_map, edge_map, 0);
930 graph_init_parallel_tree(void)
932 uint8_t edge_map[][4][4] = {
964 uint8_t src_map[][4] = {
970 uint8_t snk_map[][4] = {
977 return graph_init("graph_parallel", SOURCES(src_map), SINKS(snk_map),
978 STAGES(edge_map), NODES_PER_STAGE(edge_map), src_map,
979 snk_map, edge_map, 0);
982 /** Graph Creation cheat sheet
983 * edge_map -> dictates graph flow from worker stage 0 to worker stage n-1.
984 * src_map -> dictates source nodes enqueue percentage to worker stage 0.
985 * snk_map -> dictates stage n-1 enqueue percentage to sink.
988 * edge_map[<nb_stages>][<nodes_per_stg>][<nodes_in_nxt_stg = nodes_per_stg>]
989 * src_map[<nb_sources>][<nodes_in_stage0 = nodes_per_stage>]
990 * snk_map[<nodes_in_stage(n-1) = nodes_per_stage>][<nb_sinks>]
992 * The last array dictates the percentage of received objs to enqueue to next
995 * Note: edge_map[][0][] will always be unused as it will receive from source
999 * http://bit.ly/2PqbqOy
1000 * Each stage(n) connects to all nodes in the next stage in decreasing
1002 * Since we can't resize the edge_map dynamically we get away by creating
1003 * dummy nodes and assigning 0 percentages.
1004 * Max nodes across all stages = 4
1009 * edge_map[][4][4] = {
1010 * // Nodes per stage
1016 * }, // This will be unused.
1018 * // Nodes enabled in current stage + prev stage enq %
1031 * Above, each stage tells how much it should receive from previous except
1034 * src_map[][4] = { {25, 25, 25, 25} };
1035 * Here, we tell each source the % it has to send to stage_0 nodes. In
1036 * case we want 2 source node we can declare as
1037 * src_map[][4] = { {25, 25, 25, 25}, {25, 25, 25, 25} };
1039 * snk_map[][1] = { {100}, {100}, {0}, {0} }
1040 * Here, we tell stage - 1 nodes how much to enqueue to sink_0.
1041 * If we have 2 sinks we can do as follows
1042 * snk_map[][2] = { {50, 50}, {50, 50}, {0, 0}, {0, 0} }
1045 static struct unit_test_suite graph_perf_testsuite = {
1046 .suite_name = "Graph library performance test suite",
1047 .setup = graph_perf_setup,
1048 .teardown = graph_perf_teardown,
1049 .unit_test_cases = {
1050 TEST_CASE_ST(graph_init_hr, graph_fini,
1051 graph_hr_4s_1n_1src_1snk),
1052 TEST_CASE_ST(graph_init_hr_brst_one, graph_fini,
1053 graph_hr_4s_1n_1src_1snk_brst_one),
1054 TEST_CASE_ST(graph_init_hr_multi_src, graph_fini,
1055 graph_hr_4s_1n_2src_1snk),
1056 TEST_CASE_ST(graph_init_hr_multi_snk, graph_fini,
1057 graph_hr_4s_1n_1src_2snk),
1058 TEST_CASE_ST(graph_init_tree, graph_fini,
1059 graph_tree_4s_4n_1src_4snk),
1060 TEST_CASE_ST(graph_init_reverse_tree, graph_fini,
1061 graph_reverse_tree_3s_4n_1src_1snk),
1062 TEST_CASE_ST(graph_init_parallel_tree, graph_fini,
1063 graph_parallel_tree_5s_4n_4src_4snk),
1064 TEST_CASES_END(), /**< NULL terminate unit test array */
1069 test_graph_perf_func(void)
1071 return unit_test_suite_runner(&graph_perf_testsuite);
1074 #endif /* !RTE_EXEC_ENV_WINDOWS */
1076 REGISTER_TEST_COMMAND(graph_perf_autotest, test_graph_perf_func);