1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2020 Marvell International Ltd.
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_errno.h>
12 #include <rte_graph.h>
13 #include <rte_graph_worker.h>
14 #include <rte_lcore.h>
15 #include <rte_malloc.h>
20 #define TEST_GRAPH_PERF_MZ "graph_perf_data"
21 #define TEST_GRAPH_SRC_NAME "test_graph_perf_source"
22 #define TEST_GRAPH_SRC_BRST_ONE_NAME "test_graph_perf_source_one"
23 #define TEST_GRAPH_WRK_NAME "test_graph_perf_worker"
24 #define TEST_GRAPH_SNK_NAME "test_graph_perf_sink"
26 #define SOURCES(map) RTE_DIM(map)
27 #define STAGES(map) RTE_DIM(map)
28 #define NODES_PER_STAGE(map) RTE_DIM(map[0])
29 #define SINKS(map) RTE_DIM(map[0])
31 #define MAX_EDGES_PER_NODE 7
33 struct test_node_data {
36 uint8_t next_nodes[MAX_EDGES_PER_NODE];
37 uint8_t next_percentage[MAX_EDGES_PER_NODE];
40 struct test_graph_perf {
43 struct test_node_data *node_data;
46 struct graph_lcore_data {
51 static struct test_node_data *
52 graph_get_node_data(struct test_graph_perf *graph_data, rte_node_t id)
54 struct test_node_data *node_data = NULL;
57 for (i = 0; i < graph_data->nb_nodes; i++)
58 if (graph_data->node_data[i].node_id == id) {
59 node_data = &graph_data->node_data[i];
67 test_node_ctx_init(const struct rte_graph *graph, struct rte_node *node)
69 struct test_graph_perf *graph_data;
70 struct test_node_data *node_data;
71 const struct rte_memzone *mz;
72 rte_node_t nid = node->id;
78 mz = rte_memzone_lookup(TEST_GRAPH_PERF_MZ);
79 graph_data = mz->addr;
80 node_data = graph_get_node_data(graph_data, nid);
81 node->ctx[0] = node->nb_edges;
82 for (i = 0; i < node->nb_edges && !node_data->is_sink; i++, edge++) {
83 node->ctx[i + 1] = edge;
84 node->ctx[i + 9] = node_data->next_percentage[i];
90 /* Source node function */
92 test_perf_node_worker_source(struct rte_graph *graph, struct rte_node *node,
93 void **objs, uint16_t nb_objs)
99 RTE_SET_USED(nb_objs);
101 /* Create a proportional stream for every next */
102 for (i = 0; i < node->ctx[0]; i++) {
103 count = (node->ctx[i + 9] * RTE_GRAPH_BURST_SIZE) / 100;
104 rte_node_next_stream_get(graph, node, node->ctx[i + 1], count);
105 rte_node_next_stream_put(graph, node, node->ctx[i + 1], count);
108 return RTE_GRAPH_BURST_SIZE;
111 static struct rte_node_register test_graph_perf_source = {
112 .name = TEST_GRAPH_SRC_NAME,
113 .process = test_perf_node_worker_source,
114 .flags = RTE_NODE_SOURCE_F,
115 .init = test_node_ctx_init,
118 RTE_NODE_REGISTER(test_graph_perf_source);
121 test_perf_node_worker_source_burst_one(struct rte_graph *graph,
122 struct rte_node *node, void **objs,
129 RTE_SET_USED(nb_objs);
131 /* Create a proportional stream for every next */
132 for (i = 0; i < node->ctx[0]; i++) {
133 count = (node->ctx[i + 9]) / 100;
134 rte_node_next_stream_get(graph, node, node->ctx[i + 1], count);
135 rte_node_next_stream_put(graph, node, node->ctx[i + 1], count);
141 static struct rte_node_register test_graph_perf_source_burst_one = {
142 .name = TEST_GRAPH_SRC_BRST_ONE_NAME,
143 .process = test_perf_node_worker_source_burst_one,
144 .flags = RTE_NODE_SOURCE_F,
145 .init = test_node_ctx_init,
148 RTE_NODE_REGISTER(test_graph_perf_source_burst_one);
150 /* Worker node function */
152 test_perf_node_worker(struct rte_graph *graph, struct rte_node *node,
153 void **objs, uint16_t nb_objs)
160 /* Move stream for single next node */
161 if (node->ctx[0] == 1) {
162 rte_node_next_stream_move(graph, node, node->ctx[1]);
166 /* Enqueue objects to next nodes proportionally */
167 for (i = 0; i < node->ctx[0]; i++) {
168 next = node->ctx[i + 1];
169 count = (node->ctx[i + 9] * nb_objs) / 100;
172 switch (count & (4 - 1)) {
174 rte_node_enqueue_x4(graph, node, next, objs[0],
175 objs[1], objs[2], objs[3]);
180 rte_node_enqueue_x1(graph, node, next, objs[0]);
185 rte_node_enqueue_x2(graph, node, next, objs[0],
191 rte_node_enqueue_x2(graph, node, next, objs[0],
193 rte_node_enqueue_x1(graph, node, next, objs[0]);
202 rte_node_enqueue(graph, node, next, objs, nb_objs - enq);
207 static struct rte_node_register test_graph_perf_worker = {
208 .name = TEST_GRAPH_WRK_NAME,
209 .process = test_perf_node_worker,
210 .init = test_node_ctx_init,
213 RTE_NODE_REGISTER(test_graph_perf_worker);
215 /* Last node in graph a.k.a sink node */
217 test_perf_node_sink(struct rte_graph *graph, struct rte_node *node, void **objs,
223 RTE_SET_USED(nb_objs);
228 static struct rte_node_register test_graph_perf_sink = {
229 .name = TEST_GRAPH_SNK_NAME,
230 .process = test_perf_node_sink,
231 .init = test_node_ctx_init,
234 RTE_NODE_REGISTER(test_graph_perf_sink);
237 graph_perf_setup(void)
239 if (rte_lcore_count() < 2) {
240 printf("Test requires at least 2 lcores\n");
248 graph_perf_teardown(void)
252 static inline rte_node_t
253 graph_node_get(const char *pname, char *nname)
255 rte_node_t pnode_id = rte_node_from_name(pname);
256 char lookup_name[RTE_NODE_NAMESIZE];
259 snprintf(lookup_name, RTE_NODE_NAMESIZE, "%s-%s", pname, nname);
260 node_id = rte_node_from_name(lookup_name);
262 if (node_id != RTE_NODE_ID_INVALID) {
263 if (rte_node_edge_count(node_id))
264 rte_node_edge_shrink(node_id, 0);
268 return rte_node_clone(pnode_id, nname);
272 graph_node_count_edges(uint32_t stage, uint16_t node, uint16_t nodes_per_stage,
273 uint8_t edge_map[][nodes_per_stage][nodes_per_stage],
274 char *ename[], struct test_node_data *node_data,
275 rte_node_t **node_map)
277 uint8_t total_percent = 0;
281 for (i = 0; i < nodes_per_stage && edges < MAX_EDGES_PER_NODE; i++) {
282 if (edge_map[stage + 1][i][node]) {
283 ename[edges] = malloc(sizeof(char) * RTE_NODE_NAMESIZE);
284 snprintf(ename[edges], RTE_NODE_NAMESIZE, "%s",
285 rte_node_id_to_name(node_map[stage + 1][i]));
286 node_data->next_nodes[edges] = node_map[stage + 1][i];
287 node_data->next_percentage[edges] =
288 edge_map[stage + 1][i][node];
290 total_percent += edge_map[stage + 1][i][node];
294 if (edges >= MAX_EDGES_PER_NODE || (edges && total_percent != 100)) {
295 for (i = 0; i < edges; i++)
297 return RTE_EDGE_ID_INVALID;
304 graph_init(const char *gname, uint8_t nb_srcs, uint8_t nb_sinks,
305 uint32_t stages, uint16_t nodes_per_stage,
306 uint8_t src_map[][nodes_per_stage], uint8_t snk_map[][nb_sinks],
307 uint8_t edge_map[][nodes_per_stage][nodes_per_stage],
310 struct test_graph_perf *graph_data;
311 char nname[RTE_NODE_NAMESIZE / 2];
312 struct test_node_data *node_data;
313 char *ename[nodes_per_stage];
314 struct rte_graph_param gconf;
315 const struct rte_memzone *mz;
316 uint8_t total_percent = 0;
317 rte_node_t *src_nodes;
318 rte_node_t *snk_nodes;
319 rte_node_t **node_map;
320 char **node_patterns;
321 rte_graph_t graph_id;
326 mz = rte_memzone_reserve(TEST_GRAPH_PERF_MZ,
327 sizeof(struct test_graph_perf), 0, 0);
329 printf("Failed to allocate graph common memory\n");
333 graph_data = mz->addr;
334 graph_data->nb_nodes = 0;
335 graph_data->node_data =
336 malloc(sizeof(struct test_node_data) *
337 (nb_srcs + nb_sinks + stages * nodes_per_stage));
338 if (graph_data->node_data == NULL) {
339 printf("Failed to reserve memzone for graph data\n");
343 node_patterns = malloc(sizeof(char *) *
344 (nb_srcs + nb_sinks + stages * nodes_per_stage));
345 if (node_patterns == NULL) {
346 printf("Failed to reserve memory for node patterns\n");
350 src_nodes = malloc(sizeof(rte_node_t) * nb_srcs);
351 if (src_nodes == NULL) {
352 printf("Failed to reserve memory for src nodes\n");
356 snk_nodes = malloc(sizeof(rte_node_t) * nb_sinks);
357 if (snk_nodes == NULL) {
358 printf("Failed to reserve memory for snk nodes\n");
362 node_map = malloc(sizeof(rte_node_t *) * stages +
363 sizeof(rte_node_t) * nodes_per_stage * stages);
364 if (node_map == NULL) {
365 printf("Failed to reserve memory for node map\n");
369 /* Setup the Graph */
370 for (i = 0; i < stages; i++) {
372 (rte_node_t *)(node_map + stages) + nodes_per_stage * i;
373 for (j = 0; j < nodes_per_stage; j++) {
375 for (k = 0; k < nodes_per_stage; k++)
376 total_percent += edge_map[i][j][k];
379 node_patterns[graph_data->nb_nodes] =
380 malloc(RTE_NODE_NAMESIZE);
381 if (node_patterns[graph_data->nb_nodes] == NULL) {
382 printf("Failed to create memory for pattern\n");
383 goto pattern_name_free;
386 /* Clone a worker node */
387 snprintf(nname, sizeof(nname), "%d-%d", i, j);
389 graph_node_get(TEST_GRAPH_WRK_NAME, nname);
390 if (node_map[i][j] == RTE_NODE_ID_INVALID) {
391 printf("Failed to create node[%s]\n", nname);
392 graph_data->nb_nodes++;
393 goto pattern_name_free;
395 snprintf(node_patterns[graph_data->nb_nodes],
396 RTE_NODE_NAMESIZE, "%s",
397 rte_node_id_to_name(node_map[i][j]));
399 &graph_data->node_data[graph_data->nb_nodes];
400 node_data->node_id = node_map[i][j];
401 node_data->is_sink = false;
402 graph_data->nb_nodes++;
406 for (i = 0; i < stages - 1; i++) {
407 for (j = 0; j < nodes_per_stage; j++) {
408 /* Count edges i.e connections of worker node to next */
410 graph_get_node_data(graph_data, node_map[i][j]);
411 edges = graph_node_count_edges(i, j, nodes_per_stage,
413 node_data, node_map);
414 if (edges == RTE_EDGE_ID_INVALID) {
415 printf("Invalid edge configuration\n");
416 goto pattern_name_free;
421 /* Connect a node in stage 'i' to nodes
422 * in stage 'i + 1' with edges.
424 count = rte_node_edge_update(
426 (const char **)(uintptr_t)ename, edges);
427 for (k = 0; k < edges; k++)
429 if (count != edges) {
430 printf("Couldn't add edges %d %d\n", edges,
432 goto pattern_name_free;
437 /* Setup Source nodes */
438 for (i = 0; i < nb_srcs; i++) {
441 node_patterns[graph_data->nb_nodes] = malloc(RTE_NODE_NAMESIZE);
442 if (node_patterns[graph_data->nb_nodes] == NULL) {
443 printf("Failed to create memory for pattern\n");
444 goto pattern_name_free;
446 /* Clone a source node */
447 snprintf(nname, sizeof(nname), "%d", i);
449 graph_node_get(burst_one ? TEST_GRAPH_SRC_BRST_ONE_NAME
450 : TEST_GRAPH_SRC_NAME,
452 if (src_nodes[i] == RTE_NODE_ID_INVALID) {
453 printf("Failed to create node[%s]\n", nname);
454 graph_data->nb_nodes++;
455 goto pattern_name_free;
457 snprintf(node_patterns[graph_data->nb_nodes], RTE_NODE_NAMESIZE,
458 "%s", rte_node_id_to_name(src_nodes[i]));
459 node_data = &graph_data->node_data[graph_data->nb_nodes];
460 node_data->node_id = src_nodes[i];
461 node_data->is_sink = false;
462 graph_data->nb_nodes++;
464 /* Prepare next node list to connect to */
465 for (j = 0; j < nodes_per_stage; j++) {
468 ename[edges] = malloc(sizeof(char) * RTE_NODE_NAMESIZE);
469 snprintf(ename[edges], RTE_NODE_NAMESIZE, "%s",
470 rte_node_id_to_name(node_map[0][j]));
471 node_data->next_nodes[edges] = node_map[0][j];
472 node_data->next_percentage[edges] = src_map[i][j];
474 total_percent += src_map[i][j];
479 if (edges >= MAX_EDGES_PER_NODE || total_percent != 100) {
480 printf("Invalid edge configuration\n");
481 for (j = 0; j < edges; j++)
483 goto pattern_name_free;
486 /* Connect to list of next nodes using edges */
487 count = rte_node_edge_update(src_nodes[i], 0,
488 (const char **)(uintptr_t)ename,
490 for (k = 0; k < edges; k++)
492 if (count != edges) {
493 printf("Couldn't add edges %d %d\n", edges, count);
494 goto pattern_name_free;
498 /* Setup Sink nodes */
499 for (i = 0; i < nb_sinks; i++) {
500 node_patterns[graph_data->nb_nodes] = malloc(RTE_NODE_NAMESIZE);
501 if (node_patterns[graph_data->nb_nodes] == NULL) {
502 printf("Failed to create memory for pattern\n");
503 goto pattern_name_free;
506 /* Clone a sink node */
507 snprintf(nname, sizeof(nname), "%d", i);
508 snk_nodes[i] = graph_node_get(TEST_GRAPH_SNK_NAME, nname);
509 if (snk_nodes[i] == RTE_NODE_ID_INVALID) {
510 printf("Failed to create node[%s]\n", nname);
511 graph_data->nb_nodes++;
512 goto pattern_name_free;
514 snprintf(node_patterns[graph_data->nb_nodes], RTE_NODE_NAMESIZE,
515 "%s", rte_node_id_to_name(snk_nodes[i]));
516 node_data = &graph_data->node_data[graph_data->nb_nodes];
517 node_data->node_id = snk_nodes[i];
518 node_data->is_sink = true;
519 graph_data->nb_nodes++;
522 /* Connect last stage worker nodes to sink nodes */
523 for (i = 0; i < nodes_per_stage; i++) {
526 node_data = graph_get_node_data(graph_data,
527 node_map[stages - 1][i]);
528 /* Prepare list of sink nodes to connect to */
529 for (j = 0; j < nb_sinks; j++) {
532 ename[edges] = malloc(sizeof(char) * RTE_NODE_NAMESIZE);
533 snprintf(ename[edges], RTE_NODE_NAMESIZE, "%s",
534 rte_node_id_to_name(snk_nodes[j]));
535 node_data->next_nodes[edges] = snk_nodes[j];
536 node_data->next_percentage[edges] = snk_map[i][j];
538 total_percent += snk_map[i][j];
542 if (edges >= MAX_EDGES_PER_NODE || total_percent != 100) {
543 printf("Invalid edge configuration\n");
544 for (j = 0; j < edges; j++)
546 goto pattern_name_free;
549 /* Connect a worker node to a list of sink nodes */
550 count = rte_node_edge_update(node_map[stages - 1][i], 0,
551 (const char **)(uintptr_t)ename,
553 for (k = 0; k < edges; k++)
555 if (count != edges) {
556 printf("Couldn't add edges %d %d\n", edges, count);
557 goto pattern_name_free;
562 gconf.socket_id = SOCKET_ID_ANY;
563 gconf.nb_node_patterns = graph_data->nb_nodes;
564 gconf.node_patterns = (const char **)(uintptr_t)node_patterns;
566 graph_id = rte_graph_create(gname, &gconf);
567 if (graph_id == RTE_GRAPH_ID_INVALID) {
568 printf("Graph creation failed with error = %d\n", rte_errno);
569 goto pattern_name_free;
571 graph_data->graph_id = graph_id;
573 for (i = 0; i < graph_data->nb_nodes; i++)
574 free(node_patterns[i]);
581 for (i = 0; i < graph_data->nb_nodes; i++)
582 free(node_patterns[i]);
590 free(graph_data->node_data);
592 rte_memzone_free(mz);
596 /* Worker thread function */
598 _graph_perf_wrapper(void *args)
600 struct graph_lcore_data *data = args;
601 struct rte_graph *graph;
604 graph = rte_graph_lookup(rte_graph_id_to_name(data->graph_id));
606 /* Graph walk until done */
608 rte_graph_walk(graph);
614 measure_perf_get(rte_graph_t graph_id)
616 const char *pattern = rte_graph_id_to_name(graph_id);
617 uint32_t lcore_id = rte_get_next_lcore(-1, 1, 0);
618 struct rte_graph_cluster_stats_param param;
619 struct rte_graph_cluster_stats *stats;
620 struct graph_lcore_data *data;
622 data = rte_zmalloc("Graph_perf", sizeof(struct graph_lcore_data),
623 RTE_CACHE_LINE_SIZE);
624 data->graph_id = graph_id;
627 /* Run graph worker thread function */
628 rte_eal_remote_launch(_graph_perf_wrapper, data, lcore_id);
630 /* Collect stats for few msecs */
631 if (rte_graph_has_stats_feature()) {
632 memset(¶m, 0, sizeof(param));
634 param.socket_id = SOCKET_ID_ANY;
635 param.graph_patterns = &pattern;
636 param.nb_graph_patterns = 1;
638 stats = rte_graph_cluster_stats_create(¶m);
640 printf("Failed to create stats\n");
645 rte_graph_cluster_stats_get(stats, true);
647 rte_graph_cluster_stats_get(stats, false);
648 rte_graph_cluster_stats_destroy(stats);
653 rte_eal_wait_lcore(lcore_id);
661 const struct rte_memzone *mz = rte_memzone_lookup(TEST_GRAPH_PERF_MZ);
662 struct test_graph_perf *graph_data;
666 graph_data = mz->addr;
668 rte_graph_destroy(graph_data->graph_id);
669 free(graph_data->node_data);
670 rte_memzone_free(rte_memzone_lookup(TEST_GRAPH_PERF_MZ));
676 const struct rte_memzone *mz;
677 struct test_graph_perf *graph_data;
679 mz = rte_memzone_lookup(TEST_GRAPH_PERF_MZ);
680 graph_data = mz->addr;
682 return measure_perf_get(graph_data->graph_id);
686 graph_hr_4s_1n_1src_1snk(void)
688 return measure_perf();
692 graph_hr_4s_1n_1src_1snk_brst_one(void)
694 return measure_perf();
698 graph_hr_4s_1n_2src_1snk(void)
700 return measure_perf();
704 graph_hr_4s_1n_1src_2snk(void)
706 return measure_perf();
710 graph_tree_4s_4n_1src_4snk(void)
712 return measure_perf();
716 graph_reverse_tree_3s_4n_1src_1snk(void)
718 return measure_perf();
722 graph_parallel_tree_5s_4n_4src_4snk(void)
724 return measure_perf();
736 uint8_t edge_map[][1][1] = {
742 uint8_t src_map[][1] = { {100} };
743 uint8_t snk_map[][1] = { {100} };
745 return graph_init("graph_hr", SOURCES(src_map), SINKS(snk_map),
746 STAGES(edge_map), NODES_PER_STAGE(edge_map), src_map,
747 snk_map, edge_map, 0);
757 graph_init_hr_brst_one(void)
759 uint8_t edge_map[][1][1] = {
765 uint8_t src_map[][1] = { {100} };
766 uint8_t snk_map[][1] = { {100} };
768 return graph_init("graph_hr", SOURCES(src_map), SINKS(snk_map),
769 STAGES(edge_map), NODES_PER_STAGE(edge_map), src_map,
770 snk_map, edge_map, 1);
780 graph_init_hr_multi_src(void)
782 uint8_t edge_map[][1][1] = {
788 uint8_t src_map[][1] = {
791 uint8_t snk_map[][1] = { {100} };
793 return graph_init("graph_hr", SOURCES(src_map), SINKS(snk_map),
794 STAGES(edge_map), NODES_PER_STAGE(edge_map), src_map,
795 snk_map, edge_map, 0);
805 graph_init_hr_multi_snk(void)
807 uint8_t edge_map[][1][1] = {
813 uint8_t src_map[][1] = { {100} };
814 uint8_t snk_map[][2] = { {50, 50} };
816 return graph_init("graph_hr", SOURCES(src_map), SINKS(snk_map),
817 STAGES(edge_map), NODES_PER_STAGE(edge_map), src_map,
818 snk_map, edge_map, 0);
828 graph_init_tree(void)
830 uint8_t edge_map[][4][4] = {
856 uint8_t src_map[][4] = { {100, 0, 0, 0} };
857 uint8_t snk_map[][4] = {
864 return graph_init("graph_full_split", SOURCES(src_map), SINKS(snk_map),
865 STAGES(edge_map), NODES_PER_STAGE(edge_map), src_map,
866 snk_map, edge_map, 0);
876 graph_init_reverse_tree(void)
878 uint8_t edge_map[][4][4] = {
898 uint8_t src_map[][4] = { {25, 25, 25, 25} };
899 uint8_t snk_map[][1] = { {100}, {100}, {0}, {0} };
901 return graph_init("graph_full_split", SOURCES(src_map), SINKS(snk_map),
902 STAGES(edge_map), NODES_PER_STAGE(edge_map), src_map,
903 snk_map, edge_map, 0);
913 graph_init_parallel_tree(void)
915 uint8_t edge_map[][4][4] = {
947 uint8_t src_map[][4] = {
953 uint8_t snk_map[][4] = {
960 return graph_init("graph_parallel", SOURCES(src_map), SINKS(snk_map),
961 STAGES(edge_map), NODES_PER_STAGE(edge_map), src_map,
962 snk_map, edge_map, 0);
965 /** Graph Creation cheat sheet
966 * edge_map -> dictates graph flow from worker stage 0 to worker stage n-1.
967 * src_map -> dictates source nodes enqueue percentage to worker stage 0.
968 * snk_map -> dictates stage n-1 enqueue percentage to sink.
971 * edge_map[<nb_stages>][<nodes_per_stg>][<nodes_in_nxt_stg = nodes_per_stg>]
972 * src_map[<nb_sources>][<nodes_in_stage0 = nodes_per_stage>]
973 * snk_map[<nodes_in_stage(n-1) = nodes_per_stage>][<nb_sinks>]
975 * The last array dictates the percentage of received objs to enqueue to next
978 * Note: edge_map[][0][] will always be unused as it will receive from source
982 * http://bit.ly/2PqbqOy
983 * Each stage(n) connects to all nodes in the next stage in decreasing
985 * Since we can't resize the edge_map dynamically we get away by creating
986 * dummy nodes and assigning 0 percentages.
987 * Max nodes across all stages = 4
992 * edge_map[][4][4] = {
999 * }, // This will be unused.
1001 * // Nodes enabled in current stage + prev stage enq %
1014 * Above, each stage tells how much it should receive from previous except
1017 * src_map[][4] = { {25, 25, 25, 25} };
1018 * Here, we tell each source the % it has to send to stage_0 nodes. In
1019 * case we want 2 source node we can declare as
1020 * src_map[][4] = { {25, 25, 25, 25}, {25, 25, 25, 25} };
1022 * snk_map[][1] = { {100}, {100}, {0}, {0} }
1023 * Here, we tell stage - 1 nodes how much to enqueue to sink_0.
1024 * If we have 2 sinks we can do as follows
1025 * snk_map[][2] = { {50, 50}, {50, 50}, {0, 0}, {0, 0} }
1028 static struct unit_test_suite graph_perf_testsuite = {
1029 .suite_name = "Graph library performance test suite",
1030 .setup = graph_perf_setup,
1031 .teardown = graph_perf_teardown,
1032 .unit_test_cases = {
1033 TEST_CASE_ST(graph_init_hr, graph_fini,
1034 graph_hr_4s_1n_1src_1snk),
1035 TEST_CASE_ST(graph_init_hr_brst_one, graph_fini,
1036 graph_hr_4s_1n_1src_1snk_brst_one),
1037 TEST_CASE_ST(graph_init_hr_multi_src, graph_fini,
1038 graph_hr_4s_1n_2src_1snk),
1039 TEST_CASE_ST(graph_init_hr_multi_snk, graph_fini,
1040 graph_hr_4s_1n_1src_2snk),
1041 TEST_CASE_ST(graph_init_tree, graph_fini,
1042 graph_tree_4s_4n_1src_4snk),
1043 TEST_CASE_ST(graph_init_reverse_tree, graph_fini,
1044 graph_reverse_tree_3s_4n_1src_1snk),
1045 TEST_CASE_ST(graph_init_parallel_tree, graph_fini,
1046 graph_parallel_tree_5s_4n_4src_4snk),
1047 TEST_CASES_END(), /**< NULL terminate unit test array */
1052 test_graph_perf_func(void)
1054 return unit_test_suite_runner(&graph_perf_testsuite);
1057 REGISTER_TEST_COMMAND(graph_perf_autotest, test_graph_perf_func);