1 #! /usr/bin/env python3
2 # SPDX-License-Identifier: BSD-3-Clause
3 # Copyright(c) 2021 Intel Corporation
6 Script to automate running crypto performance tests for a range of test
7 cases as configured in the JSON file specified by the user.
8 The results are processed and output into various graphs in PDF files.
9 Currently, throughput and latency tests are supported.
17 from argparse import ArgumentParser
18 from argparse import ArgumentDefaultsHelpFormatter
21 import plotly.express as px
23 SCRIPT_PATH = os.path.dirname(__file__) + "/"
24 GRAPH_DIR = "temp_graphs"
28 """Grapher object containing all graphing functions. """
29 def __init__(self, config, suite, graph_path):
31 self.graph_path = graph_path
36 self.data = pd.DataFrame()
38 def save_graph(self, fig, subdir):
40 Update figure layout to increase readability, output to JPG file.
42 path = os.path.join(self.graph_path, subdir, "")
43 if not os.path.exists(path):
45 fig.update_layout(font_size=30, title_x=0.5, title_font={"size": 25},
46 margin={'t': 300, 'l': 150, 'r': 150, 'b': 150})
47 fig.write_image(path + "%d.jpg" % self.graph_num)
49 def boxplot_graph(self, x_axis_label, burst, buffer):
50 """Plot a boxplot graph for the given parameters."""
51 fig = px.box(self.data, x=x_axis_label,
52 title="Config: " + self.config + "<br>Test Suite: " +
53 self.suite + "<br>" + self.test +
54 "<br>(Outliers Included)<br>Burst Size: " + burst +
55 ", Buffer Size: " + buffer,
56 height=1400, width=2400)
57 self.save_graph(fig, x_axis_label.replace(' ', '_'))
60 def grouped_graph(self, y_axis_label, x_axis_label, color_label):
61 """Plot a grouped barchart using the given parameters."""
62 if (self.data[y_axis_label] == 0).all():
64 fig = px.bar(self.data, x=x_axis_label, color=color_label,
66 title="Config: " + self.config + "<br>Test Suite: " +
67 self.suite + "<br>" + self.test + "<br>"
68 + y_axis_label + " for each " + x_axis_label +
69 "/" + color_label, barmode="group", height=1400,
71 fig.update_xaxes(type='category')
72 self.save_graph(fig, y_axis_label.replace(' ', '_'))
75 def histogram_graph(self, x_axis_label, burst, buffer):
76 """Plot a histogram graph using the given parameters."""
77 quart1 = self.data[x_axis_label].quantile(0.25)
78 quart3 = self.data[x_axis_label].quantile(0.75)
79 inter_quart_range = quart3 - quart1
80 data_out = self.data[~((self.data[x_axis_label] <
81 (quart1 - 1.5 * inter_quart_range)) |
82 (self.data[x_axis_label] >
83 (quart3 + 1.5 * inter_quart_range)))]
84 fig = px.histogram(data_out, x=x_axis_label,
85 title="Config: " + self.config + "<br>Test Suite: "
86 + self.suite + "<br>" + self.test
87 + "<br>(Outliers removed using Interquartile Range)"
88 + "<br>Burst Size: " + burst + ", Buffer Size: " +
89 buffer, height=1400, width=2400)
90 max_val = data_out[x_axis_label].max()
91 min_val = data_out[x_axis_label].min()
92 fig.update_traces(xbins=dict(
95 size=(max_val - min_val) / 200
97 self.save_graph(fig, x_axis_label.replace(' ', '_'))
101 def cleanup_throughput_datatypes(data):
102 """Cleanup data types of throughput test results dataframe. """
103 data.columns = data.columns.str.replace('/', ' ')
104 data.columns = data.columns.str.strip()
105 data['Burst Size'] = data['Burst Size'].astype('category')
106 data['Buffer Size(B)'] = data['Buffer Size(B)'].astype('category')
107 data['Failed Enq'] = data['Failed Enq'].astype('int')
108 data['Throughput(Gbps)'] = data['Throughput(Gbps)'].astype('float')
109 data['Ops(Millions)'] = data['Ops(Millions)'].astype('float')
110 data['Cycles Buf'] = data['Cycles Buf'].astype('float')
114 def cleanup_latency_datatypes(data):
115 """Cleanup data types of latency test results dataframe. """
116 data.columns = data.columns.str.strip()
117 data = data[['Burst Size', 'Buffer Size', 'time (us)']].copy()
118 data['Burst Size'] = data['Burst Size'].astype('category')
119 data['Buffer Size'] = data['Buffer Size'].astype('category')
120 data['time (us)'] = data['time (us)'].astype('float')
124 def process_test_results(grapher, data):
126 Process results from the test case,
127 calling graph functions to output graph images.
129 if grapher.ptest == "throughput":
130 grapher.data = cleanup_throughput_datatypes(data)
131 for y_label in ["Throughput(Gbps)", "Ops(Millions)",
132 "Cycles Buf", "Failed Enq"]:
133 grapher.grouped_graph(y_label, "Buffer Size(B)",
135 elif grapher.ptest == "latency":
136 clean_data = cleanup_latency_datatypes(data)
137 for (burst, buffer), group in clean_data.groupby(['Burst Size',
140 grapher.histogram_graph("time (us)", burst, buffer)
141 grapher.boxplot_graph("time (us)", burst, buffer)
143 print("Invalid ptest")
147 def create_results_pdf(graph_path, pdf_path):
148 """Output results graphs to PDFs."""
149 if not os.path.exists(pdf_path):
150 os.makedirs(pdf_path)
151 for _, dirs, _ in os.walk(graph_path):
153 graphs = sorted(glob.glob(os.path.join(graph_path, sub, "*.jpg")),
154 key=(lambda x: int((x.rsplit('/', 1)[1])
157 with open(pdf_path + "%s_results.pdf" % sub, "wb") as pdf_file:
158 pdf_file.write(img2pdf.convert(graphs))
161 def run_test(test_cmd, test, grapher, params, verbose):
162 """Run performance test app for the given test case parameters."""
163 process = subprocess.Popen(["stdbuf", "-oL", test_cmd] + params,
164 universal_newlines=True,
165 stdout=subprocess.PIPE,
166 stderr=subprocess.STDOUT)
169 print("\n\tOutput for " + test + ":")
170 while process.poll() is None:
171 line = process.stdout.readline().strip()
175 print("\t\t>>" + line)
177 if line.replace(' ', '').startswith('#lcore'):
178 columns = line[1:].split(',')
179 elif line[0].isdigit():
180 line = line.replace(';', ',')
181 rows.append(line.split(','))
185 if process.poll() != 0 or not columns or not rows:
186 print("\n\t" + test + ": FAIL")
188 data = pd.DataFrame(rows, columns=columns)
190 process_test_results(grapher, data)
191 print("\n\t" + test + ": OK")
195 def parse_parameters(config_parameters):
196 """Convert the JSON config to list of strings."""
198 for (key, val) in config_parameters:
199 if isinstance(val, bool):
200 params.append("--" + key if val is True else "")
202 params.append("-" + key)
205 params.append("--" + key + "=" + val)
209 def run_test_suite(test_cmd, suite_config, verbose):
210 """Parse test cases for the test suite and run each test."""
211 print("\nRunning Test Suite: " + suite_config['suite'])
212 graph_path = os.path.join(suite_config['output_path'], GRAPH_DIR,
213 suite_config['suite'], "")
214 grapher = Grapher(suite_config['config_name'], suite_config['suite'],
216 test_cases = suite_config['test_cases']
217 if 'default' not in test_cases:
218 print("Test Suite must contain default case, skipping")
221 default_params = parse_parameters(test_cases['default']['eal'].items())
222 default_params.append("--")
223 default_params += parse_parameters(test_cases['default']['app'].items())
225 if 'ptest' not in test_cases['default']['app']:
226 print("Test Suite must contain default ptest value, skipping")
228 grapher.ptest = test_cases['default']['app']['ptest']
230 for (test, params) in {k: v for (k, v) in test_cases.items() if
231 k != "default"}.items():
232 extra_params = parse_parameters(params.items())
233 run_test(test_cmd, test, grapher, default_params + extra_params,
236 create_results_pdf(graph_path, os.path.join(suite_config['output_path'],
237 suite_config['suite'], ""))
241 """Parse command-line arguments passed to script."""
242 parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
243 parser.add_argument('config_path', type=str,
244 help="Path to JSON configuration file")
245 parser.add_argument('-t', '--test-suites', nargs='+', default=["all"],
246 help="List of test suites to run")
247 parser.add_argument('-v', '--verbose', action='store_true',
248 help="""Display perf test app output.
249 Not recommended for latency tests.""")
250 parser.add_argument('-f', '--file-path',
251 default=shutil.which('dpdk-test-crypto-perf'),
252 help="Path for perf test app")
253 parser.add_argument('-o', '--output-path', default=SCRIPT_PATH,
254 help="Path to store output directories")
255 args = parser.parse_args()
256 return (args.file_path, args.test_suites, args.config_path,
257 args.output_path, args.verbose)
262 Load JSON config and call relevant functions to run chosen test suites.
264 test_cmd, test_suites, config_file, output_path, verbose = parse_args()
265 if test_cmd is None or not os.path.isfile(test_cmd):
266 print("Invalid filepath for perf test app!")
269 with open(config_file) as conf:
270 test_suite_ops = json.load(conf)
271 config_name = os.path.splitext(config_file)[0]
272 if '/' in config_name:
273 config_name = config_name.rsplit('/', 1)[1]
274 output_path = os.path.join(output_path, config_name, "")
275 print("Using config: " + config_file)
276 except OSError as err:
277 print("Error with JSON file path: " + err.strerror)
279 except json.decoder.JSONDecodeError as err:
280 print("Error loading JSON config: " + err.msg)
283 if test_suites != ["all"]:
285 for (suite, test_cases) in {k: v for (k, v) in test_suite_ops.items()
286 if k in test_suites}.items():
287 suite_list.append(suite)
288 suite_config = {'config_name': config_name, 'suite': suite,
289 'test_cases': test_cases,
290 'output_path': output_path}
291 run_test_suite(test_cmd, suite_config, verbose)
293 print("No valid test suites chosen!")
296 for (suite, test_cases) in test_suite_ops.items():
297 suite_config = {'config_name': config_name, 'suite': suite,
298 'test_cases': test_cases,
299 'output_path': output_path}
300 run_test_suite(test_cmd, suite_config, verbose)
302 graph_path = os.path.join(output_path, GRAPH_DIR, "")
303 if os.path.exists(graph_path):
304 shutil.rmtree(graph_path)
307 if __name__ == "__main__":