diff --git a/scripts/ci/json_report_parser.py b/scripts/ci/json_report_parser.py index 8809c145388e2e762586d09fc2a6b7a371ee7b83..7403dfd3130380af606e9245c571aa446d450726 100755 --- a/scripts/ci/json_report_parser.py +++ b/scripts/ci/json_report_parser.py @@ -80,16 +80,16 @@ def json_report_parser(path_to_test_dir, json_file, time_threshold=1.0): ofile.write(" </head>\n") ofile.write(" <body>\n") ofile.write(" <table>\n") - ofile.write(" <tr><th>Test name</th><th>Min time [s]</th><th>Max time [s]</th><th>Mean time [s]</th></tr>\n") + ofile.write(" <tr><th>Test name</th><th>Min time [ms]</th><th>Max time [ms]</th><th>Mean time [ms]</th></tr>\n") json_data = None with open(json_file, "r") as json_file: json_data = load(json_file) for benchmark in json_data['benchmarks']: if float(benchmark['stats']['mean']) > time_threshold: - ofile.write(" <tr><td>{}<br/>Parameters: {}</td><td>{:.4f}</td><td>{:.4f}</td><td bgcolor=\"red\">{:.4f}</td></tr>\n".format(benchmark['name'], get_request_from_yaml(class_to_path(benchmark['name'][5:], class_to_path_dic)), benchmark['stats']['min'], benchmark['stats']['max'], benchmark['stats']['mean'])) - above_treshold.append((benchmark['name'], "{:.4f}".format(benchmark['stats']['mean']), get_request_from_yaml(class_to_path(benchmark['name'][5:], class_to_path_dic)))) + ofile.write(" <tr><td>{}<br/>Parameters: {}</td><td>{:.4f}</td><td>{:.4f}</td><td bgcolor=\"red\">{:.4f}</td></tr>\n".format(benchmark['name'], get_request_from_yaml(class_to_path(benchmark['name'][5:], class_to_path_dic)), benchmark['stats']['min'] * 1000, benchmark['stats']['max'] * 1000, benchmark['stats']['mean'] * 1000)) + above_treshold.append((benchmark['name'], "{:.4f}".format(benchmark['stats']['mean'] * 1000), get_request_from_yaml(class_to_path(benchmark['name'][5:], class_to_path_dic)))) else: - ofile.write(" <tr><td>{}</td><td>{:.4f}</td><td>{:.4f}</td><td>{:.4f}</td></tr>\n".format(benchmark['name'], benchmark['stats']['min'], benchmark['stats']['max'], benchmark['stats']['mean'])) + ofile.write(" <tr><td>{}</td><td>{:.4f}</td><td>{:.4f}</td><td>{:.4f}</td></tr>\n".format(benchmark['name'], benchmark['stats']['min'] * 1000, benchmark['stats']['max'] * 1000, benchmark['stats']['mean'] * 1000)) ofile.write(" </table>\n") ofile.write(" </body>\n") ofile.write("</html>\n") diff --git a/scripts/ci/start_api_benchmark.py b/scripts/ci/start_api_benchmark.py index cd14aa5706a290b06450054da9b9924e9caddbeb..92fb6e73d6107beeea3aa40b167f1ac5975831a1 100755 --- a/scripts/ci/start_api_benchmark.py +++ b/scripts/ci/start_api_benchmark.py @@ -101,8 +101,8 @@ if __name__ == "__main__": if failed: from prettytable import PrettyTable summary = PrettyTable() - print("########## Test failed with following tests above {}s threshold ##########".format(args.time_threshold)) - summary.field_names = ['Test name', 'Mean time [s]', 'Call parameters'] + print("########## Test failed with following tests above {}ms threshold ##########".format(args.time_threshold * 1000)) + summary.field_names = ['Test name', 'Mean time [ms]', 'Call parameters'] for entry in failed: summary.add_row(entry) print(summary)