diff --git a/.gitlab-ci.yaml b/.gitlab-ci.yaml index eecb7c851cc34c7745561275c5f700f2df35e0e3..2427c20a7685aa7336cd62bf8b0df4e3c1361a82 100644 --- a/.gitlab-ci.yaml +++ b/.gitlab-ci.yaml @@ -291,18 +291,9 @@ api_smoketest_benchmark: - hivemind script: - - tox -e benchmark -- localhost $HIVEMIND_HTTP_PORT tests/tests_api/hivemind/tavern + - ./scripts/ci_start_api_benchmarks.sh localhost $HIVEMIND_HTTP_PORT 5 artifacts: when: always paths: - - tavern_report_benchmark_bridge_api_patterns.html - - tavern_report_benchmark_bridge_api_negative.html - - tavern_report_benchmark_condenser_api_patterns.html - - tavern_report_benchmark_condenser_api_negative.html - - tavern_report_benchmark_database_api_patterns.html - - tavern_report_benchmark_database_api_negative.html - - tavern_report_benchmark_follow_api_patterns.html - - tavern_report_benchmark_follow_api_negative.html - - tavern_report_benchmark_tags_api_patterns.html - - tavern_report_benchmark_tags_api_negative.html + - tavern_benchmarks_report.html diff --git a/scripts/ci_start_api_benchmarks.sh b/scripts/ci_start_api_benchmarks.sh new file mode 100755 index 0000000000000000000000000000000000000000..361629b8dbe9e714e295cc4b7a2ab695445663af --- /dev/null +++ b/scripts/ci_start_api_benchmarks.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +set -e +pip3 install tox --user + +export HIVEMIND_ADDRESS=$1 +export HIVEMIND_PORT=$2 +export TAVERN_DISABLE_COMPARATOR=true + +echo Attempting to start benchmarks on hivemind instance listeing on: $HIVEMIND_ADDRESS port: $HIVEMIND_PORT + +ITERATIONS=$3 + +for (( i=0; i<$ITERATIONS; i++ )) +do + echo About to run iteration $i + tox -e tavern-benchmark -- -W ignore::pytest.PytestDeprecationWarning -n auto --junitxml=../../../../benchmarks-$i.xml + echo Done! +done +./scripts/xml_report_parser.py . ./tests/tests_api/hivemind/tavern diff --git a/scripts/xml_report_parser.py b/scripts/xml_report_parser.py new file mode 100755 index 0000000000000000000000000000000000000000..f229e9fc8ebf2c115f51a7255dfc000f31bc657b --- /dev/null +++ b/scripts/xml_report_parser.py @@ -0,0 +1,105 @@ +#!/usr/bin/python3 +import os + +from xml.dom import minidom + +def process_file_name(file_name, tavern_root_dir): + tavern_root_dir_dot = tavern_root_dir.replace("/", ".") + file_name_dot = file_name.replace("/", ".") + return file_name_dot.replace(tavern_root_dir_dot, "").lstrip(".") + +def get_requests_from_yaml(tavern_root_dir): + from fnmatch import fnmatch + import yaml + from json import dumps + ret = {} + pattern = "*.tavern.yaml" + for path, subdirs, files in os.walk(tavern_root_dir): + for name in files: + if fnmatch(name, pattern): + test_file = os.path.join(path, name) + yaml_document = None + with open(test_file, "r") as yaml_file: + yaml_document = yaml.load(yaml_file, Loader=yaml.BaseLoader) + if "stages" in yaml_document: + if "request" in yaml_document["stages"][0]: + json_parameters = yaml_document["stages"][0]["request"].get("json", None) + assert json_parameters is not None, "Unable to find json parameters in request" + ret[process_file_name(test_file, tavern_root_dir)] = dumps(json_parameters) + return ret + +def parse_xml_files(root_dir): + ret = {} + print("Scanning path: {}".format(root_dir)) + for name in os.listdir(root_dir): + file_path = os.path.join(root_dir, name) + if os.path.isfile(file_path) and name.startswith("benchmarks") and file_path.endswith(".xml"): + print("Processing file: {}".format(file_path)) + xmldoc = minidom.parse(file_path) + test_cases = xmldoc.getElementsByTagName('testcase') + for test_case in test_cases: + test_name = test_case.attributes['classname'].value + test_time = float(test_case.attributes['time'].value) + if test_name in ret: + ret[test_name].append(test_time) + else: + ret[test_name] = [test_time] + return ret + +if __name__ == "__main__": + import argparse + from statistics import mean + + parser = argparse.ArgumentParser() + parser.add_argument("xml_report_dir", type=str, help="Path to benchmark xml reports") + parser.add_argument("tavern_root_dir", type=str, help="Path to tavern tests root dir") + parser.add_argument("--time-threshold", dest="time_threshold", type=float, default=1.0, help="Time threshold for test execution time, tests with execution time greater than threshold will be marked on red.") + args = parser.parse_args() + + assert os.path.exists(args.xml_report_dir), "Please provide valid xml report path" + assert os.path.exists(args.tavern_root_dir), "Please provide valid tavern path" + + report_data = parse_xml_files(args.xml_report_dir) + request_data = get_requests_from_yaml(args.tavern_root_dir) + + html_file = "tavern_benchmarks_report.html" + above_treshold = [] + with open(html_file, "w") as ofile: + ofile.write("<html>\n") + ofile.write(" <head>\n") + ofile.write(" <style>\n") + ofile.write(" table, th, td {\n") + ofile.write(" border: 1px solid black;\n") + ofile.write(" border-collapse: collapse;\n") + ofile.write(" }\n") + ofile.write(" th, td {\n") + ofile.write(" padding: 15px;\n") + ofile.write(" }\n") + ofile.write(" </style>\n") + ofile.write(" </head>\n") + ofile.write(" <body>\n") + ofile.write(" <table>\n") + ofile.write(" <tr><th>Test name</th><th>Min time [s]</th><th>Max time [s]</th><th>Mean time [s]</th></tr>\n") + for name, data in report_data.items(): + dmin = min(data) + dmax = max(data) + dmean = mean(data) + if dmean > args.time_threshold: + ofile.write(" <tr><td>{}<br/>Parameters: {}</td><td>{:.4f}</td><td>{:.4f}</td><td bgcolor=\"red\">{:.4f}</td></tr>\n".format(name, request_data[name], dmin, dmax, dmean)) + above_treshold.append((name, "{:.4f}".format(dmean), request_data[name])) + else: + ofile.write(" <tr><td>{}</td><td>{:.4f}</td><td>{:.4f}</td><td>{:.4f}</td></tr>\n".format(name, dmin, dmax, dmean)) + ofile.write(" </table>\n") + ofile.write(" </body>\n") + ofile.write("</html>\n") + + if above_treshold: + from prettytable import PrettyTable + summary = PrettyTable() + print("########## Test failed with following tests above {}s threshold ##########".format(args.time_threshold)) + summary.field_names = ['Test name', 'Mean time [s]', 'Call parameters'] + for entry in above_treshold: + summary.add_row(entry) + print(summary) + exit(2) + exit(0) diff --git a/tests/tests_api b/tests/tests_api index 4640946229bce481c03f64bea01d97588340a213..1ae2a4b1367139e7954b31596eaa4bf65e6aa68e 160000 --- a/tests/tests_api +++ b/tests/tests_api @@ -1 +1 @@ -Subproject commit 4640946229bce481c03f64bea01d97588340a213 +Subproject commit 1ae2a4b1367139e7954b31596eaa4bf65e6aa68e diff --git a/tox.ini b/tox.ini index 2e3d7fb2bd9522c1d33d1c08e177d34d0c8005dc..b18444609752007d4657a825fb0f2efcdd4a1c44 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py36, tavern, benchmark +envlist = py36, tavern, benchmark, tavern-benchmark skipsdist = true [testenv] @@ -34,3 +34,18 @@ deps = jsondiff commands = pytest {posargs} + +[testenv:tavern-benchmark] +setenv = + {[testenv:tavern]setenv} + +passenv = + {[testenv:tavern]passenv} + TAVERN_DISABLE_COMPARATOR + +changedir = tests/tests_api/hivemind/tavern + +deps = + {[testenv:tavern]deps} + +commands = pytest --durations=0 {posargs}