Skip to content
Snippets Groups Projects
Commit 2b34d1e3 authored by Dariusz Kędzierski's avatar Dariusz Kędzierski
Browse files

Rebase to current develop

parents ebdc2e99 a2f4070e
No related branches found
No related tags found
2 merge requests!456Release candidate v1 24,!246API tests execution time reports
......@@ -7,6 +7,7 @@ stages:
- data-supply
- deploy
- e2e-test
- benchmark-tests
- post-deploy
variables:
......@@ -242,7 +243,7 @@ follow_api_smoketest:
artifacts:
reports:
junit: api_smoketest_follow_api.xml
junit: api_smoketest.xml
follow_api_smoketest_negative:
<<: *common_api_smoketest_job
......@@ -270,6 +271,36 @@ tags_api_smoketest_negative:
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" tags_api_negative/ api_smoketest_tags_api_negative.xml
api_smoketest_benchmark:
stage: benchmark-tests
environment: hive-4.pl.syncad.com
needs:
- job: hivemind_start_server
artifacts: true
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: always
- if: '$CI_PIPELINE_SOURCE == "push"'
when: manual
- when: on_success
tags:
- hivemind
script:
- tox -e benchmark -- localhost $HIVEMIND_HTTP_PORT tests/tests_api/hivemind/tavern
artifacts:
reports:
junit: api_smoketest_tags_api_negative.xml
when: always
paths:
- tavern_report_benchmark_bridge_api_patterns.html
- tavern_report_benchmark_bridge_api_negative.html
- tavern_report_benchmark_condenser_api_patterns.html
- tavern_report_benchmark_condenser_api_negative.html
- tavern_report_benchmark_database_api_patterns.html
- tavern_report_benchmark_database_api_negative.html
- tavern_report_benchmark_follow_api_patterns.html
- tavern_report_benchmark_follow_api_negative.html
- tavern_report_benchmark_tags_api_patterns.html
- tavern_report_benchmark_tags_api_negative.html
#!/usr/bin/python3
from json import dumps
def make_benchmark_header():
return """from requests import post
from json import dumps
def send_rpc_query(address, data):
response = post(address, data=data)
response_json = response.json()
return response_json
"""
def make_benchmark(test_name, address, test_payload):
return """
def test_{}(benchmark):
response_json = benchmark(send_rpc_query, "{}", dumps({}))
error = response_json.get("error", None)
result = response_json.get("result", None)
assert error is not None or result is not None, "No error or result in response"
""".format(test_name, address, test_payload)
def get_request_from_yaml(path_to_yaml):
import yaml
yaml_document = None
with open(path_to_yaml, "r") as yaml_file:
yaml_document = yaml.load(yaml_file, Loader=yaml.BaseLoader)
if "stages" in yaml_document:
if "request" in yaml_document["stages"][0]:
json_parameters = yaml_document["stages"][0]["request"].get("json", None)
assert json_parameters is not None, "Unable to find json parameters in request"
return dumps(json_parameters)
return None
def make_test_name_from_path(test_path):
splited = test_path.split("/")
return ("_".join(splited[-3:])).replace(".", "_").replace("-", "_")
def make_benchmark_test_file(file_name, address, tests_root_dir):
import os
from fnmatch import fnmatch
pattern = "*.tavern.yaml"
test_files = []
for path, subdirs, files in os.walk(tests_root_dir):
for name in files:
if fnmatch(name, pattern):
test_files.append(os.path.join(path, name))
with open(file_name, "w") as benchmarks_file:
benchmarks_file.write(make_benchmark_header())
for test_file in test_files:
test_name = make_test_name_from_path(test_file)
test_payload = get_request_from_yaml(test_file)
benchmarks_file.write(make_benchmark(test_name, address, test_payload))
benchmarks_file.write("\n")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("path_to_test_dir", type=str, help = "Path to test directory for given xml file")
parser.add_argument("benchmark_test_file_name", type=str, help="Name of the generated test file")
parser.add_argument("target_ip_address", type=str, help="Address of the hivemind")
args = parser.parse_args()
make_benchmark_test_file(args.benchmark_test_file_name, args.target_ip_address, args.path_to_test_dir)
#!/usr/bin/python3
""" Parse json file generated by pytest benchmarks and create htm report file
for files exceeding expected threshold print information to the console
"""
import os
from sys import exit
from json import dumps, load
def get_request_from_yaml(path_to_yaml):
""" Extract request parameters from given yaml file
Parameters:
- path_to_yaml - path to yaml file
Returns:
- string with request parameters
"""
import yaml
yaml_document = None
with open(path_to_yaml, "r") as yaml_file:
yaml_document = yaml.load(yaml_file, Loader=yaml.BaseLoader)
if "stages" in yaml_document:
if "request" in yaml_document["stages"][0]:
json_parameters = yaml_document["stages"][0]["request"].get("json", None)
assert json_parameters is not None, "Unable to find json parameters in request"
return dumps(json_parameters)
return ""
def make_class_path_dict(root_dir):
""" Scan root dir for files with given pattern and construct dictionary
with keys as path with replaced ., -, / characters and values as file path
Parameters:
- root_dir - dir to scan for files
Returns:
- dict class_name -> path
"""
from fnmatch import fnmatch
pattern = "*.tavern.yaml"
ret = {}
for path, _, files in os.walk(root_dir):
for name in files:
if fnmatch(name, pattern):
test_path = os.path.join(path, name)
ret[test_path.replace(".", "_").replace("-", "_").replace("/", "_")] = test_path
return ret
def class_to_path(class_name, class_to_path_dic):
""" Return path to test file basing on class name
Parameters:
- class_name - test to find,
- class_to_path_dic - dict with class -> path key/values
Return:
- path to test file
"""
from fnmatch import fnmatch
for c, p in class_to_path_dic.items():
if fnmatch(c, "*" + class_name):
return p
return None
def json_report_parser(path_to_test_dir, json_file, time_threshold=1.0):
above_treshold = []
html_file, _ = os.path.splitext(json_file)
html_file = "tavern_report_" + html_file + ".html"
class_to_path_dic = make_class_path_dict(path_to_test_dir)
with open(html_file, "w") as ofile:
ofile.write("<html>\n")
ofile.write(" <head>\n")
ofile.write(" <style>\n")
ofile.write(" table, th, td {\n")
ofile.write(" border: 1px solid black;\n")
ofile.write(" border-collapse: collapse;\n")
ofile.write(" }\n")
ofile.write(" th, td {\n")
ofile.write(" padding: 15px;\n")
ofile.write(" }\n")
ofile.write(" </style>\n")
ofile.write(" </head>\n")
ofile.write(" <body>\n")
ofile.write(" <table>\n")
ofile.write(" <tr><th>Test name</th><th>Min time [ms]</th><th>Max time [ms]</th><th>Mean time [ms]</th></tr>\n")
json_data = None
with open(json_file, "r") as json_file:
json_data = load(json_file)
for benchmark in json_data['benchmarks']:
if float(benchmark['stats']['mean']) > time_threshold:
ofile.write(" <tr><td>{}<br/>Parameters: {}</td><td>{:.4f}</td><td>{:.4f}</td><td bgcolor=\"red\">{:.4f}</td></tr>\n".format(benchmark['name'], get_request_from_yaml(class_to_path(benchmark['name'][5:], class_to_path_dic)), benchmark['stats']['min'] * 1000, benchmark['stats']['max'] * 1000, benchmark['stats']['mean'] * 1000))
above_treshold.append((benchmark['name'], "{:.4f}".format(benchmark['stats']['mean'] * 1000), get_request_from_yaml(class_to_path(benchmark['name'][5:], class_to_path_dic))))
else:
ofile.write(" <tr><td>{}</td><td>{:.4f}</td><td>{:.4f}</td><td>{:.4f}</td></tr>\n".format(benchmark['name'], benchmark['stats']['min'] * 1000, benchmark['stats']['max'] * 1000, benchmark['stats']['mean'] * 1000))
ofile.write(" </table>\n")
ofile.write(" </body>\n")
ofile.write("</html>\n")
return above_treshold
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("path_to_test_dir", type = str, help = "Path to test directory for given json benchmark file")
parser.add_argument("json_file", type = str, help = "Path to benchmark json file")
parser.add_argument("--time-threshold", dest="time_threshold", type=float, default=1.0, help="Time threshold for test execution time, tests with execution time greater than threshold will be marked on red.")
args = parser.parse_args()
if not json_report_parser(args.path_to_test_dir, args.json_file, args.time_threshold):
exit(1)
exit(0)
......@@ -9,5 +9,4 @@ echo "Starting tests on hivemind server running on ${HIVEMIND_ADDRESS}:${HIVEMIN
echo "Selected test group (if empty all will be executed): $3"
tox -- -W ignore::pytest.PytestDeprecationWarning -n auto --durations=0 \
--junitxml=../../../../$4 $3
tox -e tavern -- -W ignore::pytest.PytestDeprecationWarning -n auto --junitxml=../../../../$4 $3
#!/usr/bin/python3
import os
import subprocess
from json import load, dump
from benchmark_generator import make_benchmark_test_file
from json_report_parser import json_report_parser
def get_test_directories(tests_root_dir):
ret = []
for name in os.listdir(tests_root_dir):
dir_path = os.path.join(tests_root_dir, name)
if os.path.isdir(dir_path):
ret.append(dir_path)
return ret
def find_data_in_benchmarks(name, json_data):
for benchmark in json_data['benchmarks']:
if benchmark['name'] == name:
return (benchmark['stats']['min'], benchmark['stats']['max'], benchmark['stats']['mean'])
return (None, None, None)
def join_benchmark_data(file_name, json_files):
from statistics import mean
jsons = []
for json_file in json_files:
with open(json_file, "r") as src:
jsons.append(load(src))
for benchmark in jsons[0]['benchmarks']:
bmin = []
bmax = []
bmean = []
for j in jsons:
data = find_data_in_benchmarks(benchmark['name'], j)
if data[0] is not None:
bmin.append(data[0])
if data[1] is not None:
bmax.append(data[1])
if data[2] is not None:
bmean.append(data[2])
benchmark['stats']['min'] = min(bmin)
benchmark['stats']['max'] = max(bmax)
benchmark['stats']['mean'] = mean(bmean)
with open("{}.json".format(file_name), "w") as out:
dump(jsons[0], out)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("hivemind_address", type=str, help="Address of hivemind instance")
parser.add_argument("hivemind_port", type=int, help="Port of hivemind instance")
parser.add_argument("tests_root_dir", type=str, help="Path to tests root dir")
parser.add_argument("--benchmark-runs", type=int, default=3, help="How many benchmark runs")
parser.add_argument("--time-threshold", dest="time_threshold", type=float, default=1.0, help="Time threshold for test execution time, tests with execution time greater than threshold will be marked on red.")
args = parser.parse_args()
assert os.path.exists(args.tests_root_dir), "Directory does not exist"
assert args.benchmark_runs > 0, "Benchmarks runs option has to be positive number"
hivemind_url = "http://{}:{}".format(args.hivemind_address, args.hivemind_port)
test_directories = get_test_directories(args.tests_root_dir)
benchmarks_files = []
for test_directory in test_directories:
benchmark_file_name = "benchmark_" + test_directory.split("/")[-1] + ".py"
make_benchmark_test_file(benchmark_file_name, hivemind_url, test_directory)
benchmarks_files.append(benchmark_file_name)
benchmark_json_files = {}
for run in range(args.benchmark_runs):
for benchmark_file in benchmarks_files:
name, ext = os.path.splitext(benchmark_file)
json_file_name = "{}-{:03d}.json".format(name, run)
cmd = [
"pytest",
"--benchmark-max-time=0.000001",
"--benchmark-min-rounds=10",
"--benchmark-json={}".format(json_file_name),
benchmark_file
]
if name in benchmark_json_files:
benchmark_json_files[name].append(json_file_name)
else:
benchmark_json_files[name] = [json_file_name]
ret = subprocess.run(cmd)
if ret.returncode != 0:
print("Error while running `{}`".format(' '.join(cmd)))
exit(1)
for name, json_files in benchmark_json_files.items():
join_benchmark_data(name, json_files)
failed = []
for test_directory in test_directories:
json_file_name = "benchmark_" + test_directory.split("/")[-1] + ".json"
ret = json_report_parser(test_directory, json_file_name, args.time_threshold)
if ret:
failed.extend(ret)
if failed:
from prettytable import PrettyTable
summary = PrettyTable()
print("########## Test failed with following tests above {}ms threshold ##########".format(args.time_threshold * 1000))
summary.field_names = ['Test name', 'Mean time [ms]', 'Call parameters']
for entry in failed:
summary.add_row(entry)
print(summary)
exit(2)
exit(0)
......@@ -9,4 +9,4 @@ echo Attempting to start tests on hivemind instance listeing on: $HIVEMIND_ADDRE
echo "Selected test group (if empty all will be executed): $3"
tox -- -W ignore::pytest.PytestDeprecationWarning -n auto --durations=0 --junitxml=../../../../$4 $3
tox -e tavern -- -W ignore::pytest.PytestDeprecationWarning -n auto --junitxml=../../../../$4 $3
Subproject commit d8c41b9bbbe8b38744cfe1079e3ff72ce125a554
Subproject commit 8aac0997c0ad0ea62d8d3fe2a38e5f0d24951380
[tox]
envlist = py36
envlist = py36, tavern, benchmark
[testenv]
deps =
pytest
[testenv:benchmark]
deps =
{[testenv]deps}
pytest-benchmark
requests
pyyaml
prettytable
commands =
python {toxinidir}/scripts/ci/start_api_benchmark.py {posargs}
[testenv:tavern]
setenv =
PYTHONPATH = {toxinidir}/tests/tests_api/hivemind/tavern:{env:PYTHONPATH:}
......@@ -12,7 +26,7 @@ passenv =
changedir = tests/tests_api/hivemind/tavern
deps =
pytest
{[testenv]deps}
pytest-cov
pytest-pylint
pytest-asyncio
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment