Skip to content
Snippets Groups Projects
Commit 1e166a3a authored by Dariusz Kędzierski's avatar Dariusz Kędzierski
Browse files

Rebase to current develop

parents 417cd793 f9e11c47
No related branches found
No related tags found
2 merge requests!456Release candidate v1 24,!246API tests execution time reports
......@@ -7,6 +7,7 @@ stages:
- data-supply
- deploy
- e2e-test
- benchmark-tests
- post-deploy
variables:
......@@ -274,3 +275,133 @@ tags_api_smoketest_negative:
reports:
junit: api_smoketest_tags_api_negative.xml
.benchmark_tests: &common_api_benchmarks
stage: benchmark-tests
environment: hive-4.pl.syncad.com
needs:
- job: hivemind_start_server
artifacts: true
variables:
GIT_STRATEGY: none
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: always
- if: '$CI_PIPELINE_SOURCE == "push"'
when: manual
- when: on_success
tags:
- hivemind
bridge_api_smoketest_benchmark:
<<: *common_api_benchmarks
script:
- scripts/ci_start_api_benchmark.sh localhost $HIVEMIND_HTTP_PORT tests/tests_api/hivemind/tavern/bridge_api_patterns/ api_benchmark_bridge
artifacts:
when: always
paths:
- tavern_benchmark_report_api_benchmark_bridge.html
bridge_api_smoketest_negative_benchmark:
<<: *common_api_benchmarks
script:
- scripts/ci_start_api_benchmark.sh localhost $HIVEMIND_HTTP_PORT tests/tests_api/hivemind/tavern/bridge_api_negative/ api_benchmark_bridge_negative
artifacts:
when: always
paths:
- tavern_benchmark_report_api_benchmark_bridge_negative.html
condenser_api_smoketest_benchmark:
<<: *common_api_benchmarks
script:
- scripts/ci_start_api_benchmark.sh localhost $HIVEMIND_HTTP_PORT tests/tests_api/hivemind/tavern/condenser_api_patterns/ api_benchmark_condenser
artifacts:
when: always
paths:
- tavern_benchmark_report_api_benchmark_condenser_api.html
condenser_api_smoketest_negative_benchmark:
<<: *common_api_benchmarks
script:
- scripts/ci_start_api_benchmark.sh localhost $HIVEMIND_HTTP_PORT tests/tests_api/hivemind/tavern/condenser_api_negative/ api_benchmark_condenser_negative
artifacts:
when: always
paths:
- tavern_benchmark_report_api_benchmark_condenser_api_negative.html
database_api_smoketest_benchmark:
<<: *common_api_benchmarks
script:
- scripts/ci_start_api_benchmark.sh localhost $HIVEMIND_HTTP_PORT tests/tests_api/hivemind/tavern/database_api_patterns/ api_benchmark_database
artifacts:
when: always
paths:
- tavern_benchmark_report_api_benchmark_database_api.html
database_api_smoketest_negative_benchmark:
<<: *common_api_benchmarks
script:
- scripts/ci_start_api_benchmark.sh localhost $HIVEMIND_HTTP_PORT tests/tests_api/hivemind/tavern/database_api_negative/ api_benchmark_database_negative
artifacts:
when: always
paths:
- tavern_benchmark_report_api_benchmark_database_api_negative.html
follow_api_smoketest_benchmark:
<<: *common_api_benchmarks
script:
- scripts/ci_start_api_benchmark.sh localhost $HIVEMIND_HTTP_PORT tests/tests_api/hivemind/tavern/follow_api_patterns/ api_benchmark_follow
artifacts:
when: always
paths:
- tavern_benchmark_report_api_benchmark_follow_api.html
follow_api_smoketest_negative_benchmark:
<<: *common_api_benchmarks
script:
- scripts/ci_start_api_benchmark.sh localhost $HIVEMIND_HTTP_PORT tests/tests_api/hivemind/tavern/follow_api_negative/ api_benchmark_follow_negative
artifacts:
when: always
paths:
- tavern_benchmark_report_api_benchmark_follow_api_negative.html
tags_api_smoketest_benchmark:
<<: *common_api_benchmarks
script:
- scripts/ci_start_api_benchmark.sh localhost $HIVEMIND_HTTP_PORT tests/tests_api/hivemind/tavern/tags_api_patterns/ api_benchmark_tags
artifacts:
when: always
paths:
- tavern_benchmark_report_api_benchmark_tags_api.html
tags_api_smoketest_negative_benchmark:
<<: *common_api_benchmarks
script:
- scripts/ci_start_api_benchmark.sh localhost $HIVEMIND_HTTP_PORT tests/tests_api/hivemind/tavern/tags_api_negative/ api_benchmark_tags_negative
artifacts:
when: always
paths:
- tavern_benchmark_report_api_benchmark_tags_api_negative.html
#!/bin/bash
# $1 - server address
# $2 - server port
# $3 - path to test directory
# $4 - name of the benchmark script file
set -e
echo "========================= BENCHMARKS ================================="
echo "Server address: $1"
echo "Server port: $2"
echo "Test directory to be processed: $3"
echo "Benchmark test file name: $4.py"
BASE_DIR=$(pwd)
echo "Script base dir is: $BASE_DIR"
pip3 install tox --user
echo "Creating benchmark test file as: $4.py"
$BASE_DIR/tests/tests_api/hivemind/benchmarks/benchmark_generator.py $3 "$4.py" "http://$1:$2"
echo "Running benchmark tests on http://$1:$2"
tox -e benchmark -- --benchmark-json="$4.json" "$4.py"
echo "Creating html report from $4.json"
$BASE_DIR/scripts/json_report_parser.py $3 "$4.json"
\ No newline at end of file
......@@ -9,4 +9,4 @@ echo Attempting to start tests on hivemind instance listeing on: $HIVEMIND_ADDRE
echo "Selected test group (if empty all will be executed): $3"
tox -- -W ignore::pytest.PytestDeprecationWarning -n auto --durations=0 --junitxml=../../../../$4 $3
tox -e tavern -- -W ignore::pytest.PytestDeprecationWarning -n auto --durations=0 --junitxml=../../../../$4 $3
#!/usr/bin/python3
import xml.dom.minidom
import os
from sys import exit
from json import dumps, load
def get_request_from_yaml(path_to_yaml):
import yaml
yaml_document = None
with open(path_to_yaml, "r") as yaml_file:
yaml_document = yaml.load(yaml_file, Loader=yaml.BaseLoader)
if "stages" in yaml_document:
if "request" in yaml_document["stages"][0]:
json_parameters = yaml_document["stages"][0]["request"].get("json", None)
assert json_parameters is not None, "Unable to find json parameters in request"
return dumps(json_parameters)
return ""
def make_class_path_dict(root_dir):
import os
from fnmatch import fnmatch
pattern = "*.tavern.yaml"
ret = {}
for path, subdirs, files in os.walk(root_dir):
for name in files:
if fnmatch(name, pattern):
test_path = os.path.join(path, name)
ret[test_path.replace(".", "_").replace("-", "_").replace("/", "_")] = test_path
return ret
def class_to_path(class_name, class_to_path_dic):
from fnmatch import fnmatch
for c, p in class_to_path_dic.items():
if fnmatch(c, "*" + class_name):
return p
return None
if __name__ == '__main__':
above_treshold = False
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("path_to_test_dir", type = str, help = "Path to test directory for given json benchmark file")
parser.add_argument("json_file", type = str, help = "Path to benchmark json file")
parser.add_argument("--time-threshold", dest="time_threshold", type=float, default=1.0, help="Time threshold for test execution time, tests with execution time greater than threshold will be marked on red.")
args = parser.parse_args()
html_file, _ = os.path.splitext(args.json_file)
html_file = "tavern_benchmark_report_" + html_file + ".html"
class_to_path_dic = make_class_path_dict(args.path_to_test_dir)
with open(html_file, "w") as ofile:
ofile.write("<html>\n")
ofile.write(" <head>\n")
ofile.write(" <style>\n")
ofile.write(" table, th, td {\n")
ofile.write(" border: 1px solid black;\n")
ofile.write(" border-collapse: collapse;\n")
ofile.write(" }\n")
ofile.write(" th, td {\n")
ofile.write(" padding: 15px;\n")
ofile.write(" }\n")
ofile.write(" </style>\n")
ofile.write(" </head>\n")
ofile.write(" <body>\n")
ofile.write(" <table>\n")
ofile.write(" <tr><th>Test name</th><th>Time [s]</th></tr>\n")
json_data = None
with open(args.json_file, "r") as json_file:
json_data = load(json_file)
for benchmark in json_data['benchmarks']:
if float(benchmark['stats']['mean']) > args.time_threshold:
ofile.write(" <tr><td>{}<br/>Parameters: {}</td><td bgcolor=\"red\">{:.4f}</td></tr>\n".format(benchmark['name'], get_request_from_yaml(class_to_path(benchmark['name'][5:], class_to_path_dic)), benchmark['stats']['mean']))
above_treshold = True
else:
ofile.write(" <tr><td>{}</td><td>{:.4f}</td></tr>\n".format(benchmark['name'], benchmark['stats']['mean']))
ofile.write(" </table>\n")
ofile.write(" </body>\n")
ofile.write("</html>\n")
if above_treshold:
exit(1)
exit(0)
Subproject commit 3d3daf0c67b9d429be51b2d66543a57c0f8fcf29
Subproject commit 542c3adadf6601b1cee9edc5e0350ed1009258eb
[tox]
envlist = py36
envlist = py36, tavern, benchmark
[testenv]
deps =
pytest
[testenv:benchmark]
deps =
{[testenv]deps}
pytest-benchmark
requests
commands = pytest {posargs}
[testenv:tavern]
setenv =
PYTHONPATH = {toxinidir}/tests/tests_api/hivemind/tavern:{env:PYTHONPATH:}
......@@ -12,7 +24,7 @@ passenv =
changedir = tests/tests_api/hivemind/tavern
deps =
pytest
{[testenv]deps}
pytest-cov
pytest-pylint
pytest-asyncio
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment