Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
H
hivemind
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Deploy
Releases
Package registry
Container Registry
Model registry
Operate
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
hive
hivemind
Commits
5ce6f29f
Commit
5ce6f29f
authored
4 years ago
by
Dariusz Kędzierski
Browse files
Options
Downloads
Patches
Plain Diff
Add info about failing benchmarks to the console
parent
b90ae0da
No related branches found
No related tags found
2 merge requests
!456
Release candidate v1 24
,
!246
API tests execution time reports
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
scripts/ci_start_api_benchmark.sh
+1
-0
1 addition, 0 deletions
scripts/ci_start_api_benchmark.sh
scripts/json_report_parser.py
+10
-4
10 additions, 4 deletions
scripts/json_report_parser.py
with
11 additions
and
4 deletions
scripts/ci_start_api_benchmark.sh
+
1
−
0
View file @
5ce6f29f
...
@@ -17,6 +17,7 @@ BASE_DIR=$(pwd)
...
@@ -17,6 +17,7 @@ BASE_DIR=$(pwd)
echo
"Script base dir is:
$BASE_DIR
"
echo
"Script base dir is:
$BASE_DIR
"
pip3
install
tox
--user
pip3
install
tox
--user
pip3
install
prettytable
--user
echo
"Creating benchmark test file as:
$4
.py"
echo
"Creating benchmark test file as:
$4
.py"
$BASE_DIR
/tests/tests_api/hivemind/benchmarks/benchmark_generator.py
$3
"
$4
.py"
"http://
$1
:
$2
"
$BASE_DIR
/tests/tests_api/hivemind/benchmarks/benchmark_generator.py
$3
"
$4
.py"
"http://
$1
:
$2
"
...
...
This diff is collapsed.
Click to expand it.
scripts/json_report_parser.py
+
10
−
4
View file @
5ce6f29f
#!/usr/bin/python3
#!/usr/bin/python3
import
xml.dom.minidom
import
os
import
os
from
sys
import
exit
from
sys
import
exit
from
json
import
dumps
,
load
from
json
import
dumps
,
load
...
@@ -40,7 +39,7 @@ def class_to_path(class_name, class_to_path_dic):
...
@@ -40,7 +39,7 @@ def class_to_path(class_name, class_to_path_dic):
return
None
return
None
if
__name__
==
'
__main__
'
:
if
__name__
==
'
__main__
'
:
above_treshold
=
False
above_treshold
=
[]
import
argparse
import
argparse
parser
=
argparse
.
ArgumentParser
()
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
"
path_to_test_dir
"
,
type
=
str
,
help
=
"
Path to test directory for given json benchmark file
"
)
parser
.
add_argument
(
"
path_to_test_dir
"
,
type
=
str
,
help
=
"
Path to test directory for given json benchmark file
"
)
...
@@ -65,19 +64,26 @@ if __name__ == '__main__':
...
@@ -65,19 +64,26 @@ if __name__ == '__main__':
ofile
.
write
(
"
</head>
\n
"
)
ofile
.
write
(
"
</head>
\n
"
)
ofile
.
write
(
"
<body>
\n
"
)
ofile
.
write
(
"
<body>
\n
"
)
ofile
.
write
(
"
<table>
\n
"
)
ofile
.
write
(
"
<table>
\n
"
)
ofile
.
write
(
"
<tr><th>Test name</th><th>
T
ime [s]</th></tr>
\n
"
)
ofile
.
write
(
"
<tr><th>Test name</th><th>
Mean t
ime [s]</th></tr>
\n
"
)
json_data
=
None
json_data
=
None
with
open
(
args
.
json_file
,
"
r
"
)
as
json_file
:
with
open
(
args
.
json_file
,
"
r
"
)
as
json_file
:
json_data
=
load
(
json_file
)
json_data
=
load
(
json_file
)
for
benchmark
in
json_data
[
'
benchmarks
'
]:
for
benchmark
in
json_data
[
'
benchmarks
'
]:
if
float
(
benchmark
[
'
stats
'
][
'
mean
'
])
>
args
.
time_threshold
:
if
float
(
benchmark
[
'
stats
'
][
'
mean
'
])
>
args
.
time_threshold
:
ofile
.
write
(
"
<tr><td>{}<br/>Parameters: {}</td><td bgcolor=
\"
red
\"
>{:.4f}</td></tr>
\n
"
.
format
(
benchmark
[
'
name
'
],
get_request_from_yaml
(
class_to_path
(
benchmark
[
'
name
'
][
5
:],
class_to_path_dic
)),
benchmark
[
'
stats
'
][
'
mean
'
]))
ofile
.
write
(
"
<tr><td>{}<br/>Parameters: {}</td><td bgcolor=
\"
red
\"
>{:.4f}</td></tr>
\n
"
.
format
(
benchmark
[
'
name
'
],
get_request_from_yaml
(
class_to_path
(
benchmark
[
'
name
'
][
5
:],
class_to_path_dic
)),
benchmark
[
'
stats
'
][
'
mean
'
]))
above_treshold
=
True
above_treshold
.
append
((
benchmark
[
'
name
'
],
benchmark
[
'
stats
'
][
'
mean
'
],
get_request_from_yaml
(
class_to_path
(
benchmark
[
'
name
'
][
5
:],
class_to_path_dic
))))
else
:
else
:
ofile
.
write
(
"
<tr><td>{}</td><td>{:.4f}</td></tr>
\n
"
.
format
(
benchmark
[
'
name
'
],
benchmark
[
'
stats
'
][
'
mean
'
]))
ofile
.
write
(
"
<tr><td>{}</td><td>{:.4f}</td></tr>
\n
"
.
format
(
benchmark
[
'
name
'
],
benchmark
[
'
stats
'
][
'
mean
'
]))
ofile
.
write
(
"
</table>
\n
"
)
ofile
.
write
(
"
</table>
\n
"
)
ofile
.
write
(
"
</body>
\n
"
)
ofile
.
write
(
"
</body>
\n
"
)
ofile
.
write
(
"
</html>
\n
"
)
ofile
.
write
(
"
</html>
\n
"
)
if
above_treshold
:
if
above_treshold
:
from
prettytable
import
PrettyTable
summary
=
PrettyTable
()
print
(
"
########## Test failed with following tests above {}s threshold ##########
"
.
format
(
args
.
time_threshold
))
summary
.
field_names
=
[
'
Test name
'
,
'
Mean time [s]
'
,
'
Call parameters
'
]
for
entry
in
above_treshold
:
summary
.
add_row
(
entry
)
print
(
summary
)
exit
(
1
)
exit
(
1
)
exit
(
0
)
exit
(
0
)
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment