Skip to content
Snippets Groups Projects
Commit a8b80be2 authored by Krzysztof Mochocki's avatar Krzysztof Mochocki
Browse files

Imporve accessibility

parent 215faf4f
No related branches found
No related tags found
No related merge requests found
......@@ -84,6 +84,9 @@ ENV ADDITIONAL_ARGS="--skip-version-check -i"
# path to directory, where jmeter and python benchmark script will put all it's output
ENV JMETER_WORKDIR=${ROOT_DIR}/wdir
# set to 0 to generate junit report using m2u
ENV SKIP_JUNIT_TEST_REPORT=1
# verification is setup ready
RUN source ${VENV} && python3 -m pip list
RUN source ${VENV} && python3 benchmark.py -h
......
......@@ -79,7 +79,6 @@ environment.add_argument('-l', '--list', dest='list_csv', **BOO
environment.add_argument('-r', '--root-dir', dest='root_dir', type=str, default=DEFAULT_ROOT_DIR, help=f'path to root directory of tests_api project [default={DEFAULT_ROOT_DIR}]')
environment.add_argument('-d', '--datadir', dest='datadir', type=str, default='./wdir', help='defines path to workdir (path to this dir will alway be recreated) [default=./wdir]')
environment.add_argument('-j', '--jmeter', dest='jmeter', type=str, default='/usr/bin/jmeter', help='path to jmeter executable [default=/usr/bin/jmeter]')
environment.add_argument('-q', '--supr-errors', dest='supr_err', **BOOL_PARAM, help="if specified error messages of bad requests won't be printed")
environment.add_argument('--skip-version-check', dest='skip_version', **BOOL_PARAM, help='if specified, `hive_api.get_version` call will not be performed')
# benchmarking options
......@@ -113,7 +112,6 @@ SKIP_VERSION : bool = args.skip_version
API_NAME : str = args.api
LOOP_COUNT : int = max(-1, args.loops)
IGNORE_BAD_REQ : bool = args.ignore_br
SUPR_ERRRORS : bool = args.supr_err
SCHEMA : str = args.schema
# print configuration
......@@ -282,6 +280,9 @@ except Exception as e:
# processing output
# gathering data from CSV
input_csv_lines = CSV_PATH.read_text().strip("\n").splitlines()
len_input_csv_lines = len(input_csv_lines)
# read and organize output from JMETER
@dataclass
class jmeter_record:
......@@ -291,6 +292,7 @@ class jmeter_record:
# process incoming data from JMETER
jmeter_output : Dict[str, List[jmeter_record]] = dict()
error_counter = 0
error_lines_in_csv: set[tuple[int, str]] = set()
with JMETER_REPORT_OUT_FILE.open('rt', encoding='utf-8') as in_file:
raw_line = in_file.readline()
headers_raw = raw_line.split(',')
......@@ -303,22 +305,19 @@ with JMETER_REPORT_OUT_FILE.open('rt', encoding='utf-8') as in_file:
def handle_error(msg : str):
global error_counter
error_counter += 1
if not SUPR_ERRRORS:
log.error('during analysis of jmeter output, found error in line: \n' + msg)
for count, raw_line in enumerate(in_file):
line = raw_line.split(',')
if line[success_idx] != 'true':
error_lines_in_csv.add(((count % len_input_csv_lines), raw_line))
if CSV_MODE == CSV.MODE.CL and ( jmeter_interrupt or LOOP_COUNT > 0 ):
if not IGNORE_BAD_REQ:
log.info(f'total amount of calls on {THREADS} threads: {count-1}')
break
else:
handle_error(raw_line)
error_counter += 1
else:
handle_error(raw_line)
error_counter += 1
if not IGNORE_BAD_REQ:
assert False, f'test failed, check logs in {DATADIR.as_posix()} for more informations. Fail detected on line: `{raw_line}`'
......@@ -339,9 +338,9 @@ with JMETER_REPORT_OUT_FILE.open('rt', encoding='utf-8') as in_file:
if error_counter > 0:
log.error(f'Amount of invalid requests/total amount of requests: {error_counter}/{count + 1}')
log.info(f"total amount of calls: {count}")
log.info(f"total amount of calls: {count+1}")
# generate pretty table
table = PrettyTable(field_names=['Endpoint', 'Max [ms]', 'Min [ms]', 'Average [ms]', 'Median [ms]'])
table = PrettyTable(field_names=['Endpoint', 'Max [ms]', 'Min [ms]', 'Average [ms]', 'Median [ms]', "Count [-]"])
value_extr = lambda x: x.value
def median_on_sorted(iter: List[jmeter_record]):
length = len(iter)
......@@ -366,10 +365,15 @@ for endpoint, values in jmeter_output.items():
int(vsorted[-1].value),
int(vsorted[0].value),
int(summ(vsorted)/len(vsorted)),
int(median_on_sorted(vsorted))
int(median_on_sorted(vsorted)),
int(len(values))
])
# formating
table.align = 'c'
table.align[table.field_names[0]] = 'l'
log.info('\n' + f'{table}')
# printing invalid lines from csv
if len(error_lines_in_csv) > 0:
log.info("calls that did't succeed from input csv:\n\n" + "\n".join(f"{i[1]}{input_csv_lines[i[0]]}\n" for i in error_lines_in_csv))
......@@ -19,9 +19,11 @@ source $VENV && python3 "$ROOT_DIR/benchmarks/benchmark.py" \
--call-style "$CALL_STYLE" \
$@; true
echo "generating JUNIT report"
$M2U --input "$JMETER_WORKDIR/raw_jmeter_report.xml" --output "$JMETER_WORKDIR/report.junit"; true
echo "generated output to: $JMETER_WORKDIR/report.junit"
if [ "$SKIP_JUNIT_TEST_REPORT" = "0" ]; then
echo "generating JUNIT report"
$M2U --input "$JMETER_WORKDIR/raw_jmeter_report.xml" --output "$JMETER_WORKDIR/report.junit"; true
echo "generated output to: $JMETER_WORKDIR/report.junit"
fi
if [ -n "${eSERVE_PORT}" ]; then
echo "serving output from benchmarks on http://0.0.0.0:$SERVE_PORT"
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment