diff --git a/benchmarks/Dockerfile.jmeter b/benchmarks/Dockerfile.jmeter
index 0b6924ebb6b91aaf793661bbcf4fe72a0d01540e..51b6d6130f61d7c0eab8bf424a3b2b9c60ac6dda 100644
--- a/benchmarks/Dockerfile.jmeter
+++ b/benchmarks/Dockerfile.jmeter
@@ -79,7 +79,7 @@ ENV ROOT_DIR="${WDIR}"
 ENV SERVE_PORT=""
 
 # additional arguments that will be passed to benchmarking script
-ENV ADDITIONAL_ARGS="--skip-version-check"
+ENV ADDITIONAL_ARGS="--skip-version-check -i"
 
 # path to directory, where jmeter and python benchmark script will put all it's output
 ENV JMETER_WORKDIR=${ROOT_DIR}/wdir
diff --git a/benchmarks/benchmark.py b/benchmarks/benchmark.py
index 09629e91eb259dfac6a3531280491ccc6dceb026..3fefa465746c02d8f63ad39dc15dc722432f0f5f 100755
--- a/benchmarks/benchmark.py
+++ b/benchmarks/benchmark.py
@@ -339,6 +339,7 @@ with JMETER_REPORT_OUT_FILE.open('rt', encoding='utf-8') as in_file:
 	if error_counter > 0:
 		log.error(f'Amount of invalid requests/total amount of requests: {error_counter}/{count + 1}')
 
+	log.info(f"total amount of calls: {count}")
 # generate pretty table
 table = PrettyTable(field_names=['Endpoint', 'Max [ms]', 'Min [ms]', 'Average [ms]', 'Median [ms]'])
 value_extr = lambda x: x.value