Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
  • 2mln_blocks_test
  • abw_rshares_experiment
  • bridge_get_account_posts
  • bw_ci_validating_test
  • bw_list_comments_by_permlink_opt_patterns
  • dk-5e6-patterns
  • dk-database-api-tests-001
  • dk-diff-log
  • dk-fix-order-in-some-tests
  • dk-follow-item-overwrite-fix
  • dk-follow-refactor
  • dk-functional-example
  • dk-list-votes-pre24
  • dk-mock-follow-tests
  • dk-mock-follow-tests-2
  • dk-mock-vops-tests
  • dk-pytest-benchmark
  • follow_api_tests
  • get_accounts
  • get_blog_entries
  • get_ranked_posts
  • issue_37_trend_and_hot
  • jsalyers-more-blacklist-updates
  • klesniak-api-tests-pyresttests
  • km_hafah_MR15
  • km_issue_151_revert
  • kmochocki/add-slow-call
  • kmochocki/ci
  • kmochocki/hivemind-testing
  • kudmich/prepare_tavern_tests_to_working_with_postgrest
  • master
  • mi_correct_notif_cache_update
  • mi_mention_date_changed
  • mi_notifications_in_cache
  • mi_test_for_bridge_get_ranked_posts
  • msochacki_tests
  • mt-47
  • mt-find-list-votes
  • mt-further-pids-refactoring
  • mt-further-pids-refactoring-old
  • mt-get-pids-fix
  • mt-get-profile-new-patterns
  • mt-grayed
  • mt-lack-of-post-forces-empty-pattern
  • mt-list-comments-fix
  • mt-main-unit
  • mt-pids-performance-refactoring
  • mz-hivemind-benchmark-parser
  • pczempiel_community
  • pczempiel_date_changes
  • pczempiel_new_tests
  • pmaniora_account_notifications_tests
  • pmaniora_communities_sql
  • pmaniora_community_fixes
  • pmaniora_fullsync_slow_calls
  • pmaniora_new_tests
  • request-execution-time
  • revert-36610d67
  • tmp
59 results

Target

Select target project
  • hive/tests_api
1 result
Select Git revision
  • 2mln_blocks_test
  • abw_rshares_experiment
  • bridge_get_account_posts
  • bw_ci_validating_test
  • bw_list_comments_by_permlink_opt_patterns
  • dk-5e6-patterns
  • dk-database-api-tests-001
  • dk-diff-log
  • dk-fix-order-in-some-tests
  • dk-follow-item-overwrite-fix
  • dk-follow-refactor
  • dk-functional-example
  • dk-list-votes-pre24
  • dk-mock-follow-tests
  • dk-mock-follow-tests-2
  • dk-mock-vops-tests
  • dk-pytest-benchmark
  • follow_api_tests
  • get_accounts
  • get_blog_entries
  • get_ranked_posts
  • issue_37_trend_and_hot
  • jsalyers-more-blacklist-updates
  • klesniak-api-tests-pyresttests
  • km_hafah_MR15
  • km_issue_151_revert
  • kmochocki/add-slow-call
  • kmochocki/ci
  • kmochocki/hivemind-testing
  • kudmich/prepare_tavern_tests_to_working_with_postgrest
  • master
  • mi_correct_notif_cache_update
  • mi_mention_date_changed
  • mi_notifications_in_cache
  • mi_test_for_bridge_get_ranked_posts
  • msochacki_tests
  • mt-47
  • mt-find-list-votes
  • mt-further-pids-refactoring
  • mt-further-pids-refactoring-old
  • mt-get-pids-fix
  • mt-get-profile-new-patterns
  • mt-grayed
  • mt-lack-of-post-forces-empty-pattern
  • mt-list-comments-fix
  • mt-main-unit
  • mt-pids-performance-refactoring
  • mz-hivemind-benchmark-parser
  • pczempiel_community
  • pczempiel_date_changes
  • pczempiel_new_tests
  • pmaniora_account_notifications_tests
  • pmaniora_communities_sql
  • pmaniora_community_fixes
  • pmaniora_fullsync_slow_calls
  • pmaniora_new_tests
  • request-execution-time
  • revert-36610d67
  • tmp
59 results
Show changes
Commits on Source (15)
stages:
- benchmark
- account history api
- blocks api
- hivemind
variables:
CI_DEBUG_SERVICES: "true"
BENCHMARK_ADDRESS: ""
BENCHMARK_PORT: ""
Trigger any benchmark:
stage: benchmark
image: alpine:3.7
include:
- local: '/ci-scripts/docker_based_benchmark_job.yaml'
.custom_benchmark:
extends: .benchmark_api
rules:
- when: manual
services:
- name: registry.gitlab.syncad.com/hive/tests_api/benchmark_aio:latest
alias: benchmark
variables:
# const:
eROOT_DIR: "/builds/hive/tests_api"
# required:
eADDRESS: $BENCHMARK_ADDRESS
ePORT: $BENCHMARK_PORT
# optional:
eJOBS: 1
eLOOPS: 1
# eAPI: account_history_api
# eCSV: perf_60M_heavy.csv
# eJOBS: 10
# eLOOPS: 500
# eCALL_STYLE: old-style
# ePOSTGRES_URL: postgresql:///haf_block_log
# ePOSTGRES_SCHEMA: hive
# eSERVE_PORT: ""
script:
- ls -la /
- find /builds -type d -maxdepth 4
- when: manual
tags:
- tests-api
######## VARIANTS ########
.api_account_history: &api_ah
API: account_history_api
.api_block: &api_blocks
API: blocks_api
.universal: &api_universal
API: universal
.light_60M: &light_60M
CSV: perf_60M_light.csv
.heavy_60M: &heavy_60M
CSV: perf_60M_heavy.csv
.http: &http
CALL_STYLE: old-style
.postgres: &postgres
CALL_STYLE: postgres
######## STAGES ########
.account_history_api:
extends: .custom_benchmark
stage: account history api
variables: *api_ah
.blocks_api:
extends: .custom_benchmark
stage: blocks api
variables: *api_blocks
.hivemind:
extends: .custom_benchmark
stage: hivemind
variables: *api_universal
### account history api ###
light 60M http account history api:
extends: .account_history_api
variables:
CSV: perf_60M_light.csv
CALL_STYLE: old-style
heavy 60M http account history api:
extends: .account_history_api
variables:
CSV: perf_60M_heavy.csv
CALL_STYLE: old-style
light 60M postgres account history api:
extends: .account_history_api
variables:
CSV: perf_60M_light.csv
CALL_STYLE: postgres
heavy 60M postgres account history api:
extends: .account_history_api
variables:
CSV: perf_60M_heavy.csv
CALL_STYLE: postgres
### blocks api ###
light 60M http blocks api:
extends: .blocks_api
variables:
CSV: perf_60M_light.csv
CALL_STYLE: old-style
heavy 60M http blocks api:
extends: .blocks_api
variables:
CSV: perf_60M_heavy.csv
CALL_STYLE: old-style
light 60M postgres blocks api:
extends: .blocks_api
variables:
CSV: perf_60M_light.csv
CALL_STYLE: postgres
heavy 60M postgres blocks api:
extends: .blocks_api
variables:
CSV: perf_60M_heavy.csv
CALL_STYLE: postgres
######## hivemind ########
warmup:
extends: .hivemind
variables:
CSV: 2022_11_16_hivemind_60M_prod_jrpc.csv
CALL_STYLE: old-style
......@@ -3,27 +3,33 @@
FROM alpine as jmeter_dependencies
# install all required dependencies
RUN apk add git bash openjdk8 python3 py3-pip
RUN apk add git bash openjdk8 python3 py3-pip maven
RUN python3 -m pip install --upgrade pip && pip install prettytable requests
# base enviroments
# base environments
ENV WDIR /jmeter
ENV BENCHMARK_DIR ${WDIR}/benchmarks
# set working direcotry in container
# set working directory in container
WORKDIR ${WDIR}
# get required resources from current build dir
ADD . ${BENCHMARK_DIR}
# configure java
ENV JAVA_ARGS -Xms4g -Xmx4g
# configure jmeter
RUN bash ${BENCHMARK_DIR}/setup_jmeter.bash
ENV JAVA_ARGS -Xms4g -Xmx4g
ENV JMETER="${WDIR}/jmeter/apache/bin/jmeter"
# configure file/dir server
RUN git clone https://github.com/simon-budig/woof.git
ENV WOOF "${WDIR}/woof/woof"
# configure M2U
RUN bash ${BENCHMARK_DIR}/setup_m2u.bash
ENV M2U="java -jar ${WDIR}/m2u/target/m2u.jar"
################################################################
# this image contains set of rules to start benchmark tests
FROM jmeter_dependencies AS benchmark_aio
......@@ -31,64 +37,47 @@ FROM jmeter_dependencies AS benchmark_aio
WORKDIR ${BENCHMARK_DIR}
# api to test
ARG API="account_history_api"
ENV eAPI ${API}
ENV API="account_history_api"
# input file to use for performance testing
ARG CSV="perf_60M_heavy.csv"
ENV eCSV ${CSV}
ENV CSV="perf_60M_heavy.csv"
# amount of threads
ARG JOBS=10
ENV eJOBS ${JOBS}
ENV JOBS=10
# amount of requests per thread (-1 for infinite)
ARG LOOPS=500
ENV eLOOPS ${LOOPS}
ENV LOOPS=500
# possible options: old-style, new-style, postgres
ARG CALL_STYLE="old-style"
ENV eCALL_STYLE ${CALL_STYLE}
ENV CALL_STYLE="old-style"
# address to test (default is set to default host address in docker)
ARG ADDRESS='172.17.0.1'
ENV eADDRESS ${ADDRESS}
ENV ADDRESS='172.17.0.1'
# port to perform tests
ARG PORT=8090
ENV ePORT ${PORT}
ENV PORT=8090
# url to postgres database (required only if CALL_STYLE = postgres)
ARG POSTGRES_URL="postgresql:///haf_block_log"
ENV ePOSTGRES_URL ${POSTGRES_URL}
ENV POSTGRES_URL="postgresql:///haf_block_log"
# schema in which functions to test are (required only if CALL_STYLE = postgres)
ARG POSTGRES_SCHEMA="hive"
ENV ePOSTGRES_SCHEMA ${POSTGRES_SCHEMA}
ENV POSTGRES_SCHEMA="hive"
# path to root directory of tests_api project (can be set on CI to /build/path/to/hive/tests/tests_api)
ARG ROOT_DIR="${WDIR}"
ENV eROOT_DIR ${ROOT_DIR}
ENV ROOT_DIR="${WDIR}"
# if set, start hosting workdir after benchmarking on specified port (just one time), remember to expose that port
ARG SERVE_PORT=""
ENV eSERVE_PORT ${SERVE_PORT}
ENV SERVE_PORT=""
# additional arguments that will be passed to benchmarking script
ENV ADDITIONAL_ARGS=""
# path to directory, where jmeter and python benchmark script will put all it's output
ENV JMETER_WORKDIR=${ROOT_DIR}/wdir
# verification is setup ready
RUN python3 benchmark.py -h
RUN python3 benchmark.py -n ${eAPI} -l
RUN python3 benchmark.py -n ${API} -l
# defines what to do after docker starts
ENTRYPOINT python3 benchmark.py \
-r $eROOT_DIR \
-d wdir \
-j $JMETER \
-p $ePORT \
-a $eADDRESS \
--postgres $ePOSTGRES_URL \
--postgres-schema $ePOSTGRES_SCHEMA \
-n $eAPI \
-c $eCSV \
-k $eLOOPS \
-t $eJOBS \
--call-style $eCALL_STYLE; bash -c "[[ ! -z ${eSERVE_PORT} ]] && python3 $WOOF -i 0.0.0.0 -p $eSERVE_PORT -Z -c 1 wdir"; echo "exitting"
ENTRYPOINT bash ${ROOT_DIR}/benchmarks/docker/entrypoint.bash $ADDITIONAL_ARGS
# Benchmarks
# Benchmark tests
This directory contains inputs and programs to benchmark any API.
It requires jmeter to work, which can be set up using `./setup_jmeter.bash` script
### Configuration
#### Locally
- install packages for java 8: `openjdk-8-jdk` `openjdk-8-jre`
- download and setup jmeter
- run script `benchmarks/setup_jmeter.bash`
- `source jmeter/activate` to have JMETER env with path to jmeter binary
- (optional) `ln -s $JMETR /usr/bin/jmeter` to install system-wide
- create and setup virtual environment
- `python -m venv .venv` (if this does not work install `python3-venv` package)
- `source .venv/bin/activate` to activate env (`deactivate` command to exit virtual env)
- (optional) `pip install --upgrade pip`
- `pip install -r benchmarks/requirements.txt` to install all dependencies
- to confirm is everything properly setup run: `python benchmarks/benchmark.py --help`, which should print hel and exit 0 code
#### Starting tests
#### Locally
To benchmark API's you can use jmeter with pre-configured `jmx` files (jmeter test plans), which you can find in `./performance_data/<api name>/JMX/*.jmx` or you can use `./benchmark.py` script.
......@@ -19,6 +39,41 @@ For same test but on remote machine:
./benchmark.py -n blocks_api -p 8090 -c perf_5M_light.csv -a hive-6.pl.syncad.com
```
#### Docker
You can pull docker image (~208M) with:
```
docker pull registry.gitlab.syncad.com/hive/tests_api/benchmark_aio:latest
```
Here are examples
```
# run default benchmarks (account_history, 60M)
docker run hive/benchmark_aio
# run benchmarks for blocks_api on 5M node
docker run -e eAPI=blocks_api -e eCSV=perf_5M_heavy.csv hive/benchmark_aio
# run benchmarks for blocks_api on 5M node on 8091 port
docker run -e eAPI=blocks_api -e eCSV=perf_5M_heavy.csv -e ePORT=8091 hive/benchmark_aio
# run default benchmarks on address 192.168.16.12 and 8091 port
docker run -e eADDRESS=192.168.16.12 -e ePORT=8091 hive/benchmark_aio
# run default benchmarks in infinity loop on 8091 port
docker run -e eLOOPS=-1 -e ePORT=8091 hive/benchmark_aio
# run list available CSV's for blocks_api
docker run -e eAPI=blocks_api -e eADDITIONAL_ARGS="-l" hive/benchmark_aio
# run with custom CSV with attached volume
docker run -v my_dir:/inputs -e eCSV="/inputs/perf_60M_my_super_custom.csv" hive/benchmark_aio
```
### CSV management
To list available csv inputs, use `-l` flag + `-n` with api name (by default `account_history_api`), for output like this
```
......@@ -61,5 +116,3 @@ If you want to add new csv, make sure to keep filename properly:
```
<mode>_<amount of blocks>M_<tag 1>_<tag 2>_<tag N>.csv
```
......@@ -175,8 +175,8 @@ if CSV_FILENAME in AVAILA_CSV:
CSV_MODE: CSV.MODE = AVAILA_CSV[CSV_FILENAME][1]
CSV_PATH: Path = AVAILA_CSV[CSV_FILENAME][0]
else:
assert Path(CSV_FILENAME).exists()
CSV_PATH = CSV_FILENAME
CSV_PATH = Path(CSV_FILENAME)
assert CSV_PATH.exists()
CSV_MODE: CSV.MODE = (CSV.MODE.PERF if CSV_PATH.name.startswith('perf') else CSV.MODE.CL)
# process postgresql conection string to fill jdbc requirements
......
#!/bin/bash
echo "setting up paths"
mkdir -p "$JMETER_WORKDIR"
echo "starting benchmarks"
python3 "$ROOT_DIR/benchmarks/benchmark.py" \
-r "$ROOT_DIR" \
-d "$JMETER_WORKDIR" \
-j "$JMETER" \
-p "$PORT" \
-a "$ADDRESS" \
--postgres "$POSTGRES_URL" \
--postgres-schema "$POSTGRES_SCHEMA" \
-n "$API" \
-c "$CSV" \
-k "$LOOPS" \
-t "$JOBS" \
--call-style "$CALL_STYLE" \
$@; true
echo "generating JUNIT report"
$M2U --input "$JMETER_WORKDIR/raw_jmeter_report.xml" --output "$JMETER_WORKDIR/report.junit"; true
echo "generated output to: $JMETER_WORKDIR/report.junit"
if [ -n "${eSERVE_PORT}" ]; then
echo "serving output from benchmarks on http://0.0.0.0:$SERVE_PORT"
python3 "$WOOF" -i 0.0.0.0 -p "$SERVE_PORT" -Z -c 1 $JMETER_WORKDIR
fi
bridge_get_ranked_post_by_created_for_community|SELECT * FROM hivemind_app.bridge_get_ranked_post_by_created_for_community( ('hive-193552')::VARCHAR, ('rushi1983')::VARCHAR, ('actifit-rushi1983-20240213t135440513z')::VARCHAR, (21)::SMALLINT, True, ('manuvert')::VARCHAR );
bridge_get_ranked_post_by_created_for_community|SELECT * FROM hivemind_app.bridge_get_ranked_post_by_created_for_community( ('hive-193552')::VARCHAR, ('aljif7')::VARCHAR, ('actifit-aljif7-20240213t094009913z')::VARCHAR, (21)::SMALLINT, True, ('manuvert')::VARCHAR );
bridge_get_account_posts_by_payout|SELECT * FROM hivemind_app.bridge_get_account_posts_by_payout( ('w7ngc')::VARCHAR, ('')::VARCHAR, ('')::VARCHAR, (20)::SMALLINT );
bridge_get_ranked_post_by_created_for_community|SELECT * FROM hivemind_app.bridge_get_ranked_post_by_created_for_community( ('hive-193552')::VARCHAR, ('')::VARCHAR, ('')::VARCHAR, (20)::SMALLINT, True, ('spectrumecons')::VARCHAR );
bridge_get_ranked_post_by_created_for_community|SELECT * FROM hivemind_app.bridge_get_ranked_post_by_created_for_community( ('hive-193552')::VARCHAR, ('')::VARCHAR, ('')::VARCHAR, (19)::SMALLINT, True, ('hive.blog')::VARCHAR );
bridge_get_ranked_post_by_created_for_community|SELECT * FROM hivemind_app.bridge_get_ranked_post_by_created_for_community( ('hive-193084')::VARCHAR, ('')::VARCHAR, ('')::VARCHAR, (21)::SMALLINT, True, ('philnews.xyz')::VARCHAR );
bridge_get_ranked_post_by_created_for_community|SELECT * FROM hivemind_app.bridge_get_ranked_post_by_created_for_community( ('hive-193552')::VARCHAR, ('')::VARCHAR, ('')::VARCHAR, (19)::SMALLINT, True, ('')::VARCHAR )
bridge_get_ranked_post_by_created_for_community|SELECT * FROM hivemind_app.bridge_get_ranked_post_by_created_for_community( ('hive-171330')::VARCHAR, ('')::VARCHAR, ('')::VARCHAR, (20)::SMALLINT, True, ('hive.blog')::VARCHAR );
bridge_get_ranked_post_by_created_for_community|SELECT * FROM hivemind_app.bridge_get_ranked_post_by_created_for_community( ('hive-193084')::VARCHAR, ('')::VARCHAR, ('')::VARCHAR, (21)::SMALLINT, True, ('philnews.xyz')::VARCHAR );
bridge_get_ranked_post_by_created_for_tag|SELECT * FROM hivemind_app.bridge_get_ranked_post_by_created_for_tag( ('fr')::VARCHAR, ('')::VARCHAR, ('')::VARCHAR, (40)::SMALLINT, ('')::VARCHAR );
bridge_get_ranked_post_by_created_for_tag|SELECT * FROM hivemind_app.bridge_get_ranked_post_by_created_for_tag( ('kr-life')::VARCHAR, ('')::VARCHAR, ('')::VARCHAR, (8)::SMALLINT, ('')::VARCHAR );
bridge_get_ranked_post_by_muted_for_community|SELECT * FROM hivemind_app.bridge_get_ranked_post_by_muted_for_community( ('hive-133872')::VARCHAR, ('')::VARCHAR, ('')::VARCHAR, (20)::SMALLINT, ('blocktrades')::VARCHAR );
bridge_get_ranked_post_by_created_for_tag|SELECT * FROM hivemind_app.bridge_get_ranked_post_by_created_for_tag( ('kr-life')::VARCHAR, ('')::VARCHAR, ('')::VARCHAR, (8)::SMALLINT, ('')::VARCHAR );
bridge_get_ranked_post_by_created_for_tag|SELECT * FROM hivemind_app.bridge_get_ranked_post_by_created_for_tag( ('kr-life')::VARCHAR, ('')::VARCHAR, ('')::VARCHAR, (8)::SMALLINT, ('')::VARCHAR );
bridge_get_ranked_post_by_created_for_tag|SELECT * FROM hivemind_app.bridge_get_ranked_post_by_created_for_tag( ('fr')::VARCHAR, ((''))::VARCHAR, ((''))::VARCHAR, ((100))::SMALLINT, ('')::VARCHAR );
bridge_get_account_posts_by_posts|SELECT * FROM hivemind_app.bridge_get_account_posts_by_posts( ('drotto')::VARCHAR, ('')::VARCHAR, ('')::VARCHAR, (20)::SMALLINT );
bridge_get_ranked_post_by_created_for_tag|SELECT * FROM hivemind_app.bridge_get_ranked_post_by_created_for_tag( ('ita')::VARCHAR, ((''))::VARCHAR, ((''))::VARCHAR, ((100))::SMALLINT, ('')::VARCHAR );
......@@ -43,6 +43,7 @@ get_account_history|{"jsonrpc": "2.0", "method": "account_history_api.get_accoun
get_ops_in_block|{"jsonrpc": "2.0", "method": "account_history_api.get_ops_in_block", "params": {"block_num": 62739611, "only_virtual": true, "include_reversible": false}, "id": "get_ops_in_block( 62739611, true, false, true )"}
get_account_history|{"jsonrpc": "2.0", "method": "account_history_api.get_account_history", "params": {"operation_filter_low": 199284866418737180, "operation_filter_high": 0, "account": "starworld", "start": "9223372036854775807", "limit": 500, "include_reversible": false}, "id": "get_account_history( 199284866418737180, 0, 'starworld', 9223372036854775807 ::BIGINT, 500, false, true )"}
get_account_history|{"jsonrpc": "2.0", "method": "account_history_api.get_account_history", "params": {"operation_filter_low": 0, "operation_filter_high": 0, "account": "zuun.net", "start": "9223372036854775807", "limit": 1, "include_reversible": false}, "id": "get_account_history( 0, 0, 'zuun.net', 9223372036854775807 ::BIGINT, 1, false, false )"}
get_account_history|{"jsonrpc": "2.0", "method": "account_history_api.get_account_history", "params": {"operation_filter_low": 4, "account": "gtg", "start": -1, "limit": 250, "include_reversible": false}, "id": "get_account_history( 4, NULL, 'gtg', 9223372036854775807 ::BIGINT, 250, false, false )"}
get_transaction|{"jsonrpc": "2.0", "method": "account_history_api.get_transaction", "params": {"id": "60e8692950763aa65d94c470afa98ee7d7e62cf7", "include_reversible": false}, "id": "get_transaction( '60e8692950763aa65d94c470afa98ee7d7e62cf7', false, false )"}
get_transaction|{"jsonrpc": "2.0", "method": "account_history_api.get_transaction", "params": {"id": "sl_52a5bfcf9e6dc93ea14b0dde43e6f7a4", "include_reversible": true}, "id": "get_transaction( 'sl_52a5bfcf9e6dc93ea14b0dde43e6f7a4', true, false )"}
get_account_history|{"jsonrpc": "2.0", "method": "account_history_api.get_account_history", "params": {"operation_filter_low": 199284866418737180, "operation_filter_high": 0, "account": "catfacts", "start": "9223372036854775807", "limit": 500, "include_reversible": false}, "id": "get_account_history( 199284866418737180, 0, 'catfacts', 9223372036854775807 ::BIGINT, 500, false, true )"}
prettytable
requests
.benchmark_api:
image:
name: registry.gitlab.syncad.com/hive/tests_api/benchmark_aio@sha256:4acf4b83dce920ac2f49f2109f0f392d1df6d003c79f9e51592e2cd9f9775386
entrypoint: [""]
variables:
## required:
eADDRESS: ""
ePORT: ""
## optional:
# eROOT_DIR: "/jmeter"
# eAPI: account_history_api
# eCSV: perf_60M_heavy.csv
# eJOBS: 10
# eLOOPS: 500
# eCALL_STYLE: old-style
# ePOSTGRES_URL: postgresql:///haf_block_log
# ePOSTGRES_SCHEMA: hive
# eSERVE_PORT: ""
# eADDITIONAL_ARGS: ""
## config
JMETER_WORKDIR: $eROOT_DIR/wdir
ARTIFACTS_PATH: $CI_PROJECT_DIR/benchmark_artifacts
script:
- bash $eROOT_DIR/benchmarks/docker/entrypoint.bash $eADDITIONAL_ARGS
after_script:
- if [[ "$JMETER_WORKDIR" != "$ARTIFACTS_PATH" ]]; then cp -r $JMETER_WORKDIR $ARTIFACTS_PATH; fi
artifacts:
name: "benchmarks-$CI_JOB_NAME-$CI_COMMIT_REF_NAME"
reports:
junit: "$ARTIFACTS_PATH/report.junit"
paths:
- "$ARTIFACTS_PATH"
when: always
expire_in: 6 hours
# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand.
[[package]]
name = "certifi"
version = "2023.7.22"
description = "Python package for providing Mozilla's CA Bundle."
category = "main"
optional = false
python-versions = ">=3.6"
files = [
{file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"},
{file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"},
]
[[package]]
name = "charset-normalizer"
version = "2.0.12"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
category = "main"
optional = false
python-versions = ">=3.5.0"
files = [
{file = "charset-normalizer-2.0.12.tar.gz", hash = "sha256:2857e29ff0d34db842cd7ca3230549d1a697f96ee6d3fb071cfa6c7393832597"},
{file = "charset_normalizer-2.0.12-py3-none-any.whl", hash = "sha256:6881edbebdb17b39b4eaaa821b438bf6eddffb4468cf344f09f89def34a8b1df"},
]
[package.extras]
unicode-backport = ["unicodedata2"]
[[package]]
name = "deepdiff"
version = "6.3.0"
description = "Deep Difference and Search of any Python object/data. Recreate objects by adding adding deltas to each other."
category = "main"
optional = false
python-versions = ">=3.7"
files = [
{file = "deepdiff-6.3.0-py3-none-any.whl", hash = "sha256:15838bd1cbd046ce15ed0c41e837cd04aff6b3e169c5e06fca69d7aa11615ceb"},
{file = "deepdiff-6.3.0.tar.gz", hash = "sha256:6a3bf1e7228ac5c71ca2ec43505ca0a743ff54ec77aa08d7db22de6bc7b2b644"},
]
[package.dependencies]
ordered-set = ">=4.0.2,<4.2.0"
[package.extras]
cli = ["click (==8.1.3)", "pyyaml (==6.0)"]
optimize = ["orjson"]
[[package]]
name = "idna"
version = "3.4"
description = "Internationalized Domain Names in Applications (IDNA)"
category = "main"
optional = false
python-versions = ">=3.5"
files = [
{file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"},
{file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"},
]
[[package]]
name = "ordered-set"
version = "4.1.0"
description = "An OrderedSet is a custom MutableSet that remembers its order, so that every"
category = "main"
optional = false
python-versions = ">=3.7"
files = [
{file = "ordered-set-4.1.0.tar.gz", hash = "sha256:694a8e44c87657c59292ede72891eb91d34131f6531463aab3009191c77364a8"},
{file = "ordered_set-4.1.0-py3-none-any.whl", hash = "sha256:046e1132c71fcf3330438a539928932caf51ddbc582496833e23de611de14562"},
]
[package.extras]
dev = ["black", "mypy", "pytest"]
[[package]]
name = "prettytable"
version = "3.8.0"
description = "A simple Python library for easily displaying tabular data in a visually appealing ASCII table format"
category = "main"
optional = false
python-versions = ">=3.8"
files = [
{file = "prettytable-3.8.0-py3-none-any.whl", hash = "sha256:03481bca25ae0c28958c8cd6ac5165c159ce89f7ccde04d5c899b24b68bb13b7"},
{file = "prettytable-3.8.0.tar.gz", hash = "sha256:031eae6a9102017e8c7c7906460d150b7ed78b20fd1d8c8be4edaf88556c07ce"},
]
[package.dependencies]
wcwidth = "*"
[package.extras]
tests = ["pytest", "pytest-cov", "pytest-lazy-fixture"]
[[package]]
name = "requests"
version = "2.27.1"
description = "Python HTTP for Humans."
category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
files = [
{file = "requests-2.27.1-py2.py3-none-any.whl", hash = "sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d"},
{file = "requests-2.27.1.tar.gz", hash = "sha256:68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61"},
]
[package.dependencies]
certifi = ">=2017.4.17"
charset-normalizer = {version = ">=2.0.0,<2.1.0", markers = "python_version >= \"3\""}
idna = {version = ">=2.5,<4", markers = "python_version >= \"3\""}
urllib3 = ">=1.21.1,<1.27"
[package.extras]
socks = ["PySocks (>=1.5.6,!=1.5.7)", "win-inet-pton"]
use-chardet-on-py3 = ["chardet (>=3.0.2,<5)"]
[[package]]
name = "types-requests"
version = "2.31.0.2"
description = "Typing stubs for requests"
category = "main"
optional = false
python-versions = "*"
files = [
{file = "types-requests-2.31.0.2.tar.gz", hash = "sha256:6aa3f7faf0ea52d728bb18c0a0d1522d9bfd8c72d26ff6f61bfc3d06a411cf40"},
{file = "types_requests-2.31.0.2-py3-none-any.whl", hash = "sha256:56d181c85b5925cbc59f4489a57e72a8b2166f18273fd8ba7b6fe0c0b986f12a"},
]
[package.dependencies]
types-urllib3 = "*"
[[package]]
name = "types-urllib3"
version = "1.26.25.14"
description = "Typing stubs for urllib3"
category = "main"
optional = false
python-versions = "*"
files = [
{file = "types-urllib3-1.26.25.14.tar.gz", hash = "sha256:229b7f577c951b8c1b92c1bc2b2fdb0b49847bd2af6d1cc2a2e3dd340f3bda8f"},
{file = "types_urllib3-1.26.25.14-py3-none-any.whl", hash = "sha256:9683bbb7fb72e32bfe9d2be6e04875fbe1b3eeec3cbb4ea231435aa7fd6b4f0e"},
]
[[package]]
name = "urllib3"
version = "1.26.16"
description = "HTTP library with thread-safe connection pooling, file post, and more."
category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
files = [
{file = "urllib3-1.26.16-py2.py3-none-any.whl", hash = "sha256:8d36afa7616d8ab714608411b4a3b13e58f463aee519024578e062e141dce20f"},
{file = "urllib3-1.26.16.tar.gz", hash = "sha256:8f135f6502756bde6b2a9b28989df5fbe87c9970cecaa69041edcce7f0589b14"},
]
[package.extras]
brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"]
secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
[[package]]
name = "wcwidth"
version = "0.2.6"
description = "Measures the displayed width of unicode strings in a terminal"
category = "main"
optional = false
python-versions = "*"
files = [
{file = "wcwidth-0.2.6-py2.py3-none-any.whl", hash = "sha256:795b138f6875577cd91bba52baf9e445cd5118fd32723b460e30a0af30ea230e"},
{file = "wcwidth-0.2.6.tar.gz", hash = "sha256:a5220780a404dbe3353789870978e472cfe477761f06ee55077256e509b156d0"},
]
[metadata]
lock-version = "2.0"
python-versions = "^3.10"
content-hash = "30c673ef21420a7741c3a59c66bb8f43c51bc065e0c053d98d83131bb75dc055"
[build-system]
requires = ["poetry-core==1.1.0"]
build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "tests_api"
version = "0.0.0"
description = "A collective library containing tests for different APIs and tools for benchmarking"
authors = ["Krzysztof Mochocki"]
packages = [
{ include = "benchmarks" },
{ include = "validate_response" },
]
[tool.poetry.dependencies]
python = "^3.10"
prettytable = "3.8.0"
requests = "2.27.1"
types-requests = "2.31.0.2"
deepdiff = "6.3.0"
import os
import csv
from difflib import SequenceMatcher
from pathlib import Path
from time import perf_counter as perf
from typing import Any
import deepdiff
import re
......@@ -15,7 +17,7 @@ class NoResultException(Exception):
# of result json when comparing with pattern - only one per test (ignore_tags should
# be a string denoting predefined situation), exclusive with regular (not predefined)
# tags (in normal case ignore_tags must be a list of tag specifiers)
predefined_ignore_tags = {
predefined_ignore_tags: dict[str, re.Pattern] = {
'<bridge post>' : re.compile(r"root\['post_id'\]"),
'<bridge posts>' : re.compile(r"root\[\d+\]\['post_id'\]"),
'<bridge discussion>' : re.compile(r"root\[.+\]\['post_id'\]"),
......@@ -31,12 +33,13 @@ predefined_ignore_tags = {
'<database votes>' : re.compile(r"root\['votes'\]\[\d+\]\['id'\]"),
'<follow blog>' : re.compile(r"root\[\d+\]\['comment'\]\['post_id'\]"), # follow_api.get_blog
'<tags posts>' : re.compile(r"root\[\d+\]\['post_id'\]"),
'<tags post>' : re.compile(r"root\['post_id'\]") # tags_api.get_discussion
'<tags post>' : re.compile(r"root\['post_id'\]"), # tags_api.get_discussion
'<hafbe cache_update>' : re.compile(r"root\['votes_updated_at'\]") # witness api in haf_block_explorer
}
def get_overlap(s1, s2):
s = SequenceMatcher(None, s1, s2)
pos_a, pos_b, size = s.find_longest_match(0, len(s1), 0, len(s2))
pos_a, pos_b, size = s.find_longest_match(0, len(s1), 0, len(s2))
return s1[pos_a:pos_a+size] if pos_b == 0 else ""
def json_pretty_string(json_obj):
......@@ -90,15 +93,15 @@ def get_time(test_id):
def compare_response_with_pattern(response, method=None, directory=None, ignore_tags=None, error_response=False, benchmark_time_threshold=None, allow_null_response=False):
""" This method will compare response with pattern file """
test_fname, _ = os.getenv('PYTEST_CURRENT_TEST').split("::")
test_dir = os.getenv("TAVERN_DIR", "")
overlap = get_overlap(test_dir, test_fname)
test_fname = test_dir + "/" + test_fname.replace(overlap, "")
test_fname = test_fname.replace(TEST_FILE_EXT, "")
response_fname = test_fname + RESPONSE_FILE_EXT
pattern_fname = test_fname + PATTERN_FILE_EXT
tavern_disable_comparator = bool(os.getenv('TAVERN_DISABLE_COMPARATOR', False))
if os.path.exists(response_fname) and not tavern_disable_comparator:
......@@ -119,7 +122,7 @@ def compare_response_with_pattern(response, method=None, directory=None, ignore_
if ignore_tags is not None:
assert isinstance(ignore_tags, list), "ignore_tags should be list of tags"
# disable comparison with pattern on demand and save
# disable comparison with pattern on demand and save
if tavern_disable_comparator:
if error is not None:
save_json(response_fname, error)
......@@ -168,16 +171,16 @@ def compare_response_with_pattern(response, method=None, directory=None, ignore_
def has_valid_response(response, method=None, directory=None, error_response=False, response_fname=None, benchmark_time_threshold=None):
test_fname, _ = os.getenv('PYTEST_CURRENT_TEST').split("::")
test_dir = os.getenv("TAVERN_DIR", "")
overlap = get_overlap(test_dir, test_fname)
test_fname = test_dir + "/" + test_fname.replace(overlap, "")
test_fname = test_fname.replace(TEST_FILE_EXT, "")
response_fname = test_fname + RESPONSE_FILE_EXT
tavern_disable_comparator = bool(os.getenv('TAVERN_DISABLE_COMPARATOR', False))
if os.path.exists(response_fname) and not tavern_disable_comparator:
os.remove(response_fname)
......@@ -191,7 +194,7 @@ def has_valid_response(response, method=None, directory=None, error_response=Fal
correct_response = result
# disable coparison with pattern on demand
# and save
# and save
if tavern_disable_comparator:
test_id = response_json.get("id", None)
if error is not None:
......@@ -207,3 +210,31 @@ def has_valid_response(response, method=None, directory=None, error_response=Fal
if correct_response is None:
msg = "Error detected in response: result is null, json object was expected"
raise NoResultException(msg)
def compare_rest_response_with_pattern(response, method=None, directory=None, error_response: bool = False, ignore_tags: str | list[str] | list[re.Pattern] | None = None):
pytest_current_test = os.getenv('PYTEST_CURRENT_TEST')
assert pytest_current_test is not None, "Environment variable not set: PYTEST_CURRENT_TEST"
test_fname, _ = pytest_current_test.split("::")
test_dir = os.getenv("TAVERN_DIR", "")
overlap = get_overlap(test_dir, test_fname)
test_fname = test_dir + "/" + test_fname.replace(overlap, "")
test_fname = test_fname.replace(TEST_FILE_EXT, "")
response_fname = test_fname + RESPONSE_FILE_EXT
json_response: dict[str, Any] = response.json()
save_json(response_fname, json_response)
if error_response:
for required_key in ["code", "details", "hint", "message"]:
assert required_key in json_response, f"Response, marked as error, does not contain {required_key} key"
if isinstance(ignore_tags, str):
ignore_tags = [predefined_ignore_tags[ignore_tags]]
pattern = load_pattern(test_fname + PATTERN_FILE_EXT)
pattern_resp_diff = deepdiff.DeepDiff(pattern, json_response, exclude_regex_paths=ignore_tags)
if pattern_resp_diff:
raise PatternDiffException("Differences detected between response and pattern.")