diff --git a/.gitignore b/.gitignore
index 166a11ef61ea9a98ccefa11f53f0e68d899ed16b..6a1bcbf1f0bb64821739366cc9074d3b6720c5f2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -144,3 +144,5 @@ Pipfile.lock
 pghero.yml
 *~
 .tmp
+
+.private
diff --git a/.gitlab-ci-docker.yaml b/.gitlab-ci-docker.yaml
deleted file mode 100644
index b849f7958a970061b1c3f283f434d4a1ee2f9b5c..0000000000000000000000000000000000000000
--- a/.gitlab-ci-docker.yaml
+++ /dev/null
@@ -1,422 +0,0 @@
-stages:
-  - build
-  - data-supply
-  - e2e-test
-
-variables:
-
-  PGPASSWORD: $HIVEMIND_POSTGRES_PASSWORD
-
-  # GIT_DEPTH: 10
-  GIT_DEPTH: 1
-
-  # GIT_STRATEGY: fetch # Noticed errors with that.
-  GIT_STRATEGY: clone
-  # GIT_STRATEGY: none
-
-  GIT_SUBMODULE_STRATEGY: recursive
-
-  PIPENV_VENV_IN_PROJECT: 1
-  PIPENV_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pipenv"
-  PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip"
-
-  POSTGRES_CLIENT_TOOLS_PATH: /usr/lib/postgresql
-
-  # POSTGRES_HOST: 172.17.0.1 # Host
-  # POSTGRES_HOST: postgres-10 # Docker service
-  POSTGRES_PORT: 5432
-
-  # Set on project level in Gitlab CI.
-  # We need create role and create db privileges.
-  # ADMIN_POSTGRES_USER: postgres
-  # ADMIN_POSTGRES_USER_PASSWORD: postgres
-
-  # Needed by old runner ssh-executor, probably.
-  POSTGRES_USER: $HIVEMIND_POSTGRES_USER
-  POSTGRES_PASSWORD: $HIVEMIND_POSTGRES_PASSWORD
-  POSTGRES_HOST_AUTH_METHOD: trust
-
-  HIVEMIND_DB_NAME: "hive_${CI_COMMIT_REF_SLUG}_pipeline_id_${CI_PIPELINE_ID}"
-  HIVEMIND_EXEC_NAME: $DB_NAME
-
-  # Set on project level in Gitlab CI.
-  # HIVEMIND_POSTGRES_USER: hivemind_ci
-
-  # Set on project level in Gitlab CI.
-  HIVEMIND_POSTGRES_USER_PASSWORD: $HIVEMIND_POSTGRES_PASSWORD
-
-  # Set on project level in Gitlab CI.
-  # HIVEMIND_HTTP_PORT: 18080
-
-  # Set on project level in Gitlab CI.
-  # HIVEMIND_MAX_BLOCK: 10001
-  # HIVEMIND_MAX_BLOCK: 5000001
-
-  # Set on project level in Gitlab CI.
-  # HIVEMIND_SOURCE_HIVED_URL: {"default":"http://hive-4.pl.syncad.com:8091"}
-  # HIVEMIND_SOURCE_HIVED_URL: {"default":"192.168.6.136:8091"}
-  # HIVEMIND_SOURCE_HIVED_URL: {"default":"http://172.17.0.1:8091"}
-
-
-.postgres-10: &postgres-10
-  name: hivemind/postgres:10
-  alias: db
-  command: [
-      "postgres",
-      "-c", "shared_preload_libraries=pg_stat_statements,pg_qualstats",
-      "-c", "track_functions=pl",
-      "-c", "track_io_timing=on",
-      "-c", "track_activity_query_size=2048",
-      "-c", "pg_stat_statements.max=10000",
-      "-c", "pg_stat_statements.track=all",
-      "-c", "max_connections=100",
-      "-c", "shared_buffers=2GB",
-      "-c", "effective_cache_size=6GB",
-      "-c", "maintenance_work_mem=512MB",
-      "-c", "checkpoint_completion_target=0.9",
-      "-c", "wal_buffers=16MB",
-      "-c", "default_statistics_target=100",
-      "-c", "random_page_cost=1.1",
-      "-c", "effective_io_concurrency=200",
-      "-c", "work_mem=5242kB",
-      "-c", "min_wal_size=2GB",
-      "-c", "max_wal_size=8GB",
-      "-c", "max_worker_processes=4",
-      "-c", "max_parallel_workers_per_gather=2",
-      "-c", "max_parallel_workers=4",
-      ]
-
-.postgres-12: &postgres-12
-  name: hivemind/postgres:12
-  alias: db
-  command: [
-      "postgres",
-      "-c", "shared_preload_libraries=pg_stat_statements,pg_qualstats",
-      "-c", "track_functions=pl",
-      "-c", "track_io_timing=on",
-      "-c", "track_activity_query_size=2048",
-      "-c", "pg_stat_statements.max=10000",
-      "-c", "pg_stat_statements.track=all",
-      "-c", "max_connections=100",
-      "-c", "shared_buffers=2GB",
-      "-c", "effective_cache_size=6GB",
-      "-c", "maintenance_work_mem=512MB",
-      "-c", "checkpoint_completion_target=0.9",
-      "-c", "wal_buffers=16MB",
-      "-c", "default_statistics_target=100",
-      "-c", "random_page_cost=1.1",
-      "-c", "effective_io_concurrency=200",
-      "-c", "work_mem=5242kB",
-      "-c", "min_wal_size=2GB",
-      "-c", "max_wal_size=8GB",
-      "-c", "max_worker_processes=4",
-      "-c", "max_parallel_workers_per_gather=2",
-      "-c", "max_parallel_workers=4",
-      ]
-
-.setup-pip: &setup-pip
-  - python -m venv .venv
-  - source .venv/bin/activate
-  - time pip install --upgrade pip setuptools wheel
-  - pip --version
-  - easy_install --version
-  - wheel version
-  - pipenv --version
-  - poetry --version
-  - time pip install --editable .
-
-.setup-setuptools: &setup-setuptools
-  - python -m venv .venv
-  - source .venv/bin/activate
-  - time pip install --upgrade pip setuptools wheel
-  - pip --version
-  - easy_install --version
-  - wheel version
-  - pipenv --version
-  - poetry --version
-  - time python setup.py develop
-
-# no virtual environment
-.setuptools: &setup-setuptools-no-venv
-  # setuptools will install all dependencies to this directory.
-  - export PYTHONUSERBASE=./local-site
-  - time pip install --upgrade pip setuptools wheel
-  - pip --version
-  - easy_install --version
-  - wheel version
-  - pipenv --version
-  - poetry --version
-  - mkdir -p `python -m site --user-site`
-  - python setup.py install --user --force
-  # we can probably also run via: ./hive/cli.py
-  - ln -sf ./local-site/bin/hive "$HIVEMIND_EXEC_NAME"
-
-.setup-pipenv: &setup-pipenv
-  ## Note, that Pipfile must exist.
-  ## `--sequential` is slower, but doesn't emit messages about errors
-  ## and need to repeat install.
-  ## - pipenv sync --dev --bare --sequential
-  ## It's faster than `--sequential`, but emits messages about errors
-  ## and a need to repeat install, sometimes. However seems these
-  ## errors are negligible.
-  - time pipenv sync --dev --bare
-  - source .venv/bin/activate
-  - pip --version
-  - easy_install --version
-  - wheel version
-  - pipenv --version
-  - poetry --version
-
-.set-variables: &set-variables
-  - whoami
-  # list all variables predefined by Gitlab CI
-  # - export
-  - echo "CI_PIPELINE_URL is $CI_PIPELINE_URL"
-  - echo "CI_PIPELINE_ID is $CI_PIPELINE_ID"
-  - echo "CI_COMMIT_SHORT_SHA is $CI_COMMIT_SHORT_SHA"
-  - echo "CI_COMMIT_REF_SLUG is $CI_COMMIT_REF_SLUG"
-  - export HIVEMIND_DB_NAME=${HIVEMIND_DB_NAME//[^a-zA-Z0-9_]/_}
-  - echo "HIVEMIND_DB_NAME is $HIVEMIND_DB_NAME"
-  - export HIVEMIND_POSTGRESQL_CONNECTION_STRING=postgresql://${HIVEMIND_POSTGRES_USER}:${HIVEMIND_POSTGRES_USER_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${HIVEMIND_DB_NAME}
-
-.fetch-git-tags: &fetch-git-tags
-  # - git fetch --tags
-  - git tag -f ci_implicit_tag # Needed to build python package
-
-.start_timer: &start-timer
-  - ./scripts/ci/timer.sh start
-
-.stop-timer: &stop-timer
-  - ./scripts/ci/timer.sh check
-
-.hive-sync-script-common: &hive-sync-script-common
-  - ./scripts/ci/wait-for-postgres.sh ${POSTGRES_HOST} ${POSTGRES_PORT}
-  - export POSTGRES_MAJOR_VERSION=$(./scripts/ci/get-postgres-version.sh)
-  - ./scripts/ci/create-db.sh
-  - ./scripts/ci/hive-sync.sh
-  - ./scripts/ci/collect-db-stats.sh
-
-.default-rules: &default-rules
-  rules:
-    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
-      when: always
-    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH || $CI_COMMIT_BRANCH == "develop"'
-      when: always
-    - if: '$CI_PIPELINE_SOURCE == "push"'
-      when: manual
-    - when: on_success
-
-default:
-  image: hivemind/python:3.6
-  # image: hivemind/python:3.8
-  interruptible: false
-  timeout: 2h
-  cache: &global-cache
-    # Per-branch caching. CI_COMMIT_REF_SLUG is the same thing.
-    # key: "$CI_COMMIT_REF_NAME"
-    # Per project caching – use any key. Change this key, if you need
-    # to clear cache
-    key: common-1
-    paths:
-      - .cache/
-      - .venv/
-      - .tox/
-  before_script:
-    - *start-timer
-    - *fetch-git-tags
-    - *set-variables
-    - *setup-pip
-  after_script:
-    - *stop-timer
-
-##### Jobs #####
-
-.build-egg:
-  stage: build
-  needs: []
-  script:
-    - python setup.py bdist_egg
-    - ls -l dist/*
-  artifacts:
-    paths:
-      - dist/
-    expire_in: 7 days
-  tags:
-    - hivemind-light-job
-
-.build-wheel:
-  stage: build
-  needs: []
-  script:
-    - python setup.py bdist_wheel
-    - ls -l dist/*
-  artifacts:
-    paths:
-      - dist/
-    expire_in: 7 days
-  tags:
-    - hivemind-light-job
-
-# Postgres shared
-hivemind-sync:
-  <<: *default-rules
-  stage: data-supply
-  needs: []
-  script:
-    - *hive-sync-script-common
-  artifacts:
-    paths:
-      - hivemind-sync.log
-      - pg-stats
-    expire_in: 7 days
-  tags:
-    - hivemind-heavy-job
-
-# Postgres as service
-.hivemind-sync:
-  <<: *default-rules
-  stage: data-supply
-  services:
-    - *postgres-10
-    # - *postgres-12
-  needs: []
-  script:
-    - *hive-sync-script-common
-    # - ./scripts/ci/dump-db.sh
-  artifacts:
-    paths:
-      - hivemind-sync.log
-      - pg-stats
-      - pg-dump-${HIVEMIND_DB_NAME}
-    expire_in: 7 hours
-  tags:
-    - hivemind-heavy-job
-
-.e2e-test-common:
-  rules:
-    - when: on_success
-  needs:
-    - job: hivemind-sync
-      artifacts: false
-  before_script:
-    - *start-timer
-    - *fetch-git-tags
-    - *set-variables
-    - *setup-pip
-    - ./scripts/ci/wait-for-postgres.sh ${POSTGRES_HOST} ${POSTGRES_PORT}
-    - ./scripts/ci/hive-server.sh start
-  after_script:
-    - ./scripts/ci/hive-server.sh stop
-    - *stop-timer
-  tags:
-    - hivemind-light-job
-
-bridge_api_smoketest:
-  stage: e2e-test
-  extends: .e2e-test-common
-  script:
-    - >
-      scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
-      bridge_api_patterns/ api_smoketest_bridge.xml
-  artifacts:
-    reports:
-      junit: api_smoketest_bridge.xml
-
-bridge_api_smoketest_negative:
-  stage: e2e-test
-  extends: .e2e-test-common
-  script:
-    - >
-      scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
-      bridge_api_negative/ api_smoketest_bridge_negative.xml
-  artifacts:
-    reports:
-      junit: api_smoketest_bridge_negative.xml
-
-condenser_api_smoketest:
-  stage: e2e-test
-  extends: .e2e-test-common
-  script:
-    - >
-      scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
-      condenser_api_patterns/ api_smoketest_condenser_api.xml
-  artifacts:
-    reports:
-      junit: api_smoketest_condenser_api.xml
-
-condenser_api_smoketest_negative:
-  stage: e2e-test
-  extends: .e2e-test-common
-  script:
-    - >
-      scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
-      condenser_api_negative/ api_smoketest_condenser_api_negative.xml
-  artifacts:
-    reports:
-      junit: api_smoketest_condenser_api_negative.xml
-
-database_api_smoketest:
-  stage: e2e-test
-  extends: .e2e-test-common
-  script:
-    - >
-      scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
-      database_api_patterns/ api_smoketest_database_api.xml
-  artifacts:
-    reports:
-      junit: api_smoketest_database_api.xml
-
-database_api_smoketest_negative:
-  stage: e2e-test
-  extends: .e2e-test-common
-  script:
-    - >
-      scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
-      database_api_negative/ api_smoketest_database_api_negative.xml
-  artifacts:
-    reports:
-      junit: api_smoketest_database_api_negative.xml
-
-follow_api_smoketest:
-  stage: e2e-test
-  extends: .e2e-test-common
-  script:
-    - >
-      scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
-      follow_api_patterns/ api_smoketest_follow_api.xml
-  artifacts:
-    reports:
-      junit: api_smoketest_follow_api.xml
-
-follow_api_smoketest_negative:
-  stage: e2e-test
-  extends: .e2e-test-common
-  script:
-    - >
-      scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
-      follow_api_negative/ api_smoketest_follow_api_negative.xml
-  artifacts:
-    reports:
-      junit: api_smoketest_follow_api_negative.xml
-
-tags_api_smoketest:
-  stage: e2e-test
-  extends: .e2e-test-common
-  script:
-    - >
-      scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
-      tags_api_patterns/ api_smoketest_tags_api.xml
-  artifacts:
-    reports:
-      junit: api_smoketest_tags_api.xml
-
-tags_api_smoketest_negative:
-  stage: e2e-test
-  extends: .e2e-test-common
-  script:
-    - >
-      scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
-      tags_api_negative/ api_smoketest_tags_api_negative.xml
-  artifacts:
-    reports:
-      junit: api_smoketest_tags_api_negative.xml
diff --git a/.gitlab-ci-ssh.yaml b/.gitlab-ci-ssh.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ad8fcd1906a9acbab88a109ba774317bdbe3182c
--- /dev/null
+++ b/.gitlab-ci-ssh.yaml
@@ -0,0 +1,265 @@
+stages:
+  - build
+  - test
+  - data-supply
+  - deploy
+  - e2e-test
+  - benchmark-tests
+  - post-deploy
+
+variables:
+  GIT_DEPTH: 1
+  LC_ALL: "C"
+  GIT_STRATEGY: clone
+  GIT_SUBMODULE_STRATEGY: recursive
+  GIT_CLONE_PATH: $CI_BUILDS_DIR/$CI_COMMIT_REF_SLUG/$CI_CONCURRENT_ID/project-name
+
+  HIVEMIND_SOURCE_HIVED_URL: $HIVEMIND_SOURCE_HIVED_URL
+  HIVEMIND_DB_NAME: "hive_$CI_COMMIT_REF_SLUG"
+  HIVEMIND_HTTP_PORT: $((HIVEMIND_HTTP_PORT + CI_CONCURRENT_ID))
+  # Configured at gitlab repository settings side
+  POSTGRES_USER: $HIVEMIND_POSTGRES_USER
+  POSTGRES_PASSWORD: $HIVEMIND_POSTGRES_PASSWORD
+  POSTGRES_HOST_AUTH_METHOD: trust
+  # official way to provide password to psql: http://www.postgresql.org/docs/9.3/static/libpq-envars.html
+  PGPASSWORD: $HIVEMIND_POSTGRES_PASSWORD
+
+
+default:
+  before_script:
+    - pwd
+    - echo "CI_NODE_TOTAL is $CI_NODE_TOTAL"
+    - echo "CI_NODE_INDEX is $CI_NODE_INDEX"
+    - echo "CI_CONCURRENT_ID is $CI_CONCURRENT_ID"
+    - echo "CI_COMMIT_REF_SLUG is $CI_COMMIT_REF_SLUG"
+
+hivemind_build:
+  stage: build
+  script:
+    - pip3 install --user --upgrade pip setuptools
+    - git fetch --tags
+    - git tag -f ci_implicit_tag
+    - echo $PYTHONUSERBASE
+    - "python3 setup.py bdist_egg"
+    - ls -l dist/*
+  artifacts:
+    paths:
+      - dist/
+    expire_in: 1 week
+  tags:
+     - hivemind
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == "develop"'
+      when: always
+
+hivemind_sync:
+  stage: data-supply
+  environment:
+      name: "hive sync built from branch $CI_COMMIT_REF_NAME targeting database $HIVEMIND_DB_NAME"
+  needs:
+    - job: hivemind_build
+      artifacts: true
+  variables:
+    GIT_STRATEGY: none
+    PYTHONUSERBASE: ./local-site
+  script:
+    - pip3 install --user --upgrade pip setuptools
+    # WARNING!!! temporarily hardcoded 5000017 instead $HIVEMIND_MAX_BLOCK
+    # revert that change when $HIVEMIND_MAX_BLOCK will be set to 5000017
+    - scripts/ci_sync.sh "$HIVEMIND_DB_NAME" "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" "$HIVEMIND_SOURCE_HIVED_URL" 5000017 $HIVEMIND_HTTP_PORT
+  artifacts:
+    paths:
+      - hivemind-sync.log
+    expire_in: 1 week
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == "develop"'
+      when: always
+    - if: '$CI_PIPELINE_SOURCE == "push"'
+      when: manual
+    - when: on_success
+  tags:
+     - hivemind
+
+hivemind_start_server:
+  stage: deploy
+  environment:
+    name: "hive serve built from branch $CI_COMMIT_REF_NAME exposed on port $HIVEMIND_HTTP_PORT"
+    url: "http://hive-4.pl.syncad.com:$HIVEMIND_HTTP_PORT"
+    on_stop: hivemind_stop_server
+  needs:
+    - job: hivemind_build
+      artifacts: true
+#    - job: hivemind_sync
+#      artifacts: true
+  variables:
+    GIT_STRATEGY: none
+    PYTHONUSERBASE: ./local-site
+  script:
+    - scripts/ci_start_server.sh "$HIVEMIND_DB_NAME" "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" "$HIVEMIND_SOURCE_HIVED_URL" $HIVEMIND_HTTP_PORT
+  artifacts:
+    paths:
+      - hive_server.pid
+    expire_in: 1 week
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == "develop"'
+      when: always
+    - if: '$CI_PIPELINE_SOURCE == "push"'
+      when: manual
+    - when: on_success
+
+  tags:
+     - hivemind
+
+hivemind_stop_server:
+  stage: post-deploy
+  environment:
+    name: "hive serve built from branch $CI_COMMIT_REF_NAME exposed on port $HIVEMIND_HTTP_PORT"
+    action: stop
+  variables:
+    GIT_STRATEGY: none
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+  script:
+    - scripts/ci_stop_server.sh hive_server.pid
+  needs:
+    - job: hivemind_start_server
+      artifacts: true
+  tags:
+     - hivemind
+  artifacts:
+    paths:
+      - hive_server.log
+
+.hivemind_start_api_smoketest: &common_api_smoketest_job
+  stage: e2e-test
+  environment: hive-4.pl.syncad.com
+  needs:
+    - job: hivemind_start_server
+      artifacts: true
+  variables:
+    GIT_STRATEGY: none
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_PIPELINE_SOURCE == "push"'
+      when: manual
+    - when: on_success
+  tags:
+     - hivemind
+
+bridge_api_smoketest:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" bridge_api_patterns/ api_smoketest_bridge.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_bridge.xml
+
+bridge_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" bridge_api_negative/ api_smoketest_bridge_negative.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_bridge_negative.xml
+
+condenser_api_smoketest:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" condenser_api_patterns/ api_smoketest_condenser_api.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_condenser_api.xml
+
+condenser_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" condenser_api_negative/ api_smoketest_condenser_api_negative.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_condenser_api_negative.xml
+
+database_api_smoketest:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" database_api_patterns/ api_smoketest_database_api.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_database_api.xml
+
+database_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" database_api_negative/ api_smoketest_database_api_negative.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_database_api_negative.xml
+
+follow_api_smoketest:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" follow_api_patterns/ api_smoketest_follow_api.xml
+  artifacts:
+    reports:
+      junit: api_smoketest.xml
+
+follow_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" follow_api_negative/ api_smoketest_follow_api_negative.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_follow_api_negative.xml
+
+tags_api_smoketest:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" tags_api_patterns/ api_smoketest_tags_api.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_tags_api.xml
+
+tags_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" tags_api_negative/ api_smoketest_tags_api_negative.xml
+
+mock_tests:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" mock_tests/ api_smoketest_mock_tests.xml
+
+api_smoketest_benchmark:
+  stage: benchmark-tests
+  environment: hive-4.pl.syncad.com
+  needs:
+    - job: hivemind_start_server
+      artifacts: true
+  allow_failure: true
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_PIPELINE_SOURCE == "push"'
+      when: manual
+    - when: on_success
+  tags:
+     - hivemind
+  script:
+    - ./scripts/ci_start_api_benchmarks.sh localhost $HIVEMIND_HTTP_PORT 5
+  artifacts:
+    when: always
+    paths:
+      - tavern_benchmarks_report.html
diff --git a/.gitlab-ci.yaml b/.gitlab-ci.yaml
index 52214acf6abdc1bc22c53df16c21e48a4b649827..032e216411fa67695dd55e088f76f099bff26bd6 100644
--- a/.gitlab-ci.yaml
+++ b/.gitlab-ci.yaml
@@ -1,306 +1,312 @@
-# https://hub.docker.com/r/library/python/tags/
-image: "python:3.7"
-
 stages:
-- build
-- test
-- data-supply
-- deploy
-- e2e-test
-- benchmark-tests
-- post-deploy
-
-variables:
-  GIT_DEPTH: 1
-  LC_ALL: "C"
-  GIT_STRATEGY: clone
-  GIT_SUBMODULE_STRATEGY: recursive
-  GIT_CLONE_PATH: $CI_BUILDS_DIR/$CI_COMMIT_REF_SLUG/$CI_CONCURRENT_ID/project-name
-
-  HIVEMIND_SOURCE_HIVED_URL: $HIVEMIND_SOURCE_HIVED_URL
-  HIVEMIND_DB_NAME: "hive_$CI_COMMIT_REF_SLUG"
-  HIVEMIND_HTTP_PORT: $((HIVEMIND_HTTP_PORT + CI_CONCURRENT_ID))
-  # Configured at gitlab repository settings side
-  POSTGRES_USER: $HIVEMIND_POSTGRES_USER
-  POSTGRES_PASSWORD: $HIVEMIND_POSTGRES_PASSWORD
-  POSTGRES_HOST_AUTH_METHOD: trust
-  # official way to provide password to psql: http://www.postgresql.org/docs/9.3/static/libpq-envars.html
-  PGPASSWORD: $HIVEMIND_POSTGRES_PASSWORD
-
-before_script:
-  - pwd
-  - echo "CI_NODE_TOTAL is $CI_NODE_TOTAL"
-  - echo "CI_NODE_INDEX is $CI_NODE_INDEX"
-  - echo "CI_CONCURRENT_ID is $CI_CONCURRENT_ID"
+  - build
+  - test
+  - data-supply
+  - deploy
+  - e2e-test
+  - benchmark-tests
+  - post-deploy
+
+.dk-setup-pip: &dk-setup-pip
+  - python -m venv .venv
+  - source .venv/bin/activate
+  - time pip install --upgrade pip setuptools wheel
+  - pip --version
+  - easy_install --version
+  - wheel version
+  - pipenv --version
+  - poetry --version
+  - time pip install --editable .[dev]
+
+.dk-setup-runner-env: &dk-setup-runner-env
+  # Setup runner environment (to connect to correct postgres server, mainly).
+  - TMP_VAR=$(cat hive-sync-runner-id.txt 2>/dev/null || true); export HIVE_SYNC_RUNNER_ID=${TMP_VAR:-0}
+  - eval $(cat "$RUNNER_CONF" | ./scripts/ci/setup_env.py --current-runner-id=${CI_RUNNER_ID} --hive-sync-runner-id=${HIVE_SYNC_RUNNER_ID})
+
+.dk-set-variables: &dk-set-variables
+  # - export # List all variables and its values set by Gitlab CI.
+  - whoami
+  - echo "CI_RUNNER_ID is $CI_RUNNER_ID"
+  - echo "CI_PIPELINE_URL is $CI_PIPELINE_URL"
+  - echo "CI_PIPELINE_ID is $CI_PIPELINE_ID"
+  - echo "CI_COMMIT_SHORT_SHA is $CI_COMMIT_SHORT_SHA"
   - echo "CI_COMMIT_REF_SLUG is $CI_COMMIT_REF_SLUG"
+  - export HIVEMIND_DB_NAME=${HIVEMIND_DB_NAME//[^a-zA-Z0-9_]/_}
+  - echo "HIVEMIND_DB_NAME is $HIVEMIND_DB_NAME"
 
-hivemind_build:
-  stage: build
-  script:
-    - pip3 install --user --upgrade pip setuptools
-    - git fetch --tags
-    - git tag -f ci_implicit_tag
-    - echo $PYTHONUSERBASE
-    - "python3 setup.py bdist_egg"
-    - ls -l dist/*
-  artifacts:
-    paths:
-      - dist/
-    expire_in: 1 week
+.dk-fetch-git-tags: &dk-fetch-git-tags
+  # - git fetch --tags # Looks to be unnecessary.
+  - git tag -f ci_implicit_tag # Needed to build python package
 
-  rules:
-    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
-      when: always
-    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH || $CI_COMMIT_BRANCH == "develop"'
-      when: always
-    - when: always
-
-  tags:
-     - hivemind
+.dk-start-timer: &dk-start-timer
+  - ./scripts/ci/timer.sh start
 
-hivemind_sync:
-  stage: data-supply
+.dk-stop-timer: &dk-stop-timer
+  - ./scripts/ci/timer.sh check
 
-  environment:
-      name: "hive sync built from branch $CI_COMMIT_REF_NAME targeting database $HIVEMIND_DB_NAME"
-
-  needs:
-    - job: hivemind_build
-      artifacts: true
-  variables:
-    GIT_STRATEGY: none
-    PYTHONUSERBASE: ./local-site
-
-  script:
-    - pip3 install --user --upgrade pip setuptools
-    # WARNING!!! temporarily hardcoded 5000017 instead $HIVEMIND_MAX_BLOCK
-    # revert that change when $HIVEMIND_MAX_BLOCK will be set to 5000017
-    - scripts/ci_sync.sh "$HIVEMIND_DB_NAME" "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" "$HIVEMIND_SOURCE_HIVED_URL" 5000017 $HIVEMIND_HTTP_PORT
-
-  artifacts:
-    paths:
-      - hivemind-sync.log
-
-    expire_in: 1 week
+.dk-hive-sync-script-common: &dk-hive-sync-script-common
+  - echo "${CI_RUNNER_ID}" > hive-sync-runner-id.txt
+  - ./scripts/ci/wait-for-postgres.sh "$RUNNER_POSTGRES_HOST" "$RUNNER_POSTGRES_PORT"
+  - export POSTGRES_MAJOR_VERSION=$(./scripts/ci/get-postgres-version.sh)
+  - ./scripts/ci/create-db.sh
+  - ./scripts/ci/hive-sync.sh
+  - ./scripts/ci/collect-db-stats.sh
 
+.dk-rules-for-sync: &dk-rules-for-sync
   rules:
     - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
       when: always
-    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH || $CI_COMMIT_BRANCH == "develop"'
+    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == "develop"'
       when: always
     - if: '$CI_PIPELINE_SOURCE == "push"'
       when: manual
-    - when: on_success
-
-  tags:
-     - hivemind
-
-hivemind_start_server:
-  stage: deploy
-  environment:
-    name: "hive serve built from branch $CI_COMMIT_REF_NAME exposed on port $HIVEMIND_HTTP_PORT"
-    url: "http://hive-4.pl.syncad.com:$HIVEMIND_HTTP_PORT"
-    on_stop: hivemind_stop_server
-
-  needs:
-    - job: hivemind_build
-      artifacts: true
-#    - job: hivemind_sync
-#      artifacts: true
-  variables:
-    GIT_STRATEGY: none
-    PYTHONUSERBASE: ./local-site
-
-  script:
-    - scripts/ci_start_server.sh "$HIVEMIND_DB_NAME" "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" "$HIVEMIND_SOURCE_HIVED_URL" $HIVEMIND_HTTP_PORT
-
-  artifacts:
-    paths:
-      - hive_server.pid
-    expire_in: 1 week
+    - when: manual
 
+.dk-rules-for-test: &dk-rules-for-test
   rules:
     - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
-      when: always
-    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH || $CI_COMMIT_BRANCH == "develop"'
-      when: always
+      when: on_success
     - if: '$CI_PIPELINE_SOURCE == "push"'
-      when: manual
+      when: on_success
     - when: on_success
 
-  tags:
-     - hivemind
-
-hivemind_stop_server:
-  stage: post-deploy
-  environment:
-    name: "hive serve built from branch $CI_COMMIT_REF_NAME exposed on port $HIVEMIND_HTTP_PORT"
-    action: stop
-
+.dk-default:
+  image: hivemind/python:3.6
+  interruptible: true
+  inherit:
+    default: false
+    variables: false
   variables:
-    GIT_STRATEGY: none
-  rules:
-    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
-      when: always
-    - when: manual
+    GIT_DEPTH: 10
+    GIT_STRATEGY: fetch
+    GIT_SUBMODULE_STRATEGY: recursive
+    PIPENV_VENV_IN_PROJECT: 1
+    PIPENV_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pipenv"
+    PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip"
+    POSTGRES_CLIENT_TOOLS_PATH: /usr/lib/postgresql
+    HIVEMIND_DB_NAME: "hive_${CI_COMMIT_REF_SLUG}"
+  cache: &dk-global-cache
+    # Per-branch caching. CI_COMMIT_REF_SLUG is the same thing.
+    # key: "$CI_COMMIT_REF_NAME"
+    # Per project caching – use any key.
+    # Change this key, if you need to clear cache.
+    key: common-1
+    paths:
+      - .cache/
+      - .venv/
+      - .tox/
+  before_script:
+    - *dk-start-timer
+    - *dk-fetch-git-tags
+    - *dk-set-variables
+    - *dk-setup-pip
+    - *dk-setup-runner-env
+  after_script:
+    - *dk-stop-timer
+
+##### Jobs #####
+
+dk-hivemind-sync:
+  # Postgres shared on host.
+  extends: .dk-default
+  <<: *dk-rules-for-sync
+  stage: data-supply
+  needs: []
   script:
-    - scripts/ci_stop_server.sh hive_server.pid
-
-  needs:
-    - job: hivemind_start_server
-      artifacts: true
-
-  tags:
-     - hivemind
-
+    - *dk-hive-sync-script-common
   artifacts:
     paths:
-      - hive_server.log
+      - hivemind-sync.log
+      - pg-stats
+      - hive-sync-runner-id.txt
+    expire_in: 7 days
+  tags:
+    - hivemind-heavy-job
 
-.hivemind_start_api_smoketest: &common_api_smoketest_job
-  stage: e2e-test
-  environment: hive-4.pl.syncad.com
+.dk-test-common:
+  extends: .dk-default
+  <<: *dk-rules-for-test
   needs:
-    - job: hivemind_start_server
+    - job: dk-hivemind-sync
       artifacts: true
-
-  variables:
-    GIT_STRATEGY: none
-
-  rules:
-    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
-      when: always
-    - if: '$CI_PIPELINE_SOURCE == "push"'
-      when: manual
-    - when: on_success
-
+  allow_failure: false
+  before_script:
+    - *dk-start-timer
+    - *dk-fetch-git-tags
+    - *dk-set-variables
+    - *dk-setup-pip
+    - *dk-setup-runner-env
+    - ./scripts/ci/wait-for-postgres.sh "$RUNNER_POSTGRES_HOST" "$RUNNER_POSTGRES_PORT"
+    - ./scripts/ci/hive-server.sh start
+  after_script:
+    - *dk-stop-timer
   tags:
-     - hivemind
+    - hivemind-light-job
 
-bridge_api_smoketest:
-  <<: *common_api_smoketest_job
 
+dk-bridge_api_smoketest:
+  stage: e2e-test
+  extends: .dk-test-common
   script:
-    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" bridge_api_patterns/ api_smoketest_bridge.xml
-
+    - |
+      ./scripts/ci/start-api-smoketest.sh \
+          localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+          bridge_api_patterns/ api_smoketest_bridge.xml \
+          $RUNNER_TEST_JOBS
   artifacts:
+    when: always
     reports:
       junit: api_smoketest_bridge.xml
 
-bridge_api_smoketest_negative:
-  <<: *common_api_smoketest_job
-
+dk-bridge_api_smoketest_negative:
+  stage: e2e-test
+  extends: .dk-test-common
   script:
-    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" bridge_api_negative/ api_smoketest_bridge_negative.xml
-
+    - |
+      ./scripts/ci/start-api-smoketest.sh \
+          localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+          bridge_api_negative/ api_smoketest_bridge_negative.xml \
+          $RUNNER_TEST_JOBS
   artifacts:
+    when: always
     reports:
       junit: api_smoketest_bridge_negative.xml
 
-condenser_api_smoketest:
-  <<: *common_api_smoketest_job
-
+dk-condenser_api_smoketest:
+  stage: e2e-test
+  extends: .dk-test-common
   script:
-    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" condenser_api_patterns/ api_smoketest_condenser_api.xml
-
+    - |
+      ./scripts/ci/start-api-smoketest.sh \
+          localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+          condenser_api_patterns/ api_smoketest_condenser_api.xml \
+          $RUNNER_TEST_JOBS
   artifacts:
+    when: always
     reports:
       junit: api_smoketest_condenser_api.xml
 
-condenser_api_smoketest_negative:
-  <<: *common_api_smoketest_job
-
+dk-condenser_api_smoketest_negative:
+  stage: e2e-test
+  extends: .dk-test-common
   script:
-    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" condenser_api_negative/ api_smoketest_condenser_api_negative.xml
-
+    - |
+      ./scripts/ci/start-api-smoketest.sh \
+          localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+          condenser_api_negative/ api_smoketest_condenser_api_negative.xml \
+          $RUNNER_TEST_JOBS
   artifacts:
+    when: always
     reports:
       junit: api_smoketest_condenser_api_negative.xml
 
-database_api_smoketest:
-  <<: *common_api_smoketest_job
-
+dk-database_api_smoketest:
+  stage: e2e-test
+  extends: .dk-test-common
   script:
-    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" database_api_patterns/ api_smoketest_database_api.xml
-
+    - |
+      ./scripts/ci/start-api-smoketest.sh \
+          localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+          database_api_patterns/ api_smoketest_database_api.xml \
+          $RUNNER_TEST_JOBS
   artifacts:
+    when: always
     reports:
       junit: api_smoketest_database_api.xml
 
-database_api_smoketest_negative:
-  <<: *common_api_smoketest_job
-
+dk-database_api_smoketest_negative:
+  stage: e2e-test
+  extends: .dk-test-common
   script:
-    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" database_api_negative/ api_smoketest_database_api_negative.xml
-
+    - |
+      ./scripts/ci/start-api-smoketest.sh \
+          localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+          database_api_negative/ api_smoketest_database_api_negative.xml \
+          $RUNNER_TEST_JOBS
   artifacts:
+    when: always
     reports:
       junit: api_smoketest_database_api_negative.xml
 
-follow_api_smoketest:
-  <<: *common_api_smoketest_job
-
+dk-follow_api_smoketest:
+  stage: e2e-test
+  extends: .dk-test-common
   script:
-    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" follow_api_patterns/ api_smoketest_follow_api.xml
-
+    - |
+      ./scripts/ci/start-api-smoketest.sh \
+          localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+          follow_api_patterns/ api_smoketest_follow_api.xml \
+          $RUNNER_TEST_JOBS
   artifacts:
+    when: always
     reports:
       junit: api_smoketest.xml
 
-follow_api_smoketest_negative:
-  <<: *common_api_smoketest_job
-
+dk-follow_api_smoketest_negative:
+  stage: e2e-test
+  extends: .dk-test-common
   script:
-    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" follow_api_negative/ api_smoketest_follow_api_negative.xml
-
+    - |
+      ./scripts/ci/start-api-smoketest.sh \
+          localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+          follow_api_negative/ api_smoketest_follow_api_negative.xml \
+          $RUNNER_TEST_JOBS
   artifacts:
+    when: always
     reports:
       junit: api_smoketest_follow_api_negative.xml
 
-tags_api_smoketest:
-  <<: *common_api_smoketest_job
-
+dk-tags_api_smoketest:
+  stage: e2e-test
+  extends: .dk-test-common
   script:
-    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" tags_api_patterns/ api_smoketest_tags_api.xml
-
+    - |
+      ./scripts/ci/start-api-smoketest.sh \
+          localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+          tags_api_negative/ api_smoketest_tags_api_negative.xml \
+          $RUNNER_TEST_JOBS
   artifacts:
+    when: always
     reports:
-      junit: api_smoketest_tags_api.xml
-
-tags_api_smoketest_negative:
-  <<: *common_api_smoketest_job
+      junit: api_smoketest_tags_api_negative.xml
 
+dk-tags_api_smoketest_negative:
+  stage: e2e-test
+  extends: .dk-test-common
   script:
-    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" tags_api_negative/ api_smoketest_tags_api_negative.xml
-
-mock_tests:
-  <<: *common_api_smoketest_job
+    - |
+      ./scripts/ci/start-api-smoketest.sh \
+          localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+          tags_api_patterns/ api_smoketest_tags_api.xml \
+          $RUNNER_TEST_JOBS
+  artifacts:
+    when: always
+    reports:
+      junit: api_smoketest_tags_api.xml
 
+dk-mock_tests:
+  stage: e2e-test
+  extends: .dk-test-common
   script:
-    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" mock_tests/ api_smoketest_mock_tests.xml
+    - |
+      scripts/ci/start-api-smoketest.sh \
+      localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
+      mock_tests/ api_smoketest_mock_tests.xml \
+      $RUNNER_TEST_JOBS
 
-api_smoketest_benchmark:
+dk-api-smoketest-benchmark:
   stage: benchmark-tests
-  environment: hive-4.pl.syncad.com
-  needs:
-    - job: hivemind_start_server
-      artifacts: true
-
+  extends: .dk-test-common
+  # Temporary failure (when any call is longer than 1s is allowed)
   allow_failure: true
-
-  rules:
-    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
-      when: always
-    - if: '$CI_PIPELINE_SOURCE == "push"'
-      when: manual
-    - when: on_success
-
-  tags:
-     - hivemind
-
   script:
-    - ./scripts/ci_start_api_benchmarks.sh localhost $HIVEMIND_HTTP_PORT 5
-
+    - |
+      ./scripts/ci/start-api-benchmarks.sh \
+          localhost $RUNNER_HIVEMIND_SERVER_HTTP_PORT 5 \
+          $RUNNER_TEST_JOBS
+    - ./scripts/xml_report_parser.py . ./tests/tests_api/hivemind/tavern
   artifacts:
     when: always
     paths:
diff --git a/docker-compose-ci.yml b/docker-compose-ci.yml
index a944cc3d070b3abcc729fc4b99c43afa2eeaf7f0..3f731f8e3fe2c471ad5bcf6153b32832c8a772e2 100644
--- a/docker-compose-ci.yml
+++ b/docker-compose-ci.yml
@@ -1,32 +1,70 @@
-version: "3"
+version: "3.2"
 
 services:
 
-  python-3.6:
-    image: hivemind/python:3.6
+
+  python-3.6-dev:
+    image: hivemind/python:3.6-dev
     build:
       context: .
-      dockerfile: ./scripts/ci/python/3.6/Dockerfile
+      dockerfile: ./scripts/ci/python/3.6/dev.dockerfile
       args:
         - user=${USER}
-        - workdir=/home/${USER}
+        - workdir=/home/${USER}/hivemind
     user: ${USER}
-    shm_size: 0
-    # Below command makes your container running forever.
+    # security_opt:
+    #   # Significant performance boost (about 5%), but very insecure.
+    #   # See https://medium.com/better-programming/faster-python-in-docker-d1a71a9b9917
+    #   # See https://docs.docker.com/engine/security/seccomp/
+    #   - seccomp:unconfined
+    shm_size: 2g
     # command: ["tail", "-f", "/dev/null"]
+    volumes:
+      # Sockets of postgres servers on dockers.
+      - "postgres-10-run:/var/run/postgres-10"
+      - "postgres-12-run:/var/run/postgres-12"
+      # Sockets of postgres servers on host.
+      - "/var/run/postgresql:/var/run/postgresql"
+      # For keeping python dependencies created in docker.
+      - "python-3.6-dev:/home/${USER}"
+      # Application stuff from host.
+      - "$PWD/hive:$PWD/hive"
+      - "$PWD/tests:$PWD/tests"
+      - "$PWD/hive.conf:$PWD/hive.conf"
+      - "$PWD/pyproject.toml:$PWD/pyproject.toml"
+      - "$PWD/README.md:$PWD/README.md"
+      - "$PWD/setup.cfg:$PWD/setup.cfg"
+      - "$PWD/setup.py:$PWD/setup.py"
+      - "$PWD/tox.ini:$PWD/tox.ini"
+
+
+  python-3.6:
+    image: hivemind/python:3.6
+    build:
+      context: .
+      dockerfile: ./scripts/ci/python/3.6/Dockerfile
+      args:
+        - user=worker
+    user: worker
+    shm_size: 2g
+    volumes:
+      # Sockets of postgres servers on host.
+      - "/var/run/postgresql:/var/run/postgresql"
+
 
   python-3.8:
     image: hivemind/python:3.8
-    shm_size: 0
     build:
       context: .
       dockerfile: ./scripts/ci/python/3.8/Dockerfile
       args:
-        - user=${USER}
-        - workdir=/home/${USER}
-    user: ${USER}
-    # Below command makes your container running forever.
-    # command: ["tail", "-f", "/dev/null"]
+        - user=worker
+    user: worker
+    shm_size: 2g
+    volumes:
+      # Sockets of postgres servers on host.
+      - "/var/run/postgresql:/var/run/postgresql"
+
 
   postgres-10:
     image: hivemind/postgres:10
@@ -37,35 +75,17 @@ services:
     environment:
       - POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
     volumes:
-      - postgres-10-pgdata:/var/lib/postgresql/data
+      - $PWD/$POSTGRES_10_CONF_FILE:/etc/postgresql/postgresql.conf:ro
+      - postgres-10-run:/var/run/postgresql
     ports:
       - "${POSTGRES_10_PUBLISHED_PORT}:5432"
-    shm_size: 0
+    shm_size: 12g
     command: [
       "postgres",
-      "-c", "shared_preload_libraries=pg_stat_statements,pg_qualstats",
-      "-c", "track_functions=pl",
-      "-c", "track_io_timing=on",
-      "-c", "track_activity_query_size=2048",
-      "-c", "pg_stat_statements.max=10000",
-      "-c", "pg_stat_statements.track=all",
-      "-c", "max_connections=100",
-      "-c", "shared_buffers=12GB",
-      "-c", "effective_cache_size=36GB",
-      "-c", "maintenance_work_mem=2GB",
-      "-c", "checkpoint_completion_target=0.9",
-      "-c", "wal_buffers=16MB",
-      "-c", "default_statistics_target=100",
-      "-c", "random_page_cost=1.1",
-      "-c", "effective_io_concurrency=200",
-      "-c", "work_mem=31457kB",
-      "-c", "min_wal_size=2GB",
-      "-c", "max_wal_size=8GB",
-      "-c", "max_worker_processes=12",
-      "-c", "max_parallel_workers_per_gather=4",
-      "-c", "max_parallel_workers=12",
+      "-c", "config_file=/etc/postgresql/postgresql.conf"
     ]
 
+
   postgres-12:
     image: hivemind/postgres:12
     restart: unless-stopped
@@ -75,56 +95,38 @@ services:
     environment:
       - POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
     volumes:
-      - postgres-12-pgdata:/var/lib/postgresql/data
+      - $PWD/$POSTGRES_12_CONF_FILE:/etc/postgresql/postgresql.conf:ro
+      - postgres-12-run:/var/run/postgresql
     ports:
       - "${POSTGRES_12_PUBLISHED_PORT}:5432"
-    shm_size: 0
-    # https://pgtune.leopard.in.ua/#/ oltp 48G ram, 12 cpus, ssd
+    shm_size: 12g
     command: [
       "postgres",
-      "-c", "shared_preload_libraries=pg_stat_statements,pg_qualstats",
-      "-c", "track_functions=pl",
-      "-c", "track_io_timing=on",
-      "-c", "track_activity_query_size=2048",
-      "-c", "pg_stat_statements.max=10000",
-      "-c", "pg_stat_statements.track=all",
-      "-c", "max_connections=100",
-      "-c", "shared_buffers=12GB",
-      "-c", "effective_cache_size=36GB",
-      "-c", "maintenance_work_mem=2GB",
-      "-c", "checkpoint_completion_target=0.9",
-      "-c", "wal_buffers=16MB",
-      "-c", "default_statistics_target=100",
-      "-c", "random_page_cost=1.1",
-      "-c", "effective_io_concurrency=200",
-      "-c", "work_mem=31457kB",
-      "-c", "min_wal_size=2GB",
-      "-c", "max_wal_size=8GB",
-      "-c", "max_worker_processes=12",
-      "-c", "max_parallel_workers_per_gather=4",
-      "-c", "max_parallel_workers=12",
-      "-c", "max_parallel_maintenance_workers=4",
+      "-c", "config_file=/etc/postgresql/postgresql.conf"
     ]
 
+
   hived-node:
-    image: registry.gitlab.syncad.com/hive/hive/consensus_node:00b5ff55
+    image: $HIVED_IMAGE
     restart: unless-stopped
-    # ports:
-    #   - "2001:2001"
-    #   - "8090:8090"
-    #   - "8091:8091"
-    shm_size: 0
+    ports:
+      - "$HIVED_PUBLISHED_WS_PORT:8090" # websocket
+      - "$HIVED_PUBLISHED_HTTP_PORT:8091"
+    shm_size: 12g
     entrypoint: /usr/local/hive/consensus/entrypoint.sh
-    command: >-
-      --replay-blockchain
-      --stop-replay-at-block 5000000
+    command: [
+      "--replay-blockchain",
+      "--stop-replay-at-block 5000000"
+    ]
     volumes:
       - $PWD/scripts/ci/hived-node/entrypoint.sh:/usr/local/hive/consensus/entrypoint.sh
       - $PWD/scripts/ci/hived-node/config.ini:/usr/local/hive/consensus/datadir/config.ini
       - ${HIVED_BLOCK_LOG_FILE}:/usr/local/hive/consensus/datadir/blockchain/block_log
       - hived-node-datadir:/usr/local/hive/consensus/datadir
 
+
 volumes:
-  postgres-10-pgdata:
-  postgres-12-pgdata:
+  postgres-10-run:
+  postgres-12-run:
   hived-node-datadir:
+  python-3.6-dev:
diff --git a/hive/cli.py b/hive/cli.py
index 6b8d467248f3f9cf773056658bdd92e2939d9b8c..1278641b612afd6990c454197941c7badae64019 100755
--- a/hive/cli.py
+++ b/hive/cli.py
@@ -21,13 +21,13 @@ def setup_logging(conf):
         fmt = '%(asctime)s.%(msecs)03d{} %(created).6f ' \
             '%(levelname)s - %(name)s - %(message)s'.format(timezone)
         logging.basicConfig(format=fmt, datefmt=datefmt)
-    if timestamp:
+    elif timestamp:
         datefmt='%Y-%m-%d %H:%M:%S'
         timezone = time.strftime('%z')
         fmt = '%(asctime)s.%(msecs)03d{} ' \
             '%(levelname)s - %(name)s - %(message)s'.format(timezone)
         logging.basicConfig(format=fmt, datefmt=datefmt)
-    if epoch:
+    elif epoch:
         fmt = '%(created).6f %(levelname)s - %(name)s - %(message)s'
         logging.basicConfig(format=fmt)
     else:
diff --git a/scripts/ci/backup/.gitlab-ci-ssh.yaml b/scripts/ci/backup/.gitlab-ci-ssh.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ad8fcd1906a9acbab88a109ba774317bdbe3182c
--- /dev/null
+++ b/scripts/ci/backup/.gitlab-ci-ssh.yaml
@@ -0,0 +1,265 @@
+stages:
+  - build
+  - test
+  - data-supply
+  - deploy
+  - e2e-test
+  - benchmark-tests
+  - post-deploy
+
+variables:
+  GIT_DEPTH: 1
+  LC_ALL: "C"
+  GIT_STRATEGY: clone
+  GIT_SUBMODULE_STRATEGY: recursive
+  GIT_CLONE_PATH: $CI_BUILDS_DIR/$CI_COMMIT_REF_SLUG/$CI_CONCURRENT_ID/project-name
+
+  HIVEMIND_SOURCE_HIVED_URL: $HIVEMIND_SOURCE_HIVED_URL
+  HIVEMIND_DB_NAME: "hive_$CI_COMMIT_REF_SLUG"
+  HIVEMIND_HTTP_PORT: $((HIVEMIND_HTTP_PORT + CI_CONCURRENT_ID))
+  # Configured at gitlab repository settings side
+  POSTGRES_USER: $HIVEMIND_POSTGRES_USER
+  POSTGRES_PASSWORD: $HIVEMIND_POSTGRES_PASSWORD
+  POSTGRES_HOST_AUTH_METHOD: trust
+  # official way to provide password to psql: http://www.postgresql.org/docs/9.3/static/libpq-envars.html
+  PGPASSWORD: $HIVEMIND_POSTGRES_PASSWORD
+
+
+default:
+  before_script:
+    - pwd
+    - echo "CI_NODE_TOTAL is $CI_NODE_TOTAL"
+    - echo "CI_NODE_INDEX is $CI_NODE_INDEX"
+    - echo "CI_CONCURRENT_ID is $CI_CONCURRENT_ID"
+    - echo "CI_COMMIT_REF_SLUG is $CI_COMMIT_REF_SLUG"
+
+hivemind_build:
+  stage: build
+  script:
+    - pip3 install --user --upgrade pip setuptools
+    - git fetch --tags
+    - git tag -f ci_implicit_tag
+    - echo $PYTHONUSERBASE
+    - "python3 setup.py bdist_egg"
+    - ls -l dist/*
+  artifacts:
+    paths:
+      - dist/
+    expire_in: 1 week
+  tags:
+     - hivemind
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == "develop"'
+      when: always
+
+hivemind_sync:
+  stage: data-supply
+  environment:
+      name: "hive sync built from branch $CI_COMMIT_REF_NAME targeting database $HIVEMIND_DB_NAME"
+  needs:
+    - job: hivemind_build
+      artifacts: true
+  variables:
+    GIT_STRATEGY: none
+    PYTHONUSERBASE: ./local-site
+  script:
+    - pip3 install --user --upgrade pip setuptools
+    # WARNING!!! temporarily hardcoded 5000017 instead $HIVEMIND_MAX_BLOCK
+    # revert that change when $HIVEMIND_MAX_BLOCK will be set to 5000017
+    - scripts/ci_sync.sh "$HIVEMIND_DB_NAME" "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" "$HIVEMIND_SOURCE_HIVED_URL" 5000017 $HIVEMIND_HTTP_PORT
+  artifacts:
+    paths:
+      - hivemind-sync.log
+    expire_in: 1 week
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == "develop"'
+      when: always
+    - if: '$CI_PIPELINE_SOURCE == "push"'
+      when: manual
+    - when: on_success
+  tags:
+     - hivemind
+
+hivemind_start_server:
+  stage: deploy
+  environment:
+    name: "hive serve built from branch $CI_COMMIT_REF_NAME exposed on port $HIVEMIND_HTTP_PORT"
+    url: "http://hive-4.pl.syncad.com:$HIVEMIND_HTTP_PORT"
+    on_stop: hivemind_stop_server
+  needs:
+    - job: hivemind_build
+      artifacts: true
+#    - job: hivemind_sync
+#      artifacts: true
+  variables:
+    GIT_STRATEGY: none
+    PYTHONUSERBASE: ./local-site
+  script:
+    - scripts/ci_start_server.sh "$HIVEMIND_DB_NAME" "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" "$HIVEMIND_SOURCE_HIVED_URL" $HIVEMIND_HTTP_PORT
+  artifacts:
+    paths:
+      - hive_server.pid
+    expire_in: 1 week
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == "develop"'
+      when: always
+    - if: '$CI_PIPELINE_SOURCE == "push"'
+      when: manual
+    - when: on_success
+
+  tags:
+     - hivemind
+
+hivemind_stop_server:
+  stage: post-deploy
+  environment:
+    name: "hive serve built from branch $CI_COMMIT_REF_NAME exposed on port $HIVEMIND_HTTP_PORT"
+    action: stop
+  variables:
+    GIT_STRATEGY: none
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+  script:
+    - scripts/ci_stop_server.sh hive_server.pid
+  needs:
+    - job: hivemind_start_server
+      artifacts: true
+  tags:
+     - hivemind
+  artifacts:
+    paths:
+      - hive_server.log
+
+.hivemind_start_api_smoketest: &common_api_smoketest_job
+  stage: e2e-test
+  environment: hive-4.pl.syncad.com
+  needs:
+    - job: hivemind_start_server
+      artifacts: true
+  variables:
+    GIT_STRATEGY: none
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_PIPELINE_SOURCE == "push"'
+      when: manual
+    - when: on_success
+  tags:
+     - hivemind
+
+bridge_api_smoketest:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" bridge_api_patterns/ api_smoketest_bridge.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_bridge.xml
+
+bridge_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" bridge_api_negative/ api_smoketest_bridge_negative.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_bridge_negative.xml
+
+condenser_api_smoketest:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" condenser_api_patterns/ api_smoketest_condenser_api.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_condenser_api.xml
+
+condenser_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" condenser_api_negative/ api_smoketest_condenser_api_negative.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_condenser_api_negative.xml
+
+database_api_smoketest:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" database_api_patterns/ api_smoketest_database_api.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_database_api.xml
+
+database_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" database_api_negative/ api_smoketest_database_api_negative.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_database_api_negative.xml
+
+follow_api_smoketest:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" follow_api_patterns/ api_smoketest_follow_api.xml
+  artifacts:
+    reports:
+      junit: api_smoketest.xml
+
+follow_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" follow_api_negative/ api_smoketest_follow_api_negative.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_follow_api_negative.xml
+
+tags_api_smoketest:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" tags_api_patterns/ api_smoketest_tags_api.xml
+  artifacts:
+    reports:
+      junit: api_smoketest_tags_api.xml
+
+tags_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" tags_api_negative/ api_smoketest_tags_api_negative.xml
+
+mock_tests:
+  <<: *common_api_smoketest_job
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" mock_tests/ api_smoketest_mock_tests.xml
+
+api_smoketest_benchmark:
+  stage: benchmark-tests
+  environment: hive-4.pl.syncad.com
+  needs:
+    - job: hivemind_start_server
+      artifacts: true
+  allow_failure: true
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_PIPELINE_SOURCE == "push"'
+      when: manual
+    - when: on_success
+  tags:
+     - hivemind
+  script:
+    - ./scripts/ci_start_api_benchmarks.sh localhost $HIVEMIND_HTTP_PORT 5
+  artifacts:
+    when: always
+    paths:
+      - tavern_benchmarks_report.html
diff --git a/scripts/ci/child-pipelines/.gitlab-ci-child-pipeline-1.yaml b/scripts/ci/child-pipelines/.gitlab-ci-child-pipeline-1.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..77413c9ebc7574fdd538a932964b2c9833482bbd
--- /dev/null
+++ b/scripts/ci/child-pipelines/.gitlab-ci-child-pipeline-1.yaml
@@ -0,0 +1,42 @@
+stages:
+  - run
+
+variables:
+
+  GIT_DEPTH: 10
+
+  GIT_STRATEGY: fetch # It's quick, but noticed errors with that, sometimes.
+  # GIT_STRATEGY: clone
+  # GIT_STRATEGY: none
+
+  GIT_SUBMODULE_STRATEGY: recursive
+  MY_VARIABLE: "bamboo"
+
+default:
+  image: hivemind/python:3.6
+  interruptible: false
+  cache: &global-cache
+    # Per-branch caching. CI_COMMIT_REF_SLUG is the same thing.
+    # key: "$CI_COMMIT_REF_NAME"
+    # Per project caching – use any key.
+    # Change this key, if you need to clear cache.
+    key: common-1
+    paths:
+      - .cache/
+      - .venv/
+      - .tox/
+  before_script:
+    - echo "I am before_script in child-1. MY_VARIABLE is $MY_VARIABLE"
+  after_script:
+    - echo "I am after_script in in child-1. MY_VARIABLE is $MY_VARIABLE"
+
+child-1-job:
+  stage: run
+  rules:
+    - when: manual
+  script:
+    - echo "I am script in child-1-job. MY_VARIABLE is $MY_VARIABLE"
+    - sleep 30
+    - exit 1
+  tags:
+    - hivemind-light-job
diff --git a/scripts/ci/child-pipelines/.gitlab-ci-child-pipeline-2.yaml b/scripts/ci/child-pipelines/.gitlab-ci-child-pipeline-2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..307f0f68ae46d93c61b42f787fa08fdacfa90521
--- /dev/null
+++ b/scripts/ci/child-pipelines/.gitlab-ci-child-pipeline-2.yaml
@@ -0,0 +1,38 @@
+stages:
+  - run
+
+variables:
+
+  GIT_DEPTH: 10
+
+  GIT_STRATEGY: fetch # It's quick, but noticed errors with that, sometimes.
+  # GIT_STRATEGY: clone
+  # GIT_STRATEGY: none
+
+  GIT_SUBMODULE_STRATEGY: recursive
+  MY_VARIABLE: "bamboo"
+
+default:
+  image: hivemind/python:3.6
+  interruptible: false
+  cache: &global-cache
+    # Per-branch caching. CI_COMMIT_REF_SLUG is the same thing.
+    # key: "$CI_COMMIT_REF_NAME"
+    # Per project caching – use any key.
+    # Change this key, if you need to clear cache.
+    key: common-1
+    paths:
+      - .cache/
+      - .venv/
+      - .tox/
+  before_script:
+    - echo "I am before_script in child-2. MY_VARIABLE is $MY_VARIABLE"
+  after_script:
+    - echo "I am after_script in child-2. MY_VARIABLE is $MY_VARIABLE"
+
+child-2-job:
+  stage: run
+  script:
+    - echo "I am script in child-2-job. MY_VARIABLE is $MY_VARIABLE"
+  tags:
+    - hivemind-light-job
diff --git a/scripts/ci/child-pipelines/.gitlab-ci-dynamic.yaml b/scripts/ci/child-pipelines/.gitlab-ci-dynamic.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3c3e2d4d40e89b93be32f43f84b3f2b08c750044
--- /dev/null
+++ b/scripts/ci/child-pipelines/.gitlab-ci-dynamic.yaml
@@ -0,0 +1,33 @@
+# See https://gitlab.com/fgrimshaw/dynamic-ci
+# See https://gitlab.com/gitlab-org/gitlab/-/issues/212373
+
+# I tested this feature, but our current version of Gitlab 13.2.2
+# doesn't support it well. Child pipelines run with no problem,
+# but UI displays wrong badges, for instance job was marked as
+# still running, though it was finished. Also jobs with rule
+# "when: manual" where started without user's permission.
+# We need to wait for better support in Gitlab UI.
+
+stages:
+  - run
+
+variables:
+  GIT_STRATEGY: none
+
+trigger-child-1:
+  stage: run
+  rules:
+    - if: '$CI_COMMIT_MESSAGE =~ /child-1/'
+      when: always
+  trigger:
+    include: .gitlab-ci-child-pipeline-1.yaml
+    strategy: depend
+
+trigger-child-2:
+  stage: run
+  rules:
+    - if: '$CI_COMMIT_MESSAGE =~ /child-2/'
+      when: always
+  trigger:
+    include: .gitlab-ci-child-pipeline-2.yaml
+    strategy: depend
diff --git a/scripts/ci/collect-db-stats.sh b/scripts/ci/collect-db-stats.sh
index 93bb1195fc0f8be03d681a078b0239fd24855850..f957af71f858f8260c43837a84910913ed2e0eb4 100755
--- a/scripts/ci/collect-db-stats.sh
+++ b/scripts/ci/collect-db-stats.sh
@@ -2,25 +2,27 @@
 
 set -euo pipefail
 
-collect_stats() {
+collect_db_stats() {
 
     echo "Collecting statistics from database ${HIVEMIND_DB_NAME}"
 
     mkdir -p pg-stats
     DIR=$PWD/pg-stats
 
-    PGPASSWORD=${POSTGRES_PASSWORD} psql \
-        --username "${POSTGRES_USER}" \
-        --host ${POSTGRES_HOST} \
-        --port ${POSTGRES_PORT} \
+    PGPASSWORD=${RUNNER_POSTGRES_APP_USER_PASSWORD} psql \
+        --username "${RUNNER_POSTGRES_APP_USER=}" \
+        --host ${RUNNER_POSTGRES_HOST} \
+        --port ${RUNNER_POSTGRES_PORT} \
         --dbname ${HIVEMIND_DB_NAME} << EOF
 \timing
 \copy (select * from pg_settings) to '$DIR/pg_settings.csv' WITH CSV HEADER
 \copy (select * from pg_stat_user_tables) to '$DIR/pg_stat_user_tables.csv' WITH CSV HEADER
 
 -- Disabled, because this table is too big.
---\copy (select * from pg_stat_statements) to '$DIR/pg_stat_statements.csv' WITH CSV HEADER
+-- \copy (select * from pg_stat_statements) to '$DIR/pg_stat_statements.csv' WITH CSV HEADER
 
+/*
+-- Looks to be unuseful.
 -- See https://github.com/powa-team/pg_qualstats
 \echo pg_qualstats index advisor
 SELECT v
@@ -33,8 +35,9 @@ SELECT v
   FROM json_array_elements(
     pg_qualstats_index_advisor(min_filter => 50)->'unoptimised') v
   ORDER BY v::text COLLATE "C";
+*/
 EOF
 
 }
 
-collect_stats
+collect_db_stats
diff --git a/scripts/ci/create-db.sh b/scripts/ci/create-db.sh
index 8cc734f74e4cd8bb6ed04171dc47ab34fc46bed5..0a006f0bf93ec6eb0956dd49024095830c6e6b0d 100755
--- a/scripts/ci/create-db.sh
+++ b/scripts/ci/create-db.sh
@@ -2,46 +2,48 @@
 
 set -euo pipefail
 
-# TODO We have troubles with user, when postgresql is run from docker.
-# We need user name `postgres`, not other, I'm afraid.
-# ADMIN_POSTGRES_USER=postgres
-# ADMIN_POSTGRES_USER_PASSWORD=postgres
-
 create_db() {
 
-    echo "Creating user ${HIVEMIND_POSTGRES_USER} and database ${HIVEMIND_DB_NAME}, owned by this user"
+    echo "Creating user ${RUNNER_POSTGRES_APP_USER} and database ${HIVEMIND_DB_NAME}, owned by this user"
+
+    TEMPLATE="template_monitoring"
 
-    PGPASSWORD=${ADMIN_POSTGRES_USER_PASSWORD} psql \
-        --username "${ADMIN_POSTGRES_USER}" \
-        --host ${POSTGRES_HOST} \
-        --port ${POSTGRES_PORT} \
+    PGPASSWORD=${RUNNER_POSTGRES_ADMIN_USER_PASSWORD} psql \
+        --username "${RUNNER_POSTGRES_ADMIN_USER}" \
+        --host ${RUNNER_POSTGRES_HOST} \
+        --port ${RUNNER_POSTGRES_PORT} \
         --dbname postgres << EOF
 
-\echo Creating role ${HIVEMIND_POSTGRES_USER}
+\echo Creating role ${RUNNER_POSTGRES_APP_USER}
 
 DO \$$
 BEGIN
     IF EXISTS (SELECT * FROM pg_user
-            WHERE pg_user.usename = '${HIVEMIND_POSTGRES_USER}') THEN
-        raise warning 'Role % already exists', '${HIVEMIND_POSTGRES_USER}';
+            WHERE pg_user.usename = '${RUNNER_POSTGRES_APP_USER}') THEN
+        raise warning 'Role % already exists', '${RUNNER_POSTGRES_APP_USER}';
     ELSE
-        CREATE ROLE ${HIVEMIND_POSTGRES_USER}
-                WITH LOGIN PASSWORD '${HIVEMIND_POSTGRES_USER_PASSWORD}';
+        CREATE ROLE ${RUNNER_POSTGRES_APP_USER}
+                WITH LOGIN PASSWORD '${RUNNER_POSTGRES_APP_USER_PASSWORD}';
     END IF;
 END
 \$$;
 
-\echo Creating database ${HIVEMIND_DB_NAME}
+-- We drop database to enable retry of CI job.
+\echo Dropping database ${HIVEMIND_DB_NAME}
+DROP DATABASE IF EXISTS ${HIVEMIND_DB_NAME};
 
-CREATE DATABASE ${HIVEMIND_DB_NAME} TEMPLATE template_monitoring
-    OWNER ${HIVEMIND_POSTGRES_USER};
+\echo Creating database ${HIVEMIND_DB_NAME}
+CREATE DATABASE ${HIVEMIND_DB_NAME} TEMPLATE ${TEMPLATE}
+    OWNER ${RUNNER_POSTGRES_APP_USER};
 COMMENT ON DATABASE ${HIVEMIND_DB_NAME} IS
     'Database for Gitlab CI pipeline ${CI_PIPELINE_URL}, commit ${CI_COMMIT_SHORT_SHA}';
 
 \c ${HIVEMIND_DB_NAME}
 
+drop schema if exists hivemind_admin cascade;
+
 create schema hivemind_admin
-        authorization ${HIVEMIND_POSTGRES_USER};
+        authorization ${RUNNER_POSTGRES_APP_USER};
 
 CREATE SEQUENCE hivemind_admin.database_metadata_id_seq
     INCREMENT 1
@@ -63,10 +65,10 @@ CREATE TABLE hivemind_admin.database_metadata
 );
 
 alter sequence hivemind_admin.database_metadata_id_seq
-        OWNER TO ${HIVEMIND_POSTGRES_USER};
+        OWNER TO ${RUNNER_POSTGRES_APP_USER};
 
 alter table hivemind_admin.database_metadata
-        OWNER TO ${HIVEMIND_POSTGRES_USER};
+        OWNER TO ${RUNNER_POSTGRES_APP_USER};
 
 insert into hivemind_admin.database_metadata
     (database_name, ci_pipeline_url, ci_pipeline_id, commit_sha)
@@ -75,6 +77,8 @@ values (
     ${CI_PIPELINE_ID}, '${CI_COMMIT_SHORT_SHA}'
     );
 
+-- VACUUM VERBOSE ANALYZE;
+
 \q
 EOF
 
diff --git a/scripts/ci/dump-db.sh b/scripts/ci/dump-db.sh
index e2e4764d0e790a731637ef6579783b5df45e7ed4..2b9f1c31e8e54d4db042dcc2ee54333c73262b60 100755
--- a/scripts/ci/dump-db.sh
+++ b/scripts/ci/dump-db.sh
@@ -2,26 +2,30 @@
 
 set -euo pipefail
 
-echo "Dumping database ${HIVEMIND_DB_NAME}"
+dump_db() {
+    echo "Dumping database ${HIVEMIND_DB_NAME}"
 
-export PGPASSWORD=${POSTGRES_PASSWORD}
-exec_path=$POSTGRES_CLIENT_TOOLS_PATH/$POSTGRES_MAJOR_VERSION/bin
+    export PGPASSWORD=${RUNNER_POSTGRES_APP_USER_PASSWORD}
+    exec_path=$POSTGRES_CLIENT_TOOLS_PATH/$POSTGRES_MAJOR_VERSION/bin
 
-echo "Using pg_dump version $($exec_path/pg_dump --version)"
+    echo "Using pg_dump version $($exec_path/pg_dump --version)"
 
-time $exec_path/pg_dump \
-    --username="${POSTGRES_USER}" \
-    --host="${POSTGRES_HOST}" \
-    --port="${POSTGRES_PORT}" \
-    --dbname="${HIVEMIND_DB_NAME}" \
-    --schema=public \
-    --format=directory \
-    --jobs=4 \
-    --compress=6 \
-    --quote-all-identifiers \
-    --lock-wait-timeout=30000 \
-    --no-privileges --no-acl \
-    --verbose \
-    --file="pg-dump-${HIVEMIND_DB_NAME}"
+    time $exec_path/pg_dump \
+        --username="${RUNNER_POSTGRES_APP_USER}" \
+        --host="${RUNNER_POSTGRES_HOST}" \
+        --port="${RUNNER_POSTGRES_PORT}" \
+        --dbname="${HIVEMIND_DB_NAME}" \
+        --schema=public \
+        --format=directory \
+        --jobs=4 \
+        --compress=6 \
+        --quote-all-identifiers \
+        --lock-wait-timeout=30000 \
+        --no-privileges --no-acl \
+        --verbose \
+        --file="pg-dump-${HIVEMIND_DB_NAME}"
 
-unset PGPASSWORD
+    unset PGPASSWORD
+}
+
+dump_db
diff --git a/scripts/ci/get-postgres-version.sh b/scripts/ci/get-postgres-version.sh
index 47e42fda1179f05e01bc7104ec98bac453e1cbdc..4b0e05dd386785784b52f69ca391b36aed9a30b3 100755
--- a/scripts/ci/get-postgres-version.sh
+++ b/scripts/ci/get-postgres-version.sh
@@ -5,17 +5,16 @@
 set -euo pipefail
 
 get_postgres_version() {
-
+    # Get major version of postgres server.
     version=$(
-        PGPASSWORD=$POSTGRES_PASSWORD psql -X -A -t \
-            --username $POSTGRES_USER \
-            --host $POSTGRES_HOST \
-            --port ${POSTGRES_PORT} \
+        PGPASSWORD=$RUNNER_POSTGRES_APP_USER_PASSWORD psql -X -A -t \
+            --username $RUNNER_POSTGRES_APP_USER \
+            --host $RUNNER_POSTGRES_HOST \
+            --port ${RUNNER_POSTGRES_PORT} \
             --dbname postgres \
             -c "show server_version_num;"
         )
     echo $(echo $version | cut -c1-2)
-
 }
 
 get_postgres_version
diff --git a/scripts/ci/goodies/.gitlab-ci-goodies.yaml b/scripts/ci/goodies/.gitlab-ci-goodies.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..535d4e9542daff911c4baf56f3c76deeed946218
--- /dev/null
+++ b/scripts/ci/goodies/.gitlab-ci-goodies.yaml
@@ -0,0 +1,154 @@
+# Useful snippets for Gitlab CI, but not used currently.
+
+.postgres-10: &postgres-10
+  name: hivemind/postgres:10
+  alias: db
+  command: [
+      "postgres",
+      "-c", "shared_preload_libraries=pg_stat_statements,pg_qualstats",
+      "-c", "track_functions=pl",
+      "-c", "track_io_timing=on",
+      "-c", "track_activity_query_size=2048",
+      "-c", "pg_stat_statements.max=10000",
+      "-c", "pg_stat_statements.track=all",
+      "-c", "max_connections=100",
+      "-c", "shared_buffers=2GB",
+      "-c", "effective_cache_size=6GB",
+      "-c", "maintenance_work_mem=512MB",
+      "-c", "checkpoint_completion_target=0.9",
+      "-c", "wal_buffers=16MB",
+      "-c", "default_statistics_target=100",
+      "-c", "random_page_cost=1.1",
+      "-c", "effective_io_concurrency=200",
+      "-c", "work_mem=5242kB",
+      "-c", "min_wal_size=2GB",
+      "-c", "max_wal_size=8GB",
+      "-c", "max_worker_processes=4",
+      "-c", "max_parallel_workers_per_gather=2",
+      "-c", "max_parallel_workers=4",
+      ]
+
+.postgres-12: &postgres-12
+  name: hivemind/postgres:12
+  alias: db
+  command: [
+      "postgres",
+      "-c", "shared_preload_libraries=pg_stat_statements,pg_qualstats",
+      "-c", "track_functions=pl",
+      "-c", "track_io_timing=on",
+      "-c", "track_activity_query_size=2048",
+      "-c", "pg_stat_statements.max=10000",
+      "-c", "pg_stat_statements.track=all",
+      "-c", "max_connections=100",
+      "-c", "shared_buffers=2GB",
+      "-c", "effective_cache_size=6GB",
+      "-c", "maintenance_work_mem=512MB",
+      "-c", "checkpoint_completion_target=0.9",
+      "-c", "wal_buffers=16MB",
+      "-c", "default_statistics_target=100",
+      "-c", "random_page_cost=1.1",
+      "-c", "effective_io_concurrency=200",
+      "-c", "work_mem=5242kB",
+      "-c", "min_wal_size=2GB",
+      "-c", "max_wal_size=8GB",
+      "-c", "max_worker_processes=4",
+      "-c", "max_parallel_workers_per_gather=2",
+      "-c", "max_parallel_workers=4",
+      ]
+
+.setup-setuptools: &setup-setuptools
+  - python -m venv .venv
+  - source .venv/bin/activate
+  - time pip install --upgrade pip setuptools wheel
+  - pip --version
+  - easy_install --version
+  - wheel version
+  - pipenv --version
+  - poetry --version
+  - time python setup.py develop
+
+.setup-setuptools-no-venv: &setup-setuptools-no-venv
+  # No virtual environment here.
+  # Setuptools will install all dependencies to PYTHONUSERBASE directory.
+  - export PYTHONUSERBASE=./local-site
+  - time pip install --upgrade pip setuptools wheel
+  - pip --version
+  - easy_install --version
+  - wheel version
+  - pipenv --version
+  - poetry --version
+  - mkdir -p `python -m site --user-site`
+  - python setup.py install --user --force
+  - ln -sf ./local-site/bin/hive "$HIVEMIND_EXEC_NAME"
+
+.setup-pipenv: &setup-pipenv
+  ## Note, that Pipfile must exist.
+  ## `--sequential` is slower, but doesn't emit messages about errors
+  ## and need to repeat install.
+  ## - pipenv sync --dev --bare --sequential
+  ## It's faster than `--sequential`, but emits messages about errors
+  ## and a need to repeat install, sometimes. However seems these
+  ## errors are negligible.
+  - time pipenv sync --dev --bare
+  - source .venv/bin/activate
+  - pip --version
+  - easy_install --version
+  - wheel version
+  - pipenv --version
+  - poetry --version
+
+
+##### Jobs #####
+
+.build-egg:
+  stage: build
+  needs: []
+  script:
+    - python setup.py bdist_egg
+    - ls -l dist/*
+  artifacts:
+    paths:
+      - dist/
+    expire_in: 7 days
+  tags:
+    - hivemind-light-job
+
+.build-wheel:
+  stage: build
+  needs: []
+  script:
+    - python setup.py bdist_wheel
+    - ls -l dist/*
+  artifacts:
+    paths:
+      - dist/
+    expire_in: 7 days
+  tags:
+    - hivemind-light-job
+
+# Postgres as docker service
+.hivemind-sync-postgres-as-service:
+  # <<: *default-rules
+  stage: data-supply
+  services:
+    - *postgres-10
+    # - *postgres-12
+  needs: []
+  script:
+    # - *hive-sync-script-common
+    # - ./scripts/ci/dump-db.sh
+  artifacts:
+    paths:
+      - hivemind-sync.log
+      - pg-stats
+      - pg-dump-${HIVEMIND_DB_NAME}
+    expire_in: 7 hours
+  tags:
+    - hivemind-heavy-job
+
+# Test job doing nothing (for debugging CI)
+.just-a-test:
+  stage: e2e-test
+  extends: .e2e-test-common
+  script:
+    - echo "Run some tests"
diff --git a/scripts/ci/hive-server.sh b/scripts/ci/hive-server.sh
index 0499af8a9530e007689b0f00dd3a68742cd124a7..03ceca8a442640c8b17713020aed677be04dc590 100755
--- a/scripts/ci/hive-server.sh
+++ b/scripts/ci/hive-server.sh
@@ -4,17 +4,11 @@
 
 set -euo pipefail
 
+JOB=$1
 HIVEMIND_PID=0
 MERCY_KILL_TIMEOUT=5
 START_DELAY=5
 
-# For debug only!
-# HIVED_URL='{"default":"http://hived-node:8091"}'
-# HIVED_URL='{"default":"http://172.17.0.1:8091"}'
-# HIVED_URL='{"default":"http://127.0.0.1:8091"}'
-# HIVEMIND_HTTP_PORT="8080"
-# HIVEMIND_POSTGRESQL_CONNECTION_STRING="postgresql://syncad:devdev@localhost:5432/hive_test"
-
 check_pid() {
   if [ -f hive_server.pid ]; then
     HIVEMIND_PID=`cat hive_server.pid`
@@ -24,6 +18,7 @@ check_pid() {
       echo "Process pid $HIVEMIND_PID is running"
     else
       # Process is not running
+      echo "Process pid $HIVEMIND_PID is not running"
       rm hive_server.pid
       HIVEMIND_PID=0
     fi
@@ -33,7 +28,7 @@ check_pid() {
 }
 
 stop() {
-  if [ "$HIVEMIND_PID" -gt "0" ]; then
+  if [ "$HIVEMIND_PID" -gt 0 ]; then
     HIVEMIND_PID=`cat hive_server.pid`
 
     # Send INT signal and give it some time to stop.
@@ -52,22 +47,25 @@ stop() {
   fi
 }
 
-
 start() {
 
-  if [ "$HIVEMIND_PID" -gt "0" ]; then
+  if [ "$HIVEMIND_PID" -gt 0 ]; then
     echo "Hive server is already running (pid $HIVEMIND_PID)"
     exit 0
   fi
 
-  echo "Starting hive server on port ${HIVEMIND_HTTP_PORT}"
+  echo "Starting hive server on port ${RUNNER_HIVEMIND_SERVER_HTTP_PORT}"
+
+  USER=${RUNNER_POSTGRES_APP_USER}:${RUNNER_POSTGRES_APP_USER_PASSWORD}
+  OPTIONS="host=${RUNNER_POSTGRES_HOST}&port=${RUNNER_POSTGRES_PORT}"
+  DATABASE_URL="postgresql://${USER}@/${HIVEMIND_DB_NAME}?${OPTIONS}"
 
   hive server \
       --log-mask-sensitive-data \
       --pid-file hive_server.pid \
-      --http-server-port $HIVEMIND_HTTP_PORT \
-      --steemd-url "$HIVED_URL" \
-      --database-url "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" 2>&1 \
+      --http-server-port ${RUNNER_HIVEMIND_SERVER_HTTP_PORT} \
+      --steemd-url "${RUNNER_HIVED_URL}" \
+      --database-url "${DATABASE_URL}" 2>&1 \
       | tee -ia hivemind-server.log &
 
   HIVEMIND_PID=$!
@@ -81,11 +79,14 @@ start() {
       if ps -p $HIVEMIND_PID > /dev/null
       then
         echo "Hive server is running (pid $HIVEMIND_PID)"
+        # Write pid to file, sometimes there's wrong pid there.
+        echo $HIVEMIND_PID > hive_server.pid
         exit 0
       else
         # Check if process executed successfully or not.
         if wait $HIVEMIND_PID; then
           echo "Hive server has been started (pid $HIVEMIND_PID)"
+          echo $HIVEMIND_PID > hive_server.pid
           exit 0
         else
           RESULT=$?
@@ -107,5 +108,16 @@ start() {
 }
 
 
-check_pid
-"$1"
+main() {
+  check_pid
+  if [ "$JOB" = "start" ]; then
+    start
+  elif [ "$JOB" = "stop" ]; then
+    stop
+  else
+    echo "Invalid argument"
+    exit 1
+  fi
+}
+
+main
diff --git a/scripts/ci/hive-sync.sh b/scripts/ci/hive-sync.sh
index 6962980d0ffc435d8d63143958aaab25cd9dc771..7b3373a2d0e9cc9f67ec0c20bc6d3cbb0ab55a9b 100755
--- a/scripts/ci/hive-sync.sh
+++ b/scripts/ci/hive-sync.sh
@@ -2,25 +2,35 @@
 
 set -euo pipefail
 
-cat << EOF
-Starting hive sync using hived url: ${HIVED_URL}.
-Max sync block is: ${HIVEMIND_MAX_BLOCK}.
+# For debug only!
+# RUNNER_HIVEMIND_SYNC_MAX_BLOCK=10000
+# RUNNER_HIVED_URL='{"default":"http://hived-node:8091"}'
+# RUNNER_HIVED_URL='{"default":"http://172.17.0.1:8091"}'
+
+hive_sync() {
+    # Start hive sync process
+
+    cat << EOF
+Starting hive sync using hived url: ${RUNNER_HIVED_URL}.
+Max sync block is: ${RUNNER_HIVEMIND_SYNC_MAX_BLOCK}.
 EOF
 
-# For debug only!
-# HIVEMIND_MAX_BLOCK=10001
-# HIVED_URL='{"default":"http://hived-node:8091"}'
-# HIVED_URL='{"default":"http://172.17.0.1:8091"}'
-
-DATABASE_URL="postgresql://${HIVEMIND_POSTGRES_USER}:${HIVEMIND_POSTGRES_USER_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${HIVEMIND_DB_NAME}"
-
-hive sync \
-    --log-mask-sensitive-data \
-    --pid-file hive_sync.pid \
-    --test-max-block=${HIVEMIND_MAX_BLOCK} \
-    --exit-after-sync \
-    --test-profile=False \
-    --steemd-url "$HIVED_URL" \
-    --prometheus-port 11011 \
-    --database-url "$DATABASE_URL" \
-    2>&1 | tee -i hivemind-sync.log
+    USER=${RUNNER_POSTGRES_APP_USER}:${RUNNER_POSTGRES_APP_USER_PASSWORD}
+    OPTIONS="host=${RUNNER_POSTGRES_HOST}&port=${RUNNER_POSTGRES_PORT}"
+    DATABASE_URL="postgresql://${USER}@/${HIVEMIND_DB_NAME}?${OPTIONS}"
+
+    hive sync \
+        --log-mask-sensitive-data \
+        --pid-file hive_sync.pid \
+        --test-max-block=${RUNNER_HIVEMIND_SYNC_MAX_BLOCK} \
+        --exit-after-sync \
+        --test-profile=False \
+        --steemd-url "${RUNNER_HIVED_URL}" \
+        --prometheus-port 11011 \
+        --database-url "${DATABASE_URL}" \
+        --mock-block-data-path mock_data/block_data/follow_op/mock_block_data_follow.json \
+        2>&1 | tee -i hivemind-sync.log
+
+}
+
+hive_sync
diff --git a/scripts/ci/hived-node/config.ini b/scripts/ci/hived-node/config.ini
index 1c1980279c5a2dfac05d738ffdc0141d6a771180..ecdc18b6d0c2896ed4dfc68928b1b93fe65ac1ef 100644
--- a/scripts/ci/hived-node/config.ini
+++ b/scripts/ci/hived-node/config.ini
@@ -8,7 +8,7 @@ plugin = webserver p2p json_rpc
 plugin = database_api
 # condenser_api enabled per abw request
 plugin = condenser_api
-plugin = block_api 
+plugin = block_api
 # gandalf enabled witness + rc
 plugin = witness
 plugin = rc
@@ -34,7 +34,7 @@ plugin = block_api network_broadcast_api rc_api
 history-disable-pruning = 1
 account-history-rocksdb-path = "blockchain/account-history-rocksdb-storage"
 
-#shared-file-dir = "/run/hive"
+# shared-file-dir = "/run/hive"
 shared-file-size = 20G
 shared-file-full-threshold = 9500
 shared-file-scale-rate = 1000
@@ -45,8 +45,8 @@ market-history-bucket-size = [15,60,300,3600,86400]
 market-history-buckets-per-size = 5760
 
 p2p-endpoint = 0.0.0.0:2001
-p2p-seed-node = 
-#gtg.openhive.network:2001
+p2p-seed-node =
+# gtg.openhive.network:2001
 
 transaction-status-block-depth = 64000
 transaction-status-track-after-block = 42000000
diff --git a/scripts/ci/hived-node/run.sh b/scripts/ci/hived-node/run.sh
index 0c3fa4a80f5edc8306293014662580f94c4ae3b6..32841a9ae109d86fe254b38734710d09b95a9c88 100755
--- a/scripts/ci/hived-node/run.sh
+++ b/scripts/ci/hived-node/run.sh
@@ -4,6 +4,7 @@
 
 MYDIR="$PWD"
 WORKDIR="/usr/local/hive/consensus"
+IMAGE="registry.gitlab.syncad.com/hive/hive/consensus_node:00b5ff55"
 
 docker run -d \
     --name hived-replay-5000000 \
@@ -14,5 +15,5 @@ docker run -d \
     -v $MYDIR/blockchain/block_log:$WORKDIR/datadir/blockchain/block_log \
     -v $MYDIR/entrypoint.sh:$WORKDIR/entrypoint.sh \
     --entrypoint $WORKDIR/entrypoint.sh \
-    registry.gitlab.syncad.com/hive/hive/consensus_node:00b5ff55 \
+    $IMAGE \
     --replay-blockchain --stop-replay-at-block 5000000
diff --git a/scripts/ci/postgres/10/postgresql.conf b/scripts/ci/postgres/10/postgresql.conf
new file mode 100644
index 0000000000000000000000000000000000000000..3c9abc6a6f68fae32f429070a42b65a4e7311993
--- /dev/null
+++ b/scripts/ci/postgres/10/postgresql.conf
@@ -0,0 +1,686 @@
+# -----------------------------
+# PostgreSQL configuration file
+# -----------------------------
+#
+# This file consists of lines of the form:
+#
+#   name = value
+#
+# (The "=" is optional.)  Whitespace may be used.  Comments are introduced with
+# "#" anywhere on a line.  The complete list of parameter names and allowed
+# values can be found in the PostgreSQL documentation.
+#
+# The commented-out settings shown in this file represent the default values.
+# Re-commenting a setting is NOT sufficient to revert it to the default value;
+# you need to reload the server.
+#
+# This file is read on server startup and when the server receives a SIGHUP
+# signal.  If you edit the file on a running system, you have to SIGHUP the
+# server for the changes to take effect, run "pg_ctl reload", or execute
+# "SELECT pg_reload_conf()".  Some parameters, which are marked below,
+# require a server shutdown and restart to take effect.
+#
+# Any parameter can also be given as a command-line option to the server, e.g.,
+# "postgres -c log_connections=on".  Some parameters can be changed at run time
+# with the "SET" SQL command.
+#
+# Memory units:  kB = kilobytes        Time units:  ms  = milliseconds
+#                MB = megabytes                     s   = seconds
+#                GB = gigabytes                     min = minutes
+#                TB = terabytes                     h   = hours
+#                                                   d   = days
+
+
+#------------------------------------------------------------------------------
+# FILE LOCATIONS
+#------------------------------------------------------------------------------
+
+# The default values of these variables are driven from the -D command-line
+# option or PGDATA environment variable, represented here as ConfigDir.
+
+#data_directory = 'ConfigDir'		# use data in another directory
+					# (change requires restart)
+#hba_file = 'ConfigDir/pg_hba.conf'	# host-based authentication file
+					# (change requires restart)
+#ident_file = 'ConfigDir/pg_ident.conf'	# ident configuration file
+					# (change requires restart)
+
+# If external_pid_file is not explicitly set, no extra PID file is written.
+#external_pid_file = ''			# write an extra PID file
+					# (change requires restart)
+
+
+#------------------------------------------------------------------------------
+# CONNECTIONS AND AUTHENTICATION
+#------------------------------------------------------------------------------
+
+# - Connection Settings -
+
+listen_addresses = '*'
+					# comma-separated list of addresses;
+					# defaults to 'localhost'; use '*' for all
+					# (change requires restart)
+#port = 5432				# (change requires restart)
+max_connections = 100			# (change requires restart)
+#superuser_reserved_connections = 3	# (change requires restart)
+#unix_socket_directories = '/var/run/postgresql'	# comma-separated list of directories
+					# (change requires restart)
+#unix_socket_group = ''			# (change requires restart)
+#unix_socket_permissions = 0777		# begin with 0 to use octal notation
+					# (change requires restart)
+#bonjour = off				# advertise server via Bonjour
+					# (change requires restart)
+#bonjour_name = ''			# defaults to the computer name
+					# (change requires restart)
+
+# - Security and Authentication -
+
+#authentication_timeout = 1min		# 1s-600s
+#ssl = off
+#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
+#ssl_prefer_server_ciphers = on
+#ssl_ecdh_curve = 'prime256v1'
+#ssl_dh_params_file = ''
+#ssl_cert_file = 'server.crt'
+#ssl_key_file = 'server.key'
+#ssl_ca_file = ''
+#ssl_crl_file = ''
+#password_encryption = md5		# md5 or scram-sha-256
+#db_user_namespace = off
+#row_security = on
+
+# GSSAPI using Kerberos
+#krb_server_keyfile = ''
+#krb_caseins_users = off
+
+# - TCP Keepalives -
+# see "man 7 tcp" for details
+
+#tcp_keepalives_idle = 0		# TCP_KEEPIDLE, in seconds;
+					# 0 selects the system default
+#tcp_keepalives_interval = 0		# TCP_KEEPINTVL, in seconds;
+					# 0 selects the system default
+#tcp_keepalives_count = 0		# TCP_KEEPCNT;
+					# 0 selects the system default
+
+
+#------------------------------------------------------------------------------
+# RESOURCE USAGE (except WAL)
+#------------------------------------------------------------------------------
+
+# - Memory -
+
+shared_buffers = 128MB			# min 128kB
+					# (change requires restart)
+#huge_pages = try			# on, off, or try
+					# (change requires restart)
+#temp_buffers = 8MB			# min 800kB
+#max_prepared_transactions = 0		# zero disables the feature
+					# (change requires restart)
+# Caution: it is not advisable to set max_prepared_transactions nonzero unless
+# you actively intend to use prepared transactions.
+#work_mem = 4MB				# min 64kB
+#maintenance_work_mem = 64MB		# min 1MB
+#replacement_sort_tuples = 150000	# limits use of replacement selection sort
+#autovacuum_work_mem = -1		# min 1MB, or -1 to use maintenance_work_mem
+#max_stack_depth = 2MB			# min 100kB
+dynamic_shared_memory_type = posix	# the default is the first option
+					# supported by the operating system:
+					#   posix
+					#   sysv
+					#   windows
+					#   mmap
+					# use none to disable dynamic shared memory
+					# (change requires restart)
+
+# - Disk -
+
+#temp_file_limit = -1			# limits per-process temp file space
+					# in kB, or -1 for no limit
+
+# - Kernel Resource Usage -
+
+#max_files_per_process = 1000		# min 25
+					# (change requires restart)
+#shared_preload_libraries = ''		# (change requires restart)
+
+# - Cost-Based Vacuum Delay -
+
+#vacuum_cost_delay = 0			# 0-100 milliseconds
+#vacuum_cost_page_hit = 1		# 0-10000 credits
+#vacuum_cost_page_miss = 10		# 0-10000 credits
+#vacuum_cost_page_dirty = 20		# 0-10000 credits
+#vacuum_cost_limit = 200		# 1-10000 credits
+
+# - Background Writer -
+
+#bgwriter_delay = 200ms			# 10-10000ms between rounds
+#bgwriter_lru_maxpages = 100		# 0-1000 max buffers written/round
+#bgwriter_lru_multiplier = 2.0		# 0-10.0 multiplier on buffers scanned/round
+#bgwriter_flush_after = 512kB		# measured in pages, 0 disables
+
+# - Asynchronous Behavior -
+
+#effective_io_concurrency = 1		# 1-1000; 0 disables prefetching
+#max_worker_processes = 8		# (change requires restart)
+#max_parallel_workers_per_gather = 2	# taken from max_parallel_workers
+#max_parallel_workers = 8		# maximum number of max_worker_processes that
+					# can be used in parallel queries
+#old_snapshot_threshold = -1		# 1min-60d; -1 disables; 0 is immediate
+					# (change requires restart)
+#backend_flush_after = 0		# measured in pages, 0 disables
+
+
+#------------------------------------------------------------------------------
+# WRITE AHEAD LOG
+#------------------------------------------------------------------------------
+
+# - Settings -
+
+#wal_level = replica			# minimal, replica, or logical
+					# (change requires restart)
+#fsync = on				# flush data to disk for crash safety
+					# (turning this off can cause
+					# unrecoverable data corruption)
+#synchronous_commit = on		# synchronization level;
+					# off, local, remote_write, remote_apply, or on
+#wal_sync_method = fsync		# the default is the first option
+					# supported by the operating system:
+					#   open_datasync
+					#   fdatasync (default on Linux)
+					#   fsync
+					#   fsync_writethrough
+					#   open_sync
+#full_page_writes = on			# recover from partial page writes
+#wal_compression = off			# enable compression of full-page writes
+#wal_log_hints = off			# also do full page writes of non-critical updates
+					# (change requires restart)
+#wal_buffers = -1			# min 32kB, -1 sets based on shared_buffers
+					# (change requires restart)
+#wal_writer_delay = 200ms		# 1-10000 milliseconds
+#wal_writer_flush_after = 1MB		# measured in pages, 0 disables
+
+#commit_delay = 0			# range 0-100000, in microseconds
+#commit_siblings = 5			# range 1-1000
+
+# - Checkpoints -
+
+#checkpoint_timeout = 5min		# range 30s-1d
+#max_wal_size = 1GB
+#min_wal_size = 80MB
+#checkpoint_completion_target = 0.5	# checkpoint target duration, 0.0 - 1.0
+#checkpoint_flush_after = 256kB		# measured in pages, 0 disables
+#checkpoint_warning = 30s		# 0 disables
+
+# - Archiving -
+
+#archive_mode = off		# enables archiving; off, on, or always
+				# (change requires restart)
+#archive_command = ''		# command to use to archive a logfile segment
+				# placeholders: %p = path of file to archive
+				#               %f = file name only
+				# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
+#archive_timeout = 0		# force a logfile segment switch after this
+				# number of seconds; 0 disables
+
+
+#------------------------------------------------------------------------------
+# REPLICATION
+#------------------------------------------------------------------------------
+
+# - Sending Server(s) -
+
+# Set these on the master and on any standby that will send replication data.
+
+#max_wal_senders = 10		# max number of walsender processes
+				# (change requires restart)
+#wal_keep_segments = 0		# in logfile segments, 16MB each; 0 disables
+#wal_sender_timeout = 60s	# in milliseconds; 0 disables
+
+#max_replication_slots = 10	# max number of replication slots
+				# (change requires restart)
+#track_commit_timestamp = off	# collect timestamp of transaction commit
+				# (change requires restart)
+
+# - Master Server -
+
+# These settings are ignored on a standby server.
+
+#synchronous_standby_names = ''	# standby servers that provide sync rep
+				# method to choose sync standbys, number of sync standbys,
+				# and comma-separated list of application_name
+				# from standby(s); '*' = all
+#vacuum_defer_cleanup_age = 0	# number of xacts by which cleanup is delayed
+
+# - Standby Servers -
+
+# These settings are ignored on a master server.
+
+#hot_standby = on			# "off" disallows queries during recovery
+					# (change requires restart)
+#max_standby_archive_delay = 30s	# max delay before canceling queries
+					# when reading WAL from archive;
+					# -1 allows indefinite delay
+#max_standby_streaming_delay = 30s	# max delay before canceling queries
+					# when reading streaming WAL;
+					# -1 allows indefinite delay
+#wal_receiver_status_interval = 10s	# send replies at least this often
+					# 0 disables
+#hot_standby_feedback = off		# send info from standby to prevent
+					# query conflicts
+#wal_receiver_timeout = 60s		# time that receiver waits for
+					# communication from master
+					# in milliseconds; 0 disables
+#wal_retrieve_retry_interval = 5s	# time to wait before retrying to
+					# retrieve WAL after a failed attempt
+
+# - Subscribers -
+
+# These settings are ignored on a publisher.
+
+#max_logical_replication_workers = 4	# taken from max_worker_processes
+					# (change requires restart)
+#max_sync_workers_per_subscription = 2	# taken from max_logical_replication_workers
+
+
+#------------------------------------------------------------------------------
+# QUERY TUNING
+#------------------------------------------------------------------------------
+
+# - Planner Method Configuration -
+
+#enable_bitmapscan = on
+#enable_hashagg = on
+#enable_hashjoin = on
+#enable_indexscan = on
+#enable_indexonlyscan = on
+#enable_material = on
+#enable_mergejoin = on
+#enable_nestloop = on
+#enable_seqscan = on
+#enable_sort = on
+#enable_tidscan = on
+
+# - Planner Cost Constants -
+
+#seq_page_cost = 1.0			# measured on an arbitrary scale
+#random_page_cost = 4.0			# same scale as above
+#cpu_tuple_cost = 0.01			# same scale as above
+#cpu_index_tuple_cost = 0.005		# same scale as above
+#cpu_operator_cost = 0.0025		# same scale as above
+#parallel_tuple_cost = 0.1		# same scale as above
+#parallel_setup_cost = 1000.0	# same scale as above
+#min_parallel_table_scan_size = 8MB
+#min_parallel_index_scan_size = 512kB
+#effective_cache_size = 4GB
+
+# - Genetic Query Optimizer -
+
+#geqo = on
+#geqo_threshold = 12
+#geqo_effort = 5			# range 1-10
+#geqo_pool_size = 0			# selects default based on effort
+#geqo_generations = 0			# selects default based on effort
+#geqo_selection_bias = 2.0		# range 1.5-2.0
+#geqo_seed = 0.0			# range 0.0-1.0
+
+# - Other Planner Options -
+
+#default_statistics_target = 100	# range 1-10000
+#constraint_exclusion = partition	# on, off, or partition
+#cursor_tuple_fraction = 0.1		# range 0.0-1.0
+#from_collapse_limit = 8
+#join_collapse_limit = 8		# 1 disables collapsing of explicit
+					# JOIN clauses
+#force_parallel_mode = off
+
+
+#------------------------------------------------------------------------------
+# ERROR REPORTING AND LOGGING
+#------------------------------------------------------------------------------
+
+# - Where to Log -
+
+#log_destination = 'stderr'		# Valid values are combinations of
+					# stderr, csvlog, syslog, and eventlog,
+					# depending on platform.  csvlog
+					# requires logging_collector to be on.
+
+# This is used when logging to stderr:
+#logging_collector = off		# Enable capturing of stderr and csvlog
+					# into log files. Required to be on for
+					# csvlogs.
+					# (change requires restart)
+
+# These are only used if logging_collector is on:
+#log_directory = 'log'			# directory where log files are written,
+					# can be absolute or relative to PGDATA
+#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log'	# log file name pattern,
+					# can include strftime() escapes
+#log_file_mode = 0600			# creation mode for log files,
+					# begin with 0 to use octal notation
+#log_truncate_on_rotation = off		# If on, an existing log file with the
+					# same name as the new log file will be
+					# truncated rather than appended to.
+					# But such truncation only occurs on
+					# time-driven rotation, not on restarts
+					# or size-driven rotation.  Default is
+					# off, meaning append to existing files
+					# in all cases.
+#log_rotation_age = 1d			# Automatic rotation of logfiles will
+					# happen after that time.  0 disables.
+#log_rotation_size = 10MB		# Automatic rotation of logfiles will
+					# happen after that much log output.
+					# 0 disables.
+
+# These are relevant when logging to syslog:
+#syslog_facility = 'LOCAL0'
+#syslog_ident = 'postgres'
+#syslog_sequence_numbers = on
+#syslog_split_messages = on
+
+# This is only relevant when logging to eventlog (win32):
+# (change requires restart)
+#event_source = 'PostgreSQL'
+
+# - When to Log -
+
+#log_min_messages = warning		# values in order of decreasing detail:
+					#   debug5
+					#   debug4
+					#   debug3
+					#   debug2
+					#   debug1
+					#   info
+					#   notice
+					#   warning
+					#   error
+					#   log
+					#   fatal
+					#   panic
+
+#log_min_error_statement = error	# values in order of decreasing detail:
+					#   debug5
+					#   debug4
+					#   debug3
+					#   debug2
+					#   debug1
+					#   info
+					#   notice
+					#   warning
+					#   error
+					#   log
+					#   fatal
+					#   panic (effectively off)
+
+#log_min_duration_statement = -1	# -1 is disabled, 0 logs all statements
+					# and their durations, > 0 logs only
+					# statements running at least this number
+					# of milliseconds
+
+
+# - What to Log -
+
+#debug_print_parse = off
+#debug_print_rewritten = off
+#debug_print_plan = off
+#debug_pretty_print = on
+#log_checkpoints = off
+#log_connections = off
+#log_disconnections = off
+#log_duration = off
+#log_error_verbosity = default		# terse, default, or verbose messages
+#log_hostname = off
+#log_line_prefix = '%m [%p] '		# special values:
+					#   %a = application name
+					#   %u = user name
+					#   %d = database name
+					#   %r = remote host and port
+					#   %h = remote host
+					#   %p = process ID
+					#   %t = timestamp without milliseconds
+					#   %m = timestamp with milliseconds
+					#   %n = timestamp with milliseconds (as a Unix epoch)
+					#   %i = command tag
+					#   %e = SQL state
+					#   %c = session ID
+					#   %l = session line number
+					#   %s = session start timestamp
+					#   %v = virtual transaction ID
+					#   %x = transaction ID (0 if none)
+					#   %q = stop here in non-session
+					#        processes
+					#   %% = '%'
+					# e.g. '<%u%%%d> '
+#log_lock_waits = off			# log lock waits >= deadlock_timeout
+#log_statement = 'none'			# none, ddl, mod, all
+#log_replication_commands = off
+#log_temp_files = -1			# log temporary files equal or larger
+					# than the specified size in kilobytes;
+					# -1 disables, 0 logs all temp files
+log_timezone = 'Etc/UTC'
+
+
+# - Process Title -
+
+#cluster_name = ''			# added to process titles if nonempty
+					# (change requires restart)
+#update_process_title = on
+
+
+#------------------------------------------------------------------------------
+# RUNTIME STATISTICS
+#------------------------------------------------------------------------------
+
+# - Query/Index Statistics Collector -
+
+#track_activities = on
+#track_counts = on
+#track_io_timing = off
+#track_functions = none			# none, pl, all
+#track_activity_query_size = 1024	# (change requires restart)
+#stats_temp_directory = 'pg_stat_tmp'
+
+
+# - Statistics Monitoring -
+
+#log_parser_stats = off
+#log_planner_stats = off
+#log_executor_stats = off
+#log_statement_stats = off
+
+
+#------------------------------------------------------------------------------
+# AUTOVACUUM PARAMETERS
+#------------------------------------------------------------------------------
+
+#autovacuum = on			# Enable autovacuum subprocess?  'on'
+					# requires track_counts to also be on.
+#log_autovacuum_min_duration = -1	# -1 disables, 0 logs all actions and
+					# their durations, > 0 logs only
+					# actions running at least this number
+					# of milliseconds.
+#autovacuum_max_workers = 3		# max number of autovacuum subprocesses
+					# (change requires restart)
+#autovacuum_naptime = 1min		# time between autovacuum runs
+#autovacuum_vacuum_threshold = 50	# min number of row updates before
+					# vacuum
+#autovacuum_analyze_threshold = 50	# min number of row updates before
+					# analyze
+#autovacuum_vacuum_scale_factor = 0.2	# fraction of table size before vacuum
+#autovacuum_analyze_scale_factor = 0.1	# fraction of table size before analyze
+#autovacuum_freeze_max_age = 200000000	# maximum XID age before forced vacuum
+					# (change requires restart)
+#autovacuum_multixact_freeze_max_age = 400000000	# maximum multixact age
+					# before forced vacuum
+					# (change requires restart)
+#autovacuum_vacuum_cost_delay = 20ms	# default vacuum cost delay for
+					# autovacuum, in milliseconds;
+					# -1 means use vacuum_cost_delay
+#autovacuum_vacuum_cost_limit = -1	# default vacuum cost limit for
+					# autovacuum, -1 means use
+					# vacuum_cost_limit
+
+
+#------------------------------------------------------------------------------
+# CLIENT CONNECTION DEFAULTS
+#------------------------------------------------------------------------------
+
+# - Statement Behavior -
+
+#client_min_messages = notice		# values in order of decreasing detail:
+					#   debug5
+					#   debug4
+					#   debug3
+					#   debug2
+					#   debug1
+					#   log
+					#   notice
+					#   warning
+					#   error
+#search_path = '"$user", public'	# schema names
+#default_tablespace = ''		# a tablespace name, '' uses the default
+#temp_tablespaces = ''			# a list of tablespace names, '' uses
+					# only default tablespace
+#check_function_bodies = on
+#default_transaction_isolation = 'read committed'
+#default_transaction_read_only = off
+#default_transaction_deferrable = off
+#session_replication_role = 'origin'
+#statement_timeout = 0			# in milliseconds, 0 is disabled
+#lock_timeout = 0			# in milliseconds, 0 is disabled
+#idle_in_transaction_session_timeout = 0	# in milliseconds, 0 is disabled
+#vacuum_freeze_min_age = 50000000
+#vacuum_freeze_table_age = 150000000
+#vacuum_multixact_freeze_min_age = 5000000
+#vacuum_multixact_freeze_table_age = 150000000
+#bytea_output = 'hex'			# hex, escape
+#xmlbinary = 'base64'
+#xmloption = 'content'
+#gin_fuzzy_search_limit = 0
+#gin_pending_list_limit = 4MB
+
+# - Locale and Formatting -
+
+datestyle = 'iso, mdy'
+#intervalstyle = 'postgres'
+timezone = 'Etc/UTC'
+#timezone_abbreviations = 'Default'     # Select the set of available time zone
+					# abbreviations.  Currently, there are
+					#   Default
+					#   Australia (historical usage)
+					#   India
+					# You can create your own file in
+					# share/timezonesets/.
+#extra_float_digits = 0			# min -15, max 3
+#client_encoding = sql_ascii		# actually, defaults to database
+					# encoding
+
+# These settings are initialized by initdb, but they can be changed.
+lc_messages = 'en_US.utf8'			# locale for system error message
+					# strings
+lc_monetary = 'en_US.utf8'			# locale for monetary formatting
+lc_numeric = 'en_US.utf8'			# locale for number formatting
+lc_time = 'en_US.utf8'				# locale for time formatting
+
+# default configuration for text search
+default_text_search_config = 'pg_catalog.english'
+
+# - Other Defaults -
+
+#dynamic_library_path = '$libdir'
+#local_preload_libraries = ''
+#session_preload_libraries = ''
+
+
+#------------------------------------------------------------------------------
+# LOCK MANAGEMENT
+#------------------------------------------------------------------------------
+
+#deadlock_timeout = 1s
+#max_locks_per_transaction = 64		# min 10
+					# (change requires restart)
+#max_pred_locks_per_transaction = 64	# min 10
+					# (change requires restart)
+#max_pred_locks_per_relation = -2	# negative values mean
+					# (max_pred_locks_per_transaction
+					#  / -max_pred_locks_per_relation) - 1
+#max_pred_locks_per_page = 2            # min 0
+
+
+#------------------------------------------------------------------------------
+# VERSION/PLATFORM COMPATIBILITY
+#------------------------------------------------------------------------------
+
+# - Previous PostgreSQL Versions -
+
+#array_nulls = on
+#backslash_quote = safe_encoding	# on, off, or safe_encoding
+#default_with_oids = off
+#escape_string_warning = on
+#lo_compat_privileges = off
+#operator_precedence_warning = off
+#quote_all_identifiers = off
+#standard_conforming_strings = on
+#synchronize_seqscans = on
+
+# - Other Platforms and Clients -
+
+#transform_null_equals = off
+
+
+#------------------------------------------------------------------------------
+# ERROR HANDLING
+#------------------------------------------------------------------------------
+
+#exit_on_error = off			# terminate session on any error?
+#restart_after_crash = on		# reinitialize after backend crash?
+#data_sync_retry = off			# retry or panic on failure to fsync
+					# data?
+					# (change requires restart)
+
+
+#------------------------------------------------------------------------------
+# CONFIG FILE INCLUDES
+#------------------------------------------------------------------------------
+
+# These options allow settings to be loaded from files other than the
+# default postgresql.conf.  Note that these are directives, not variable
+# assignments, so they can usefully be given more than once.
+
+#include_dir = '...'			# include files ending in '.conf' from
+					# a directory, e.g., 'conf.d'
+#include_if_exists = '...'		# include file only if it exists
+#include = '...'			# include file
+
+
+#------------------------------------------------------------------------------
+# CUSTOMIZED OPTIONS
+#------------------------------------------------------------------------------
+
+# Add settings for extensions here
+
+
+# https://pgtune.leopard.in.ua/#/ oltp 48G ram, 12 cpus, ssd
+
+shared_preload_libraries=pg_stat_statementspg_qualstats
+track_functions=pl
+track_io_timing=on
+track_activity_query_size=2048
+pg_stat_statements.max=10000
+pg_stat_statements.track=all
+max_connections=100
+shared_buffers=12GB
+effective_cache_size=36GB
+maintenance_work_mem=2GB
+checkpoint_completion_target=0.9
+wal_buffers=16MB
+default_statistics_target=100
+random_page_cost=1.1
+effective_io_concurrency=200
+work_mem=31457kB
+min_wal_size=2GB
+max_wal_size=8GB
+max_worker_processes=12
+max_parallel_workers_per_gather=4
+max_parallel_workers=12
diff --git a/scripts/ci/postgres/12/postgresql.conf b/scripts/ci/postgres/12/postgresql.conf
new file mode 100644
index 0000000000000000000000000000000000000000..1a2dbb245cc9a243e4512af72ce2266e064c47e7
--- /dev/null
+++ b/scripts/ci/postgres/12/postgresql.conf
@@ -0,0 +1,776 @@
+# -----------------------------
+# PostgreSQL configuration file
+# -----------------------------
+#
+# This file consists of lines of the form:
+#
+#   name = value
+#
+# (The "=" is optional.)  Whitespace may be used.  Comments are introduced with
+# "#" anywhere on a line.  The complete list of parameter names and allowed
+# values can be found in the PostgreSQL documentation.
+#
+# The commented-out settings shown in this file represent the default values.
+# Re-commenting a setting is NOT sufficient to revert it to the default value;
+# you need to reload the server.
+#
+# This file is read on server startup and when the server receives a SIGHUP
+# signal.  If you edit the file on a running system, you have to SIGHUP the
+# server for the changes to take effect, run "pg_ctl reload", or execute
+# "SELECT pg_reload_conf()".  Some parameters, which are marked below,
+# require a server shutdown and restart to take effect.
+#
+# Any parameter can also be given as a command-line option to the server, e.g.,
+# "postgres -c log_connections=on".  Some parameters can be changed at run time
+# with the "SET" SQL command.
+#
+# Memory units:  kB = kilobytes        Time units:  ms  = milliseconds
+#                MB = megabytes                     s   = seconds
+#                GB = gigabytes                     min = minutes
+#                TB = terabytes                     h   = hours
+#                                                   d   = days
+
+
+#------------------------------------------------------------------------------
+# FILE LOCATIONS
+#------------------------------------------------------------------------------
+
+# The default values of these variables are driven from the -D command-line
+# option or PGDATA environment variable, represented here as ConfigDir.
+
+#data_directory = 'ConfigDir'		# use data in another directory
+					# (change requires restart)
+#hba_file = 'ConfigDir/pg_hba.conf'	# host-based authentication file
+					# (change requires restart)
+#ident_file = 'ConfigDir/pg_ident.conf'	# ident configuration file
+					# (change requires restart)
+
+# If external_pid_file is not explicitly set, no extra PID file is written.
+#external_pid_file = ''			# write an extra PID file
+					# (change requires restart)
+
+
+#------------------------------------------------------------------------------
+# CONNECTIONS AND AUTHENTICATION
+#------------------------------------------------------------------------------
+
+# - Connection Settings -
+
+listen_addresses = '*'
+					# comma-separated list of addresses;
+					# defaults to 'localhost'; use '*' for all
+					# (change requires restart)
+#port = 5432				# (change requires restart)
+max_connections = 100			# (change requires restart)
+#superuser_reserved_connections = 3	# (change requires restart)
+#unix_socket_directories = '/var/run/postgresql'	# comma-separated list of directories
+					# (change requires restart)
+#unix_socket_group = ''			# (change requires restart)
+#unix_socket_permissions = 0777		# begin with 0 to use octal notation
+					# (change requires restart)
+#bonjour = off				# advertise server via Bonjour
+					# (change requires restart)
+#bonjour_name = ''			# defaults to the computer name
+					# (change requires restart)
+
+# - TCP settings -
+# see "man 7 tcp" for details
+
+#tcp_keepalives_idle = 0		# TCP_KEEPIDLE, in seconds;
+					# 0 selects the system default
+#tcp_keepalives_interval = 0		# TCP_KEEPINTVL, in seconds;
+					# 0 selects the system default
+#tcp_keepalives_count = 0		# TCP_KEEPCNT;
+					# 0 selects the system default
+#tcp_user_timeout = 0			# TCP_USER_TIMEOUT, in milliseconds;
+					# 0 selects the system default
+
+# - Authentication -
+
+#authentication_timeout = 1min		# 1s-600s
+#password_encryption = md5		# md5 or scram-sha-256
+#db_user_namespace = off
+
+# GSSAPI using Kerberos
+#krb_server_keyfile = ''
+#krb_caseins_users = off
+
+# - SSL -
+
+#ssl = off
+#ssl_ca_file = ''
+#ssl_cert_file = 'server.crt'
+#ssl_crl_file = ''
+#ssl_key_file = 'server.key'
+#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
+#ssl_prefer_server_ciphers = on
+#ssl_ecdh_curve = 'prime256v1'
+#ssl_min_protocol_version = 'TLSv1'
+#ssl_max_protocol_version = ''
+#ssl_dh_params_file = ''
+#ssl_passphrase_command = ''
+#ssl_passphrase_command_supports_reload = off
+
+
+#------------------------------------------------------------------------------
+# RESOURCE USAGE (except WAL)
+#------------------------------------------------------------------------------
+
+# - Memory -
+
+shared_buffers = 128MB			# min 128kB
+					# (change requires restart)
+#huge_pages = try			# on, off, or try
+					# (change requires restart)
+#temp_buffers = 8MB			# min 800kB
+#max_prepared_transactions = 0		# zero disables the feature
+					# (change requires restart)
+# Caution: it is not advisable to set max_prepared_transactions nonzero unless
+# you actively intend to use prepared transactions.
+#work_mem = 4MB				# min 64kB
+#maintenance_work_mem = 64MB		# min 1MB
+#autovacuum_work_mem = -1		# min 1MB, or -1 to use maintenance_work_mem
+#max_stack_depth = 2MB			# min 100kB
+#shared_memory_type = mmap		# the default is the first option
+					# supported by the operating system:
+					#   mmap
+					#   sysv
+					#   windows
+					# (change requires restart)
+dynamic_shared_memory_type = posix	# the default is the first option
+					# supported by the operating system:
+					#   posix
+					#   sysv
+					#   windows
+					#   mmap
+					# (change requires restart)
+
+# - Disk -
+
+#temp_file_limit = -1			# limits per-process temp file space
+					# in kB, or -1 for no limit
+
+# - Kernel Resources -
+
+#max_files_per_process = 1000		# min 25
+					# (change requires restart)
+
+# - Cost-Based Vacuum Delay -
+
+#vacuum_cost_delay = 0			# 0-100 milliseconds (0 disables)
+#vacuum_cost_page_hit = 1		# 0-10000 credits
+#vacuum_cost_page_miss = 10		# 0-10000 credits
+#vacuum_cost_page_dirty = 20		# 0-10000 credits
+#vacuum_cost_limit = 200		# 1-10000 credits
+
+# - Background Writer -
+
+#bgwriter_delay = 200ms			# 10-10000ms between rounds
+#bgwriter_lru_maxpages = 100		# max buffers written/round, 0 disables
+#bgwriter_lru_multiplier = 2.0		# 0-10.0 multiplier on buffers scanned/round
+#bgwriter_flush_after = 512kB		# measured in pages, 0 disables
+
+# - Asynchronous Behavior -
+
+#effective_io_concurrency = 1		# 1-1000; 0 disables prefetching
+#max_worker_processes = 8		# (change requires restart)
+#max_parallel_maintenance_workers = 2	# taken from max_parallel_workers
+#max_parallel_workers_per_gather = 2	# taken from max_parallel_workers
+#parallel_leader_participation = on
+#max_parallel_workers = 8		# maximum number of max_worker_processes that
+					# can be used in parallel operations
+#old_snapshot_threshold = -1		# 1min-60d; -1 disables; 0 is immediate
+					# (change requires restart)
+#backend_flush_after = 0		# measured in pages, 0 disables
+
+
+#------------------------------------------------------------------------------
+# WRITE-AHEAD LOG
+#------------------------------------------------------------------------------
+
+# - Settings -
+
+#wal_level = replica			# minimal, replica, or logical
+					# (change requires restart)
+#fsync = on				# flush data to disk for crash safety
+					# (turning this off can cause
+					# unrecoverable data corruption)
+#synchronous_commit = on		# synchronization level;
+					# off, local, remote_write, remote_apply, or on
+#wal_sync_method = fsync		# the default is the first option
+					# supported by the operating system:
+					#   open_datasync
+					#   fdatasync (default on Linux)
+					#   fsync
+					#   fsync_writethrough
+					#   open_sync
+#full_page_writes = on			# recover from partial page writes
+#wal_compression = off			# enable compression of full-page writes
+#wal_log_hints = off			# also do full page writes of non-critical updates
+					# (change requires restart)
+#wal_init_zero = on			# zero-fill new WAL files
+#wal_recycle = on			# recycle WAL files
+#wal_buffers = -1			# min 32kB, -1 sets based on shared_buffers
+					# (change requires restart)
+#wal_writer_delay = 200ms		# 1-10000 milliseconds
+#wal_writer_flush_after = 1MB		# measured in pages, 0 disables
+
+#commit_delay = 0			# range 0-100000, in microseconds
+#commit_siblings = 5			# range 1-1000
+
+# - Checkpoints -
+
+#checkpoint_timeout = 5min		# range 30s-1d
+max_wal_size = 1GB
+min_wal_size = 80MB
+#checkpoint_completion_target = 0.5	# checkpoint target duration, 0.0 - 1.0
+#checkpoint_flush_after = 256kB		# measured in pages, 0 disables
+#checkpoint_warning = 30s		# 0 disables
+
+# - Archiving -
+
+#archive_mode = off		# enables archiving; off, on, or always
+				# (change requires restart)
+#archive_command = ''		# command to use to archive a logfile segment
+				# placeholders: %p = path of file to archive
+				#               %f = file name only
+				# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
+#archive_timeout = 0		# force a logfile segment switch after this
+				# number of seconds; 0 disables
+
+# - Archive Recovery -
+
+# These are only used in recovery mode.
+
+#restore_command = ''		# command to use to restore an archived logfile segment
+				# placeholders: %p = path of file to restore
+				#               %f = file name only
+				# e.g. 'cp /mnt/server/archivedir/%f %p'
+				# (change requires restart)
+#archive_cleanup_command = ''	# command to execute at every restartpoint
+#recovery_end_command = ''	# command to execute at completion of recovery
+
+# - Recovery Target -
+
+# Set these only when performing a targeted recovery.
+
+#recovery_target = ''		# 'immediate' to end recovery as soon as a
+                                # consistent state is reached
+				# (change requires restart)
+#recovery_target_name = ''	# the named restore point to which recovery will proceed
+				# (change requires restart)
+#recovery_target_time = ''	# the time stamp up to which recovery will proceed
+				# (change requires restart)
+#recovery_target_xid = ''	# the transaction ID up to which recovery will proceed
+				# (change requires restart)
+#recovery_target_lsn = ''	# the WAL LSN up to which recovery will proceed
+				# (change requires restart)
+#recovery_target_inclusive = on # Specifies whether to stop:
+				# just after the specified recovery target (on)
+				# just before the recovery target (off)
+				# (change requires restart)
+#recovery_target_timeline = 'latest'	# 'current', 'latest', or timeline ID
+				# (change requires restart)
+#recovery_target_action = 'pause'	# 'pause', 'promote', 'shutdown'
+				# (change requires restart)
+
+
+#------------------------------------------------------------------------------
+# REPLICATION
+#------------------------------------------------------------------------------
+
+# - Sending Servers -
+
+# Set these on the master and on any standby that will send replication data.
+
+#max_wal_senders = 10		# max number of walsender processes
+				# (change requires restart)
+#wal_keep_segments = 0		# in logfile segments; 0 disables
+#wal_sender_timeout = 60s	# in milliseconds; 0 disables
+
+#max_replication_slots = 10	# max number of replication slots
+				# (change requires restart)
+#track_commit_timestamp = off	# collect timestamp of transaction commit
+				# (change requires restart)
+
+# - Master Server -
+
+# These settings are ignored on a standby server.
+
+#synchronous_standby_names = ''	# standby servers that provide sync rep
+				# method to choose sync standbys, number of sync standbys,
+				# and comma-separated list of application_name
+				# from standby(s); '*' = all
+#vacuum_defer_cleanup_age = 0	# number of xacts by which cleanup is delayed
+
+# - Standby Servers -
+
+# These settings are ignored on a master server.
+
+#primary_conninfo = ''			# connection string to sending server
+					# (change requires restart)
+#primary_slot_name = ''			# replication slot on sending server
+					# (change requires restart)
+#promote_trigger_file = ''		# file name whose presence ends recovery
+#hot_standby = on			# "off" disallows queries during recovery
+					# (change requires restart)
+#max_standby_archive_delay = 30s	# max delay before canceling queries
+					# when reading WAL from archive;
+					# -1 allows indefinite delay
+#max_standby_streaming_delay = 30s	# max delay before canceling queries
+					# when reading streaming WAL;
+					# -1 allows indefinite delay
+#wal_receiver_status_interval = 10s	# send replies at least this often
+					# 0 disables
+#hot_standby_feedback = off		# send info from standby to prevent
+					# query conflicts
+#wal_receiver_timeout = 60s		# time that receiver waits for
+					# communication from master
+					# in milliseconds; 0 disables
+#wal_retrieve_retry_interval = 5s	# time to wait before retrying to
+					# retrieve WAL after a failed attempt
+#recovery_min_apply_delay = 0		# minimum delay for applying changes during recovery
+
+# - Subscribers -
+
+# These settings are ignored on a publisher.
+
+#max_logical_replication_workers = 4	# taken from max_worker_processes
+					# (change requires restart)
+#max_sync_workers_per_subscription = 2	# taken from max_logical_replication_workers
+
+
+#------------------------------------------------------------------------------
+# QUERY TUNING
+#------------------------------------------------------------------------------
+
+# - Planner Method Configuration -
+
+#enable_bitmapscan = on
+#enable_hashagg = on
+#enable_hashjoin = on
+#enable_indexscan = on
+#enable_indexonlyscan = on
+#enable_material = on
+#enable_mergejoin = on
+#enable_nestloop = on
+#enable_parallel_append = on
+#enable_seqscan = on
+#enable_sort = on
+#enable_tidscan = on
+#enable_partitionwise_join = off
+#enable_partitionwise_aggregate = off
+#enable_parallel_hash = on
+#enable_partition_pruning = on
+
+# - Planner Cost Constants -
+
+#seq_page_cost = 1.0			# measured on an arbitrary scale
+#random_page_cost = 4.0			# same scale as above
+#cpu_tuple_cost = 0.01			# same scale as above
+#cpu_index_tuple_cost = 0.005		# same scale as above
+#cpu_operator_cost = 0.0025		# same scale as above
+#parallel_tuple_cost = 0.1		# same scale as above
+#parallel_setup_cost = 1000.0	# same scale as above
+
+#jit_above_cost = 100000		# perform JIT compilation if available
+					# and query more expensive than this;
+					# -1 disables
+#jit_inline_above_cost = 500000		# inline small functions if query is
+					# more expensive than this; -1 disables
+#jit_optimize_above_cost = 500000	# use expensive JIT optimizations if
+					# query is more expensive than this;
+					# -1 disables
+
+#min_parallel_table_scan_size = 8MB
+#min_parallel_index_scan_size = 512kB
+#effective_cache_size = 4GB
+
+# - Genetic Query Optimizer -
+
+#geqo = on
+#geqo_threshold = 12
+#geqo_effort = 5			# range 1-10
+#geqo_pool_size = 0			# selects default based on effort
+#geqo_generations = 0			# selects default based on effort
+#geqo_selection_bias = 2.0		# range 1.5-2.0
+#geqo_seed = 0.0			# range 0.0-1.0
+
+# - Other Planner Options -
+
+#default_statistics_target = 100	# range 1-10000
+#constraint_exclusion = partition	# on, off, or partition
+#cursor_tuple_fraction = 0.1		# range 0.0-1.0
+#from_collapse_limit = 8
+#join_collapse_limit = 8		# 1 disables collapsing of explicit
+					# JOIN clauses
+#force_parallel_mode = off
+#jit = on				# allow JIT compilation
+#plan_cache_mode = auto			# auto, force_generic_plan or
+					# force_custom_plan
+
+
+#------------------------------------------------------------------------------
+# REPORTING AND LOGGING
+#------------------------------------------------------------------------------
+
+# - Where to Log -
+
+#log_destination = 'stderr'		# Valid values are combinations of
+					# stderr, csvlog, syslog, and eventlog,
+					# depending on platform.  csvlog
+					# requires logging_collector to be on.
+
+# This is used when logging to stderr:
+#logging_collector = off		# Enable capturing of stderr and csvlog
+					# into log files. Required to be on for
+					# csvlogs.
+					# (change requires restart)
+
+# These are only used if logging_collector is on:
+#log_directory = 'log'			# directory where log files are written,
+					# can be absolute or relative to PGDATA
+#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log'	# log file name pattern,
+					# can include strftime() escapes
+#log_file_mode = 0600			# creation mode for log files,
+					# begin with 0 to use octal notation
+#log_truncate_on_rotation = off		# If on, an existing log file with the
+					# same name as the new log file will be
+					# truncated rather than appended to.
+					# But such truncation only occurs on
+					# time-driven rotation, not on restarts
+					# or size-driven rotation.  Default is
+					# off, meaning append to existing files
+					# in all cases.
+#log_rotation_age = 1d			# Automatic rotation of logfiles will
+					# happen after that time.  0 disables.
+#log_rotation_size = 10MB		# Automatic rotation of logfiles will
+					# happen after that much log output.
+					# 0 disables.
+
+# These are relevant when logging to syslog:
+#syslog_facility = 'LOCAL0'
+#syslog_ident = 'postgres'
+#syslog_sequence_numbers = on
+#syslog_split_messages = on
+
+# This is only relevant when logging to eventlog (win32):
+# (change requires restart)
+#event_source = 'PostgreSQL'
+
+# - When to Log -
+
+#log_min_messages = warning		# values in order of decreasing detail:
+					#   debug5
+					#   debug4
+					#   debug3
+					#   debug2
+					#   debug1
+					#   info
+					#   notice
+					#   warning
+					#   error
+					#   log
+					#   fatal
+					#   panic
+
+#log_min_error_statement = error	# values in order of decreasing detail:
+					#   debug5
+					#   debug4
+					#   debug3
+					#   debug2
+					#   debug1
+					#   info
+					#   notice
+					#   warning
+					#   error
+					#   log
+					#   fatal
+					#   panic (effectively off)
+
+#log_min_duration_statement = -1	# -1 is disabled, 0 logs all statements
+					# and their durations, > 0 logs only
+					# statements running at least this number
+					# of milliseconds
+
+#log_transaction_sample_rate = 0.0	# Fraction of transactions whose statements
+					# are logged regardless of their duration. 1.0 logs all
+					# statements from all transactions, 0.0 never logs.
+
+# - What to Log -
+
+#debug_print_parse = off
+#debug_print_rewritten = off
+#debug_print_plan = off
+#debug_pretty_print = on
+#log_checkpoints = off
+#log_connections = off
+#log_disconnections = off
+#log_duration = off
+#log_error_verbosity = default		# terse, default, or verbose messages
+#log_hostname = off
+#log_line_prefix = '%m [%p] '		# special values:
+					#   %a = application name
+					#   %u = user name
+					#   %d = database name
+					#   %r = remote host and port
+					#   %h = remote host
+					#   %p = process ID
+					#   %t = timestamp without milliseconds
+					#   %m = timestamp with milliseconds
+					#   %n = timestamp with milliseconds (as a Unix epoch)
+					#   %i = command tag
+					#   %e = SQL state
+					#   %c = session ID
+					#   %l = session line number
+					#   %s = session start timestamp
+					#   %v = virtual transaction ID
+					#   %x = transaction ID (0 if none)
+					#   %q = stop here in non-session
+					#        processes
+					#   %% = '%'
+					# e.g. '<%u%%%d> '
+#log_lock_waits = off			# log lock waits >= deadlock_timeout
+#log_statement = 'none'			# none, ddl, mod, all
+#log_replication_commands = off
+#log_temp_files = -1			# log temporary files equal or larger
+					# than the specified size in kilobytes;
+					# -1 disables, 0 logs all temp files
+log_timezone = 'Etc/UTC'
+
+#------------------------------------------------------------------------------
+# PROCESS TITLE
+#------------------------------------------------------------------------------
+
+#cluster_name = ''			# added to process titles if nonempty
+					# (change requires restart)
+#update_process_title = on
+
+
+#------------------------------------------------------------------------------
+# STATISTICS
+#------------------------------------------------------------------------------
+
+# - Query and Index Statistics Collector -
+
+#track_activities = on
+#track_counts = on
+#track_io_timing = off
+#track_functions = none			# none, pl, all
+#track_activity_query_size = 1024	# (change requires restart)
+#stats_temp_directory = 'pg_stat_tmp'
+
+
+# - Monitoring -
+
+#log_parser_stats = off
+#log_planner_stats = off
+#log_executor_stats = off
+#log_statement_stats = off
+
+
+#------------------------------------------------------------------------------
+# AUTOVACUUM
+#------------------------------------------------------------------------------
+
+#autovacuum = on			# Enable autovacuum subprocess?  'on'
+					# requires track_counts to also be on.
+#log_autovacuum_min_duration = -1	# -1 disables, 0 logs all actions and
+					# their durations, > 0 logs only
+					# actions running at least this number
+					# of milliseconds.
+#autovacuum_max_workers = 3		# max number of autovacuum subprocesses
+					# (change requires restart)
+#autovacuum_naptime = 1min		# time between autovacuum runs
+#autovacuum_vacuum_threshold = 50	# min number of row updates before
+					# vacuum
+#autovacuum_analyze_threshold = 50	# min number of row updates before
+					# analyze
+#autovacuum_vacuum_scale_factor = 0.2	# fraction of table size before vacuum
+#autovacuum_analyze_scale_factor = 0.1	# fraction of table size before analyze
+#autovacuum_freeze_max_age = 200000000	# maximum XID age before forced vacuum
+					# (change requires restart)
+#autovacuum_multixact_freeze_max_age = 400000000	# maximum multixact age
+					# before forced vacuum
+					# (change requires restart)
+#autovacuum_vacuum_cost_delay = 2ms	# default vacuum cost delay for
+					# autovacuum, in milliseconds;
+					# -1 means use vacuum_cost_delay
+#autovacuum_vacuum_cost_limit = -1	# default vacuum cost limit for
+					# autovacuum, -1 means use
+					# vacuum_cost_limit
+
+
+#------------------------------------------------------------------------------
+# CLIENT CONNECTION DEFAULTS
+#------------------------------------------------------------------------------
+
+# - Statement Behavior -
+
+#client_min_messages = notice		# values in order of decreasing detail:
+					#   debug5
+					#   debug4
+					#   debug3
+					#   debug2
+					#   debug1
+					#   log
+					#   notice
+					#   warning
+					#   error
+#search_path = '"$user", public'	# schema names
+#row_security = on
+#default_tablespace = ''		# a tablespace name, '' uses the default
+#temp_tablespaces = ''			# a list of tablespace names, '' uses
+					# only default tablespace
+#default_table_access_method = 'heap'
+#check_function_bodies = on
+#default_transaction_isolation = 'read committed'
+#default_transaction_read_only = off
+#default_transaction_deferrable = off
+#session_replication_role = 'origin'
+#statement_timeout = 0			# in milliseconds, 0 is disabled
+#lock_timeout = 0			# in milliseconds, 0 is disabled
+#idle_in_transaction_session_timeout = 0	# in milliseconds, 0 is disabled
+#vacuum_freeze_min_age = 50000000
+#vacuum_freeze_table_age = 150000000
+#vacuum_multixact_freeze_min_age = 5000000
+#vacuum_multixact_freeze_table_age = 150000000
+#vacuum_cleanup_index_scale_factor = 0.1	# fraction of total number of tuples
+						# before index cleanup, 0 always performs
+						# index cleanup
+#bytea_output = 'hex'			# hex, escape
+#xmlbinary = 'base64'
+#xmloption = 'content'
+#gin_fuzzy_search_limit = 0
+#gin_pending_list_limit = 4MB
+
+# - Locale and Formatting -
+
+datestyle = 'iso, mdy'
+#intervalstyle = 'postgres'
+timezone = 'Etc/UTC'
+#timezone_abbreviations = 'Default'     # Select the set of available time zone
+					# abbreviations.  Currently, there are
+					#   Default
+					#   Australia (historical usage)
+					#   India
+					# You can create your own file in
+					# share/timezonesets/.
+#extra_float_digits = 1			# min -15, max 3; any value >0 actually
+					# selects precise output mode
+#client_encoding = sql_ascii		# actually, defaults to database
+					# encoding
+
+# These settings are initialized by initdb, but they can be changed.
+lc_messages = 'en_US.utf8'			# locale for system error message
+					# strings
+lc_monetary = 'en_US.utf8'			# locale for monetary formatting
+lc_numeric = 'en_US.utf8'			# locale for number formatting
+lc_time = 'en_US.utf8'				# locale for time formatting
+
+# default configuration for text search
+default_text_search_config = 'pg_catalog.english'
+
+# - Shared Library Preloading -
+
+#shared_preload_libraries = ''	# (change requires restart)
+#local_preload_libraries = ''
+#session_preload_libraries = ''
+#jit_provider = 'llvmjit'		# JIT library to use
+
+# - Other Defaults -
+
+#dynamic_library_path = '$libdir'
+
+
+#------------------------------------------------------------------------------
+# LOCK MANAGEMENT
+#------------------------------------------------------------------------------
+
+#deadlock_timeout = 1s
+#max_locks_per_transaction = 64		# min 10
+					# (change requires restart)
+#max_pred_locks_per_transaction = 64	# min 10
+					# (change requires restart)
+#max_pred_locks_per_relation = -2	# negative values mean
+					# (max_pred_locks_per_transaction
+					#  / -max_pred_locks_per_relation) - 1
+#max_pred_locks_per_page = 2            # min 0
+
+
+#------------------------------------------------------------------------------
+# VERSION AND PLATFORM COMPATIBILITY
+#------------------------------------------------------------------------------
+
+# - Previous PostgreSQL Versions -
+
+#array_nulls = on
+#backslash_quote = safe_encoding	# on, off, or safe_encoding
+#escape_string_warning = on
+#lo_compat_privileges = off
+#operator_precedence_warning = off
+#quote_all_identifiers = off
+#standard_conforming_strings = on
+#synchronize_seqscans = on
+
+# - Other Platforms and Clients -
+
+#transform_null_equals = off
+
+
+#------------------------------------------------------------------------------
+# ERROR HANDLING
+#------------------------------------------------------------------------------
+
+#exit_on_error = off			# terminate session on any error?
+#restart_after_crash = on		# reinitialize after backend crash?
+#data_sync_retry = off			# retry or panic on failure to fsync
+					# data?
+					# (change requires restart)
+
+
+#------------------------------------------------------------------------------
+# CONFIG FILE INCLUDES
+#------------------------------------------------------------------------------
+
+# These options allow settings to be loaded from files other than the
+# default postgresql.conf.  Note that these are directives, not variable
+# assignments, so they can usefully be given more than once.
+
+#include_dir = '...'			# include files ending in '.conf' from
+					# a directory, e.g., 'conf.d'
+#include_if_exists = '...'		# include file only if it exists
+#include = '...'			# include file
+
+
+#------------------------------------------------------------------------------
+# CUSTOMIZED OPTIONS
+#------------------------------------------------------------------------------
+
+# Add settings for extensions here
+
+
+# https://pgtune.leopard.in.ua/#/ oltp 48G ram, 12 cpus, ssd
+
+shared_preload_libraries=pg_stat_statementspg_qualstats
+track_functions=pl
+track_io_timing=on
+track_activity_query_size=2048
+pg_stat_statements.max=10000
+pg_stat_statements.track=all
+max_connections=100
+shared_buffers=12GB
+effective_cache_size=36GB
+maintenance_work_mem=2GB
+checkpoint_completion_target=0.9
+wal_buffers=16MB
+default_statistics_target=100
+random_page_cost=1.1
+effective_io_concurrency=200
+work_mem=31457kB
+min_wal_size=2GB
+max_wal_size=8GB
+max_worker_processes=12
+max_parallel_workers_per_gather=4
+max_parallel_workers=12
+max_parallel_maintenance_workers=4
diff --git a/scripts/ci/python/3.6/Dockerfile b/scripts/ci/python/3.6/Dockerfile
index b82dbc46c6766971cf9ccc7ead0f00a821792f99..69b046c159281790efb9413e1ace50b18d5c52a0 100644
--- a/scripts/ci/python/3.6/Dockerfile
+++ b/scripts/ci/python/3.6/Dockerfile
@@ -39,12 +39,7 @@ ENV user ${user}
 ## Add user ##
 RUN groupadd --gid 1000 ${user} \
     && useradd --create-home --uid 1000 --gid ${user} ${user}
-
-# Gitlab CI accepts only root user, so we don't set user here.
-# You can (and should) run command in container as user `alice` this way:
-# docker-compose run --rm --user=alice --name=myrunner runner /bin/bash
-# USER ${user}
-
+USER ${user}
 WORKDIR /home/${user}
 RUN chown -R ${user}:${user} /home/${user}
 
diff --git a/scripts/ci/python/3.6/dev.dockerfile b/scripts/ci/python/3.6/dev.dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..19fc00387378571659818eec7ab8eeac65d8eb19
--- /dev/null
+++ b/scripts/ci/python/3.6/dev.dockerfile
@@ -0,0 +1,51 @@
+FROM python:3.6.12-buster
+
+# For running python as non-root user, e.g. on devel machine.
+
+# Setup python environment.
+ENV LANG C.UTF-8
+ENV LC_ALL C.UTF-8
+ENV PYTHONDONTWRITEBYTECODE 1
+ENV PYTHONFAULTHANDLER 1
+
+# Install debian packages.
+RUN apt-get update \
+    && apt-get install -y --no-install-recommends \
+        curl \
+        ca-certificates \
+        gnupg \
+    && rm -rf /var/lib/apt/lists/*
+
+# Install debian pgdg repository.
+RUN curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
+RUN echo "deb http://apt.postgresql.org/pub/repos/apt buster-pgdg main" \
+        > /etc/apt/sources.list.d/pgdg.list
+RUN apt-get update
+# Install postgresql client programs for various postgresl versions.
+RUN apt-get install -y --no-install-recommends \
+        postgresql-client-10 \
+        postgresql-client-11 \
+        postgresql-client-12 \
+        postgresql-client-13 \
+    && rm -rf /var/lib/apt/lists/*
+
+# Upgrade some crucial python packages.
+RUN pip install --upgrade pip setuptools wheel
+
+# Install python dependencies via pip.
+RUN pip install pipenv poetry
+
+ARG user
+ENV user ${user}
+
+## Add user ##
+RUN groupadd --gid 1000 ${user} \
+    && useradd --create-home --uid 1000 --gid ${user} ${user}
+
+RUN mkdir -p /home/${user}/src/hivemind
+RUN chown -R ${user}:${user} /home/${user}
+
+WORKDIR /home/${user}/src/hivemind
+USER ${user}
+
+CMD [ "python3" ]
diff --git a/scripts/ci/python/3.8/Dockerfile b/scripts/ci/python/3.8/Dockerfile
index 41c3d4deeac1147b454b0083da6e04af8226f5a8..9a7bf7893c6de9c23141e678ec6df0e0373fce2e 100644
--- a/scripts/ci/python/3.8/Dockerfile
+++ b/scripts/ci/python/3.8/Dockerfile
@@ -39,12 +39,7 @@ ENV user ${user}
 ## Add user ##
 RUN groupadd --gid 1000 ${user} \
     && useradd --create-home --uid 1000 --gid ${user} ${user}
-
-# Gitlab CI accepts only root user, so we don't set user here.
-# You can (and should) run command in container as user `alice` this way:
-# docker-compose run --rm --user=alice --name=myrunner runner /bin/bash
-# USER ${user}
-
+USER ${user}
 WORKDIR /home/${user}
 RUN chown -R ${user}:${user} /home/${user}
 
diff --git a/scripts/ci/setup_env.py b/scripts/ci/setup_env.py
new file mode 100755
index 0000000000000000000000000000000000000000..8f24d82d5c36c0a9bc725d5b9fa7d9c426a922c9
--- /dev/null
+++ b/scripts/ci/setup_env.py
@@ -0,0 +1,172 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+"""
+Tool for Gitlab runner to read environment from project variable
+and setup bash environment.
+When running on Gitlab CI you can  do this:
+```
+eval "$(cat $MY_ENV_VARIABLE | ./scripts/ci/setup_env.py)"
+echo "RUNNER_ID is $RUNNER_ID"
+```
+In bash you can do this:
+```
+eval "$(cat ./.tmp/env.yaml | ./scripts/ci/setup_env.py)"
+echo "RUNNER_ID is $RUNNER_ID"
+```
+"""
+
+import logging
+import sys
+import argparse
+import yaml
+
+FORMAT = '# %(asctime)s - %(name)s - %(levelname)s - %(message)s '
+logging.basicConfig(format=FORMAT)
+logger = logging.getLogger(__name__)
+
+
+def output(message, outfile, end='\n'):
+    """Print data to outfile"""
+    print(message, file=outfile, end=end)
+
+
+def read(infile):
+    """Read data from infile"""
+    if hasattr(infile, 'read'):
+        # data = json.loads(infile.read())
+        data = yaml.safe_load(infile.read())
+    else:
+        # data = json.loads(infile)
+        data = yaml.safe_load(infile)
+    return data
+
+
+def setup_env(current_runner_id, hive_sync_runner_id, infile, outfile, end, **kwargs):
+    """
+    Resolve and output environment for bash in pending CI job.
+    Assumption: all jobs in pipeline must use the same database.
+    We need to point current runner to the database used by runner,
+    that did hive sync (first stage in pipeline).
+    """
+
+    logger.debug('current_runner_id: %s', current_runner_id)
+    logger.debug('hive_sync_runner_id: %s', hive_sync_runner_id)
+
+    data = read(infile)
+    logger.debug('data: %s', data)
+
+    current_runner = data['runners'][str(current_runner_id)]
+    if hive_sync_runner_id == 0:
+        hive_sync_runner = current_runner
+    else:
+        hive_sync_runner = data['runners'][str(hive_sync_runner_id)]
+
+    if hive_sync_runner_id == 0:
+        # Do nothing, obviously. Current runner does hive sync itself.
+        logger.debug('case 1')
+        runner = current_runner
+    elif current_runner_id == hive_sync_runner_id:
+        # Do nothing, obviously. Current runner is the same, as runner
+        # that did hive sync.
+        logger.debug('case 2')
+        runner = current_runner
+    else:
+        if current_runner['host'] == hive_sync_runner['host']:
+            # We assume that all executors on the same machine
+            # use the same postgres server with the same credentials
+            # and unix socket connection configuration. So do nothing.
+            logger.debug('case 3')
+            runner = current_runner
+        else:
+            # Take postgres stuff from runner that did hive sync,
+            # but point current runner to postgres on the host of runner
+            # that did hive sync (exposed on network, we assume).
+            logger.debug('case 4')
+            runner = {}
+            for key, value in current_runner.items():
+                if key.startswith('postgres'):
+                    if key == 'postgres_host':
+                        runner[key] = hive_sync_runner['host']
+                    else:
+                        runner[key] = hive_sync_runner[key]
+                else:
+                    runner[key] = value
+
+    for key in runner:
+        output(
+            f'export RUNNER_{key.upper()}="{str(runner[key])}"',
+            outfile,
+            end,
+            )
+
+    for key in data['common']:
+        output(
+            f"export RUNNER_{key.upper()}=\"{str(data['common'][key])}\"",
+            outfile,
+            end,
+            )
+
+
+def parse_args():
+    """Parse command line arguments"""
+    parser = argparse.ArgumentParser(
+        description=__doc__,
+        formatter_class=argparse.RawDescriptionHelpFormatter
+        )
+    parser.add_argument(
+        'infile',
+        type=argparse.FileType('r'),
+        nargs='?',
+        default=sys.stdin,
+        help='Input file or pipe via STDIN'
+        )
+    parser.add_argument(
+        '-o', '--outfile',
+        type=argparse.FileType('w'),
+        default=sys.stdout,
+        help='Output file, STDOUT if not set'
+        )
+    parser.add_argument(
+        "-e", "--end",
+        dest='end',
+        default='\n',
+        help='String at the end of line in output'
+        )
+    parser.add_argument(
+        "-s", "--hive-sync-runner-id",
+        required=True,
+        type=int,
+        help='Id of runner which did hive sync, 0 when current runner does hive sync actually'
+        )
+    parser.add_argument(
+        "-c", "--current-runner-id",
+        required=True,
+        type=int,
+        help='Id of current runner'
+        )
+    parser.add_argument(
+        '--log-level',
+        default='INFO',
+        dest='log_level',
+        choices=['debug', 'info', 'warning', 'error'],
+        help='Log level (string)',
+        )
+
+    result = parser.parse_args()
+
+    # configure logger and print config
+    root = logging.getLogger()
+    root.setLevel(result.log_level.upper())
+
+    return result
+
+
+def main():
+    """Main dispatcher function"""
+    flags = parse_args()
+    setup_env(**vars(flags))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/scripts/ci/start-api-benchmarks.sh b/scripts/ci/start-api-benchmarks.sh
new file mode 100755
index 0000000000000000000000000000000000000000..aedef030b343c6623315711817e7481edc0e9206
--- /dev/null
+++ b/scripts/ci/start-api-benchmarks.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+set -e
+
+pip install tox
+
+export HIVEMIND_ADDRESS=$1
+export HIVEMIND_PORT=$2
+ITERATIONS=$3
+JOBS=${4:-auto}
+export TAVERN_DISABLE_COMPARATOR=true
+
+echo Attempting to start benchmarks on hivemind instance listening on: $HIVEMIND_ADDRESS port: $HIVEMIND_PORT
+
+for (( i=0; i<$ITERATIONS; i++ ))
+do
+  echo About to run iteration $i
+  tox -e tavern-benchmark -- \
+      -W ignore::pytest.PytestDeprecationWarning \
+      -n $JOBS \
+      --junitxml=../../../../benchmarks-$i.xml
+  echo Done!
+done
diff --git a/scripts/ci/start-api-smoketest.sh b/scripts/ci/start-api-smoketest.sh
index 9e0eb1d26d045a1ee04f3fd925bddcdaee85520d..f723ae3ab70d89e0a7d090d3b283d62263f66a15 100755
--- a/scripts/ci/start-api-smoketest.sh
+++ b/scripts/ci/start-api-smoketest.sh
@@ -1,14 +1,21 @@
 #!/bin/bash
 
 set -e
+
 pip install tox
 
 export HIVEMIND_ADDRESS=$1
 export HIVEMIND_PORT=$2
+TEST_GROUP=$3
+JUNITXML=$4
+JOBS=${5:-auto}
 export TAVERN_DIR="tests/tests_api/hivemind/tavern"
 
 echo "Starting tests on hivemind server running on ${HIVEMIND_ADDRESS}:${HIVEMIND_PORT}"
 
-echo "Selected test group (if empty all will be executed): $3"
+echo "Selected test group (if empty all will be executed): $TEST_GROUP"
 
-tox -e tavern -- -W ignore::pytest.PytestDeprecationWarning -n auto --junitxml=../../../../$4 $3
+tox -e tavern -- \
+    -W ignore::pytest.PytestDeprecationWarning \
+    -n $JOBS \
+    --junitxml=../../../../$JUNITXML $TEST_GROUP
diff --git a/scripts/ci/timer.sh b/scripts/ci/timer.sh
index 490a8a440b042aaea6444475ba7f163c34c2dbee..f8ebfb3504bdc68c3107e61a5779ad8e083e8683 100755
--- a/scripts/ci/timer.sh
+++ b/scripts/ci/timer.sh
@@ -2,6 +2,8 @@
 
 set -euo pipefail
 
+JOB=$1
+
 start() {
   mkdir -p ".tmp"
   echo `date +%s` > ".tmp/timer-start"
@@ -18,4 +20,15 @@ check() {
     fi
 }
 
-"$1"
+main() {
+  if [ "$JOB" = "start" ]; then
+    start
+  elif [ "$JOB" = "check" ]; then
+    check
+  else
+    echo "Invalid argument"
+    exit 1
+  fi
+}
+
+main
diff --git a/scripts/ci/wait-for-postgres.sh b/scripts/ci/wait-for-postgres.sh
index bc705faf94277667d2c7bae6142f0f37e8b78c94..38a098b6d0e2d8cb0c6c52b7cdfb1b5c7b785d32 100755
--- a/scripts/ci/wait-for-postgres.sh
+++ b/scripts/ci/wait-for-postgres.sh
@@ -6,19 +6,26 @@
 
 set -e
 
-LIMIT=30 #seconds
-shift
-cmd="$@"
+LIMIT=10 #seconds
+
+HOST=$1
+PORT=$2
+
+if [ -z "$HOST" ]
+then
+    HOST="$RUNNER_POSTGRES_HOST"
+fi
+if [ -z "$PORT" ]
+then
+    PORT="$RUNNER_POSTGRES_PORT"
+fi
 
 wait_for_postgres() {
-    # wkedzierski@syncad.com work, but customized by wbarcik@syncad.com
     counter=0
-    echo "Waiting for postgres on ${POSTGRES_HOST}:${POSTGRES_PORT}. Timeout is ${LIMIT}s."
+    echo "Waiting for postgres on ${HOST}:${PORT}."
     while ! pg_isready \
-            --username $ADMIN_POSTGRES_USER \
-            --host $POSTGRES_HOST \
-            --port $POSTGRES_PORT \
-            --dbname postgres \
+            --host $HOST \
+            --port $PORT \
             --timeout=1 --quiet; do
         counter=$((counter+1))
         sleep 1
@@ -38,14 +45,15 @@ output_configuration() {
     echo "-------------------------------------------------"
     echo "Postgres version and configuration"
     echo "-------------------------------------------------"
-    PGPASSWORD=$ADMIN_POSTGRES_USER_PASSWORD psql \
-            --username "$ADMIN_POSTGRES_USER" \
-            --host "$POSTGRES_HOST" \
-            --port $POSTGRES_PORT \
+    PGPASSWORD=$RUNNER_POSTGRES_ADMIN_USER_PASSWORD psql \
+            --username "$RUNNER_POSTGRES_ADMIN_USER" \
+            --host "$HOST" \
+            --port $PORT \
             --dbname postgres <<EOF
 SELECT version();
-select name, setting, unit from pg_settings;
-\copy (select * from pg_settings) to '$DIR/pg_settings_on_start.csv' WITH CSV HEADER
+-- select name, setting, unit from pg_settings;
+-- show all;
+\copy (select name, setting, unit from pg_settings) to '$DIR/pg_settings_on_start.csv' WITH CSV HEADER
 \q
 EOF
     echo "-------------------------------------------------"
diff --git a/setup.py b/setup.py
index a04804479386c33cfdeee452090b06db9c1b39ac..8085695a6c714a1a712b6da2b4dd247d114448c1 100644
--- a/setup.py
+++ b/setup.py
@@ -91,9 +91,14 @@ if __name__ == "__main__":
             'pdoc',
             'diff-match-patch',
             'prometheus-client',
-            'psutil'
+            'psutil',
         ],
-
+        extras_require={
+            'dev': [
+                'pyYAML',
+                'prettytable',
+            ]
+        },
         entry_points={
             'console_scripts': [
                 'hive=hive.cli:run',
diff --git a/tox.ini b/tox.ini
index 2ee22545322dbc69a9b390a817743a3c70c38fbc..7eec99af91c546384d7c6f99dea6006d7d0caec6 100644
--- a/tox.ini
+++ b/tox.ini
@@ -3,7 +3,7 @@ envlist = py36, tavern, benchmark, tavern-benchmark
 skipsdist = true
 
 [testenv]
-deps = 
+deps =
   pytest
 
 [testenv:benchmark]
@@ -11,13 +11,13 @@ deps =
   {[testenv]deps}
   pytest-benchmark
   requests
-  pyyaml
+  pyYAML
   prettytable
 commands =
   python {toxinidir}/scripts/ci/start_api_benchmark.py {posargs}
 
 [testenv:tavern]
-setenv = 
+setenv =
   PYTHONPATH = {toxinidir}/tests/tests_api/hivemind/tavern:{env:PYTHONPATH:}
 
 passenv =
@@ -26,7 +26,7 @@ passenv =
 
 changedir = {env:TAVERN_DIR}
 
-deps = 
+deps =
   {[testenv]deps}
   pytest-xdist
   tavern
@@ -36,7 +36,7 @@ deps =
 commands = pytest {posargs}
 
 [testenv:tavern-benchmark]
-setenv = 
+setenv =
   {[testenv:tavern]setenv}
 
 passenv =
@@ -45,7 +45,7 @@ passenv =
 
 changedir = tests/tests_api/hivemind/tavern
 
-deps = 
+deps =
   {[testenv:tavern]deps}
 
 commands = pytest --durations=0 {posargs}