From 23b2cbae76ead98e49409565789ab764cebdb4a1 Mon Sep 17 00:00:00 2001
From: Wojciech Barcik <wbarcik@syncad.com>
Date: Tue, 27 Oct 2020 08:38:37 -0400
Subject: [PATCH] Implemented concurrent pipelines on Gitlab CI

---
 .gitignore                                    |   5 +-
 .gitlab-ci-old.yaml                           | 276 ++++++++++
 .gitlab-ci.yaml                               | 495 ++++++++++++------
 docker-compose-ci.yml                         | 130 +++++
 hive/cli.py                                   |  37 +-
 hive/conf.py                                  |  22 +-
 scripts/ci/collect-db-stats.sh                |  40 ++
 scripts/ci/create-db.sh                       |  83 +++
 scripts/ci/dump-db.sh                         |  27 +
 scripts/ci/get-postgres-version.sh            |  21 +
 scripts/ci/hive-server.sh                     |  80 +++
 scripts/ci/hive-sync.sh                       |  26 +
 scripts/ci/hived-node/config.ini              |  57 ++
 scripts/ci/hived-node/entrypoint.sh           |  33 ++
 scripts/ci/hived-node/run.sh                  |  18 +
 scripts/ci/postgres/10/Dockerfile             |  18 +
 scripts/ci/postgres/12/Dockerfile             |  18 +
 scripts/ci/python/3.6/Dockerfile              |  51 ++
 scripts/ci/python/3.8/Dockerfile              |  51 ++
 scripts/ci/start-api-smoketest.sh             |  13 +
 scripts/ci/timer.sh                           |  21 +
 scripts/ci/wait-for-postgres.sh               |  56 ++
 scripts/ci_sync.sh                            |   2 +-
 scripts/db-monitoring/.env_example            |   1 -
 scripts/db-monitoring/docker-compose.yml      |  43 +-
 scripts/db-monitoring/pghero_example.yml      |   6 +-
 scripts/db-monitoring/readme-monitoring.md    |  45 +-
 .../db-monitoring/setup/create_template.sql   |   9 -
 .../db-monitoring/setup/pghero_cron_jobs.txt  |  15 +-
 .../db-monitoring/setup/setup_monitoring.sh   |  38 +-
 .../db-monitoring/setup/setup_template.sql    |  13 -
 .../sql-monitoring/10_create_template.sql     |  23 +
 .../20_create_role_pgwatch2.sql}              |   6 +-
 .../21_create_role_pghero.sql}                |   6 +-
 .../30_setup_monitoring_pgwatch2.sql}         |   9 +-
 .../31_setup_monitoring_pghero.sql}           |  12 +-
 .../40_create_database_pghero.sql}            |   4 +
 .../41_create_tables_pghero.sql}              |   7 +-
 .../sql-monitoring/50_setup_template.sql      |  24 +
 .../setup/sql-monitoring/ddl_deps.sql         | 208 ++++++++
 tests/tests_api                               |   2 +-
 41 files changed, 1789 insertions(+), 262 deletions(-)
 create mode 100644 .gitlab-ci-old.yaml
 create mode 100644 docker-compose-ci.yml
 mode change 100644 => 100755 hive/cli.py
 create mode 100755 scripts/ci/collect-db-stats.sh
 create mode 100755 scripts/ci/create-db.sh
 create mode 100755 scripts/ci/dump-db.sh
 create mode 100755 scripts/ci/get-postgres-version.sh
 create mode 100755 scripts/ci/hive-server.sh
 create mode 100755 scripts/ci/hive-sync.sh
 create mode 100644 scripts/ci/hived-node/config.ini
 create mode 100755 scripts/ci/hived-node/entrypoint.sh
 create mode 100755 scripts/ci/hived-node/run.sh
 create mode 100644 scripts/ci/postgres/10/Dockerfile
 create mode 100644 scripts/ci/postgres/12/Dockerfile
 create mode 100644 scripts/ci/python/3.6/Dockerfile
 create mode 100644 scripts/ci/python/3.8/Dockerfile
 create mode 100755 scripts/ci/start-api-smoketest.sh
 create mode 100755 scripts/ci/timer.sh
 create mode 100755 scripts/ci/wait-for-postgres.sh
 delete mode 100644 scripts/db-monitoring/.env_example
 delete mode 100644 scripts/db-monitoring/setup/create_template.sql
 delete mode 100644 scripts/db-monitoring/setup/setup_template.sql
 create mode 100644 scripts/db-monitoring/setup/sql-monitoring/10_create_template.sql
 rename scripts/db-monitoring/setup/{create_role_pgwatch2.sql => sql-monitoring/20_create_role_pgwatch2.sql} (82%)
 rename scripts/db-monitoring/setup/{create_role_pghero.sql => sql-monitoring/21_create_role_pghero.sql} (80%)
 rename scripts/db-monitoring/setup/{setup_monitoring_pgwatch2.sql => sql-monitoring/30_setup_monitoring_pgwatch2.sql} (98%)
 rename scripts/db-monitoring/setup/{setup_monitoring_pghero.sql => sql-monitoring/31_setup_monitoring_pghero.sql} (80%)
 rename scripts/db-monitoring/setup/{create_database_pghero.sql => sql-monitoring/40_create_database_pghero.sql} (61%)
 rename scripts/db-monitoring/setup/{create_tables_pghero.sql => sql-monitoring/41_create_tables_pghero.sql} (80%)
 create mode 100644 scripts/db-monitoring/setup/sql-monitoring/50_setup_template.sql
 create mode 100644 scripts/db-monitoring/setup/sql-monitoring/ddl_deps.sql

diff --git a/.gitignore b/.gitignore
index a9f31ca02..166a11ef6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -37,6 +37,7 @@ var/
 *.egg-info/
 .installed.cfg
 *.egg
+pip-wheel-metadata
 
 # PyInstaller
 #  Usually these files are written by a python script from a template
@@ -134,10 +135,12 @@ tests/failed_blocks/
 # version.py
 hive/version.py
 
-# hivemind.port
 hivemind.port
+hive_server.pid
+hivemind-server.pid
 
 Pipfile.lock
 
 pghero.yml
 *~
+.tmp
diff --git a/.gitlab-ci-old.yaml b/.gitlab-ci-old.yaml
new file mode 100644
index 000000000..3184bf824
--- /dev/null
+++ b/.gitlab-ci-old.yaml
@@ -0,0 +1,276 @@
+# https://hub.docker.com/r/library/python/tags/
+image: "python:3.7"
+
+stages:
+- build
+- test
+- data-supply
+- deploy
+- e2e-test
+- post-deploy
+
+variables:
+  GIT_DEPTH: 1
+  LC_ALL: "C"
+  GIT_STRATEGY: clone
+  GIT_SUBMODULE_STRATEGY: recursive
+  GIT_CLONE_PATH: $CI_BUILDS_DIR/$CI_COMMIT_REF_SLUG/$CI_CONCURRENT_ID/project-name
+
+  HIVEMIND_SOURCE_HIVED_URL: $HIVEMIND_SOURCE_HIVED_URL
+  HIVEMIND_DB_NAME: "hive_$CI_COMMIT_REF_SLUG"
+  HIVEMIND_HTTP_PORT: $((HIVEMIND_HTTP_PORT + CI_CONCURRENT_ID))
+  # Configured at gitlab repository settings side
+  POSTGRES_USER: $HIVEMIND_POSTGRES_USER
+  POSTGRES_PASSWORD: $HIVEMIND_POSTGRES_PASSWORD
+  POSTGRES_HOST_AUTH_METHOD: trust
+  # official way to provide password to psql: http://www.postgresql.org/docs/9.3/static/libpq-envars.html
+  PGPASSWORD: $HIVEMIND_POSTGRES_PASSWORD
+
+before_script:
+  - pwd
+  - echo "CI_NODE_TOTAL is $CI_NODE_TOTAL"
+  - echo "CI_NODE_INDEX is $CI_NODE_INDEX"
+  - echo "CI_CONCURRENT_ID is $CI_CONCURRENT_ID"
+  - echo "CI_COMMIT_REF_SLUG is $CI_COMMIT_REF_SLUG"
+
+hivemind_build:
+  stage: build
+  script:
+    - pip3 install --user --upgrade pip setuptools
+    - git fetch --tags
+    - git tag -f ci_implicit_tag
+    - echo $PYTHONUSERBASE
+    - "python3 setup.py bdist_egg"
+    - ls -l dist/*
+  artifacts:
+    paths:
+      - dist/
+    expire_in: 1 week
+
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH || $CI_COMMIT_BRANCH == "develop"'
+      when: always
+    - when: always
+
+  tags:
+     - hivemind
+
+hivemind_sync:
+  stage: data-supply
+
+  environment:
+      name: "hive sync built from branch $CI_COMMIT_REF_NAME targeting database $HIVEMIND_DB_NAME"
+
+  needs:
+    - job: hivemind_build
+      artifacts: true
+  variables:
+    GIT_STRATEGY: none
+    PYTHONUSERBASE: ./local-site
+
+  script:
+    - pip3 install --user --upgrade pip setuptools
+    - scripts/ci_sync.sh "$HIVEMIND_DB_NAME" "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" "$HIVEMIND_SOURCE_HIVED_URL" $HIVEMIND_MAX_BLOCK $HIVEMIND_HTTP_PORT
+
+  artifacts:
+    paths:
+      - hivemind-sync.log
+
+    expire_in: 1 week
+
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH || $CI_COMMIT_BRANCH == "develop"'
+      when: always
+    - if: '$CI_PIPELINE_SOURCE == "push"'
+      when: manual
+    - when: on_success
+
+  tags:
+     - hivemind
+
+hivemind_start_server:
+  stage: deploy
+  environment:
+    name: "hive serve built from branch $CI_COMMIT_REF_NAME exposed on port $HIVEMIND_HTTP_PORT"
+    url: "http://hive-4.pl.syncad.com:$HIVEMIND_HTTP_PORT"
+    on_stop: hivemind_stop_server
+
+  needs:
+    - job: hivemind_build
+      artifacts: true
+#    - job: hivemind_sync
+#      artifacts: true
+  variables:
+    GIT_STRATEGY: none
+    PYTHONUSERBASE: ./local-site
+
+  script:
+    - scripts/ci_start_server.sh "$HIVEMIND_DB_NAME" "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" "$HIVEMIND_SOURCE_HIVED_URL" $HIVEMIND_HTTP_PORT
+
+  artifacts:
+    paths:
+      - hive_server.pid
+    expire_in: 1 week
+
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH || $CI_COMMIT_BRANCH == "develop"'
+      when: always
+    - if: '$CI_PIPELINE_SOURCE == "push"'
+      when: manual
+    - when: on_success
+
+  tags:
+     - hivemind
+
+hivemind_stop_server:
+  stage: post-deploy
+  environment:
+    name: "hive serve built from branch $CI_COMMIT_REF_NAME exposed on port $HIVEMIND_HTTP_PORT"
+    action: stop
+
+  variables:
+    GIT_STRATEGY: none
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - when: manual
+  script:
+    - scripts/ci_stop_server.sh hive_server.pid
+
+  needs:
+    - job: hivemind_start_server
+      artifacts: true
+
+  tags:
+     - hivemind
+
+  artifacts:
+    paths:
+      - hive_server.log
+
+.hivemind_start_api_smoketest: &common_api_smoketest_job
+  stage: e2e-test
+  environment: hive-4.pl.syncad.com
+  needs:
+    - job: hivemind_start_server
+      artifacts: true
+
+  variables:
+    GIT_STRATEGY: none
+
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: always
+    - if: '$CI_PIPELINE_SOURCE == "push"'
+      when: manual
+    - when: on_success
+
+  tags:
+     - hivemind
+
+bridge_api_smoketest:
+  <<: *common_api_smoketest_job
+
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" bridge_api_patterns/ api_smoketest_bridge.xml
+
+  artifacts:
+    reports:
+      junit: api_smoketest_bridge.xml
+
+bridge_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" bridge_api_negative/ api_smoketest_bridge_negative.xml
+
+  artifacts:
+    reports:
+      junit: api_smoketest_bridge_negative.xml
+
+condenser_api_smoketest:
+  <<: *common_api_smoketest_job
+
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" condenser_api_patterns/ api_smoketest_condenser_api.xml
+
+  artifacts:
+    reports:
+      junit: api_smoketest_condenser_api.xml
+
+condenser_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" condenser_api_negative/ api_smoketest_condenser_api_negative.xml
+
+  artifacts:
+    reports:
+      junit: api_smoketest_condenser_api_negative.xml
+
+database_api_smoketest:
+  <<: *common_api_smoketest_job
+
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" database_api_patterns/ api_smoketest_database_api.xml
+
+  artifacts:
+    reports:
+      junit: api_smoketest_database_api.xml
+
+database_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" database_api_negative/ api_smoketest_database_api_negative.xml
+
+  artifacts:
+    reports:
+      junit: api_smoketest_database_api_negative.xml
+
+follow_api_smoketest:
+  <<: *common_api_smoketest_job
+
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" follow_api_patterns/ api_smoketest_follow_api.xml
+
+  artifacts:
+    reports:
+      junit: api_smoketest_follow_api.xml
+
+follow_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" follow_api_negative/ api_smoketest_follow_api_negative.xml
+
+  artifacts:
+    reports:
+      junit: api_smoketest_follow_api_negative.xml
+
+tags_api_smoketest:
+  <<: *common_api_smoketest_job
+
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" tags_api_patterns/ api_smoketest_tags_api.xml
+
+  artifacts:
+    reports:
+      junit: api_smoketest_tags_api.xml
+
+tags_api_smoketest_negative:
+  <<: *common_api_smoketest_job
+
+  script:
+    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" tags_api_negative/ api_smoketest_tags_api_negative.xml
+
+  artifacts:
+    reports:
+      junit: api_smoketest_tags_api_negative.xml
+
diff --git a/.gitlab-ci.yaml b/.gitlab-ci.yaml
index 3184bf824..1a3b7f6ca 100644
--- a/.gitlab-ci.yaml
+++ b/.gitlab-ci.yaml
@@ -1,85 +1,203 @@
-# https://hub.docker.com/r/library/python/tags/
-image: "python:3.7"
-
 stages:
-- build
-- test
-- data-supply
-- deploy
-- e2e-test
-- post-deploy
+  - build
+  - data-supply
+  - e2e-test
+
 
 variables:
+
+  PGPASSWORD: $HIVEMIND_POSTGRES_PASSWORD
+
+  # GIT_DEPTH: 10
   GIT_DEPTH: 1
-  LC_ALL: "C"
+
+  # GIT_STRATEGY: fetch # Noticed errors with that.
   GIT_STRATEGY: clone
+  # GIT_STRATEGY: none
+
   GIT_SUBMODULE_STRATEGY: recursive
-  GIT_CLONE_PATH: $CI_BUILDS_DIR/$CI_COMMIT_REF_SLUG/$CI_CONCURRENT_ID/project-name
 
-  HIVEMIND_SOURCE_HIVED_URL: $HIVEMIND_SOURCE_HIVED_URL
-  HIVEMIND_DB_NAME: "hive_$CI_COMMIT_REF_SLUG"
-  HIVEMIND_HTTP_PORT: $((HIVEMIND_HTTP_PORT + CI_CONCURRENT_ID))
-  # Configured at gitlab repository settings side
-  POSTGRES_USER: $HIVEMIND_POSTGRES_USER
-  POSTGRES_PASSWORD: $HIVEMIND_POSTGRES_PASSWORD
-  POSTGRES_HOST_AUTH_METHOD: trust
-  # official way to provide password to psql: http://www.postgresql.org/docs/9.3/static/libpq-envars.html
-  PGPASSWORD: $HIVEMIND_POSTGRES_PASSWORD
+  PIPENV_VENV_IN_PROJECT: 1
+  PIPENV_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pipenv"
+  PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip"
 
-before_script:
-  - pwd
-  - echo "CI_NODE_TOTAL is $CI_NODE_TOTAL"
-  - echo "CI_NODE_INDEX is $CI_NODE_INDEX"
-  - echo "CI_CONCURRENT_ID is $CI_CONCURRENT_ID"
-  - echo "CI_COMMIT_REF_SLUG is $CI_COMMIT_REF_SLUG"
+  POSTGRES_CLIENT_TOOLS_PATH: /usr/lib/postgresql
 
-hivemind_build:
-  stage: build
-  script:
-    - pip3 install --user --upgrade pip setuptools
-    - git fetch --tags
-    - git tag -f ci_implicit_tag
-    - echo $PYTHONUSERBASE
-    - "python3 setup.py bdist_egg"
-    - ls -l dist/*
-  artifacts:
-    paths:
-      - dist/
-    expire_in: 1 week
-
-  rules:
-    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
-      when: always
-    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH || $CI_COMMIT_BRANCH == "develop"'
-      when: always
-    - when: always
+  # POSTGRES_HOST: 172.17.0.1 # Host
+  # POSTGRES_HOST: postgres-10 # Docker service
+  POSTGRES_PORT: 5432
 
-  tags:
-     - hivemind
+  # Set on project level in Gitlab CI.
+  # We need create role and create db privileges.
+  # ADMIN_POSTGRES_USER: postgres
+  # ADMIN_POSTGRES_USER_PASSWORD: postgres
 
-hivemind_sync:
-  stage: data-supply
+  # Needed by old runner ssh-executor, probably.
+  POSTGRES_USER: $HIVEMIND_POSTGRES_USER
+  POSTGRES_PASSWORD: $HIVEMIND_POSTGRES_PASSWORD
+  POSTGRES_HOST_AUTH_METHOD: trust
 
-  environment:
-      name: "hive sync built from branch $CI_COMMIT_REF_NAME targeting database $HIVEMIND_DB_NAME"
+  HIVEMIND_DB_NAME: "hive_${CI_COMMIT_REF_SLUG}_pipeline_id_${CI_PIPELINE_ID}"
+  HIVEMIND_EXEC_NAME: $DB_NAME
+
+  # Set on project level in Gitlab CI.
+  # HIVEMIND_POSTGRES_USER: hivemind_ci
+
+  # Set on project level in Gitlab CI.
+  HIVEMIND_POSTGRES_USER_PASSWORD: $HIVEMIND_POSTGRES_PASSWORD
+
+  # Set on project level in Gitlab CI.
+  # HIVEMIND_HTTP_PORT: 18080
+
+  # Set on project level in Gitlab CI.
+  # HIVEMIND_MAX_BLOCK: 10001
+  # HIVEMIND_MAX_BLOCK: 5000001
+
+  # Set on project level in Gitlab CI.
+  # HIVEMIND_SOURCE_HIVED_URL: {"default":"http://hive-4.pl.syncad.com:8091"}
+  # HIVEMIND_SOURCE_HIVED_URL: {"default":"192.168.6.136:8091"}
+  # HIVEMIND_SOURCE_HIVED_URL: {"default":"http://172.17.0.1:8091"}
+
+
+.postgres-10: &postgres-10
+  name: hivemind/postgres:10
+  alias: db
+  command: [
+      "postgres",
+      "-c", "shared_preload_libraries=pg_stat_statements,pg_qualstats",
+      "-c", "track_functions=pl",
+      "-c", "track_io_timing=on",
+      "-c", "track_activity_query_size=2048",
+      "-c", "pg_stat_statements.max=10000",
+      "-c", "pg_stat_statements.track=all",
+      "-c", "max_connections=100",
+      "-c", "shared_buffers=2GB",
+      "-c", "effective_cache_size=6GB",
+      "-c", "maintenance_work_mem=512MB",
+      "-c", "checkpoint_completion_target=0.9",
+      "-c", "wal_buffers=16MB",
+      "-c", "default_statistics_target=100",
+      "-c", "random_page_cost=1.1",
+      "-c", "effective_io_concurrency=200",
+      "-c", "work_mem=5242kB",
+      "-c", "min_wal_size=2GB",
+      "-c", "max_wal_size=8GB",
+      "-c", "max_worker_processes=4",
+      "-c", "max_parallel_workers_per_gather=2",
+      "-c", "max_parallel_workers=4",
+      ]
+
+.postgres-12: &postgres-12
+  name: hivemind/postgres:12
+  alias: db
+  command: [
+      "postgres",
+      "-c", "shared_preload_libraries=pg_stat_statements,pg_qualstats",
+      "-c", "track_functions=pl",
+      "-c", "track_io_timing=on",
+      "-c", "track_activity_query_size=2048",
+      "-c", "pg_stat_statements.max=10000",
+      "-c", "pg_stat_statements.track=all",
+      "-c", "max_connections=100",
+      "-c", "shared_buffers=2GB",
+      "-c", "effective_cache_size=6GB",
+      "-c", "maintenance_work_mem=512MB",
+      "-c", "checkpoint_completion_target=0.9",
+      "-c", "wal_buffers=16MB",
+      "-c", "default_statistics_target=100",
+      "-c", "random_page_cost=1.1",
+      "-c", "effective_io_concurrency=200",
+      "-c", "work_mem=5242kB",
+      "-c", "min_wal_size=2GB",
+      "-c", "max_wal_size=8GB",
+      "-c", "max_worker_processes=4",
+      "-c", "max_parallel_workers_per_gather=2",
+      "-c", "max_parallel_workers=4",
+      ]
+
+.setup-pip: &setup-pip
+  - python -m venv .venv
+  - source .venv/bin/activate
+  - time pip install --upgrade pip setuptools wheel
+  - pip --version
+  - easy_install --version
+  - wheel version
+  - pipenv --version
+  - poetry --version
+  - time pip install --editable .
+
+.setup-setuptools: &setup-setuptools
+  - python -m venv .venv
+  - source .venv/bin/activate
+  - time pip install --upgrade pip setuptools wheel
+  - pip --version
+  - easy_install --version
+  - wheel version
+  - pipenv --version
+  - poetry --version
+  - time python setup.py develop
+
+# no virtual environment
+.setuptools: &setup-setuptools-no-venv
+  # setuptools will install all dependencies to this directory.
+  - export PYTHONUSERBASE=./local-site
+  - time pip install --upgrade pip setuptools wheel
+  - pip --version
+  - easy_install --version
+  - wheel version
+  - pipenv --version
+  - poetry --version
+  - mkdir -p `python -m site --user-site`
+  - python setup.py install --user --force
+  # we can probably also run via: ./hive/cli.py
+  - ln -sf ./local-site/bin/hive "$HIVEMIND_EXEC_NAME"
+
+.setup-pipenv: &setup-pipenv
+  ## Note, that Pipfile must exist.
+  ## `--sequential` is slower, but doesn't emit messages about errors
+  ## and need to repeat install.
+  ## - pipenv sync --dev --bare --sequential
+  ## It's faster than `--sequential`, but emits messages about errors
+  ## and a need to repeat install, sometimes. However seems these
+  ## errors are negligible.
+  - time pipenv sync --dev --bare
+  - source .venv/bin/activate
+  - pip --version
+  - easy_install --version
+  - wheel version
+  - pipenv --version
+  - poetry --version
+
+.set-variables: &set-variables
+  - whoami
+  # list all variables predefined by Gitlab CI
+  # - export
+  - echo "CI_PIPELINE_URL is $CI_PIPELINE_URL"
+  - echo "CI_PIPELINE_ID is $CI_PIPELINE_ID"
+  - echo "CI_COMMIT_SHORT_SHA is $CI_COMMIT_SHORT_SHA"
+  - echo "CI_COMMIT_REF_SLUG is $CI_COMMIT_REF_SLUG"
+  - export HIVEMIND_DB_NAME=${HIVEMIND_DB_NAME//[^a-zA-Z0-9_]/_}
+  - echo "HIVEMIND_DB_NAME is $HIVEMIND_DB_NAME"
+  - export HIVEMIND_POSTGRESQL_CONNECTION_STRING=postgresql://${HIVEMIND_POSTGRES_USER}:${HIVEMIND_POSTGRES_USER_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${HIVEMIND_DB_NAME}
 
-  needs:
-    - job: hivemind_build
-      artifacts: true
-  variables:
-    GIT_STRATEGY: none
-    PYTHONUSERBASE: ./local-site
+.fetch-git-tags: &fetch-git-tags
+  # - git fetch --tags
+  - git tag -f ci_implicit_tag # Needed to build python package
 
-  script:
-    - pip3 install --user --upgrade pip setuptools
-    - scripts/ci_sync.sh "$HIVEMIND_DB_NAME" "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" "$HIVEMIND_SOURCE_HIVED_URL" $HIVEMIND_MAX_BLOCK $HIVEMIND_HTTP_PORT
+.start_timer: &start-timer
+  - ./scripts/ci/timer.sh start
 
-  artifacts:
-    paths:
-      - hivemind-sync.log
+.stop-timer: &stop-timer
+  - ./scripts/ci/timer.sh check
 
-    expire_in: 1 week
+.hive-sync-script-common: &hive-sync-script-common
+  - ./scripts/ci/wait-for-postgres.sh ${POSTGRES_HOST} ${POSTGRES_PORT}
+  - export POSTGRES_MAJOR_VERSION=$(./scripts/ci/get-postgres-version.sh)
+  - ./scripts/ci/create-db.sh
+  - ./scripts/ci/hive-sync.sh
+  - ./scripts/ci/collect-db-stats.sh
 
+.default-rules: &default-rules
   rules:
     - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
       when: always
@@ -89,188 +207,217 @@ hivemind_sync:
       when: manual
     - when: on_success
 
+default:
+  image: hivemind/python:3.6
+  # image: hivemind/python:3.8
+  interruptible: false
+  timeout: 2h
+  cache: &global-cache
+    # Per-branch caching. CI_COMMIT_REF_SLUG is the same thing.
+    # key: "$CI_COMMIT_REF_NAME"
+    # Per project caching – use any key. Change this key, if you need
+    # to clear cache
+    key: common-1
+    paths:
+      - .cache/
+      - .venv/
+      - .tox/
+  before_script:
+    - *start-timer
+    - *fetch-git-tags
+    - *set-variables
+    - *setup-pip
+  after_script:
+    - *stop-timer
+
+##### Jobs #####
+
+.build-egg:
+  stage: build
+  needs: []
+  script:
+    - python setup.py bdist_egg
+    - ls -l dist/*
+  artifacts:
+    paths:
+      - dist/
+    expire_in: 7 days
   tags:
-     - hivemind
-
-hivemind_start_server:
-  stage: deploy
-  environment:
-    name: "hive serve built from branch $CI_COMMIT_REF_NAME exposed on port $HIVEMIND_HTTP_PORT"
-    url: "http://hive-4.pl.syncad.com:$HIVEMIND_HTTP_PORT"
-    on_stop: hivemind_stop_server
-
-  needs:
-    - job: hivemind_build
-      artifacts: true
-#    - job: hivemind_sync
-#      artifacts: true
-  variables:
-    GIT_STRATEGY: none
-    PYTHONUSERBASE: ./local-site
+    - hivemind-light-job
 
+.build-wheel:
+  stage: build
+  needs: []
   script:
-    - scripts/ci_start_server.sh "$HIVEMIND_DB_NAME" "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" "$HIVEMIND_SOURCE_HIVED_URL" $HIVEMIND_HTTP_PORT
-
+    - python setup.py bdist_wheel
+    - ls -l dist/*
   artifacts:
     paths:
-      - hive_server.pid
-    expire_in: 1 week
-
-  rules:
-    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
-      when: always
-    - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH || $CI_COMMIT_BRANCH == "develop"'
-      when: always
-    - if: '$CI_PIPELINE_SOURCE == "push"'
-      when: manual
-    - when: on_success
-
+      - dist/
+    expire_in: 7 days
   tags:
-     - hivemind
-
-hivemind_stop_server:
-  stage: post-deploy
-  environment:
-    name: "hive serve built from branch $CI_COMMIT_REF_NAME exposed on port $HIVEMIND_HTTP_PORT"
-    action: stop
+    - hivemind-light-job
 
-  variables:
-    GIT_STRATEGY: none
-  rules:
-    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
-      when: always
-    - when: manual
+# Postgres shared
+hivemind-sync:
+  <<: *default-rules
+  stage: data-supply
+  needs: []
   script:
-    - scripts/ci_stop_server.sh hive_server.pid
-
-  needs:
-    - job: hivemind_start_server
-      artifacts: true
-
+    - *hive-sync-script-common
+  artifacts:
+    paths:
+      - hivemind-sync.log
+      - pg-stats
+    expire_in: 7 days
   tags:
-     - hivemind
+    - hivemind-heavy-job
 
+# Postgres as service
+.hivemind-sync:
+  <<: *default-rules
+  stage: data-supply
+  services:
+    - *postgres-10
+    # - *postgres-12
+  needs: []
+  script:
+    - *hive-sync-script-common
+    # - ./scripts/ci/dump-db.sh
   artifacts:
     paths:
-      - hive_server.log
-
-.hivemind_start_api_smoketest: &common_api_smoketest_job
-  stage: e2e-test
-  environment: hive-4.pl.syncad.com
-  needs:
-    - job: hivemind_start_server
-      artifacts: true
-
-  variables:
-    GIT_STRATEGY: none
+      - hivemind-sync.log
+      - pg-stats
+      - pg-dump-${HIVEMIND_DB_NAME}
+    expire_in: 7 hours
+  tags:
+    - hivemind-heavy-job
 
+.e2e-test-common:
   rules:
-    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
-      when: always
-    - if: '$CI_PIPELINE_SOURCE == "push"'
-      when: manual
     - when: on_success
-
+  needs:
+    - job: hivemind-sync
+      artifacts: false
+  before_script:
+    - *start-timer
+    - *fetch-git-tags
+    - *set-variables
+    - *setup-pip
+    - ./scripts/ci/wait-for-postgres.sh ${POSTGRES_HOST} ${POSTGRES_PORT}
+    - ./scripts/ci/hive-server.sh start
+  after_script:
+    - ./scripts/ci/hive-server.sh stop
+    - *stop-timer
   tags:
-     - hivemind
+    - hivemind-light-job
 
 bridge_api_smoketest:
-  <<: *common_api_smoketest_job
-
+  stage: e2e-test
+  extends: .e2e-test-common
   script:
-    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" bridge_api_patterns/ api_smoketest_bridge.xml
-
+    - >
+      scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
+      bridge_api_patterns/ api_smoketest_bridge.xml
   artifacts:
     reports:
       junit: api_smoketest_bridge.xml
 
 bridge_api_smoketest_negative:
-  <<: *common_api_smoketest_job
-
+  stage: e2e-test
+  extends: .e2e-test-common
   script:
-    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" bridge_api_negative/ api_smoketest_bridge_negative.xml
-
+    - >
+      scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
+      bridge_api_negative/ api_smoketest_bridge_negative.xml
   artifacts:
     reports:
       junit: api_smoketest_bridge_negative.xml
 
 condenser_api_smoketest:
-  <<: *common_api_smoketest_job
-
+  stage: e2e-test
+  extends: .e2e-test-common
   script:
-    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" condenser_api_patterns/ api_smoketest_condenser_api.xml
-
+    - >
+      scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
+      condenser_api_patterns/ api_smoketest_condenser_api.xml
   artifacts:
     reports:
       junit: api_smoketest_condenser_api.xml
 
 condenser_api_smoketest_negative:
-  <<: *common_api_smoketest_job
-
+  stage: e2e-test
+  extends: .e2e-test-common
   script:
-    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" condenser_api_negative/ api_smoketest_condenser_api_negative.xml
-
+    - >
+      scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
+      condenser_api_negative/ api_smoketest_condenser_api_negative.xml
   artifacts:
     reports:
       junit: api_smoketest_condenser_api_negative.xml
 
 database_api_smoketest:
-  <<: *common_api_smoketest_job
-
+  stage: e2e-test
+  extends: .e2e-test-common
   script:
-    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" database_api_patterns/ api_smoketest_database_api.xml
-
+    - >
+      scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
+      database_api_patterns/ api_smoketest_database_api.xml
   artifacts:
     reports:
       junit: api_smoketest_database_api.xml
 
 database_api_smoketest_negative:
-  <<: *common_api_smoketest_job
-
+  stage: e2e-test
+  extends: .e2e-test-common
   script:
-    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" database_api_negative/ api_smoketest_database_api_negative.xml
-
+    - >
+      scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
+      database_api_negative/ api_smoketest_database_api_negative.xml
   artifacts:
     reports:
       junit: api_smoketest_database_api_negative.xml
 
 follow_api_smoketest:
-  <<: *common_api_smoketest_job
-
+  stage: e2e-test
+  extends: .e2e-test-common
   script:
-    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" follow_api_patterns/ api_smoketest_follow_api.xml
-
+    - >
+      scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
+      follow_api_patterns/ api_smoketest_follow_api.xml
   artifacts:
     reports:
       junit: api_smoketest_follow_api.xml
 
 follow_api_smoketest_negative:
-  <<: *common_api_smoketest_job
-
+  stage: e2e-test
+  extends: .e2e-test-common
   script:
-    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" follow_api_negative/ api_smoketest_follow_api_negative.xml
-
+    - >
+      scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
+      follow_api_negative/ api_smoketest_follow_api_negative.xml
   artifacts:
     reports:
       junit: api_smoketest_follow_api_negative.xml
 
 tags_api_smoketest:
-  <<: *common_api_smoketest_job
-
+  stage: e2e-test
+  extends: .e2e-test-common
   script:
-    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" tags_api_patterns/ api_smoketest_tags_api.xml
-
+    - >
+      scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
+      tags_api_patterns/ api_smoketest_tags_api.xml
   artifacts:
     reports:
       junit: api_smoketest_tags_api.xml
 
 tags_api_smoketest_negative:
-  <<: *common_api_smoketest_job
-
+  stage: e2e-test
+  extends: .e2e-test-common
   script:
-    - scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" tags_api_negative/ api_smoketest_tags_api_negative.xml
-
+    - >
+      scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
+      tags_api_negative/ api_smoketest_tags_api_negative.xml
   artifacts:
     reports:
       junit: api_smoketest_tags_api_negative.xml
-
diff --git a/docker-compose-ci.yml b/docker-compose-ci.yml
new file mode 100644
index 000000000..a944cc3d0
--- /dev/null
+++ b/docker-compose-ci.yml
@@ -0,0 +1,130 @@
+version: "3"
+
+services:
+
+  python-3.6:
+    image: hivemind/python:3.6
+    build:
+      context: .
+      dockerfile: ./scripts/ci/python/3.6/Dockerfile
+      args:
+        - user=${USER}
+        - workdir=/home/${USER}
+    user: ${USER}
+    shm_size: 0
+    # Below command makes your container running forever.
+    # command: ["tail", "-f", "/dev/null"]
+
+  python-3.8:
+    image: hivemind/python:3.8
+    shm_size: 0
+    build:
+      context: .
+      dockerfile: ./scripts/ci/python/3.8/Dockerfile
+      args:
+        - user=${USER}
+        - workdir=/home/${USER}
+    user: ${USER}
+    # Below command makes your container running forever.
+    # command: ["tail", "-f", "/dev/null"]
+
+  postgres-10:
+    image: hivemind/postgres:10
+    restart: unless-stopped
+    build:
+      context: .
+      dockerfile: ./scripts/ci/postgres/10/Dockerfile
+    environment:
+      - POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
+    volumes:
+      - postgres-10-pgdata:/var/lib/postgresql/data
+    ports:
+      - "${POSTGRES_10_PUBLISHED_PORT}:5432"
+    shm_size: 0
+    command: [
+      "postgres",
+      "-c", "shared_preload_libraries=pg_stat_statements,pg_qualstats",
+      "-c", "track_functions=pl",
+      "-c", "track_io_timing=on",
+      "-c", "track_activity_query_size=2048",
+      "-c", "pg_stat_statements.max=10000",
+      "-c", "pg_stat_statements.track=all",
+      "-c", "max_connections=100",
+      "-c", "shared_buffers=12GB",
+      "-c", "effective_cache_size=36GB",
+      "-c", "maintenance_work_mem=2GB",
+      "-c", "checkpoint_completion_target=0.9",
+      "-c", "wal_buffers=16MB",
+      "-c", "default_statistics_target=100",
+      "-c", "random_page_cost=1.1",
+      "-c", "effective_io_concurrency=200",
+      "-c", "work_mem=31457kB",
+      "-c", "min_wal_size=2GB",
+      "-c", "max_wal_size=8GB",
+      "-c", "max_worker_processes=12",
+      "-c", "max_parallel_workers_per_gather=4",
+      "-c", "max_parallel_workers=12",
+    ]
+
+  postgres-12:
+    image: hivemind/postgres:12
+    restart: unless-stopped
+    build:
+      context: .
+      dockerfile: ./scripts/ci/postgres/12/Dockerfile
+    environment:
+      - POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
+    volumes:
+      - postgres-12-pgdata:/var/lib/postgresql/data
+    ports:
+      - "${POSTGRES_12_PUBLISHED_PORT}:5432"
+    shm_size: 0
+    # https://pgtune.leopard.in.ua/#/ oltp 48G ram, 12 cpus, ssd
+    command: [
+      "postgres",
+      "-c", "shared_preload_libraries=pg_stat_statements,pg_qualstats",
+      "-c", "track_functions=pl",
+      "-c", "track_io_timing=on",
+      "-c", "track_activity_query_size=2048",
+      "-c", "pg_stat_statements.max=10000",
+      "-c", "pg_stat_statements.track=all",
+      "-c", "max_connections=100",
+      "-c", "shared_buffers=12GB",
+      "-c", "effective_cache_size=36GB",
+      "-c", "maintenance_work_mem=2GB",
+      "-c", "checkpoint_completion_target=0.9",
+      "-c", "wal_buffers=16MB",
+      "-c", "default_statistics_target=100",
+      "-c", "random_page_cost=1.1",
+      "-c", "effective_io_concurrency=200",
+      "-c", "work_mem=31457kB",
+      "-c", "min_wal_size=2GB",
+      "-c", "max_wal_size=8GB",
+      "-c", "max_worker_processes=12",
+      "-c", "max_parallel_workers_per_gather=4",
+      "-c", "max_parallel_workers=12",
+      "-c", "max_parallel_maintenance_workers=4",
+    ]
+
+  hived-node:
+    image: registry.gitlab.syncad.com/hive/hive/consensus_node:00b5ff55
+    restart: unless-stopped
+    # ports:
+    #   - "2001:2001"
+    #   - "8090:8090"
+    #   - "8091:8091"
+    shm_size: 0
+    entrypoint: /usr/local/hive/consensus/entrypoint.sh
+    command: >-
+      --replay-blockchain
+      --stop-replay-at-block 5000000
+    volumes:
+      - $PWD/scripts/ci/hived-node/entrypoint.sh:/usr/local/hive/consensus/entrypoint.sh
+      - $PWD/scripts/ci/hived-node/config.ini:/usr/local/hive/consensus/datadir/config.ini
+      - ${HIVED_BLOCK_LOG_FILE}:/usr/local/hive/consensus/datadir/blockchain/block_log
+      - hived-node-datadir:/usr/local/hive/consensus/datadir
+
+volumes:
+  postgres-10-pgdata:
+  postgres-12-pgdata:
+  hived-node-datadir:
diff --git a/hive/cli.py b/hive/cli.py
old mode 100644
new mode 100755
index d64a7a7a0..b2c4a88a2
--- a/hive/cli.py
+++ b/hive/cli.py
@@ -1,14 +1,39 @@
-#!/usr/local/bin/python3
+#!/usr/bin/env python
 
 """CLI service router"""
 
 import os
 import logging
+import time
 from hive.conf import Conf
 from hive.db.adapter import Db
 from hive.utils.stats import PrometheusClient
 
-logging.basicConfig()
+
+def setup_logging(conf):
+    """Setup logging with timestamps"""
+
+    timestamp = conf.get('log_timestamp')
+    epoch = conf.get('log_epoch')
+    if timestamp and epoch:
+        datefmt='%Y-%m-%d %H:%M:%S'
+        timezone = time.strftime('%z')
+        fmt = '%(asctime)s.%(msecs)03d{} %(created).6f ' \
+            '%(levelname)s - %(name)s - %(message)s'.format(timezone)
+        logging.basicConfig(format=fmt, datefmt=datefmt)
+    if timestamp:
+        datefmt='%Y-%m-%d %H:%M:%S'
+        timezone = time.strftime('%z')
+        fmt = '%(asctime)s.%(msecs)03d{} ' \
+            '%(levelname)s - %(name)s - %(message)s'.format(timezone)
+        logging.basicConfig(format=fmt, datefmt=datefmt)
+    if epoch:
+        fmt = '%(created).6f %(levelname)s - %(name)s - %(message)s'
+        logging.basicConfig(format=fmt)
+    else:
+        fmt = '%(levelname)s - %(name)s - %(message)s'
+        logging.basicConfig(format=fmt)
+
 
 def run():
     """Run the service specified in the `--mode` argument."""
@@ -17,6 +42,8 @@ def run():
     mode = conf.mode()
     PrometheusClient( conf.get('prometheus_port') )
 
+    setup_logging(conf)
+
     if mode == 'completion':
         conf.generate_completion()
         return
@@ -29,9 +56,9 @@ def run():
         if fh is None:
           print("Cannot write into specified pid_file: %s", pid_file_name)
         else:
-          pid = os.getpid()
-          fh.write(str(pid))
-          fh.close()
+            pid = os.getpid()
+            fh.write(str(pid))
+            fh.close()
 
 
     if conf.get('test_profile'):
diff --git a/hive/conf.py b/hive/conf.py
index 1707562c9..e17951a22 100644
--- a/hive/conf.py
+++ b/hive/conf.py
@@ -54,6 +54,11 @@ class Conf():
         add('--test-profile', type=strtobool, env_var='TEST_PROFILE', help='(debug) profile execution', default=False)
         add('--log-virtual-op-calls', type=strtobool, env_var='LOG_VIRTUAL_OP_CALLS', help='(debug) log virtual op calls and responses', default=False)
 
+        # logging
+        add('--log-timestamp', help='Output timestamp in log', action='store_true')
+        add('--log-epoch', help='Output unix epoch in log', action='store_true')
+        add('--log-mask-sensitive-data', help='Mask sensitive data, e.g. passwords', action='store_true')
+
         add('--pid-file', type=str, env_var='PID_FILE', help='Allows to dump current process pid into specified file', default=None)
 
         add('--auto-http-server-port', nargs='+', type=int, help='Hivemind will listen on first available port from this range')
@@ -80,8 +85,23 @@ class Conf():
             root.error("Value error: {}".format(ex))
             exit(1)
 
+        # Print command line args, but on continuous integration server
+        # hide db connection string.
         from sys import argv
-        root.info("Used command line args: %s", " ".join(argv[1:]))
+        if conf.get('log_mask_sensitive_data'):
+            my_args = []
+            upcoming_connection_string = False
+            for elem in argv[1:]:
+                if upcoming_connection_string:
+                    upcoming_connection_string = False
+                    my_args.append('MASKED')
+                    continue
+                if elem == '--database-url':
+                    upcoming_connection_string = True
+                my_args.append(elem)
+            root.info("Used command line args: %s", " ".join(my_args))
+        else:
+            root.info("Used command line args: %s", " ".join(argv[1:]))
 
         # uncomment for full list of program args
         #args_list = ["--" + k + " " + str(v) for k,v in vars(args).items()]
diff --git a/scripts/ci/collect-db-stats.sh b/scripts/ci/collect-db-stats.sh
new file mode 100755
index 000000000..93bb1195f
--- /dev/null
+++ b/scripts/ci/collect-db-stats.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+set -euo pipefail
+
+collect_stats() {
+
+    echo "Collecting statistics from database ${HIVEMIND_DB_NAME}"
+
+    mkdir -p pg-stats
+    DIR=$PWD/pg-stats
+
+    PGPASSWORD=${POSTGRES_PASSWORD} psql \
+        --username "${POSTGRES_USER}" \
+        --host ${POSTGRES_HOST} \
+        --port ${POSTGRES_PORT} \
+        --dbname ${HIVEMIND_DB_NAME} << EOF
+\timing
+\copy (select * from pg_settings) to '$DIR/pg_settings.csv' WITH CSV HEADER
+\copy (select * from pg_stat_user_tables) to '$DIR/pg_stat_user_tables.csv' WITH CSV HEADER
+
+-- Disabled, because this table is too big.
+--\copy (select * from pg_stat_statements) to '$DIR/pg_stat_statements.csv' WITH CSV HEADER
+
+-- See https://github.com/powa-team/pg_qualstats
+\echo pg_qualstats index advisor
+SELECT v
+  FROM json_array_elements(
+    pg_qualstats_index_advisor(min_filter => 50)->'indexes') v
+  ORDER BY v::text COLLATE "C";
+
+\echo pg_qualstats unoptimised
+SELECT v
+  FROM json_array_elements(
+    pg_qualstats_index_advisor(min_filter => 50)->'unoptimised') v
+  ORDER BY v::text COLLATE "C";
+EOF
+
+}
+
+collect_stats
diff --git a/scripts/ci/create-db.sh b/scripts/ci/create-db.sh
new file mode 100755
index 000000000..8cc734f74
--- /dev/null
+++ b/scripts/ci/create-db.sh
@@ -0,0 +1,83 @@
+#!/bin/bash
+
+set -euo pipefail
+
+# TODO We have troubles with user, when postgresql is run from docker.
+# We need user name `postgres`, not other, I'm afraid.
+# ADMIN_POSTGRES_USER=postgres
+# ADMIN_POSTGRES_USER_PASSWORD=postgres
+
+create_db() {
+
+    echo "Creating user ${HIVEMIND_POSTGRES_USER} and database ${HIVEMIND_DB_NAME}, owned by this user"
+
+    PGPASSWORD=${ADMIN_POSTGRES_USER_PASSWORD} psql \
+        --username "${ADMIN_POSTGRES_USER}" \
+        --host ${POSTGRES_HOST} \
+        --port ${POSTGRES_PORT} \
+        --dbname postgres << EOF
+
+\echo Creating role ${HIVEMIND_POSTGRES_USER}
+
+DO \$$
+BEGIN
+    IF EXISTS (SELECT * FROM pg_user
+            WHERE pg_user.usename = '${HIVEMIND_POSTGRES_USER}') THEN
+        raise warning 'Role % already exists', '${HIVEMIND_POSTGRES_USER}';
+    ELSE
+        CREATE ROLE ${HIVEMIND_POSTGRES_USER}
+                WITH LOGIN PASSWORD '${HIVEMIND_POSTGRES_USER_PASSWORD}';
+    END IF;
+END
+\$$;
+
+\echo Creating database ${HIVEMIND_DB_NAME}
+
+CREATE DATABASE ${HIVEMIND_DB_NAME} TEMPLATE template_monitoring
+    OWNER ${HIVEMIND_POSTGRES_USER};
+COMMENT ON DATABASE ${HIVEMIND_DB_NAME} IS
+    'Database for Gitlab CI pipeline ${CI_PIPELINE_URL}, commit ${CI_COMMIT_SHORT_SHA}';
+
+\c ${HIVEMIND_DB_NAME}
+
+create schema hivemind_admin
+        authorization ${HIVEMIND_POSTGRES_USER};
+
+CREATE SEQUENCE hivemind_admin.database_metadata_id_seq
+    INCREMENT 1
+    START 1
+    MINVALUE 1
+    MAXVALUE 2147483647
+    CACHE 1;
+
+CREATE TABLE hivemind_admin.database_metadata
+(
+    id integer NOT NULL DEFAULT
+        nextval('hivemind_admin.database_metadata_id_seq'::regclass),
+    database_name text,
+    ci_pipeline_url text,
+    ci_pipeline_id integer,
+    commit_sha text,
+    created_at timestamp with time zone DEFAULT now(),
+    CONSTRAINT database_metadata_pkey PRIMARY KEY (id)
+);
+
+alter sequence hivemind_admin.database_metadata_id_seq
+        OWNER TO ${HIVEMIND_POSTGRES_USER};
+
+alter table hivemind_admin.database_metadata
+        OWNER TO ${HIVEMIND_POSTGRES_USER};
+
+insert into hivemind_admin.database_metadata
+    (database_name, ci_pipeline_url, ci_pipeline_id, commit_sha)
+values (
+    '${HIVEMIND_DB_NAME}', '${CI_PIPELINE_URL}',
+    ${CI_PIPELINE_ID}, '${CI_COMMIT_SHORT_SHA}'
+    );
+
+\q
+EOF
+
+}
+
+create_db
diff --git a/scripts/ci/dump-db.sh b/scripts/ci/dump-db.sh
new file mode 100755
index 000000000..e2e4764d0
--- /dev/null
+++ b/scripts/ci/dump-db.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+set -euo pipefail
+
+echo "Dumping database ${HIVEMIND_DB_NAME}"
+
+export PGPASSWORD=${POSTGRES_PASSWORD}
+exec_path=$POSTGRES_CLIENT_TOOLS_PATH/$POSTGRES_MAJOR_VERSION/bin
+
+echo "Using pg_dump version $($exec_path/pg_dump --version)"
+
+time $exec_path/pg_dump \
+    --username="${POSTGRES_USER}" \
+    --host="${POSTGRES_HOST}" \
+    --port="${POSTGRES_PORT}" \
+    --dbname="${HIVEMIND_DB_NAME}" \
+    --schema=public \
+    --format=directory \
+    --jobs=4 \
+    --compress=6 \
+    --quote-all-identifiers \
+    --lock-wait-timeout=30000 \
+    --no-privileges --no-acl \
+    --verbose \
+    --file="pg-dump-${HIVEMIND_DB_NAME}"
+
+unset PGPASSWORD
diff --git a/scripts/ci/get-postgres-version.sh b/scripts/ci/get-postgres-version.sh
new file mode 100755
index 000000000..47e42fda1
--- /dev/null
+++ b/scripts/ci/get-postgres-version.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+# Get postgresql server version
+
+set -euo pipefail
+
+get_postgres_version() {
+
+    version=$(
+        PGPASSWORD=$POSTGRES_PASSWORD psql -X -A -t \
+            --username $POSTGRES_USER \
+            --host $POSTGRES_HOST \
+            --port ${POSTGRES_PORT} \
+            --dbname postgres \
+            -c "show server_version_num;"
+        )
+    echo $(echo $version | cut -c1-2)
+
+}
+
+get_postgres_version
diff --git a/scripts/ci/hive-server.sh b/scripts/ci/hive-server.sh
new file mode 100755
index 000000000..d38f963f5
--- /dev/null
+++ b/scripts/ci/hive-server.sh
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+set -euo pipefail
+
+HIVEMIND_PID=0
+MERCY_KILL_TIMEOUT=5
+START_DELAY=5
+
+# For debug only!
+# HIVED_URL='{"default":"http://hived-node:8091"}'
+# HIVED_URL='{"default":"http://172.17.0.1:8091"}'
+
+check_pid() {
+  if [ -f hive_server.pid ]; then
+    HIVEMIND_PID=`cat hive_server.pid`
+  else
+    HIVEMIND_PID=0
+  fi
+}
+
+
+stop() {
+  if [ "$HIVEMIND_PID" -gt "0" ]; then
+    HIVEMIND_PID=`cat hive_server.pid`
+
+    # Send INT signal and give it some time to stop.
+    echo "Stopping hive server (pid $HIVEMIND_PID) gently (SIGINT)"
+    kill -SIGINT $HIVEMIND_PID || true;
+    sleep $MERCY_KILL_TIMEOUT
+
+    # Send TERM signal. Kill to be sure.
+    echo "Killing hive server (pid $HIVEMIND_PID) to be sure (SIGTERM)"
+    kill -9 $HIVEMIND_PID > /dev/null 2>&1 || true;
+
+    rm hive_server.pid;
+    echo "Hive server has been stopped"
+  else
+    echo "Hive server is not running"
+  fi
+}
+
+
+start() {
+
+  if [ "$HIVEMIND_PID" -gt "0" ]; then
+    echo "Hive server is already running (pid $HIVEMIND_PID)"
+    exit 0
+  fi
+
+  echo "Starting hive server on port ${HIVEMIND_HTTP_PORT}"
+
+  hive server \
+      --log-mask-sensitive-data \
+      --pid-file hive_server.pid \
+      --http-server-port $HIVEMIND_HTTP_PORT \
+      --steemd-url "$HIVED_URL" \
+      --database-url "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" 2>&1 \
+      | tee -ia hivemind-server.log &
+
+  HIVEMIND_PID=$!
+
+  for i in `seq 1 10`; do
+    if [ -f hive_server.pid ]; then
+      echo "Hive server has been started (pid $HIVEMIND_PID)"
+      sleep $START_DELAY
+      exit 0
+    else
+      sleep 1
+    fi
+  done
+
+  # If we are here something went wrong
+  echo "Timeout reached. Hive server has not been started, exiting."
+  exit 1
+
+}
+
+
+check_pid
+"$1"
diff --git a/scripts/ci/hive-sync.sh b/scripts/ci/hive-sync.sh
new file mode 100755
index 000000000..6962980d0
--- /dev/null
+++ b/scripts/ci/hive-sync.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+set -euo pipefail
+
+cat << EOF
+Starting hive sync using hived url: ${HIVED_URL}.
+Max sync block is: ${HIVEMIND_MAX_BLOCK}.
+EOF
+
+# For debug only!
+# HIVEMIND_MAX_BLOCK=10001
+# HIVED_URL='{"default":"http://hived-node:8091"}'
+# HIVED_URL='{"default":"http://172.17.0.1:8091"}'
+
+DATABASE_URL="postgresql://${HIVEMIND_POSTGRES_USER}:${HIVEMIND_POSTGRES_USER_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${HIVEMIND_DB_NAME}"
+
+hive sync \
+    --log-mask-sensitive-data \
+    --pid-file hive_sync.pid \
+    --test-max-block=${HIVEMIND_MAX_BLOCK} \
+    --exit-after-sync \
+    --test-profile=False \
+    --steemd-url "$HIVED_URL" \
+    --prometheus-port 11011 \
+    --database-url "$DATABASE_URL" \
+    2>&1 | tee -i hivemind-sync.log
diff --git a/scripts/ci/hived-node/config.ini b/scripts/ci/hived-node/config.ini
new file mode 100644
index 000000000..1c1980279
--- /dev/null
+++ b/scripts/ci/hived-node/config.ini
@@ -0,0 +1,57 @@
+
+log-appender = {"appender":"stderr","stream":"std_error"}
+log-logger = {"name":"default","level":"info","appender":"stderr"}
+
+backtrace = yes
+
+plugin = webserver p2p json_rpc
+plugin = database_api
+# condenser_api enabled per abw request
+plugin = condenser_api
+plugin = block_api 
+# gandalf enabled witness + rc
+plugin = witness
+plugin = rc
+
+# market_history enabled per abw request
+plugin = market_history
+plugin = market_history_api
+
+plugin = account_history_rocksdb
+plugin = account_history_api
+
+# gandalf enabled transaction status
+plugin = transaction_status
+plugin = transaction_status_api
+
+# gandalf enabled account by key
+plugin = account_by_key
+plugin = account_by_key_api
+
+# and few apis
+plugin = block_api network_broadcast_api rc_api
+
+history-disable-pruning = 1
+account-history-rocksdb-path = "blockchain/account-history-rocksdb-storage"
+
+#shared-file-dir = "/run/hive"
+shared-file-size = 20G
+shared-file-full-threshold = 9500
+shared-file-scale-rate = 1000
+
+flush-state-interval = 0
+
+market-history-bucket-size = [15,60,300,3600,86400]
+market-history-buckets-per-size = 5760
+
+p2p-endpoint = 0.0.0.0:2001
+p2p-seed-node = 
+#gtg.openhive.network:2001
+
+transaction-status-block-depth = 64000
+transaction-status-track-after-block = 42000000
+
+webserver-http-endpoint = 0.0.0.0:8091
+webserver-ws-endpoint = 0.0.0.0:8090
+
+webserver-thread-pool-size = 8
diff --git a/scripts/ci/hived-node/entrypoint.sh b/scripts/ci/hived-node/entrypoint.sh
new file mode 100755
index 000000000..cd0c81a5c
--- /dev/null
+++ b/scripts/ci/hived-node/entrypoint.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+
+SCRIPT=`realpath $0`
+SCRIPTPATH=`dirname $SCRIPT`
+
+DATADIR="${SCRIPTPATH}/datadir"
+
+HIVED="${SCRIPTPATH}/bin/hived"
+
+ARGS="$@"
+ARGS+=" "
+
+if [[ ! -z "$TRACK_ACCOUNT" ]]; then
+    ARGS+=" --plugin=account_history --plugin=account_history_api"
+    ARGS+=" --account-history-track-account-range=[\"$TRACK_ACCOUNT\",\"$TRACK_ACCOUNT\"]"
+fi
+
+if [[ "$USE_PUBLIC_BLOCKLOG" ]]; then
+  if [[ ! -e ${DATADIR}/blockchain/block_log ]]; then
+    if [[ ! -d ${DATADIR}/blockchain ]]; then
+      mkdir -p ${DATADIR}/blockchain
+    fi
+    echo "Hived: Downloading a block_log and replaying the blockchain"
+    echo "This may take a little while..."
+    wget -O ${DATADIR}/blockchain/block_log https://gtg.steem.house/get/blockchain/block_log
+    ARGS+=" --replay-blockchain"
+  fi
+fi
+
+"$HIVED" \
+  --data-dir="${DATADIR}" \
+  $ARGS \
+  2>&1
diff --git a/scripts/ci/hived-node/run.sh b/scripts/ci/hived-node/run.sh
new file mode 100755
index 000000000..0c3fa4a80
--- /dev/null
+++ b/scripts/ci/hived-node/run.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+# Start hived in docker container, replay up to 5000000 blocks
+
+MYDIR="$PWD"
+WORKDIR="/usr/local/hive/consensus"
+
+docker run -d \
+    --name hived-replay-5000000 \
+    -p 127.0.0.1:2001:2001 \
+    -p 127.0.0.1:8090:8090 \
+    -p 127.0.0.1:8091:8091 \
+    -v $MYDIR/config.ini:$WORKDIR/datadir/config.ini \
+    -v $MYDIR/blockchain/block_log:$WORKDIR/datadir/blockchain/block_log \
+    -v $MYDIR/entrypoint.sh:$WORKDIR/entrypoint.sh \
+    --entrypoint $WORKDIR/entrypoint.sh \
+    registry.gitlab.syncad.com/hive/hive/consensus_node:00b5ff55 \
+    --replay-blockchain --stop-replay-at-block 5000000
diff --git a/scripts/ci/postgres/10/Dockerfile b/scripts/ci/postgres/10/Dockerfile
new file mode 100644
index 000000000..baf6ce221
--- /dev/null
+++ b/scripts/ci/postgres/10/Dockerfile
@@ -0,0 +1,18 @@
+FROM postgres:10.14
+
+LABEL description="Available non-standard extensions: plpython2, pg_qualstats."
+
+RUN apt-get update \
+        && apt-get install -y --no-install-recommends \
+            nano \
+            postgresql-plpython3-10 \
+            python3-psutil \
+            postgresql-10-pg-qualstats \
+        && rm -rf /var/lib/apt/lists/*
+
+RUN mkdir -p /docker-entrypoint-initdb.d
+
+# Create stuff for monitoring with pgwatch2 and pghero.
+COPY ./scripts/db-monitoring/setup/setup_monitoring.sh \
+        /docker-entrypoint-initdb.d/
+COPY ./scripts/db-monitoring/setup/sql-monitoring /sql-monitoring/
diff --git a/scripts/ci/postgres/12/Dockerfile b/scripts/ci/postgres/12/Dockerfile
new file mode 100644
index 000000000..2004df978
--- /dev/null
+++ b/scripts/ci/postgres/12/Dockerfile
@@ -0,0 +1,18 @@
+FROM postgres:12.4
+
+LABEL description="Available non-standard extensions: plpython2, pg_qualstats."
+
+RUN apt-get update \
+        && apt-get install -y --no-install-recommends \
+            nano \
+            postgresql-plpython3-12 \
+            python3-psutil \
+            postgresql-12-pg-qualstats \
+        && rm -rf /var/lib/apt/lists/*
+
+RUN mkdir -p /docker-entrypoint-initdb.d
+
+# Create stuff for monitoring with pgwatch2 and pghero.
+COPY ./scripts/db-monitoring/setup/setup_monitoring.sh \
+        /docker-entrypoint-initdb.d/
+COPY ./scripts/db-monitoring/setup/sql-monitoring /sql-monitoring/
diff --git a/scripts/ci/python/3.6/Dockerfile b/scripts/ci/python/3.6/Dockerfile
new file mode 100644
index 000000000..b82dbc46c
--- /dev/null
+++ b/scripts/ci/python/3.6/Dockerfile
@@ -0,0 +1,51 @@
+FROM python:3.6.12-buster
+
+# Setup python environment.
+ENV LANG C.UTF-8
+ENV LC_ALL C.UTF-8
+ENV PYTHONDONTWRITEBYTECODE 1
+ENV PYTHONFAULTHANDLER 1
+
+# Install debian packages.
+RUN apt-get update \
+    && apt-get install -y --no-install-recommends \
+        curl \
+        ca-certificates \
+        gnupg \
+    && rm -rf /var/lib/apt/lists/*
+
+# Install debian pgdg repository.
+RUN curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
+RUN echo "deb http://apt.postgresql.org/pub/repos/apt buster-pgdg main" \
+        > /etc/apt/sources.list.d/pgdg.list
+RUN apt-get update
+# Install postgresql client programs for various postgresl versions.
+RUN apt-get install -y --no-install-recommends \
+        postgresql-client-10 \
+        postgresql-client-11 \
+        postgresql-client-12 \
+        postgresql-client-13 \
+    && rm -rf /var/lib/apt/lists/*
+
+# Upgrade some crucial python packages.
+RUN pip install --upgrade pip setuptools wheel
+
+# Install python dependencies via pip.
+RUN pip install pipenv poetry
+
+ARG user
+ENV user ${user}
+
+## Add user ##
+RUN groupadd --gid 1000 ${user} \
+    && useradd --create-home --uid 1000 --gid ${user} ${user}
+
+# Gitlab CI accepts only root user, so we don't set user here.
+# You can (and should) run command in container as user `alice` this way:
+# docker-compose run --rm --user=alice --name=myrunner runner /bin/bash
+# USER ${user}
+
+WORKDIR /home/${user}
+RUN chown -R ${user}:${user} /home/${user}
+
+CMD [ "python3" ]
diff --git a/scripts/ci/python/3.8/Dockerfile b/scripts/ci/python/3.8/Dockerfile
new file mode 100644
index 000000000..41c3d4dee
--- /dev/null
+++ b/scripts/ci/python/3.8/Dockerfile
@@ -0,0 +1,51 @@
+FROM python:3.8.3-buster
+
+# Setup python environment.
+ENV LANG C.UTF-8
+ENV LC_ALL C.UTF-8
+ENV PYTHONDONTWRITEBYTECODE 1
+ENV PYTHONFAULTHANDLER 1
+
+# Install debian packages.
+RUN apt-get update \
+    && apt-get install -y --no-install-recommends \
+        curl \
+        ca-certificates \
+        gnupg \
+    && rm -rf /var/lib/apt/lists/*
+
+# Install debian pgdg repository.
+RUN curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
+RUN echo "deb http://apt.postgresql.org/pub/repos/apt buster-pgdg main" \
+        > /etc/apt/sources.list.d/pgdg.list
+RUN apt-get update
+# Install postgresql client programs for various postgresl versions.
+RUN apt-get install -y --no-install-recommends \
+        postgresql-client-10 \
+        postgresql-client-11 \
+        postgresql-client-12 \
+        postgresql-client-13 \
+    && rm -rf /var/lib/apt/lists/*
+
+# Upgrade some crucial python packages.
+RUN pip install --upgrade pip setuptools wheel
+
+# Install python dependencies via pip.
+RUN pip install pipenv poetry
+
+ARG user
+ENV user ${user}
+
+## Add user ##
+RUN groupadd --gid 1000 ${user} \
+    && useradd --create-home --uid 1000 --gid ${user} ${user}
+
+# Gitlab CI accepts only root user, so we don't set user here.
+# You can (and should) run command in container as user `alice` this way:
+# docker-compose run --rm --user=alice --name=myrunner runner /bin/bash
+# USER ${user}
+
+WORKDIR /home/${user}
+RUN chown -R ${user}:${user} /home/${user}
+
+CMD [ "python3" ]
diff --git a/scripts/ci/start-api-smoketest.sh b/scripts/ci/start-api-smoketest.sh
new file mode 100755
index 000000000..dd1363855
--- /dev/null
+++ b/scripts/ci/start-api-smoketest.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -e
+pip install tox
+
+export HIVEMIND_ADDRESS=$1
+export HIVEMIND_PORT=$2
+echo "Starting tests on hivemind server running on ${HIVEMIND_ADDRESS}:${HIVEMIND_PORT}"
+
+echo "Selected test group (if empty all will be executed): $3"
+
+tox -- -W ignore::pytest.PytestDeprecationWarning -n auto --durations=0 \
+        --junitxml=../../../../$4 $3
diff --git a/scripts/ci/timer.sh b/scripts/ci/timer.sh
new file mode 100755
index 000000000..490a8a440
--- /dev/null
+++ b/scripts/ci/timer.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+set -euo pipefail
+
+start() {
+  mkdir -p ".tmp"
+  echo `date +%s` > ".tmp/timer-start"
+  echo "Timer: started at:" $(date -u +"%Y-%m-%dT%H:%M:%SZ")
+}
+
+check() {
+    echo "Timer: current time:" $(date -u +"%Y-%m-%dT%H:%M:%SZ")
+    start=$(cat ".tmp/timer-start" 2>/dev/null || echo 0)
+    end=`date +%s`
+    if [ "$start" -gt "0" ]; then
+        runtime=$((end-start))
+        echo "Timer: time elapsed: ${runtime} s"
+    fi
+}
+
+"$1"
diff --git a/scripts/ci/wait-for-postgres.sh b/scripts/ci/wait-for-postgres.sh
new file mode 100755
index 000000000..bc705faf9
--- /dev/null
+++ b/scripts/ci/wait-for-postgres.sh
@@ -0,0 +1,56 @@
+#!/bin/sh
+
+# wait-for-postgres.sh
+# Use in docker-compose:
+# command: ["./wait-for-postgres.sh", "name-of-postgres-service", "python", "app.py"]
+
+set -e
+
+LIMIT=30 #seconds
+shift
+cmd="$@"
+
+wait_for_postgres() {
+    # wkedzierski@syncad.com work, but customized by wbarcik@syncad.com
+    counter=0
+    echo "Waiting for postgres on ${POSTGRES_HOST}:${POSTGRES_PORT}. Timeout is ${LIMIT}s."
+    while ! pg_isready \
+            --username $ADMIN_POSTGRES_USER \
+            --host $POSTGRES_HOST \
+            --port $POSTGRES_PORT \
+            --dbname postgres \
+            --timeout=1 --quiet; do
+        counter=$((counter+1))
+        sleep 1
+        if [ $counter -eq $LIMIT ]; then
+            echo "Timeout reached, postgres is unavailable, exiting."
+            exit 1
+        fi
+    done
+}
+
+output_configuration() {
+
+    mkdir -p pg-stats
+    DIR=$PWD/pg-stats
+
+    echo "Postgres is up (discovered after ${counter}s)."
+    echo "-------------------------------------------------"
+    echo "Postgres version and configuration"
+    echo "-------------------------------------------------"
+    PGPASSWORD=$ADMIN_POSTGRES_USER_PASSWORD psql \
+            --username "$ADMIN_POSTGRES_USER" \
+            --host "$POSTGRES_HOST" \
+            --port $POSTGRES_PORT \
+            --dbname postgres <<EOF
+SELECT version();
+select name, setting, unit from pg_settings;
+\copy (select * from pg_settings) to '$DIR/pg_settings_on_start.csv' WITH CSV HEADER
+\q
+EOF
+    echo "-------------------------------------------------"
+
+}
+
+wait_for_postgres
+output_configuration
diff --git a/scripts/ci_sync.sh b/scripts/ci_sync.sh
index fa66f157e..e342c5e53 100755
--- a/scripts/ci_sync.sh
+++ b/scripts/ci_sync.sh
@@ -51,7 +51,7 @@ ln -sf ./local-site/bin/hive $HIVE_NAME
 echo Attempting to recreate database $DB_NAME
 psql -U $POSTGRES_USER -h localhost -d postgres -c "DROP DATABASE IF EXISTS $DB_NAME;"
 if [ "$HIVEMIND_ENABLE_DB_MONITORING" = "yes" ]; then
-  psql -U $POSTGRES_USER -h localhost -d postgres -c "CREATE DATABASE $DB_NAME TEMPLATE template_hive_ci;"
+  psql -U $POSTGRES_USER -h localhost -d postgres -c "CREATE DATABASE $DB_NAME TEMPLATE template_monitoring;"
 else
   psql -U $POSTGRES_USER -h localhost -d postgres -c "CREATE DATABASE $DB_NAME"
 fi
diff --git a/scripts/db-monitoring/.env_example b/scripts/db-monitoring/.env_example
deleted file mode 100644
index e4872e06c..000000000
--- a/scripts/db-monitoring/.env_example
+++ /dev/null
@@ -1 +0,0 @@
-PGHERO_DATABASE_URL=postgres://pghero:pghero@dev-66:5432/pghero
diff --git a/scripts/db-monitoring/docker-compose.yml b/scripts/db-monitoring/docker-compose.yml
index 5ba3fe762..cc446b339 100644
--- a/scripts/db-monitoring/docker-compose.yml
+++ b/scripts/db-monitoring/docker-compose.yml
@@ -5,14 +5,20 @@ services:
   pgwatch2:
     # Docs: https://pgwatch2.readthedocs.io/en/latest/index.html
     image: cybertec/pgwatch2-nonroot:1.8.0
-    ports:
-      - "30000:3000"  # Grafana dashboarding
-      - "8080:8080"   # Management Web UI (monitored hosts, metrics, metrics configurations)
-      # - "8081:8081"   # Gatherer healthcheck / statistics on number of gathered metrics (JSON)
-      # - "54320:5432"  # Postgres configuration (or metrics storage DB, when using the cybertec/pgwatch2-postgres image)
-      # - "8086:8086" # InfluxDB API (when using the InfluxDB version)
-      # - "8088:8088" # InfluxDB Backup port (when using the InfluxDB version)
     restart: unless-stopped
+    ports:
+      # Grafana dashboarding
+      - "${PGWATCH2_GRAFANA_PUBLISHED_PORT}:3000"
+      # Management Web UI (monitored hosts, metrics, metrics configurations)
+      - "${PGWATCH2_WEBUI_PUBLISHED_PORT}:8080"
+      # Gatherer healthcheck / statistics on number of gathered metrics (JSON)
+      - "${PGWATCH2_DAEMON_PUBLISHED_PORT}:8081"
+      # Postgres configuration (or metrics storage DB, when using the cybertec/pgwatch2-postgres image)
+      - "${PGWATCH2_POSTGRES_PUBLISHED_PORT}:5432"
+      # InfluxDB API (when using the InfluxDB version)
+      - "${PGWATCH2_INFLUXDB_API_PUBLISHED_PORT}:8086"
+      # InfluxDB Backup port (when using the InfluxDB version)
+      - "${PGWATCH2_INFLUXDB_BACKUP_PUBLISHED_PORT}:8088"
     volumes:
       - pgwatch2-postgresql:/var/lib/postgresql
       - pgwatch2-grafana:/var/lib/grafana
@@ -26,12 +32,31 @@ services:
       DATABASE_URL: ${PGHERO_DATABASE_URL}
     restart: unless-stopped
     ports:
-      - "8085:8080"
+      - "${PGHERO_PUBLISHED_PORT}:8080"
+    volumes:
+      - $PWD/scripts/db-monitoring/pghero.yml:/app/config/pghero.yml
+
+  pgadmin4:
+    # Docs: https://www.pgadmin.org/docs/pgadmin4/latest/container_deployment.html
+    image: dpage/pgadmin4:4.26
+    environment:
+      - PGADMIN_DEFAULT_EMAIL=${PGADMIN_DEFAULT_EMAIL}
+      - PGADMIN_DEFAULT_PASSWORD=${PGADMIN_DEFAULT_PASSWORD}
+      - PGADMIN_LISTEN_ADDRESS=${PGADMIN_LISTEN_ADDRESS}
+      - PGADMIN_LISTEN_PORT=${PGADMIN_LISTEN_PORT}
+    restart: unless-stopped
+    ports:
+      - "${PGADMIN_PUBLISHED_PORT}:${PGADMIN_LISTEN_PORT}"
     volumes:
-      - ./pghero.yml:/app/config/pghero.yml
+      - pgadmin4-pgadmin4:/pgadmin4
+      - pgadmin4-certs:/certs
+      - pgadmin4-lib:/var/lib/pgadmin
 
 volumes:
   pgwatch2-postgresql:
   pgwatch2-grafana:
   pgwatch2-influxdb:
   pgwatch2-pgwatch2:
+  pgadmin4-pgadmin4:
+  pgadmin4-certs:
+  pgadmin4-lib:
diff --git a/scripts/db-monitoring/pghero_example.yml b/scripts/db-monitoring/pghero_example.yml
index cd973a48c..66ebbe40c 100644
--- a/scripts/db-monitoring/pghero_example.yml
+++ b/scripts/db-monitoring/pghero_example.yml
@@ -1,7 +1,7 @@
 databases:
   pghero:
-    url: postgres://pghero:pghero@dev-66:5432/pghero
+    url: postgres://pghero:pghero@example-1.com:5432/pghero
   hive_test:
-    url: postgres://pghero:pghero@dev-66:5432/hive_test
+    url: postgres://pghero:pghero@example-2.com:5432/hive_test
   bamboo:
-    url: postgres://pghero:pghero@dev-66:5432/bamboo
+    url: postgres://pghero:pghero@example-3.com:5432/bamboo
diff --git a/scripts/db-monitoring/readme-monitoring.md b/scripts/db-monitoring/readme-monitoring.md
index e578dc3c2..93c068218 100644
--- a/scripts/db-monitoring/readme-monitoring.md
+++ b/scripts/db-monitoring/readme-monitoring.md
@@ -8,20 +8,34 @@ both tools, modify this tutorial accordingly.
 
 1. Install required apt packages:
 ```
-sudo apt-get install postgresql-contrib postgresql-plpython3 python3-psutil postgresql-10-pg-qualstats
+# Should be installed on Ubuntu by default, when you have Postgresql
+# installed. Required both by pgwatch2 and pghero.
+sudo apt-get install postgresql-contrib;
+
+# Only for pgwatch2, if you need to monitor host's cpu load, IO
+# and memory usage inside pgwatch2 instance.
+postgresql-plpython3 python3-psutil
+
+# Only for pgwatch2, if you need to get recommendations about
+# monitored queries. Note: you should install official Postgresql
+# ubuntu [pgdg](https://www.postgresql.org/about/news/pgdg-apt-repository-for-debianubuntu-1432/)
+# repository to get apt package postgresql-10-pg-qualstats.
+postgresql-10-pg-qualstats
 ```
-Note: you should install official Postgresql ubuntu
-[pgdg](https://www.postgresql.org/about/news/pgdg-apt-repository-for-debianubuntu-1432/) repository to get apt package postgresql-10-pg-qualstats.
 
-2. Install postgresql custom configuration file:
+2. Install postgresql custom configuration file. Be careful with line
+concerning `shared_preload_libraries` (this can overrun your existing
+settings). You can also append the contents of file
+`scripts/db-monitoring/setup/postgresql_monitoring.conf` to the bottom
+of your file `/etc/postgresql/10/main/postgresql.conf`.
 ```
 sudo cp scripts/db-monitoring/setup/postgresql_monitoring.conf /etc/postgresql/10/main/conf.d/90-monitoring.conf
 ```
-Restart postgresql.
+**Restart postgresql.**
 
 3. Create roles `pgwatch2` and `pghero` (these are unprivileged roles
 for monitoring) in postgresql and create template database
-`template_hive_ci`, in all postgresql instances, that you want to monitor
+`template_monitoring`, in all postgresql instances, that you want to monitor
 (we need postgres superuser here):
 
 ```
@@ -29,28 +43,30 @@ cd scripts/db-monitoring/setup
 PSQL_OPTIONS="-p 5432 -U postgres -h 127.0.0.1" ./setup_monitoring.sh
 ```
 
-Note that above script creates also database `pghero` for gathering historical
-stats data.
+Note that above script creates also database `pghero` for gathering
+historical stats data.
 
 Remember, that all databases under monitoring should replicate the structure
-and objects from template `template_hive_ci`, so you should create them with
+and objects from template `template_monitoring`, so you should create them with
 command:
 ```
-create database some_db template template_hive_ci
+create database some_db template template_monitoring
 ```
 
 In case of already existing database, which you can't recreate, you should
 install needed stuff into it by running command:
 ```
 cd scripts/db-monitoring/setup
-PSQL_OPTIONS="-p 5432 -U postgres -h 127.0.0.1" ./setup_monitoring.sh some_existing_db_name yes yes no no
+PSQL_OPTIONS="-p 5432 -U postgres -h 127.0.0.1" \
+    ./setup_monitoring.sh some_existing_db_name yes yes no no
 ```
 
 4. Create `.env` file and create configuration file for `pghero`
 (edit to your needs):
 ```
 cp scripts/db-monitoring/docker/.env_example scripts/db-monitoring/.env
-cp scripts/db-monitoring/docker/pghero_example.yml scripts/db-monitoring/docker/pghero.yml
+cp scripts/db-monitoring/docker/pghero_example.yml \
+    scripts/db-monitoring/docker/pghero.yml
 ```
 
 5. Run services `pgwatch2` and `pghero` in docker containers:
@@ -69,3 +85,8 @@ Use unprivileged user `pgwatch2` created earlier.
 
 9. Go to http://ci-server.domain:8085/ to see dashboard produced by
 `pghero`.
+
+10. Optionally install cron tasks from file
+`scripts/db-monitoring/setup/pghero_cron_jobs.txt`
+for collecting historical data by your pghero instance (on the host
+which runs pghero docker container).
\ No newline at end of file
diff --git a/scripts/db-monitoring/setup/create_template.sql b/scripts/db-monitoring/setup/create_template.sql
deleted file mode 100644
index 7e4e0409c..000000000
--- a/scripts/db-monitoring/setup/create_template.sql
+++ /dev/null
@@ -1,9 +0,0 @@
--- Create database
-
--- Example run:
--- psql -p 5432 -U postgres -h 127.0.0.1 -f ./create_template.sql --set=db_name=template_hive_ci
-
-SET client_encoding = 'UTF8';
-SET client_min_messages = 'warning';
-
-CREATE DATABASE :db_name;
diff --git a/scripts/db-monitoring/setup/pghero_cron_jobs.txt b/scripts/db-monitoring/setup/pghero_cron_jobs.txt
index e72f76793..f18ad4d7d 100644
--- a/scripts/db-monitoring/setup/pghero_cron_jobs.txt
+++ b/scripts/db-monitoring/setup/pghero_cron_jobs.txt
@@ -1,10 +1,19 @@
+# Cron tasks for pghero historical data collector.
 # Install with `crontab -e`
 
-# Pghero gather query stats
+# Explanation
+# postgres://pghero:pghero@hive-4.pl.syncad.com:5432/pghero
+# is a connection string to the database in which pghero collects
+# historical data.
+
+# dbmonitoring_pghero_1 is a docker container name with running pghero
+# instance
+
+# Pghero collect query stats.
 */5 * * * * docker exec -e DATABASE_URL=postgres://pghero:pghero@hive-4.pl.syncad.com:5432/pghero dbmonitoring_pghero_1 bin/rake pghero:capture_query_stats > /dev/null 2>&1
 
-# Pghero gather space stats
+# Pghero collect disk space stats.
 */5 * * * * docker exec -e DATABASE_URL=postgres://pghero:pghero@hive-4.pl.syncad.com:5432/pghero dbmonitoring_pghero_1 bin/rake pghero:capture_space_stats > /dev/null 2>&1
 
-# Pghero remove query stats
+# Pghero collect query stats.
 @monthly docker exec -e DATABASE_URL=postgres://pghero:pghero@hive-4.pl.syncad.com:5432/pghero dbmonitoring_pghero_1 bin/rake pghero:clean_query_stats > /dev/null 2>&1
diff --git a/scripts/db-monitoring/setup/setup_monitoring.sh b/scripts/db-monitoring/setup/setup_monitoring.sh
index 4fb7336a1..a1cf6af6e 100755
--- a/scripts/db-monitoring/setup/setup_monitoring.sh
+++ b/scripts/db-monitoring/setup/setup_monitoring.sh
@@ -2,44 +2,46 @@
 
 # Create stuff for monitoring.
 
-DB_NAME=${1:-template_hive_ci}
+set -e
+
+DB_NAME=${1:-template_monitoring}
 SETUP_MONITORING_PGWATCH2=${2:-yes}
 SETUP_MONITORING_PGHERO=${3:-yes}
 CREATE_TEMPLATE=${4:-yes}
-CREATE_DB_PGHERO=${5:-yes}
+CREATE_DB_PGHERO=${5:-no}
+SQL_SCRIPTS_PATH=$PWD/sql-monitoring
 
 if [ -z "$PSQL_OPTIONS" ]; then
-    PSQL_OPTIONS="-p 5432 -U postgres -h 127.0.0.1"
+    # PSQL_OPTIONS="-p 5432 -U postgres -h 127.0.0.1"
+    PSQL_OPTIONS=""
 fi
 
 setup_monitoring_pgwatch2() {
-    # Install stuff for pgwatch2 into database under monitoring.
-    psql $PSQL_OPTIONS -f ./create_role_pgwatch2.sql
-    psql $PSQL_OPTIONS -d $DB_NAME -f ./setup_monitoring_pgwatch2.sql
+    echo "Creating role and stuff for pgwatch2"
+    psql $PSQL_OPTIONS -f $SQL_SCRIPTS_PATH/20_create_role_pgwatch2.sql
+    psql $PSQL_OPTIONS -d $DB_NAME -f $SQL_SCRIPTS_PATH/30_setup_monitoring_pgwatch2.sql
 }
 
 setup_monitoring_pghero() {
-    # Install stuff for pghero into database under monitoring
-    psql $PSQL_OPTIONS -f ./create_role_pghero.sql
-    psql $PSQL_OPTIONS -d $DB_NAME -f ./setup_monitoring_pghero.sql
+    echo "Creating role and stuff for pghero"
+    psql $PSQL_OPTIONS -f $SQL_SCRIPTS_PATH/21_create_role_pghero.sql
+    psql $PSQL_OPTIONS -d $DB_NAME -f $SQL_SCRIPTS_PATH/31_setup_monitoring_pghero.sql
 }
 
 create_db_pghero() {
-    # Create database for pghero for collecting historical stats data.
-    psql $PSQL_OPTIONS -f ./create_database_pghero.sql
-    psql postgresql://pghero:pghero@127.0.0.1:5432/pghero -f ./create_tables_pghero.sql
+    echo "Creating database pghero for collecting historical stats data"
+    psql $PSQL_OPTIONS -f $SQL_SCRIPTS_PATH/40_create_database_pghero.sql
+    psql postgresql://pghero:pghero@127.0.0.1:5432/pghero -f $SQL_SCRIPTS_PATH/41_create_tables_pghero.sql
 }
 
 create_template() {
-    # Create template database.
-    echo "Creating template database $DB_NAME"
-    psql $PSQL_OPTIONS -f ./create_template.sql --set=db_name=$DB_NAME
+    echo "Creating database $DB_NAME"
+    psql $PSQL_OPTIONS -f $SQL_SCRIPTS_PATH/10_create_template.sql --set=db_name=$DB_NAME
 }
 
 lock_template() {
-    # Lock connections to template database.
-    echo "Locking connections to template database $DB_NAME"
-    psql $PSQL_OPTIONS -f ./setup_template.sql --set=db_name=$DB_NAME
+    echo "Locking connections to database $DB_NAME"
+    psql $PSQL_OPTIONS -f $SQL_SCRIPTS_PATH/50_setup_template.sql --set=db_name=$DB_NAME
 }
 
 main() {
diff --git a/scripts/db-monitoring/setup/setup_template.sql b/scripts/db-monitoring/setup/setup_template.sql
deleted file mode 100644
index 78a3e1229..000000000
--- a/scripts/db-monitoring/setup/setup_template.sql
+++ /dev/null
@@ -1,13 +0,0 @@
--- Create database
-
--- Example run:
--- psql -p 5432 -U postgres -h 127.0.0.1 -f ./setup_template.sql --set=db_name=template_hive_ci
-
-SET client_encoding = 'UTF8';
-SET client_min_messages = 'warning';
-
-update pg_database
-    set
-        datistemplate = true,
-        datallowconn = false
-    where datname = :'db_name';
diff --git a/scripts/db-monitoring/setup/sql-monitoring/10_create_template.sql b/scripts/db-monitoring/setup/sql-monitoring/10_create_template.sql
new file mode 100644
index 000000000..2e66e6f42
--- /dev/null
+++ b/scripts/db-monitoring/setup/sql-monitoring/10_create_template.sql
@@ -0,0 +1,23 @@
+-- Create database
+
+-- Example run:
+-- psql -p 5432 -U postgres -h 127.0.0.1 -f ./create_template.sql --set=db_name=template_monitoring
+
+SET client_encoding = 'UTF8';
+SET client_min_messages = 'warning';
+
+-- Handle default values for variables.
+\set db_name ':db_name'
+-- now db_name is set to the string ':db_name' if was not already set.
+-- Checking it using a CASE statement:
+SELECT CASE
+  WHEN :'db_name'= ':db_name'
+  THEN 'template_monitoring'
+  ELSE :'db_name'
+END AS "db_name"
+\gset
+
+\echo Creating database :db_name
+
+CREATE DATABASE :db_name;
+COMMENT ON DATABASE :db_name IS 'Template for monitoring';
diff --git a/scripts/db-monitoring/setup/create_role_pgwatch2.sql b/scripts/db-monitoring/setup/sql-monitoring/20_create_role_pgwatch2.sql
similarity index 82%
rename from scripts/db-monitoring/setup/create_role_pgwatch2.sql
rename to scripts/db-monitoring/setup/sql-monitoring/20_create_role_pgwatch2.sql
index 232cbd407..af1a12c0a 100644
--- a/scripts/db-monitoring/setup/create_role_pgwatch2.sql
+++ b/scripts/db-monitoring/setup/sql-monitoring/20_create_role_pgwatch2.sql
@@ -6,6 +6,8 @@
 SET client_encoding = 'UTF8';
 SET client_min_messages = 'warning';
 
+\echo Creating role pgwatch2
+
 DO
 $do$
 BEGIN
@@ -16,8 +18,10 @@ BEGIN
         -- used for monitoring can only open a limited number of connections
         -- (there are according checks in code, but multiple instances might be launched)
         CREATE ROLE pgwatch2 WITH LOGIN PASSWORD 'pgwatch2';
+        COMMENT ON ROLE pgwatch2 IS
+            'Role for monitoring https://github.com/cybertec-postgresql/pgwatch2';
         ALTER ROLE pgwatch2 CONNECTION LIMIT 10;
         GRANT pg_monitor TO pgwatch2;
    END IF;
 END
-$do$
+$do$;
diff --git a/scripts/db-monitoring/setup/create_role_pghero.sql b/scripts/db-monitoring/setup/sql-monitoring/21_create_role_pghero.sql
similarity index 80%
rename from scripts/db-monitoring/setup/create_role_pghero.sql
rename to scripts/db-monitoring/setup/sql-monitoring/21_create_role_pghero.sql
index 56cc17bbc..ebb1adc62 100644
--- a/scripts/db-monitoring/setup/create_role_pghero.sql
+++ b/scripts/db-monitoring/setup/sql-monitoring/21_create_role_pghero.sql
@@ -6,6 +6,8 @@
 SET client_encoding = 'UTF8';
 SET client_min_messages = 'warning';
 
+\echo Creating role pghero
+
 DO
 $do$
 BEGIN
@@ -13,9 +15,11 @@ BEGIN
         raise warning 'Role % already exists', 'pghero';
     ELSE
         CREATE ROLE pghero WITH LOGIN PASSWORD 'pghero';
+        COMMENT ON ROLE pghero IS
+            'Role for monitoring https://github.com/ankane/pghero/';
         ALTER ROLE pghero CONNECTION LIMIT 10;
         ALTER ROLE pghero SET search_path = pghero, pg_catalog, public;
         GRANT pg_monitor TO pghero;
    END IF;
 END
-$do$
+$do$;
diff --git a/scripts/db-monitoring/setup/setup_monitoring_pgwatch2.sql b/scripts/db-monitoring/setup/sql-monitoring/30_setup_monitoring_pgwatch2.sql
similarity index 98%
rename from scripts/db-monitoring/setup/setup_monitoring_pgwatch2.sql
rename to scripts/db-monitoring/setup/sql-monitoring/30_setup_monitoring_pgwatch2.sql
index 5754e72b7..c76ad6f7c 100644
--- a/scripts/db-monitoring/setup/setup_monitoring_pgwatch2.sql
+++ b/scripts/db-monitoring/setup/sql-monitoring/30_setup_monitoring_pgwatch2.sql
@@ -2,17 +2,20 @@
 -- using program https://github.com/cybertec-postgresql/pgwatch2/
 
 -- Example run:
--- psql -p 5432 -U postgres -h 127.0.0.1 -d template_hive_ci -f ./setup_monitoring_pgwatch2.sql
+-- psql -p 5432 -U postgres -h 127.0.0.1 -d template_monitoring -f ./setup_monitoring_pgwatch2.sql
 
 SET client_encoding = 'UTF8';
 SET client_min_messages = 'warning';
 
--- TODO We need extension pg_qualstats, see https://www.cybertec-postgresql.com/en/pgwatch2-v1-7-0-released/
 
-BEGIN;
+\echo Installing monitoring stuff for pgwatch2
 
+BEGIN;
 
 CREATE SCHEMA IF NOT EXISTS pgwatch2;
+COMMENT ON SCHEMA pgwatch2 IS
+    'Schema contains objects for monitoring https://github.com/cybertec-postgresql/pgwatch2';
+
 
 CREATE EXTENSION IF NOT EXISTS plpython3u WITH SCHEMA pg_catalog;
 COMMENT ON EXTENSION plpython3u IS 'PL/Python3U untrusted procedural language';
diff --git a/scripts/db-monitoring/setup/setup_monitoring_pghero.sql b/scripts/db-monitoring/setup/sql-monitoring/31_setup_monitoring_pghero.sql
similarity index 80%
rename from scripts/db-monitoring/setup/setup_monitoring_pghero.sql
rename to scripts/db-monitoring/setup/sql-monitoring/31_setup_monitoring_pghero.sql
index 21a18d34f..deba4b364 100644
--- a/scripts/db-monitoring/setup/setup_monitoring_pghero.sql
+++ b/scripts/db-monitoring/setup/sql-monitoring/31_setup_monitoring_pghero.sql
@@ -2,15 +2,19 @@
 -- using program https://github.com/cybertec-postgresql/pgwatch2/
 
 -- Example run:
--- psql -p 5432 -U postgres -h 127.0.0.1 -d template_hive_ci -f ./setup_monitoring.sql
+-- psql -p 5432 -U postgres -h 127.0.0.1 -d template_monitoring -f ./setup_monitoring.sql
 
 SET client_encoding = 'UTF8';
 SET client_min_messages = 'warning';
 
 
+\echo Installing monitoring stuff for pghero
+
 BEGIN;
 
 CREATE SCHEMA IF NOT EXISTS pghero;
+COMMENT ON SCHEMA pghero IS
+    'Schema contains objects for monitoring https://github.com/ankane/pghero/';
 
 CREATE EXTENSION IF NOT EXISTS pg_stat_statements WITH SCHEMA public;
 COMMENT ON EXTENSION pg_stat_statements
@@ -22,7 +26,7 @@ $$
   SELECT * FROM pg_catalog.pg_stat_activity;
 $$ LANGUAGE sql VOLATILE SECURITY DEFINER;
 
-CREATE VIEW pghero.pg_stat_activity AS SELECT * FROM pghero.pg_stat_activity();
+CREATE OR REPLACE VIEW pghero.pg_stat_activity AS SELECT * FROM pghero.pg_stat_activity();
 
 -- kill queries
 CREATE OR REPLACE FUNCTION pghero.pg_terminate_backend(pid int) RETURNS boolean AS
@@ -36,7 +40,7 @@ $$
   SELECT * FROM public.pg_stat_statements;
 $$ LANGUAGE sql VOLATILE SECURITY DEFINER;
 
-CREATE VIEW pghero.pg_stat_statements AS SELECT * FROM pghero.pg_stat_statements();
+CREATE OR REPLACE VIEW pghero.pg_stat_statements AS SELECT * FROM pghero.pg_stat_statements();
 
 -- query stats reset
 CREATE OR REPLACE FUNCTION pghero.pg_stat_statements_reset() RETURNS void AS
@@ -57,7 +61,7 @@ $$
   SELECT schemaname, tablename, attname, null_frac, avg_width, n_distinct FROM pg_catalog.pg_stats;
 $$ LANGUAGE sql VOLATILE SECURITY DEFINER;
 
-CREATE VIEW pghero.pg_stats AS SELECT * FROM pghero.pg_stats();
+CREATE OR REPLACE VIEW pghero.pg_stats AS SELECT * FROM pghero.pg_stats();
 
 GRANT USAGE ON SCHEMA pghero TO pg_monitor;
 
diff --git a/scripts/db-monitoring/setup/create_database_pghero.sql b/scripts/db-monitoring/setup/sql-monitoring/40_create_database_pghero.sql
similarity index 61%
rename from scripts/db-monitoring/setup/create_database_pghero.sql
rename to scripts/db-monitoring/setup/sql-monitoring/40_create_database_pghero.sql
index 69cd26f47..f5d8c428c 100644
--- a/scripts/db-monitoring/setup/create_database_pghero.sql
+++ b/scripts/db-monitoring/setup/sql-monitoring/40_create_database_pghero.sql
@@ -6,4 +6,8 @@
 SET client_encoding = 'UTF8';
 SET client_min_messages = 'warning';
 
+\echo Creating database pghero
+
 CREATE DATABASE pghero OWNER pghero;
+COMMENT ON DATABASE pghero
+    IS 'Historical data for monitoring https://github.com/ankane/pghero/'
\ No newline at end of file
diff --git a/scripts/db-monitoring/setup/create_tables_pghero.sql b/scripts/db-monitoring/setup/sql-monitoring/41_create_tables_pghero.sql
similarity index 80%
rename from scripts/db-monitoring/setup/create_tables_pghero.sql
rename to scripts/db-monitoring/setup/sql-monitoring/41_create_tables_pghero.sql
index 7d85d32e8..7bb2193a9 100644
--- a/scripts/db-monitoring/setup/create_tables_pghero.sql
+++ b/scripts/db-monitoring/setup/sql-monitoring/41_create_tables_pghero.sql
@@ -8,12 +8,15 @@
 SET client_encoding = 'UTF8';
 SET client_min_messages = 'warning';
 
+\echo Creating tables in database pghero
+
+\c pghero pghero
 
 BEGIN;
 
 CREATE SCHEMA pghero;
 
-CREATE TABLE "pghero_query_stats" (
+CREATE TABLE "pghero"."pghero_query_stats" (
   "id" bigserial primary key,
   "database" text,
   "user" text,
@@ -23,7 +26,7 @@ CREATE TABLE "pghero_query_stats" (
   "calls" bigint,
   "captured_at" timestamp
 );
-CREATE INDEX ON "pghero_query_stats" ("database", "captured_at");
+CREATE INDEX ON "pghero"."pghero_query_stats" ("database", "captured_at");
 
 CREATE TABLE "pghero_space_stats" (
   "id" bigserial primary key,
diff --git a/scripts/db-monitoring/setup/sql-monitoring/50_setup_template.sql b/scripts/db-monitoring/setup/sql-monitoring/50_setup_template.sql
new file mode 100644
index 000000000..56a7d8790
--- /dev/null
+++ b/scripts/db-monitoring/setup/sql-monitoring/50_setup_template.sql
@@ -0,0 +1,24 @@
+-- Create database
+
+-- Example run:
+-- psql -p 5432 -U postgres -h 127.0.0.1 -f ./setup_template.sql --set=db_name=template_monitoring
+
+SET client_encoding = 'UTF8';
+SET client_min_messages = 'warning';
+
+-- Handle default values for variables.
+\set db_name ':db_name'
+-- now db_name is set to the string ':db_name' if was not already set.
+-- Checking it using a CASE statement:
+SELECT CASE
+  WHEN :'db_name'= ':db_name'
+  THEN 'template_monitoring'
+  ELSE :'db_name'
+END AS "db_name"
+\gset
+
+update pg_database
+    set
+        datistemplate = true,
+        datallowconn = false
+    where datname = :'db_name';
diff --git a/scripts/db-monitoring/setup/sql-monitoring/ddl_deps.sql b/scripts/db-monitoring/setup/sql-monitoring/ddl_deps.sql
new file mode 100644
index 000000000..991046901
--- /dev/null
+++ b/scripts/db-monitoring/setup/sql-monitoring/ddl_deps.sql
@@ -0,0 +1,208 @@
+/**
+Easy way to drop and recreate table or view dependencies, when you need to alter
+something in them.
+See http://pretius.com/postgresql-stop-worrying-about-table-and-view-dependencies/.
+Enhanced by Wojciech Barcik wbarcik@syncad.com (handling of rules).
+*/
+
+
+-- SEQUENCE: deps_saved_ddl_deps_id_seq
+
+-- DROP SEQUENCE deps_saved_ddl_deps_id_seq;
+
+CREATE SEQUENCE deps_saved_ddl_deps_id_seq
+    INCREMENT 1
+    START 1
+    MINVALUE 1
+    MAXVALUE 9223372036854775807
+    CACHE 1;
+
+
+-- Table: deps_saved_ddl
+
+-- DROP TABLE deps_saved_ddl;
+
+CREATE TABLE deps_saved_ddl
+(
+    deps_id integer NOT NULL DEFAULT nextval('deps_saved_ddl_deps_id_seq'::regclass),
+    deps_view_schema character varying(255) COLLATE pg_catalog."default",
+    deps_view_name character varying(255) COLLATE pg_catalog."default",
+    deps_ddl_to_run text COLLATE pg_catalog."default",
+    CONSTRAINT deps_saved_ddl_pkey PRIMARY KEY (deps_id)
+)
+WITH (
+    OIDS = FALSE
+)
+TABLESPACE pg_default;
+
+
+-- create table deps_saved_ddl
+-- (
+--     deps_id serial primary key,
+--     deps_view_schema varchar(255),
+--     deps_view_name varchar(255),
+--     deps_ddl_to_run text
+-- );
+
+
+-- FUNCTION: deps_save_and_drop_dependencies(character varying, character varying, boolean)
+
+-- DROP FUNCTION deps_save_and_drop_dependencies(character varying, character varying, boolean);
+
+CREATE OR REPLACE FUNCTION deps_save_and_drop_dependencies(
+    p_view_schema character varying,
+    p_view_name character varying,
+    drop_relation boolean DEFAULT true
+  )
+  RETURNS void
+  LANGUAGE 'plpgsql'
+  COST 100
+  VOLATILE
+AS $BODY$
+/**
+From http://pretius.com/postgresql-stop-worrying-about-table-and-view-dependencies/
+@wojtek added DDL for rules.
+
+Drops dependencies of view, but saves them into table `deps_saved_ddl`, for
+future restoration. Use function `deps_restore_dependencies` to restore
+dependencies dropped by this function.
+*/
+declare
+  v_curr record;
+begin
+for v_curr in
+(
+  select obj_schema, obj_name, obj_type from
+  (
+  with recursive recursive_deps(obj_schema, obj_name, obj_type, depth) as
+  (
+    select p_view_schema, p_view_name, null::varchar, 0
+    union
+    select dep_schema::varchar, dep_name::varchar, dep_type::varchar,
+        recursive_deps.depth + 1 from
+    (
+      select ref_nsp.nspname ref_schema, ref_cl.relname ref_name,
+          rwr_cl.relkind dep_type, rwr_nsp.nspname dep_schema,
+          rwr_cl.relname dep_name
+      from pg_depend dep
+      join pg_class ref_cl on dep.refobjid = ref_cl.oid
+      join pg_namespace ref_nsp on ref_cl.relnamespace = ref_nsp.oid
+      join pg_rewrite rwr on dep.objid = rwr.oid
+      join pg_class rwr_cl on rwr.ev_class = rwr_cl.oid
+      join pg_namespace rwr_nsp on rwr_cl.relnamespace = rwr_nsp.oid
+      where dep.deptype = 'n'
+      and dep.classid = 'pg_rewrite'::regclass
+    ) deps
+    join recursive_deps on deps.ref_schema = recursive_deps.obj_schema
+        and deps.ref_name = recursive_deps.obj_name
+    where (deps.ref_schema != deps.dep_schema or deps.ref_name != deps.dep_name)
+  )
+  select obj_schema, obj_name, obj_type, depth
+  from recursive_deps
+  where depth > 0
+  ) t
+  group by obj_schema, obj_name, obj_type
+  order by max(depth) desc
+) loop
+
+  insert into deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+  select p_view_schema, p_view_name, 'COMMENT ON ' ||
+  case
+    when c.relkind = 'v' then 'VIEW'
+    when c.relkind = 'm' then 'MATERIALIZED VIEW'
+  else ''
+  end
+  || ' ' || n.nspname || '.' || c.relname || ' IS '''
+      || replace(d.description, '''', '''''') || ''';'
+  from pg_class c
+  join pg_namespace n on n.oid = c.relnamespace
+  join pg_description d on d.objoid = c.oid and d.objsubid = 0
+  where n.nspname = v_curr.obj_schema and c.relname = v_curr.obj_name
+      and d.description is not null;
+
+  insert into deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+  select p_view_schema, p_view_name, 'COMMENT ON COLUMN ' || n.nspname || '.'
+      || c.relname || '.' || a.attname || ' IS '''
+      || replace(d.description, '''', '''''') || ''';'
+  from pg_class c
+  join pg_attribute a on c.oid = a.attrelid
+  join pg_namespace n on n.oid = c.relnamespace
+  join pg_description d on d.objoid = c.oid and d.objsubid = a.attnum
+  where n.nspname = v_curr.obj_schema and c.relname = v_curr.obj_name
+      and d.description is not null;
+
+  insert into deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+  select p_view_schema, p_view_name, 'GRANT ' || privilege_type || ' ON '
+      || table_schema || '.' || table_name || ' TO ' || grantee
+  from information_schema.role_table_grants
+  where table_schema = v_curr.obj_schema and table_name = v_curr.obj_name;
+
+  if v_curr.obj_type = 'v' then
+
+    insert into deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+    select p_view_schema, p_view_name, definition
+    from pg_catalog.pg_rules
+    where schemaname = v_curr.obj_schema and tablename = v_curr.obj_name;
+
+    insert into deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+    select p_view_schema, p_view_name, 'CREATE VIEW '
+        || v_curr.obj_schema || '.' || v_curr.obj_name || ' AS ' || view_definition
+    from information_schema.views
+    where table_schema = v_curr.obj_schema and table_name = v_curr.obj_name;
+
+  elsif v_curr.obj_type = 'm' then
+    insert into deps_saved_ddl(deps_view_schema, deps_view_name, deps_ddl_to_run)
+    select p_view_schema, p_view_name, 'CREATE MATERIALIZED VIEW '
+        || v_curr.obj_schema || '.' || v_curr.obj_name || ' AS ' || definition
+    from pg_matviews
+    where schemaname = v_curr.obj_schema and matviewname = v_curr.obj_name;
+  end if;
+
+  if drop_relation = true then
+    execute 'DROP ' ||
+    case
+      when v_curr.obj_type = 'v' then 'VIEW'
+      when v_curr.obj_type = 'm' then 'MATERIALIZED VIEW'
+    end
+    || ' ' || v_curr.obj_schema || '.' || v_curr.obj_name;
+  end if;
+
+end loop;
+end;
+$BODY$;
+
+
+-- FUNCTION: deps_restore_dependencies(character varying, character varying)
+
+-- DROP FUNCTION deps_restore_dependencies(character varying, character varying);
+
+CREATE OR REPLACE FUNCTION deps_restore_dependencies(
+    p_view_schema character varying,
+    p_view_name character varying
+  )
+  RETURNS void
+  LANGUAGE 'plpgsql'
+  COST 100
+  VOLATILE
+AS $BODY$
+/**
+From http://pretius.com/postgresql-stop-worrying-about-table-and-view-dependencies/
+
+Restores dependencies dropped by function `deps_save_and_drop_dependencies`.
+*/
+declare
+  v_curr record;
+begin
+for v_curr in
+(
+  select deps_ddl_to_run
+  from deps_saved_ddl
+  where deps_view_schema = p_view_schema and deps_view_name = p_view_name
+  order by deps_id desc
+) loop
+  execute v_curr.deps_ddl_to_run;
+end loop;
+delete from deps_saved_ddl
+where deps_view_schema = p_view_schema and deps_view_name = p_view_name;
+end;
+$BODY$;
diff --git a/tests/tests_api b/tests/tests_api
index a8554ac12..d8c41b9bb 160000
--- a/tests/tests_api
+++ b/tests/tests_api
@@ -1 +1 @@
-Subproject commit a8554ac127a6f9d0ba42f2c4338ebe9c0a913541
+Subproject commit d8c41b9bbbe8b38744cfe1079e3ff72ce125a554
-- 
GitLab