Skip to content
Snippets Groups Projects
Commit 317ba694 authored by Bartek Wrona's avatar Bartek Wrona
Browse files

Merge branch 'wb-new-docker-executor' into 'develop'

New implementation of docker executors on CI server

See merge request !386
parents 04d57395 0eac560b
No related branches found
No related tags found
2 merge requests!456Release candidate v1 24,!386New implementation of docker executors on CI server
Showing
with 2693 additions and 391 deletions
...@@ -144,3 +144,5 @@ Pipfile.lock ...@@ -144,3 +144,5 @@ Pipfile.lock
pghero.yml pghero.yml
*~ *~
.tmp .tmp
.private
stages:
- build
- test
- data-supply
- deploy
- e2e-test
- benchmark-tests
- post-deploy
variables:
GIT_DEPTH: 1
LC_ALL: "C"
GIT_STRATEGY: clone
GIT_SUBMODULE_STRATEGY: recursive
GIT_CLONE_PATH: $CI_BUILDS_DIR/$CI_COMMIT_REF_SLUG/$CI_CONCURRENT_ID/project-name
HIVEMIND_SOURCE_HIVED_URL: $HIVEMIND_SOURCE_HIVED_URL
HIVEMIND_DB_NAME: "hive_$CI_COMMIT_REF_SLUG"
HIVEMIND_HTTP_PORT: $((HIVEMIND_HTTP_PORT + CI_CONCURRENT_ID))
# Configured at gitlab repository settings side
POSTGRES_USER: $HIVEMIND_POSTGRES_USER
POSTGRES_PASSWORD: $HIVEMIND_POSTGRES_PASSWORD
POSTGRES_HOST_AUTH_METHOD: trust
# official way to provide password to psql: http://www.postgresql.org/docs/9.3/static/libpq-envars.html
PGPASSWORD: $HIVEMIND_POSTGRES_PASSWORD
default:
before_script:
- pwd
- echo "CI_NODE_TOTAL is $CI_NODE_TOTAL"
- echo "CI_NODE_INDEX is $CI_NODE_INDEX"
- echo "CI_CONCURRENT_ID is $CI_CONCURRENT_ID"
- echo "CI_COMMIT_REF_SLUG is $CI_COMMIT_REF_SLUG"
hivemind_build:
stage: build
script:
- pip3 install --user --upgrade pip setuptools
- git fetch --tags
- git tag -f ci_implicit_tag
- echo $PYTHONUSERBASE
- "python3 setup.py bdist_egg"
- ls -l dist/*
artifacts:
paths:
- dist/
expire_in: 1 week
tags:
- hivemind
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: always
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
when: always
- if: '$CI_COMMIT_BRANCH == "develop"'
when: always
hivemind_sync:
stage: data-supply
environment:
name: "hive sync built from branch $CI_COMMIT_REF_NAME targeting database $HIVEMIND_DB_NAME"
needs:
- job: hivemind_build
artifacts: true
variables:
GIT_STRATEGY: none
PYTHONUSERBASE: ./local-site
script:
- pip3 install --user --upgrade pip setuptools
# WARNING!!! temporarily hardcoded 5000017 instead $HIVEMIND_MAX_BLOCK
# revert that change when $HIVEMIND_MAX_BLOCK will be set to 5000017
- scripts/ci_sync.sh "$HIVEMIND_DB_NAME" "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" "$HIVEMIND_SOURCE_HIVED_URL" 5000017 $HIVEMIND_HTTP_PORT
artifacts:
paths:
- hivemind-sync.log
expire_in: 1 week
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: always
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
when: always
- if: '$CI_COMMIT_BRANCH == "develop"'
when: always
- if: '$CI_PIPELINE_SOURCE == "push"'
when: manual
- when: on_success
tags:
- hivemind
hivemind_start_server:
stage: deploy
environment:
name: "hive serve built from branch $CI_COMMIT_REF_NAME exposed on port $HIVEMIND_HTTP_PORT"
url: "http://hive-4.pl.syncad.com:$HIVEMIND_HTTP_PORT"
on_stop: hivemind_stop_server
needs:
- job: hivemind_build
artifacts: true
# - job: hivemind_sync
# artifacts: true
variables:
GIT_STRATEGY: none
PYTHONUSERBASE: ./local-site
script:
- scripts/ci_start_server.sh "$HIVEMIND_DB_NAME" "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" "$HIVEMIND_SOURCE_HIVED_URL" $HIVEMIND_HTTP_PORT
artifacts:
paths:
- hive_server.pid
expire_in: 1 week
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: always
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
when: always
- if: '$CI_COMMIT_BRANCH == "develop"'
when: always
- if: '$CI_PIPELINE_SOURCE == "push"'
when: manual
- when: on_success
tags:
- hivemind
hivemind_stop_server:
stage: post-deploy
environment:
name: "hive serve built from branch $CI_COMMIT_REF_NAME exposed on port $HIVEMIND_HTTP_PORT"
action: stop
variables:
GIT_STRATEGY: none
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: always
script:
- scripts/ci_stop_server.sh hive_server.pid
needs:
- job: hivemind_start_server
artifacts: true
tags:
- hivemind
artifacts:
paths:
- hive_server.log
.hivemind_start_api_smoketest: &common_api_smoketest_job
stage: e2e-test
environment: hive-4.pl.syncad.com
needs:
- job: hivemind_start_server
artifacts: true
variables:
GIT_STRATEGY: none
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: always
- if: '$CI_PIPELINE_SOURCE == "push"'
when: manual
- when: on_success
tags:
- hivemind
bridge_api_smoketest:
<<: *common_api_smoketest_job
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" bridge_api_patterns/ api_smoketest_bridge.xml
artifacts:
reports:
junit: api_smoketest_bridge.xml
bridge_api_smoketest_negative:
<<: *common_api_smoketest_job
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" bridge_api_negative/ api_smoketest_bridge_negative.xml
artifacts:
reports:
junit: api_smoketest_bridge_negative.xml
condenser_api_smoketest:
<<: *common_api_smoketest_job
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" condenser_api_patterns/ api_smoketest_condenser_api.xml
artifacts:
reports:
junit: api_smoketest_condenser_api.xml
condenser_api_smoketest_negative:
<<: *common_api_smoketest_job
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" condenser_api_negative/ api_smoketest_condenser_api_negative.xml
artifacts:
reports:
junit: api_smoketest_condenser_api_negative.xml
database_api_smoketest:
<<: *common_api_smoketest_job
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" database_api_patterns/ api_smoketest_database_api.xml
artifacts:
reports:
junit: api_smoketest_database_api.xml
database_api_smoketest_negative:
<<: *common_api_smoketest_job
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" database_api_negative/ api_smoketest_database_api_negative.xml
artifacts:
reports:
junit: api_smoketest_database_api_negative.xml
follow_api_smoketest:
<<: *common_api_smoketest_job
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" follow_api_patterns/ api_smoketest_follow_api.xml
artifacts:
reports:
junit: api_smoketest.xml
follow_api_smoketest_negative:
<<: *common_api_smoketest_job
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" follow_api_negative/ api_smoketest_follow_api_negative.xml
artifacts:
reports:
junit: api_smoketest_follow_api_negative.xml
tags_api_smoketest:
<<: *common_api_smoketest_job
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" tags_api_patterns/ api_smoketest_tags_api.xml
artifacts:
reports:
junit: api_smoketest_tags_api.xml
tags_api_smoketest_negative:
<<: *common_api_smoketest_job
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" tags_api_negative/ api_smoketest_tags_api_negative.xml
mock_tests:
<<: *common_api_smoketest_job
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" mock_tests/ api_smoketest_mock_tests.xml
api_smoketest_benchmark:
stage: benchmark-tests
environment: hive-4.pl.syncad.com
needs:
- job: hivemind_start_server
artifacts: true
allow_failure: true
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: always
- if: '$CI_PIPELINE_SOURCE == "push"'
when: manual
- when: on_success
tags:
- hivemind
script:
- ./scripts/ci_start_api_benchmarks.sh localhost $HIVEMIND_HTTP_PORT 5
artifacts:
when: always
paths:
- tavern_benchmarks_report.html
# https://hub.docker.com/r/library/python/tags/
image: "python:3.7"
stages: stages:
- build - build
- test - test
- data-supply - data-supply
- deploy - deploy
- e2e-test - e2e-test
- benchmark-tests - benchmark-tests
- post-deploy - post-deploy
variables: .dk-setup-pip: &dk-setup-pip
GIT_DEPTH: 1 - python -m venv .venv
LC_ALL: "C" - source .venv/bin/activate
GIT_STRATEGY: clone - time pip install --upgrade pip setuptools wheel
GIT_SUBMODULE_STRATEGY: recursive - pip --version
GIT_CLONE_PATH: $CI_BUILDS_DIR/$CI_COMMIT_REF_SLUG/$CI_CONCURRENT_ID/project-name - easy_install --version
- wheel version
HIVEMIND_SOURCE_HIVED_URL: $HIVEMIND_SOURCE_HIVED_URL - pipenv --version
HIVEMIND_DB_NAME: "hive_$CI_COMMIT_REF_SLUG" - poetry --version
HIVEMIND_HTTP_PORT: $((HIVEMIND_HTTP_PORT + CI_CONCURRENT_ID)) - time pip install --editable .[dev]
# Configured at gitlab repository settings side
POSTGRES_USER: $HIVEMIND_POSTGRES_USER .dk-setup-runner-env: &dk-setup-runner-env
POSTGRES_PASSWORD: $HIVEMIND_POSTGRES_PASSWORD # Setup runner environment (to connect to correct postgres server, mainly).
POSTGRES_HOST_AUTH_METHOD: trust - TMP_VAR=$(cat hive-sync-runner-id.txt 2>/dev/null || true); export HIVE_SYNC_RUNNER_ID=${TMP_VAR:-0}
# official way to provide password to psql: http://www.postgresql.org/docs/9.3/static/libpq-envars.html - eval $(cat "$RUNNER_CONF" | ./scripts/ci/setup_env.py --current-runner-id=${CI_RUNNER_ID} --hive-sync-runner-id=${HIVE_SYNC_RUNNER_ID})
PGPASSWORD: $HIVEMIND_POSTGRES_PASSWORD
.dk-set-variables: &dk-set-variables
before_script: # - export # List all variables and its values set by Gitlab CI.
- pwd - whoami
- echo "CI_NODE_TOTAL is $CI_NODE_TOTAL" - echo "CI_RUNNER_ID is $CI_RUNNER_ID"
- echo "CI_NODE_INDEX is $CI_NODE_INDEX" - echo "CI_PIPELINE_URL is $CI_PIPELINE_URL"
- echo "CI_CONCURRENT_ID is $CI_CONCURRENT_ID" - echo "CI_PIPELINE_ID is $CI_PIPELINE_ID"
- echo "CI_COMMIT_SHORT_SHA is $CI_COMMIT_SHORT_SHA"
- echo "CI_COMMIT_REF_SLUG is $CI_COMMIT_REF_SLUG" - echo "CI_COMMIT_REF_SLUG is $CI_COMMIT_REF_SLUG"
- export HIVEMIND_DB_NAME=${HIVEMIND_DB_NAME//[^a-zA-Z0-9_]/_}
- echo "HIVEMIND_DB_NAME is $HIVEMIND_DB_NAME"
hivemind_build: .dk-fetch-git-tags: &dk-fetch-git-tags
stage: build # - git fetch --tags # Looks to be unnecessary.
script: - git tag -f ci_implicit_tag # Needed to build python package
- pip3 install --user --upgrade pip setuptools
- git fetch --tags
- git tag -f ci_implicit_tag
- echo $PYTHONUSERBASE
- "python3 setup.py bdist_egg"
- ls -l dist/*
artifacts:
paths:
- dist/
expire_in: 1 week
rules: .dk-start-timer: &dk-start-timer
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"' - ./scripts/ci/timer.sh start
when: always
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH || $CI_COMMIT_BRANCH == "develop"'
when: always
- when: always
tags:
- hivemind
hivemind_sync: .dk-stop-timer: &dk-stop-timer
stage: data-supply - ./scripts/ci/timer.sh check
environment: .dk-hive-sync-script-common: &dk-hive-sync-script-common
name: "hive sync built from branch $CI_COMMIT_REF_NAME targeting database $HIVEMIND_DB_NAME" - echo "${CI_RUNNER_ID}" > hive-sync-runner-id.txt
- ./scripts/ci/wait-for-postgres.sh "$RUNNER_POSTGRES_HOST" "$RUNNER_POSTGRES_PORT"
needs: - export POSTGRES_MAJOR_VERSION=$(./scripts/ci/get-postgres-version.sh)
- job: hivemind_build - ./scripts/ci/create-db.sh
artifacts: true - ./scripts/ci/hive-sync.sh
variables: - ./scripts/ci/collect-db-stats.sh
GIT_STRATEGY: none
PYTHONUSERBASE: ./local-site
script:
- pip3 install --user --upgrade pip setuptools
# WARNING!!! temporarily hardcoded 5000017 instead $HIVEMIND_MAX_BLOCK
# revert that change when $HIVEMIND_MAX_BLOCK will be set to 5000017
- scripts/ci_sync.sh "$HIVEMIND_DB_NAME" "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" "$HIVEMIND_SOURCE_HIVED_URL" 5000017 $HIVEMIND_HTTP_PORT
artifacts:
paths:
- hivemind-sync.log
expire_in: 1 week
.dk-rules-for-sync: &dk-rules-for-sync
rules: rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"' - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: always when: always
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH || $CI_COMMIT_BRANCH == "develop"' - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
when: always
- if: '$CI_COMMIT_BRANCH == "develop"'
when: always when: always
- if: '$CI_PIPELINE_SOURCE == "push"' - if: '$CI_PIPELINE_SOURCE == "push"'
when: manual when: manual
- when: on_success - when: manual
tags:
- hivemind
hivemind_start_server:
stage: deploy
environment:
name: "hive serve built from branch $CI_COMMIT_REF_NAME exposed on port $HIVEMIND_HTTP_PORT"
url: "http://hive-4.pl.syncad.com:$HIVEMIND_HTTP_PORT"
on_stop: hivemind_stop_server
needs:
- job: hivemind_build
artifacts: true
# - job: hivemind_sync
# artifacts: true
variables:
GIT_STRATEGY: none
PYTHONUSERBASE: ./local-site
script:
- scripts/ci_start_server.sh "$HIVEMIND_DB_NAME" "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" "$HIVEMIND_SOURCE_HIVED_URL" $HIVEMIND_HTTP_PORT
artifacts:
paths:
- hive_server.pid
expire_in: 1 week
.dk-rules-for-test: &dk-rules-for-test
rules: rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"' - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: always when: on_success
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH || $CI_COMMIT_BRANCH == "develop"'
when: always
- if: '$CI_PIPELINE_SOURCE == "push"' - if: '$CI_PIPELINE_SOURCE == "push"'
when: manual when: on_success
- when: on_success - when: on_success
tags: .dk-default:
- hivemind image: hivemind/python:3.6
interruptible: true
hivemind_stop_server: inherit:
stage: post-deploy default: false
environment: variables: false
name: "hive serve built from branch $CI_COMMIT_REF_NAME exposed on port $HIVEMIND_HTTP_PORT"
action: stop
variables: variables:
GIT_STRATEGY: none GIT_DEPTH: 10
rules: GIT_STRATEGY: fetch
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"' GIT_SUBMODULE_STRATEGY: recursive
when: always PIPENV_VENV_IN_PROJECT: 1
- when: manual PIPENV_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pipenv"
PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip"
POSTGRES_CLIENT_TOOLS_PATH: /usr/lib/postgresql
HIVEMIND_DB_NAME: "hive_${CI_COMMIT_REF_SLUG}"
cache: &dk-global-cache
# Per-branch caching. CI_COMMIT_REF_SLUG is the same thing.
# key: "$CI_COMMIT_REF_NAME"
# Per project caching – use any key.
# Change this key, if you need to clear cache.
key: common-1
paths:
- .cache/
- .venv/
- .tox/
before_script:
- *dk-start-timer
- *dk-fetch-git-tags
- *dk-set-variables
- *dk-setup-pip
- *dk-setup-runner-env
after_script:
- *dk-stop-timer
##### Jobs #####
dk-hivemind-sync:
# Postgres shared on host.
extends: .dk-default
<<: *dk-rules-for-sync
stage: data-supply
needs: []
script: script:
- scripts/ci_stop_server.sh hive_server.pid - *dk-hive-sync-script-common
needs:
- job: hivemind_start_server
artifacts: true
tags:
- hivemind
artifacts: artifacts:
paths: paths:
- hive_server.log - hivemind-sync.log
- pg-stats
- hive-sync-runner-id.txt
expire_in: 7 days
tags:
- hivemind-heavy-job
.hivemind_start_api_smoketest: &common_api_smoketest_job .dk-test-common:
stage: e2e-test extends: .dk-default
environment: hive-4.pl.syncad.com <<: *dk-rules-for-test
needs: needs:
- job: hivemind_start_server - job: dk-hivemind-sync
artifacts: true artifacts: true
allow_failure: false
variables: before_script:
GIT_STRATEGY: none - *dk-start-timer
- *dk-fetch-git-tags
rules: - *dk-set-variables
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"' - *dk-setup-pip
when: always - *dk-setup-runner-env
- if: '$CI_PIPELINE_SOURCE == "push"' - ./scripts/ci/wait-for-postgres.sh "$RUNNER_POSTGRES_HOST" "$RUNNER_POSTGRES_PORT"
when: manual - ./scripts/ci/hive-server.sh start
- when: on_success after_script:
- *dk-stop-timer
tags: tags:
- hivemind - hivemind-light-job
bridge_api_smoketest:
<<: *common_api_smoketest_job
dk-bridge_api_smoketest:
stage: e2e-test
extends: .dk-test-common
script: script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" bridge_api_patterns/ api_smoketest_bridge.xml - |
./scripts/ci/start-api-smoketest.sh \
localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
bridge_api_patterns/ api_smoketest_bridge.xml \
$RUNNER_TEST_JOBS
artifacts: artifacts:
when: always
reports: reports:
junit: api_smoketest_bridge.xml junit: api_smoketest_bridge.xml
bridge_api_smoketest_negative: dk-bridge_api_smoketest_negative:
<<: *common_api_smoketest_job stage: e2e-test
extends: .dk-test-common
script: script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" bridge_api_negative/ api_smoketest_bridge_negative.xml - |
./scripts/ci/start-api-smoketest.sh \
localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
bridge_api_negative/ api_smoketest_bridge_negative.xml \
$RUNNER_TEST_JOBS
artifacts: artifacts:
when: always
reports: reports:
junit: api_smoketest_bridge_negative.xml junit: api_smoketest_bridge_negative.xml
condenser_api_smoketest: dk-condenser_api_smoketest:
<<: *common_api_smoketest_job stage: e2e-test
extends: .dk-test-common
script: script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" condenser_api_patterns/ api_smoketest_condenser_api.xml - |
./scripts/ci/start-api-smoketest.sh \
localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
condenser_api_patterns/ api_smoketest_condenser_api.xml \
$RUNNER_TEST_JOBS
artifacts: artifacts:
when: always
reports: reports:
junit: api_smoketest_condenser_api.xml junit: api_smoketest_condenser_api.xml
condenser_api_smoketest_negative: dk-condenser_api_smoketest_negative:
<<: *common_api_smoketest_job stage: e2e-test
extends: .dk-test-common
script: script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" condenser_api_negative/ api_smoketest_condenser_api_negative.xml - |
./scripts/ci/start-api-smoketest.sh \
localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
condenser_api_negative/ api_smoketest_condenser_api_negative.xml \
$RUNNER_TEST_JOBS
artifacts: artifacts:
when: always
reports: reports:
junit: api_smoketest_condenser_api_negative.xml junit: api_smoketest_condenser_api_negative.xml
database_api_smoketest: dk-database_api_smoketest:
<<: *common_api_smoketest_job stage: e2e-test
extends: .dk-test-common
script: script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" database_api_patterns/ api_smoketest_database_api.xml - |
./scripts/ci/start-api-smoketest.sh \
localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
database_api_patterns/ api_smoketest_database_api.xml \
$RUNNER_TEST_JOBS
artifacts: artifacts:
when: always
reports: reports:
junit: api_smoketest_database_api.xml junit: api_smoketest_database_api.xml
database_api_smoketest_negative: dk-database_api_smoketest_negative:
<<: *common_api_smoketest_job stage: e2e-test
extends: .dk-test-common
script: script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" database_api_negative/ api_smoketest_database_api_negative.xml - |
./scripts/ci/start-api-smoketest.sh \
localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
database_api_negative/ api_smoketest_database_api_negative.xml \
$RUNNER_TEST_JOBS
artifacts: artifacts:
when: always
reports: reports:
junit: api_smoketest_database_api_negative.xml junit: api_smoketest_database_api_negative.xml
follow_api_smoketest: dk-follow_api_smoketest:
<<: *common_api_smoketest_job stage: e2e-test
extends: .dk-test-common
script: script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" follow_api_patterns/ api_smoketest_follow_api.xml - |
./scripts/ci/start-api-smoketest.sh \
localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
follow_api_patterns/ api_smoketest_follow_api.xml \
$RUNNER_TEST_JOBS
artifacts: artifacts:
when: always
reports: reports:
junit: api_smoketest.xml junit: api_smoketest.xml
follow_api_smoketest_negative: dk-follow_api_smoketest_negative:
<<: *common_api_smoketest_job stage: e2e-test
extends: .dk-test-common
script: script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" follow_api_negative/ api_smoketest_follow_api_negative.xml - |
./scripts/ci/start-api-smoketest.sh \
localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
follow_api_negative/ api_smoketest_follow_api_negative.xml \
$RUNNER_TEST_JOBS
artifacts: artifacts:
when: always
reports: reports:
junit: api_smoketest_follow_api_negative.xml junit: api_smoketest_follow_api_negative.xml
tags_api_smoketest: dk-tags_api_smoketest:
<<: *common_api_smoketest_job stage: e2e-test
extends: .dk-test-common
script: script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" tags_api_patterns/ api_smoketest_tags_api.xml - |
./scripts/ci/start-api-smoketest.sh \
localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
tags_api_negative/ api_smoketest_tags_api_negative.xml \
$RUNNER_TEST_JOBS
artifacts: artifacts:
when: always
reports: reports:
junit: api_smoketest_tags_api.xml junit: api_smoketest_tags_api_negative.xml
tags_api_smoketest_negative:
<<: *common_api_smoketest_job
dk-tags_api_smoketest_negative:
stage: e2e-test
extends: .dk-test-common
script: script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" tags_api_negative/ api_smoketest_tags_api_negative.xml - |
./scripts/ci/start-api-smoketest.sh \
mock_tests: localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
<<: *common_api_smoketest_job tags_api_patterns/ api_smoketest_tags_api.xml \
$RUNNER_TEST_JOBS
artifacts:
when: always
reports:
junit: api_smoketest_tags_api.xml
dk-mock_tests:
stage: e2e-test
extends: .dk-test-common
script: script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" mock_tests/ api_smoketest_mock_tests.xml - |
scripts/ci/start-api-smoketest.sh \
localhost "$RUNNER_HIVEMIND_SERVER_HTTP_PORT" \
mock_tests/ api_smoketest_mock_tests.xml \
$RUNNER_TEST_JOBS
api_smoketest_benchmark: dk-api-smoketest-benchmark:
stage: benchmark-tests stage: benchmark-tests
environment: hive-4.pl.syncad.com extends: .dk-test-common
needs: # Temporary failure (when any call is longer than 1s is allowed)
- job: hivemind_start_server
artifacts: true
allow_failure: true allow_failure: true
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: always
- if: '$CI_PIPELINE_SOURCE == "push"'
when: manual
- when: on_success
tags:
- hivemind
script: script:
- ./scripts/ci_start_api_benchmarks.sh localhost $HIVEMIND_HTTP_PORT 5 - |
./scripts/ci/start-api-benchmarks.sh \
localhost $RUNNER_HIVEMIND_SERVER_HTTP_PORT 5 \
$RUNNER_TEST_JOBS
- ./scripts/xml_report_parser.py . ./tests/tests_api/hivemind/tavern
artifacts: artifacts:
when: always when: always
paths: paths:
......
version: "3" version: "3.2"
services: services:
python-3.6:
image: hivemind/python:3.6 python-3.6-dev:
image: hivemind/python:3.6-dev
build: build:
context: . context: .
dockerfile: ./scripts/ci/python/3.6/Dockerfile dockerfile: ./scripts/ci/python/3.6/dev.dockerfile
args: args:
- user=${USER} - user=${USER}
- workdir=/home/${USER} - workdir=/home/${USER}/hivemind
user: ${USER} user: ${USER}
shm_size: 0 # security_opt:
# Below command makes your container running forever. # # Significant performance boost (about 5%), but very insecure.
# # See https://medium.com/better-programming/faster-python-in-docker-d1a71a9b9917
# # See https://docs.docker.com/engine/security/seccomp/
# - seccomp:unconfined
shm_size: 2g
# command: ["tail", "-f", "/dev/null"] # command: ["tail", "-f", "/dev/null"]
volumes:
# Sockets of postgres servers on dockers.
- "postgres-10-run:/var/run/postgres-10"
- "postgres-12-run:/var/run/postgres-12"
# Sockets of postgres servers on host.
- "/var/run/postgresql:/var/run/postgresql"
# For keeping python dependencies created in docker.
- "python-3.6-dev:/home/${USER}"
# Application stuff from host.
- "$PWD/hive:$PWD/hive"
- "$PWD/tests:$PWD/tests"
- "$PWD/hive.conf:$PWD/hive.conf"
- "$PWD/pyproject.toml:$PWD/pyproject.toml"
- "$PWD/README.md:$PWD/README.md"
- "$PWD/setup.cfg:$PWD/setup.cfg"
- "$PWD/setup.py:$PWD/setup.py"
- "$PWD/tox.ini:$PWD/tox.ini"
python-3.6:
image: hivemind/python:3.6
build:
context: .
dockerfile: ./scripts/ci/python/3.6/Dockerfile
args:
- user=worker
user: worker
shm_size: 2g
volumes:
# Sockets of postgres servers on host.
- "/var/run/postgresql:/var/run/postgresql"
python-3.8: python-3.8:
image: hivemind/python:3.8 image: hivemind/python:3.8
shm_size: 0
build: build:
context: . context: .
dockerfile: ./scripts/ci/python/3.8/Dockerfile dockerfile: ./scripts/ci/python/3.8/Dockerfile
args: args:
- user=${USER} - user=worker
- workdir=/home/${USER} user: worker
user: ${USER} shm_size: 2g
# Below command makes your container running forever. volumes:
# command: ["tail", "-f", "/dev/null"] # Sockets of postgres servers on host.
- "/var/run/postgresql:/var/run/postgresql"
postgres-10: postgres-10:
image: hivemind/postgres:10 image: hivemind/postgres:10
...@@ -37,35 +75,17 @@ services: ...@@ -37,35 +75,17 @@ services:
environment: environment:
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD} - POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
volumes: volumes:
- postgres-10-pgdata:/var/lib/postgresql/data - $PWD/$POSTGRES_10_CONF_FILE:/etc/postgresql/postgresql.conf:ro
- postgres-10-run:/var/run/postgresql
ports: ports:
- "${POSTGRES_10_PUBLISHED_PORT}:5432" - "${POSTGRES_10_PUBLISHED_PORT}:5432"
shm_size: 0 shm_size: 12g
command: [ command: [
"postgres", "postgres",
"-c", "shared_preload_libraries=pg_stat_statements,pg_qualstats", "-c", "config_file=/etc/postgresql/postgresql.conf"
"-c", "track_functions=pl",
"-c", "track_io_timing=on",
"-c", "track_activity_query_size=2048",
"-c", "pg_stat_statements.max=10000",
"-c", "pg_stat_statements.track=all",
"-c", "max_connections=100",
"-c", "shared_buffers=12GB",
"-c", "effective_cache_size=36GB",
"-c", "maintenance_work_mem=2GB",
"-c", "checkpoint_completion_target=0.9",
"-c", "wal_buffers=16MB",
"-c", "default_statistics_target=100",
"-c", "random_page_cost=1.1",
"-c", "effective_io_concurrency=200",
"-c", "work_mem=31457kB",
"-c", "min_wal_size=2GB",
"-c", "max_wal_size=8GB",
"-c", "max_worker_processes=12",
"-c", "max_parallel_workers_per_gather=4",
"-c", "max_parallel_workers=12",
] ]
postgres-12: postgres-12:
image: hivemind/postgres:12 image: hivemind/postgres:12
restart: unless-stopped restart: unless-stopped
...@@ -75,56 +95,38 @@ services: ...@@ -75,56 +95,38 @@ services:
environment: environment:
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD} - POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
volumes: volumes:
- postgres-12-pgdata:/var/lib/postgresql/data - $PWD/$POSTGRES_12_CONF_FILE:/etc/postgresql/postgresql.conf:ro
- postgres-12-run:/var/run/postgresql
ports: ports:
- "${POSTGRES_12_PUBLISHED_PORT}:5432" - "${POSTGRES_12_PUBLISHED_PORT}:5432"
shm_size: 0 shm_size: 12g
# https://pgtune.leopard.in.ua/#/ oltp 48G ram, 12 cpus, ssd
command: [ command: [
"postgres", "postgres",
"-c", "shared_preload_libraries=pg_stat_statements,pg_qualstats", "-c", "config_file=/etc/postgresql/postgresql.conf"
"-c", "track_functions=pl",
"-c", "track_io_timing=on",
"-c", "track_activity_query_size=2048",
"-c", "pg_stat_statements.max=10000",
"-c", "pg_stat_statements.track=all",
"-c", "max_connections=100",
"-c", "shared_buffers=12GB",
"-c", "effective_cache_size=36GB",
"-c", "maintenance_work_mem=2GB",
"-c", "checkpoint_completion_target=0.9",
"-c", "wal_buffers=16MB",
"-c", "default_statistics_target=100",
"-c", "random_page_cost=1.1",
"-c", "effective_io_concurrency=200",
"-c", "work_mem=31457kB",
"-c", "min_wal_size=2GB",
"-c", "max_wal_size=8GB",
"-c", "max_worker_processes=12",
"-c", "max_parallel_workers_per_gather=4",
"-c", "max_parallel_workers=12",
"-c", "max_parallel_maintenance_workers=4",
] ]
hived-node: hived-node:
image: registry.gitlab.syncad.com/hive/hive/consensus_node:00b5ff55 image: $HIVED_IMAGE
restart: unless-stopped restart: unless-stopped
# ports: ports:
# - "2001:2001" - "$HIVED_PUBLISHED_WS_PORT:8090" # websocket
# - "8090:8090" - "$HIVED_PUBLISHED_HTTP_PORT:8091"
# - "8091:8091" shm_size: 12g
shm_size: 0
entrypoint: /usr/local/hive/consensus/entrypoint.sh entrypoint: /usr/local/hive/consensus/entrypoint.sh
command: >- command: [
--replay-blockchain "--replay-blockchain",
--stop-replay-at-block 5000000 "--stop-replay-at-block 5000000"
]
volumes: volumes:
- $PWD/scripts/ci/hived-node/entrypoint.sh:/usr/local/hive/consensus/entrypoint.sh - $PWD/scripts/ci/hived-node/entrypoint.sh:/usr/local/hive/consensus/entrypoint.sh
- $PWD/scripts/ci/hived-node/config.ini:/usr/local/hive/consensus/datadir/config.ini - $PWD/scripts/ci/hived-node/config.ini:/usr/local/hive/consensus/datadir/config.ini
- ${HIVED_BLOCK_LOG_FILE}:/usr/local/hive/consensus/datadir/blockchain/block_log - ${HIVED_BLOCK_LOG_FILE}:/usr/local/hive/consensus/datadir/blockchain/block_log
- hived-node-datadir:/usr/local/hive/consensus/datadir - hived-node-datadir:/usr/local/hive/consensus/datadir
volumes: volumes:
postgres-10-pgdata: postgres-10-run:
postgres-12-pgdata: postgres-12-run:
hived-node-datadir: hived-node-datadir:
python-3.6-dev:
...@@ -21,13 +21,13 @@ def setup_logging(conf): ...@@ -21,13 +21,13 @@ def setup_logging(conf):
fmt = '%(asctime)s.%(msecs)03d{} %(created).6f ' \ fmt = '%(asctime)s.%(msecs)03d{} %(created).6f ' \
'%(levelname)s - %(name)s - %(message)s'.format(timezone) '%(levelname)s - %(name)s - %(message)s'.format(timezone)
logging.basicConfig(format=fmt, datefmt=datefmt) logging.basicConfig(format=fmt, datefmt=datefmt)
if timestamp: elif timestamp:
datefmt='%Y-%m-%d %H:%M:%S' datefmt='%Y-%m-%d %H:%M:%S'
timezone = time.strftime('%z') timezone = time.strftime('%z')
fmt = '%(asctime)s.%(msecs)03d{} ' \ fmt = '%(asctime)s.%(msecs)03d{} ' \
'%(levelname)s - %(name)s - %(message)s'.format(timezone) '%(levelname)s - %(name)s - %(message)s'.format(timezone)
logging.basicConfig(format=fmt, datefmt=datefmt) logging.basicConfig(format=fmt, datefmt=datefmt)
if epoch: elif epoch:
fmt = '%(created).6f %(levelname)s - %(name)s - %(message)s' fmt = '%(created).6f %(levelname)s - %(name)s - %(message)s'
logging.basicConfig(format=fmt) logging.basicConfig(format=fmt)
else: else:
......
stages:
- build
- test
- data-supply
- deploy
- e2e-test
- benchmark-tests
- post-deploy
variables:
GIT_DEPTH: 1
LC_ALL: "C"
GIT_STRATEGY: clone
GIT_SUBMODULE_STRATEGY: recursive
GIT_CLONE_PATH: $CI_BUILDS_DIR/$CI_COMMIT_REF_SLUG/$CI_CONCURRENT_ID/project-name
HIVEMIND_SOURCE_HIVED_URL: $HIVEMIND_SOURCE_HIVED_URL
HIVEMIND_DB_NAME: "hive_$CI_COMMIT_REF_SLUG"
HIVEMIND_HTTP_PORT: $((HIVEMIND_HTTP_PORT + CI_CONCURRENT_ID))
# Configured at gitlab repository settings side
POSTGRES_USER: $HIVEMIND_POSTGRES_USER
POSTGRES_PASSWORD: $HIVEMIND_POSTGRES_PASSWORD
POSTGRES_HOST_AUTH_METHOD: trust
# official way to provide password to psql: http://www.postgresql.org/docs/9.3/static/libpq-envars.html
PGPASSWORD: $HIVEMIND_POSTGRES_PASSWORD
default:
before_script:
- pwd
- echo "CI_NODE_TOTAL is $CI_NODE_TOTAL"
- echo "CI_NODE_INDEX is $CI_NODE_INDEX"
- echo "CI_CONCURRENT_ID is $CI_CONCURRENT_ID"
- echo "CI_COMMIT_REF_SLUG is $CI_COMMIT_REF_SLUG"
hivemind_build:
stage: build
script:
- pip3 install --user --upgrade pip setuptools
- git fetch --tags
- git tag -f ci_implicit_tag
- echo $PYTHONUSERBASE
- "python3 setup.py bdist_egg"
- ls -l dist/*
artifacts:
paths:
- dist/
expire_in: 1 week
tags:
- hivemind
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: always
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
when: always
- if: '$CI_COMMIT_BRANCH == "develop"'
when: always
hivemind_sync:
stage: data-supply
environment:
name: "hive sync built from branch $CI_COMMIT_REF_NAME targeting database $HIVEMIND_DB_NAME"
needs:
- job: hivemind_build
artifacts: true
variables:
GIT_STRATEGY: none
PYTHONUSERBASE: ./local-site
script:
- pip3 install --user --upgrade pip setuptools
# WARNING!!! temporarily hardcoded 5000017 instead $HIVEMIND_MAX_BLOCK
# revert that change when $HIVEMIND_MAX_BLOCK will be set to 5000017
- scripts/ci_sync.sh "$HIVEMIND_DB_NAME" "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" "$HIVEMIND_SOURCE_HIVED_URL" 5000017 $HIVEMIND_HTTP_PORT
artifacts:
paths:
- hivemind-sync.log
expire_in: 1 week
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: always
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
when: always
- if: '$CI_COMMIT_BRANCH == "develop"'
when: always
- if: '$CI_PIPELINE_SOURCE == "push"'
when: manual
- when: on_success
tags:
- hivemind
hivemind_start_server:
stage: deploy
environment:
name: "hive serve built from branch $CI_COMMIT_REF_NAME exposed on port $HIVEMIND_HTTP_PORT"
url: "http://hive-4.pl.syncad.com:$HIVEMIND_HTTP_PORT"
on_stop: hivemind_stop_server
needs:
- job: hivemind_build
artifacts: true
# - job: hivemind_sync
# artifacts: true
variables:
GIT_STRATEGY: none
PYTHONUSERBASE: ./local-site
script:
- scripts/ci_start_server.sh "$HIVEMIND_DB_NAME" "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" "$HIVEMIND_SOURCE_HIVED_URL" $HIVEMIND_HTTP_PORT
artifacts:
paths:
- hive_server.pid
expire_in: 1 week
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: always
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
when: always
- if: '$CI_COMMIT_BRANCH == "develop"'
when: always
- if: '$CI_PIPELINE_SOURCE == "push"'
when: manual
- when: on_success
tags:
- hivemind
hivemind_stop_server:
stage: post-deploy
environment:
name: "hive serve built from branch $CI_COMMIT_REF_NAME exposed on port $HIVEMIND_HTTP_PORT"
action: stop
variables:
GIT_STRATEGY: none
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: always
script:
- scripts/ci_stop_server.sh hive_server.pid
needs:
- job: hivemind_start_server
artifacts: true
tags:
- hivemind
artifacts:
paths:
- hive_server.log
.hivemind_start_api_smoketest: &common_api_smoketest_job
stage: e2e-test
environment: hive-4.pl.syncad.com
needs:
- job: hivemind_start_server
artifacts: true
variables:
GIT_STRATEGY: none
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: always
- if: '$CI_PIPELINE_SOURCE == "push"'
when: manual
- when: on_success
tags:
- hivemind
bridge_api_smoketest:
<<: *common_api_smoketest_job
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" bridge_api_patterns/ api_smoketest_bridge.xml
artifacts:
reports:
junit: api_smoketest_bridge.xml
bridge_api_smoketest_negative:
<<: *common_api_smoketest_job
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" bridge_api_negative/ api_smoketest_bridge_negative.xml
artifacts:
reports:
junit: api_smoketest_bridge_negative.xml
condenser_api_smoketest:
<<: *common_api_smoketest_job
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" condenser_api_patterns/ api_smoketest_condenser_api.xml
artifacts:
reports:
junit: api_smoketest_condenser_api.xml
condenser_api_smoketest_negative:
<<: *common_api_smoketest_job
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" condenser_api_negative/ api_smoketest_condenser_api_negative.xml
artifacts:
reports:
junit: api_smoketest_condenser_api_negative.xml
database_api_smoketest:
<<: *common_api_smoketest_job
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" database_api_patterns/ api_smoketest_database_api.xml
artifacts:
reports:
junit: api_smoketest_database_api.xml
database_api_smoketest_negative:
<<: *common_api_smoketest_job
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" database_api_negative/ api_smoketest_database_api_negative.xml
artifacts:
reports:
junit: api_smoketest_database_api_negative.xml
follow_api_smoketest:
<<: *common_api_smoketest_job
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" follow_api_patterns/ api_smoketest_follow_api.xml
artifacts:
reports:
junit: api_smoketest.xml
follow_api_smoketest_negative:
<<: *common_api_smoketest_job
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" follow_api_negative/ api_smoketest_follow_api_negative.xml
artifacts:
reports:
junit: api_smoketest_follow_api_negative.xml
tags_api_smoketest:
<<: *common_api_smoketest_job
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" tags_api_patterns/ api_smoketest_tags_api.xml
artifacts:
reports:
junit: api_smoketest_tags_api.xml
tags_api_smoketest_negative:
<<: *common_api_smoketest_job
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" tags_api_negative/ api_smoketest_tags_api_negative.xml
mock_tests:
<<: *common_api_smoketest_job
script:
- scripts/ci_start_api_smoketest.sh localhost "$HIVEMIND_HTTP_PORT" mock_tests/ api_smoketest_mock_tests.xml
api_smoketest_benchmark:
stage: benchmark-tests
environment: hive-4.pl.syncad.com
needs:
- job: hivemind_start_server
artifacts: true
allow_failure: true
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: always
- if: '$CI_PIPELINE_SOURCE == "push"'
when: manual
- when: on_success
tags:
- hivemind
script:
- ./scripts/ci_start_api_benchmarks.sh localhost $HIVEMIND_HTTP_PORT 5
artifacts:
when: always
paths:
- tavern_benchmarks_report.html
stages:
- run
variables:
GIT_DEPTH: 10
GIT_STRATEGY: fetch # It's quick, but noticed errors with that, sometimes.
# GIT_STRATEGY: clone
# GIT_STRATEGY: none
GIT_SUBMODULE_STRATEGY: recursive
MY_VARIABLE: "bamboo"
default:
image: hivemind/python:3.6
interruptible: false
cache: &global-cache
# Per-branch caching. CI_COMMIT_REF_SLUG is the same thing.
# key: "$CI_COMMIT_REF_NAME"
# Per project caching – use any key.
# Change this key, if you need to clear cache.
key: common-1
paths:
- .cache/
- .venv/
- .tox/
before_script:
- echo "I am before_script in child-1. MY_VARIABLE is $MY_VARIABLE"
after_script:
- echo "I am after_script in in child-1. MY_VARIABLE is $MY_VARIABLE"
child-1-job:
stage: run
rules:
- when: manual
script:
- echo "I am script in child-1-job. MY_VARIABLE is $MY_VARIABLE"
- sleep 30
- exit 1
tags:
- hivemind-light-job
stages:
- run
variables:
GIT_DEPTH: 10
GIT_STRATEGY: fetch # It's quick, but noticed errors with that, sometimes.
# GIT_STRATEGY: clone
# GIT_STRATEGY: none
GIT_SUBMODULE_STRATEGY: recursive
MY_VARIABLE: "bamboo"
default:
image: hivemind/python:3.6
interruptible: false
cache: &global-cache
# Per-branch caching. CI_COMMIT_REF_SLUG is the same thing.
# key: "$CI_COMMIT_REF_NAME"
# Per project caching – use any key.
# Change this key, if you need to clear cache.
key: common-1
paths:
- .cache/
- .venv/
- .tox/
before_script:
- echo "I am before_script in child-2. MY_VARIABLE is $MY_VARIABLE"
after_script:
- echo "I am after_script in child-2. MY_VARIABLE is $MY_VARIABLE"
child-2-job:
stage: run
script:
- echo "I am script in child-2-job. MY_VARIABLE is $MY_VARIABLE"
tags:
- hivemind-light-job
# See https://gitlab.com/fgrimshaw/dynamic-ci
# See https://gitlab.com/gitlab-org/gitlab/-/issues/212373
# I tested this feature, but our current version of Gitlab 13.2.2
# doesn't support it well. Child pipelines run with no problem,
# but UI displays wrong badges, for instance job was marked as
# still running, though it was finished. Also jobs with rule
# "when: manual" where started without user's permission.
# We need to wait for better support in Gitlab UI.
stages:
- run
variables:
GIT_STRATEGY: none
trigger-child-1:
stage: run
rules:
- if: '$CI_COMMIT_MESSAGE =~ /child-1/'
when: always
trigger:
include: .gitlab-ci-child-pipeline-1.yaml
strategy: depend
trigger-child-2:
stage: run
rules:
- if: '$CI_COMMIT_MESSAGE =~ /child-2/'
when: always
trigger:
include: .gitlab-ci-child-pipeline-2.yaml
strategy: depend
...@@ -2,25 +2,27 @@ ...@@ -2,25 +2,27 @@
set -euo pipefail set -euo pipefail
collect_stats() { collect_db_stats() {
echo "Collecting statistics from database ${HIVEMIND_DB_NAME}" echo "Collecting statistics from database ${HIVEMIND_DB_NAME}"
mkdir -p pg-stats mkdir -p pg-stats
DIR=$PWD/pg-stats DIR=$PWD/pg-stats
PGPASSWORD=${POSTGRES_PASSWORD} psql \ PGPASSWORD=${RUNNER_POSTGRES_APP_USER_PASSWORD} psql \
--username "${POSTGRES_USER}" \ --username "${RUNNER_POSTGRES_APP_USER=}" \
--host ${POSTGRES_HOST} \ --host ${RUNNER_POSTGRES_HOST} \
--port ${POSTGRES_PORT} \ --port ${RUNNER_POSTGRES_PORT} \
--dbname ${HIVEMIND_DB_NAME} << EOF --dbname ${HIVEMIND_DB_NAME} << EOF
\timing \timing
\copy (select * from pg_settings) to '$DIR/pg_settings.csv' WITH CSV HEADER \copy (select * from pg_settings) to '$DIR/pg_settings.csv' WITH CSV HEADER
\copy (select * from pg_stat_user_tables) to '$DIR/pg_stat_user_tables.csv' WITH CSV HEADER \copy (select * from pg_stat_user_tables) to '$DIR/pg_stat_user_tables.csv' WITH CSV HEADER
-- Disabled, because this table is too big. -- Disabled, because this table is too big.
--\copy (select * from pg_stat_statements) to '$DIR/pg_stat_statements.csv' WITH CSV HEADER -- \copy (select * from pg_stat_statements) to '$DIR/pg_stat_statements.csv' WITH CSV HEADER
/*
-- Looks to be unuseful.
-- See https://github.com/powa-team/pg_qualstats -- See https://github.com/powa-team/pg_qualstats
\echo pg_qualstats index advisor \echo pg_qualstats index advisor
SELECT v SELECT v
...@@ -33,8 +35,9 @@ SELECT v ...@@ -33,8 +35,9 @@ SELECT v
FROM json_array_elements( FROM json_array_elements(
pg_qualstats_index_advisor(min_filter => 50)->'unoptimised') v pg_qualstats_index_advisor(min_filter => 50)->'unoptimised') v
ORDER BY v::text COLLATE "C"; ORDER BY v::text COLLATE "C";
*/
EOF EOF
} }
collect_stats collect_db_stats
...@@ -2,46 +2,48 @@ ...@@ -2,46 +2,48 @@
set -euo pipefail set -euo pipefail
# TODO We have troubles with user, when postgresql is run from docker.
# We need user name `postgres`, not other, I'm afraid.
# ADMIN_POSTGRES_USER=postgres
# ADMIN_POSTGRES_USER_PASSWORD=postgres
create_db() { create_db() {
echo "Creating user ${HIVEMIND_POSTGRES_USER} and database ${HIVEMIND_DB_NAME}, owned by this user" echo "Creating user ${RUNNER_POSTGRES_APP_USER} and database ${HIVEMIND_DB_NAME}, owned by this user"
TEMPLATE="template_monitoring"
PGPASSWORD=${ADMIN_POSTGRES_USER_PASSWORD} psql \ PGPASSWORD=${RUNNER_POSTGRES_ADMIN_USER_PASSWORD} psql \
--username "${ADMIN_POSTGRES_USER}" \ --username "${RUNNER_POSTGRES_ADMIN_USER}" \
--host ${POSTGRES_HOST} \ --host ${RUNNER_POSTGRES_HOST} \
--port ${POSTGRES_PORT} \ --port ${RUNNER_POSTGRES_PORT} \
--dbname postgres << EOF --dbname postgres << EOF
\echo Creating role ${HIVEMIND_POSTGRES_USER} \echo Creating role ${RUNNER_POSTGRES_APP_USER}
DO \$$ DO \$$
BEGIN BEGIN
IF EXISTS (SELECT * FROM pg_user IF EXISTS (SELECT * FROM pg_user
WHERE pg_user.usename = '${HIVEMIND_POSTGRES_USER}') THEN WHERE pg_user.usename = '${RUNNER_POSTGRES_APP_USER}') THEN
raise warning 'Role % already exists', '${HIVEMIND_POSTGRES_USER}'; raise warning 'Role % already exists', '${RUNNER_POSTGRES_APP_USER}';
ELSE ELSE
CREATE ROLE ${HIVEMIND_POSTGRES_USER} CREATE ROLE ${RUNNER_POSTGRES_APP_USER}
WITH LOGIN PASSWORD '${HIVEMIND_POSTGRES_USER_PASSWORD}'; WITH LOGIN PASSWORD '${RUNNER_POSTGRES_APP_USER_PASSWORD}';
END IF; END IF;
END END
\$$; \$$;
\echo Creating database ${HIVEMIND_DB_NAME} -- We drop database to enable retry of CI job.
\echo Dropping database ${HIVEMIND_DB_NAME}
DROP DATABASE IF EXISTS ${HIVEMIND_DB_NAME};
CREATE DATABASE ${HIVEMIND_DB_NAME} TEMPLATE template_monitoring \echo Creating database ${HIVEMIND_DB_NAME}
OWNER ${HIVEMIND_POSTGRES_USER}; CREATE DATABASE ${HIVEMIND_DB_NAME} TEMPLATE ${TEMPLATE}
OWNER ${RUNNER_POSTGRES_APP_USER};
COMMENT ON DATABASE ${HIVEMIND_DB_NAME} IS COMMENT ON DATABASE ${HIVEMIND_DB_NAME} IS
'Database for Gitlab CI pipeline ${CI_PIPELINE_URL}, commit ${CI_COMMIT_SHORT_SHA}'; 'Database for Gitlab CI pipeline ${CI_PIPELINE_URL}, commit ${CI_COMMIT_SHORT_SHA}';
\c ${HIVEMIND_DB_NAME} \c ${HIVEMIND_DB_NAME}
drop schema if exists hivemind_admin cascade;
create schema hivemind_admin create schema hivemind_admin
authorization ${HIVEMIND_POSTGRES_USER}; authorization ${RUNNER_POSTGRES_APP_USER};
CREATE SEQUENCE hivemind_admin.database_metadata_id_seq CREATE SEQUENCE hivemind_admin.database_metadata_id_seq
INCREMENT 1 INCREMENT 1
...@@ -63,10 +65,10 @@ CREATE TABLE hivemind_admin.database_metadata ...@@ -63,10 +65,10 @@ CREATE TABLE hivemind_admin.database_metadata
); );
alter sequence hivemind_admin.database_metadata_id_seq alter sequence hivemind_admin.database_metadata_id_seq
OWNER TO ${HIVEMIND_POSTGRES_USER}; OWNER TO ${RUNNER_POSTGRES_APP_USER};
alter table hivemind_admin.database_metadata alter table hivemind_admin.database_metadata
OWNER TO ${HIVEMIND_POSTGRES_USER}; OWNER TO ${RUNNER_POSTGRES_APP_USER};
insert into hivemind_admin.database_metadata insert into hivemind_admin.database_metadata
(database_name, ci_pipeline_url, ci_pipeline_id, commit_sha) (database_name, ci_pipeline_url, ci_pipeline_id, commit_sha)
...@@ -75,6 +77,8 @@ values ( ...@@ -75,6 +77,8 @@ values (
${CI_PIPELINE_ID}, '${CI_COMMIT_SHORT_SHA}' ${CI_PIPELINE_ID}, '${CI_COMMIT_SHORT_SHA}'
); );
-- VACUUM VERBOSE ANALYZE;
\q \q
EOF EOF
......
...@@ -2,26 +2,30 @@ ...@@ -2,26 +2,30 @@
set -euo pipefail set -euo pipefail
echo "Dumping database ${HIVEMIND_DB_NAME}" dump_db() {
echo "Dumping database ${HIVEMIND_DB_NAME}"
export PGPASSWORD=${POSTGRES_PASSWORD} export PGPASSWORD=${RUNNER_POSTGRES_APP_USER_PASSWORD}
exec_path=$POSTGRES_CLIENT_TOOLS_PATH/$POSTGRES_MAJOR_VERSION/bin exec_path=$POSTGRES_CLIENT_TOOLS_PATH/$POSTGRES_MAJOR_VERSION/bin
echo "Using pg_dump version $($exec_path/pg_dump --version)" echo "Using pg_dump version $($exec_path/pg_dump --version)"
time $exec_path/pg_dump \ time $exec_path/pg_dump \
--username="${POSTGRES_USER}" \ --username="${RUNNER_POSTGRES_APP_USER}" \
--host="${POSTGRES_HOST}" \ --host="${RUNNER_POSTGRES_HOST}" \
--port="${POSTGRES_PORT}" \ --port="${RUNNER_POSTGRES_PORT}" \
--dbname="${HIVEMIND_DB_NAME}" \ --dbname="${HIVEMIND_DB_NAME}" \
--schema=public \ --schema=public \
--format=directory \ --format=directory \
--jobs=4 \ --jobs=4 \
--compress=6 \ --compress=6 \
--quote-all-identifiers \ --quote-all-identifiers \
--lock-wait-timeout=30000 \ --lock-wait-timeout=30000 \
--no-privileges --no-acl \ --no-privileges --no-acl \
--verbose \ --verbose \
--file="pg-dump-${HIVEMIND_DB_NAME}" --file="pg-dump-${HIVEMIND_DB_NAME}"
unset PGPASSWORD unset PGPASSWORD
}
dump_db
...@@ -5,17 +5,16 @@ ...@@ -5,17 +5,16 @@
set -euo pipefail set -euo pipefail
get_postgres_version() { get_postgres_version() {
# Get major version of postgres server.
version=$( version=$(
PGPASSWORD=$POSTGRES_PASSWORD psql -X -A -t \ PGPASSWORD=$RUNNER_POSTGRES_APP_USER_PASSWORD psql -X -A -t \
--username $POSTGRES_USER \ --username $RUNNER_POSTGRES_APP_USER \
--host $POSTGRES_HOST \ --host $RUNNER_POSTGRES_HOST \
--port ${POSTGRES_PORT} \ --port ${RUNNER_POSTGRES_PORT} \
--dbname postgres \ --dbname postgres \
-c "show server_version_num;" -c "show server_version_num;"
) )
echo $(echo $version | cut -c1-2) echo $(echo $version | cut -c1-2)
} }
get_postgres_version get_postgres_version
stages: # Useful snippets for Gitlab CI, but not used currently.
- build
- data-supply
- e2e-test
variables:
PGPASSWORD: $HIVEMIND_POSTGRES_PASSWORD
# GIT_DEPTH: 10
GIT_DEPTH: 1
# GIT_STRATEGY: fetch # Noticed errors with that.
GIT_STRATEGY: clone
# GIT_STRATEGY: none
GIT_SUBMODULE_STRATEGY: recursive
PIPENV_VENV_IN_PROJECT: 1
PIPENV_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pipenv"
PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip"
POSTGRES_CLIENT_TOOLS_PATH: /usr/lib/postgresql
# POSTGRES_HOST: 172.17.0.1 # Host
# POSTGRES_HOST: postgres-10 # Docker service
POSTGRES_PORT: 5432
# Set on project level in Gitlab CI.
# We need create role and create db privileges.
# ADMIN_POSTGRES_USER: postgres
# ADMIN_POSTGRES_USER_PASSWORD: postgres
# Needed by old runner ssh-executor, probably.
POSTGRES_USER: $HIVEMIND_POSTGRES_USER
POSTGRES_PASSWORD: $HIVEMIND_POSTGRES_PASSWORD
POSTGRES_HOST_AUTH_METHOD: trust
HIVEMIND_DB_NAME: "hive_${CI_COMMIT_REF_SLUG}_pipeline_id_${CI_PIPELINE_ID}"
HIVEMIND_EXEC_NAME: $DB_NAME
# Set on project level in Gitlab CI.
# HIVEMIND_POSTGRES_USER: hivemind_ci
# Set on project level in Gitlab CI.
HIVEMIND_POSTGRES_USER_PASSWORD: $HIVEMIND_POSTGRES_PASSWORD
# Set on project level in Gitlab CI.
# HIVEMIND_HTTP_PORT: 18080
# Set on project level in Gitlab CI.
# HIVEMIND_MAX_BLOCK: 10001
# HIVEMIND_MAX_BLOCK: 5000001
# Set on project level in Gitlab CI.
# HIVEMIND_SOURCE_HIVED_URL: {"default":"http://hive-4.pl.syncad.com:8091"}
# HIVEMIND_SOURCE_HIVED_URL: {"default":"192.168.6.136:8091"}
# HIVEMIND_SOURCE_HIVED_URL: {"default":"http://172.17.0.1:8091"}
.postgres-10: &postgres-10 .postgres-10: &postgres-10
name: hivemind/postgres:10 name: hivemind/postgres:10
...@@ -114,17 +56,6 @@ variables: ...@@ -114,17 +56,6 @@ variables:
"-c", "max_parallel_workers=4", "-c", "max_parallel_workers=4",
] ]
.setup-pip: &setup-pip
- python -m venv .venv
- source .venv/bin/activate
- time pip install --upgrade pip setuptools wheel
- pip --version
- easy_install --version
- wheel version
- pipenv --version
- poetry --version
- time pip install --editable .
.setup-setuptools: &setup-setuptools .setup-setuptools: &setup-setuptools
- python -m venv .venv - python -m venv .venv
- source .venv/bin/activate - source .venv/bin/activate
...@@ -136,9 +67,9 @@ variables: ...@@ -136,9 +67,9 @@ variables:
- poetry --version - poetry --version
- time python setup.py develop - time python setup.py develop
# no virtual environment .setup-setuptools-no-venv: &setup-setuptools-no-venv
.setuptools: &setup-setuptools-no-venv # No virtual environment here.
# setuptools will install all dependencies to this directory. # Setuptools will install all dependencies to PYTHONUSERBASE directory.
- export PYTHONUSERBASE=./local-site - export PYTHONUSERBASE=./local-site
- time pip install --upgrade pip setuptools wheel - time pip install --upgrade pip setuptools wheel
- pip --version - pip --version
...@@ -148,7 +79,6 @@ variables: ...@@ -148,7 +79,6 @@ variables:
- poetry --version - poetry --version
- mkdir -p `python -m site --user-site` - mkdir -p `python -m site --user-site`
- python setup.py install --user --force - python setup.py install --user --force
# we can probably also run via: ./hive/cli.py
- ln -sf ./local-site/bin/hive "$HIVEMIND_EXEC_NAME" - ln -sf ./local-site/bin/hive "$HIVEMIND_EXEC_NAME"
.setup-pipenv: &setup-pipenv .setup-pipenv: &setup-pipenv
...@@ -167,67 +97,6 @@ variables: ...@@ -167,67 +97,6 @@ variables:
- pipenv --version - pipenv --version
- poetry --version - poetry --version
.set-variables: &set-variables
- whoami
# list all variables predefined by Gitlab CI
# - export
- echo "CI_PIPELINE_URL is $CI_PIPELINE_URL"
- echo "CI_PIPELINE_ID is $CI_PIPELINE_ID"
- echo "CI_COMMIT_SHORT_SHA is $CI_COMMIT_SHORT_SHA"
- echo "CI_COMMIT_REF_SLUG is $CI_COMMIT_REF_SLUG"
- export HIVEMIND_DB_NAME=${HIVEMIND_DB_NAME//[^a-zA-Z0-9_]/_}
- echo "HIVEMIND_DB_NAME is $HIVEMIND_DB_NAME"
- export HIVEMIND_POSTGRESQL_CONNECTION_STRING=postgresql://${HIVEMIND_POSTGRES_USER}:${HIVEMIND_POSTGRES_USER_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${HIVEMIND_DB_NAME}
.fetch-git-tags: &fetch-git-tags
# - git fetch --tags
- git tag -f ci_implicit_tag # Needed to build python package
.start_timer: &start-timer
- ./scripts/ci/timer.sh start
.stop-timer: &stop-timer
- ./scripts/ci/timer.sh check
.hive-sync-script-common: &hive-sync-script-common
- ./scripts/ci/wait-for-postgres.sh ${POSTGRES_HOST} ${POSTGRES_PORT}
- export POSTGRES_MAJOR_VERSION=$(./scripts/ci/get-postgres-version.sh)
- ./scripts/ci/create-db.sh
- ./scripts/ci/hive-sync.sh
- ./scripts/ci/collect-db-stats.sh
.default-rules: &default-rules
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: always
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH || $CI_COMMIT_BRANCH == "develop"'
when: always
- if: '$CI_PIPELINE_SOURCE == "push"'
when: manual
- when: on_success
default:
image: hivemind/python:3.6
# image: hivemind/python:3.8
interruptible: false
timeout: 2h
cache: &global-cache
# Per-branch caching. CI_COMMIT_REF_SLUG is the same thing.
# key: "$CI_COMMIT_REF_NAME"
# Per project caching – use any key. Change this key, if you need
# to clear cache
key: common-1
paths:
- .cache/
- .venv/
- .tox/
before_script:
- *start-timer
- *fetch-git-tags
- *set-variables
- *setup-pip
after_script:
- *stop-timer
##### Jobs ##### ##### Jobs #####
...@@ -257,31 +126,16 @@ default: ...@@ -257,31 +126,16 @@ default:
tags: tags:
- hivemind-light-job - hivemind-light-job
# Postgres shared # Postgres as docker service
hivemind-sync: .hivemind-sync-postgres-as-service:
<<: *default-rules # <<: *default-rules
stage: data-supply
needs: []
script:
- *hive-sync-script-common
artifacts:
paths:
- hivemind-sync.log
- pg-stats
expire_in: 7 days
tags:
- hivemind-heavy-job
# Postgres as service
.hivemind-sync:
<<: *default-rules
stage: data-supply stage: data-supply
services: services:
- *postgres-10 - *postgres-10
# - *postgres-12 # - *postgres-12
needs: [] needs: []
script: script:
- *hive-sync-script-common # - *hive-sync-script-common
# - ./scripts/ci/dump-db.sh # - ./scripts/ci/dump-db.sh
artifacts: artifacts:
paths: paths:
...@@ -292,131 +146,9 @@ hivemind-sync: ...@@ -292,131 +146,9 @@ hivemind-sync:
tags: tags:
- hivemind-heavy-job - hivemind-heavy-job
.e2e-test-common: # Test job doing nothing (for debugging CI)
rules: .just-a-test:
- when: on_success
needs:
- job: hivemind-sync
artifacts: false
before_script:
- *start-timer
- *fetch-git-tags
- *set-variables
- *setup-pip
- ./scripts/ci/wait-for-postgres.sh ${POSTGRES_HOST} ${POSTGRES_PORT}
- ./scripts/ci/hive-server.sh start
after_script:
- ./scripts/ci/hive-server.sh stop
- *stop-timer
tags:
- hivemind-light-job
bridge_api_smoketest:
stage: e2e-test stage: e2e-test
extends: .e2e-test-common extends: .e2e-test-common
script: script:
- > - echo "Run some tests"
scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
bridge_api_patterns/ api_smoketest_bridge.xml
artifacts:
reports:
junit: api_smoketest_bridge.xml
bridge_api_smoketest_negative:
stage: e2e-test
extends: .e2e-test-common
script:
- >
scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
bridge_api_negative/ api_smoketest_bridge_negative.xml
artifacts:
reports:
junit: api_smoketest_bridge_negative.xml
condenser_api_smoketest:
stage: e2e-test
extends: .e2e-test-common
script:
- >
scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
condenser_api_patterns/ api_smoketest_condenser_api.xml
artifacts:
reports:
junit: api_smoketest_condenser_api.xml
condenser_api_smoketest_negative:
stage: e2e-test
extends: .e2e-test-common
script:
- >
scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
condenser_api_negative/ api_smoketest_condenser_api_negative.xml
artifacts:
reports:
junit: api_smoketest_condenser_api_negative.xml
database_api_smoketest:
stage: e2e-test
extends: .e2e-test-common
script:
- >
scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
database_api_patterns/ api_smoketest_database_api.xml
artifacts:
reports:
junit: api_smoketest_database_api.xml
database_api_smoketest_negative:
stage: e2e-test
extends: .e2e-test-common
script:
- >
scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
database_api_negative/ api_smoketest_database_api_negative.xml
artifacts:
reports:
junit: api_smoketest_database_api_negative.xml
follow_api_smoketest:
stage: e2e-test
extends: .e2e-test-common
script:
- >
scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
follow_api_patterns/ api_smoketest_follow_api.xml
artifacts:
reports:
junit: api_smoketest_follow_api.xml
follow_api_smoketest_negative:
stage: e2e-test
extends: .e2e-test-common
script:
- >
scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
follow_api_negative/ api_smoketest_follow_api_negative.xml
artifacts:
reports:
junit: api_smoketest_follow_api_negative.xml
tags_api_smoketest:
stage: e2e-test
extends: .e2e-test-common
script:
- >
scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
tags_api_patterns/ api_smoketest_tags_api.xml
artifacts:
reports:
junit: api_smoketest_tags_api.xml
tags_api_smoketest_negative:
stage: e2e-test
extends: .e2e-test-common
script:
- >
scripts/ci/start-api-smoketest.sh localhost "$HIVEMIND_HTTP_PORT"
tags_api_negative/ api_smoketest_tags_api_negative.xml
artifacts:
reports:
junit: api_smoketest_tags_api_negative.xml
...@@ -4,17 +4,11 @@ ...@@ -4,17 +4,11 @@
set -euo pipefail set -euo pipefail
JOB=$1
HIVEMIND_PID=0 HIVEMIND_PID=0
MERCY_KILL_TIMEOUT=5 MERCY_KILL_TIMEOUT=5
START_DELAY=5 START_DELAY=5
# For debug only!
# HIVED_URL='{"default":"http://hived-node:8091"}'
# HIVED_URL='{"default":"http://172.17.0.1:8091"}'
# HIVED_URL='{"default":"http://127.0.0.1:8091"}'
# HIVEMIND_HTTP_PORT="8080"
# HIVEMIND_POSTGRESQL_CONNECTION_STRING="postgresql://syncad:devdev@localhost:5432/hive_test"
check_pid() { check_pid() {
if [ -f hive_server.pid ]; then if [ -f hive_server.pid ]; then
HIVEMIND_PID=`cat hive_server.pid` HIVEMIND_PID=`cat hive_server.pid`
...@@ -24,6 +18,7 @@ check_pid() { ...@@ -24,6 +18,7 @@ check_pid() {
echo "Process pid $HIVEMIND_PID is running" echo "Process pid $HIVEMIND_PID is running"
else else
# Process is not running # Process is not running
echo "Process pid $HIVEMIND_PID is not running"
rm hive_server.pid rm hive_server.pid
HIVEMIND_PID=0 HIVEMIND_PID=0
fi fi
...@@ -33,7 +28,7 @@ check_pid() { ...@@ -33,7 +28,7 @@ check_pid() {
} }
stop() { stop() {
if [ "$HIVEMIND_PID" -gt "0" ]; then if [ "$HIVEMIND_PID" -gt 0 ]; then
HIVEMIND_PID=`cat hive_server.pid` HIVEMIND_PID=`cat hive_server.pid`
# Send INT signal and give it some time to stop. # Send INT signal and give it some time to stop.
...@@ -52,22 +47,25 @@ stop() { ...@@ -52,22 +47,25 @@ stop() {
fi fi
} }
start() { start() {
if [ "$HIVEMIND_PID" -gt "0" ]; then if [ "$HIVEMIND_PID" -gt 0 ]; then
echo "Hive server is already running (pid $HIVEMIND_PID)" echo "Hive server is already running (pid $HIVEMIND_PID)"
exit 0 exit 0
fi fi
echo "Starting hive server on port ${HIVEMIND_HTTP_PORT}" echo "Starting hive server on port ${RUNNER_HIVEMIND_SERVER_HTTP_PORT}"
USER=${RUNNER_POSTGRES_APP_USER}:${RUNNER_POSTGRES_APP_USER_PASSWORD}
OPTIONS="host=${RUNNER_POSTGRES_HOST}&port=${RUNNER_POSTGRES_PORT}"
DATABASE_URL="postgresql://${USER}@/${HIVEMIND_DB_NAME}?${OPTIONS}"
hive server \ hive server \
--log-mask-sensitive-data \ --log-mask-sensitive-data \
--pid-file hive_server.pid \ --pid-file hive_server.pid \
--http-server-port $HIVEMIND_HTTP_PORT \ --http-server-port ${RUNNER_HIVEMIND_SERVER_HTTP_PORT} \
--steemd-url "$HIVED_URL" \ --steemd-url "${RUNNER_HIVED_URL}" \
--database-url "$HIVEMIND_POSTGRESQL_CONNECTION_STRING" 2>&1 \ --database-url "${DATABASE_URL}" 2>&1 \
| tee -ia hivemind-server.log & | tee -ia hivemind-server.log &
HIVEMIND_PID=$! HIVEMIND_PID=$!
...@@ -81,11 +79,14 @@ start() { ...@@ -81,11 +79,14 @@ start() {
if ps -p $HIVEMIND_PID > /dev/null if ps -p $HIVEMIND_PID > /dev/null
then then
echo "Hive server is running (pid $HIVEMIND_PID)" echo "Hive server is running (pid $HIVEMIND_PID)"
# Write pid to file, sometimes there's wrong pid there.
echo $HIVEMIND_PID > hive_server.pid
exit 0 exit 0
else else
# Check if process executed successfully or not. # Check if process executed successfully or not.
if wait $HIVEMIND_PID; then if wait $HIVEMIND_PID; then
echo "Hive server has been started (pid $HIVEMIND_PID)" echo "Hive server has been started (pid $HIVEMIND_PID)"
echo $HIVEMIND_PID > hive_server.pid
exit 0 exit 0
else else
RESULT=$? RESULT=$?
...@@ -107,5 +108,16 @@ start() { ...@@ -107,5 +108,16 @@ start() {
} }
check_pid main() {
"$1" check_pid
if [ "$JOB" = "start" ]; then
start
elif [ "$JOB" = "stop" ]; then
stop
else
echo "Invalid argument"
exit 1
fi
}
main
...@@ -2,25 +2,35 @@ ...@@ -2,25 +2,35 @@
set -euo pipefail set -euo pipefail
cat << EOF # For debug only!
Starting hive sync using hived url: ${HIVED_URL}. # RUNNER_HIVEMIND_SYNC_MAX_BLOCK=10000
Max sync block is: ${HIVEMIND_MAX_BLOCK}. # RUNNER_HIVED_URL='{"default":"http://hived-node:8091"}'
# RUNNER_HIVED_URL='{"default":"http://172.17.0.1:8091"}'
hive_sync() {
# Start hive sync process
cat << EOF
Starting hive sync using hived url: ${RUNNER_HIVED_URL}.
Max sync block is: ${RUNNER_HIVEMIND_SYNC_MAX_BLOCK}.
EOF EOF
# For debug only! USER=${RUNNER_POSTGRES_APP_USER}:${RUNNER_POSTGRES_APP_USER_PASSWORD}
# HIVEMIND_MAX_BLOCK=10001 OPTIONS="host=${RUNNER_POSTGRES_HOST}&port=${RUNNER_POSTGRES_PORT}"
# HIVED_URL='{"default":"http://hived-node:8091"}' DATABASE_URL="postgresql://${USER}@/${HIVEMIND_DB_NAME}?${OPTIONS}"
# HIVED_URL='{"default":"http://172.17.0.1:8091"}'
hive sync \
DATABASE_URL="postgresql://${HIVEMIND_POSTGRES_USER}:${HIVEMIND_POSTGRES_USER_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${HIVEMIND_DB_NAME}" --log-mask-sensitive-data \
--pid-file hive_sync.pid \
hive sync \ --test-max-block=${RUNNER_HIVEMIND_SYNC_MAX_BLOCK} \
--log-mask-sensitive-data \ --exit-after-sync \
--pid-file hive_sync.pid \ --test-profile=False \
--test-max-block=${HIVEMIND_MAX_BLOCK} \ --steemd-url "${RUNNER_HIVED_URL}" \
--exit-after-sync \ --prometheus-port 11011 \
--test-profile=False \ --database-url "${DATABASE_URL}" \
--steemd-url "$HIVED_URL" \ --mock-block-data-path mock_data/block_data/follow_op/mock_block_data_follow.json \
--prometheus-port 11011 \ 2>&1 | tee -i hivemind-sync.log
--database-url "$DATABASE_URL" \
2>&1 | tee -i hivemind-sync.log }
hive_sync
...@@ -8,7 +8,7 @@ plugin = webserver p2p json_rpc ...@@ -8,7 +8,7 @@ plugin = webserver p2p json_rpc
plugin = database_api plugin = database_api
# condenser_api enabled per abw request # condenser_api enabled per abw request
plugin = condenser_api plugin = condenser_api
plugin = block_api plugin = block_api
# gandalf enabled witness + rc # gandalf enabled witness + rc
plugin = witness plugin = witness
plugin = rc plugin = rc
...@@ -34,7 +34,7 @@ plugin = block_api network_broadcast_api rc_api ...@@ -34,7 +34,7 @@ plugin = block_api network_broadcast_api rc_api
history-disable-pruning = 1 history-disable-pruning = 1
account-history-rocksdb-path = "blockchain/account-history-rocksdb-storage" account-history-rocksdb-path = "blockchain/account-history-rocksdb-storage"
#shared-file-dir = "/run/hive" # shared-file-dir = "/run/hive"
shared-file-size = 20G shared-file-size = 20G
shared-file-full-threshold = 9500 shared-file-full-threshold = 9500
shared-file-scale-rate = 1000 shared-file-scale-rate = 1000
...@@ -45,8 +45,8 @@ market-history-bucket-size = [15,60,300,3600,86400] ...@@ -45,8 +45,8 @@ market-history-bucket-size = [15,60,300,3600,86400]
market-history-buckets-per-size = 5760 market-history-buckets-per-size = 5760
p2p-endpoint = 0.0.0.0:2001 p2p-endpoint = 0.0.0.0:2001
p2p-seed-node = p2p-seed-node =
#gtg.openhive.network:2001 # gtg.openhive.network:2001
transaction-status-block-depth = 64000 transaction-status-block-depth = 64000
transaction-status-track-after-block = 42000000 transaction-status-track-after-block = 42000000
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
MYDIR="$PWD" MYDIR="$PWD"
WORKDIR="/usr/local/hive/consensus" WORKDIR="/usr/local/hive/consensus"
IMAGE="registry.gitlab.syncad.com/hive/hive/consensus_node:00b5ff55"
docker run -d \ docker run -d \
--name hived-replay-5000000 \ --name hived-replay-5000000 \
...@@ -14,5 +15,5 @@ docker run -d \ ...@@ -14,5 +15,5 @@ docker run -d \
-v $MYDIR/blockchain/block_log:$WORKDIR/datadir/blockchain/block_log \ -v $MYDIR/blockchain/block_log:$WORKDIR/datadir/blockchain/block_log \
-v $MYDIR/entrypoint.sh:$WORKDIR/entrypoint.sh \ -v $MYDIR/entrypoint.sh:$WORKDIR/entrypoint.sh \
--entrypoint $WORKDIR/entrypoint.sh \ --entrypoint $WORKDIR/entrypoint.sh \
registry.gitlab.syncad.com/hive/hive/consensus_node:00b5ff55 \ $IMAGE \
--replay-blockchain --stop-replay-at-block 5000000 --replay-blockchain --stop-replay-at-block 5000000
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, run "pg_ctl reload", or execute
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
# require a server shutdown and restart to take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: kB = kilobytes Time units: ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# TB = terabytes h = hours
# d = days
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
#data_directory = 'ConfigDir' # use data in another directory
# (change requires restart)
#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file
# (change requires restart)
#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file
# (change requires restart)
# If external_pid_file is not explicitly set, no extra PID file is written.
#external_pid_file = '' # write an extra PID file
# (change requires restart)
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
listen_addresses = '*'
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
#port = 5432 # (change requires restart)
max_connections = 100 # (change requires restart)
#superuser_reserved_connections = 3 # (change requires restart)
#unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
# (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - Security and Authentication -
#authentication_timeout = 1min # 1s-600s
#ssl = off
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
#ssl_prefer_server_ciphers = on
#ssl_ecdh_curve = 'prime256v1'
#ssl_dh_params_file = ''
#ssl_cert_file = 'server.crt'
#ssl_key_file = 'server.key'
#ssl_ca_file = ''
#ssl_crl_file = ''
#password_encryption = md5 # md5 or scram-sha-256
#db_user_namespace = off
#row_security = on
# GSSAPI using Kerberos
#krb_server_keyfile = ''
#krb_caseins_users = off
# - TCP Keepalives -
# see "man 7 tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = 128MB # min 128kB
# (change requires restart)
#huge_pages = try # on, off, or try
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
# you actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
#maintenance_work_mem = 64MB # min 1MB
#replacement_sort_tuples = 150000 # limits use of replacement selection sort
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#max_stack_depth = 2MB # min 100kB
dynamic_shared_memory_type = posix # the default is the first option
# supported by the operating system:
# posix
# sysv
# windows
# mmap
# use none to disable dynamic shared memory
# (change requires restart)
# - Disk -
#temp_file_limit = -1 # limits per-process temp file space
# in kB, or -1 for no limit
# - Kernel Resource Usage -
#max_files_per_process = 1000 # min 25
# (change requires restart)
#shared_preload_libraries = '' # (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 10 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
# - Asynchronous Behavior -
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
#max_worker_processes = 8 # (change requires restart)
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
#max_parallel_workers = 8 # maximum number of max_worker_processes that
# can be used in parallel queries
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
#backend_flush_after = 0 # measured in pages, 0 disables
#------------------------------------------------------------------------------
# WRITE AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
#wal_level = replica # minimal, replica, or logical
# (change requires restart)
#fsync = on # flush data to disk for crash safety
# (turning this off can cause
# unrecoverable data corruption)
#synchronous_commit = on # synchronization level;
# off, local, remote_write, remote_apply, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux)
# fsync
# fsync_writethrough
# open_sync
#full_page_writes = on # recover from partial page writes
#wal_compression = off # enable compression of full-page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
#checkpoint_timeout = 5min # range 30s-1d
#max_wal_size = 1GB
#min_wal_size = 80MB
#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
# - Archiving -
#archive_mode = off # enables archiving; off, on, or always
# (change requires restart)
#archive_command = '' # command to use to archive a logfile segment
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
#archive_timeout = 0 # force a logfile segment switch after this
# number of seconds; 0 disables
#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------
# - Sending Server(s) -
# Set these on the master and on any standby that will send replication data.
#max_wal_senders = 10 # max number of walsender processes
# (change requires restart)
#wal_keep_segments = 0 # in logfile segments, 16MB each; 0 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
#max_replication_slots = 10 # max number of replication slots
# (change requires restart)
#track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart)
# - Master Server -
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
# method to choose sync standbys, number of sync standbys,
# and comma-separated list of application_name
# from standby(s); '*' = all
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
# - Standby Servers -
# These settings are ignored on a master server.
#hot_standby = on # "off" disallows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from master
# in milliseconds; 0 disables
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
# retrieve WAL after a failed attempt
# - Subscribers -
# These settings are ignored on a publisher.
#max_logical_replication_workers = 4 # taken from max_worker_processes
# (change requires restart)
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_bitmapscan = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
#random_page_cost = 4.0 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#parallel_tuple_cost = 0.1 # same scale as above
#parallel_setup_cost = 1000.0 # same scale as above
#min_parallel_table_scan_size = 8MB
#min_parallel_index_scan_size = 512kB
#effective_cache_size = 4GB
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0
# - Other Planner Options -
#default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#force_parallel_mode = off
#------------------------------------------------------------------------------
# ERROR REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, syslog, and eventlog,
# depending on platform. csvlog
# requires logging_collector to be on.
# This is used when logging to stderr:
#logging_collector = off # Enable capturing of stderr and csvlog
# into log files. Required to be on for
# csvlogs.
# (change requires restart)
# These are only used if logging_collector is on:
#log_directory = 'log' # directory where log files are written,
# can be absolute or relative to PGDATA
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
#log_truncate_on_rotation = off # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
#syslog_sequence_numbers = on
#syslog_split_messages = on
# This is only relevant when logging to eventlog (win32):
# (change requires restart)
#event_source = 'PostgreSQL'
# - When to Log -
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds
# - What to Log -
#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = on
#log_checkpoints = off
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_error_verbosity = default # terse, default, or verbose messages
#log_hostname = off
#log_line_prefix = '%m [%p] ' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %p = process ID
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %n = timestamp with milliseconds (as a Unix epoch)
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_statement = 'none' # none, ddl, mod, all
#log_replication_commands = off
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
log_timezone = 'Etc/UTC'
# - Process Title -
#cluster_name = '' # added to process titles if nonempty
# (change requires restart)
#update_process_title = on
#------------------------------------------------------------------------------
# RUNTIME STATISTICS
#------------------------------------------------------------------------------
# - Query/Index Statistics Collector -
#track_activities = on
#track_counts = on
#track_io_timing = off
#track_functions = none # none, pl, all
#track_activity_query_size = 1024 # (change requires restart)
#stats_temp_directory = 'pg_stat_tmp'
# - Statistics Monitoring -
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#log_statement_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM PARAMETERS
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#search_path = '"$user", public' # schema names
#default_tablespace = '' # a tablespace name, '' uses the default
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_min_age = 50000000
#vacuum_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_freeze_table_age = 150000000
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
#gin_fuzzy_search_limit = 0
#gin_pending_list_limit = 4MB
# - Locale and Formatting -
datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
timezone = 'Etc/UTC'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 0 # min -15, max 3
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = 'en_US.utf8' # locale for system error message
# strings
lc_monetary = 'en_US.utf8' # locale for monetary formatting
lc_numeric = 'en_US.utf8' # locale for number formatting
lc_time = 'en_US.utf8' # locale for time formatting
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Other Defaults -
#dynamic_library_path = '$libdir'
#local_preload_libraries = ''
#session_preload_libraries = ''
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_relation = -2 # negative values mean
# (max_pred_locks_per_transaction
# / -max_pred_locks_per_relation) - 1
#max_pred_locks_per_page = 2 # min 0
#------------------------------------------------------------------------------
# VERSION/PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#default_with_oids = off
#escape_string_warning = on
#lo_compat_privileges = off
#operator_precedence_warning = off
#quote_all_identifiers = off
#standard_conforming_strings = on
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------
#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?
#data_sync_retry = off # retry or panic on failure to fsync
# data?
# (change requires restart)
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the
# default postgresql.conf. Note that these are directives, not variable
# assignments, so they can usefully be given more than once.
#include_dir = '...' # include files ending in '.conf' from
# a directory, e.g., 'conf.d'
#include_if_exists = '...' # include file only if it exists
#include = '...' # include file
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here
# https://pgtune.leopard.in.ua/#/ oltp 48G ram, 12 cpus, ssd
shared_preload_libraries=pg_stat_statementspg_qualstats
track_functions=pl
track_io_timing=on
track_activity_query_size=2048
pg_stat_statements.max=10000
pg_stat_statements.track=all
max_connections=100
shared_buffers=12GB
effective_cache_size=36GB
maintenance_work_mem=2GB
checkpoint_completion_target=0.9
wal_buffers=16MB
default_statistics_target=100
random_page_cost=1.1
effective_io_concurrency=200
work_mem=31457kB
min_wal_size=2GB
max_wal_size=8GB
max_worker_processes=12
max_parallel_workers_per_gather=4
max_parallel_workers=12
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, run "pg_ctl reload", or execute
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
# require a server shutdown and restart to take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: kB = kilobytes Time units: ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# TB = terabytes h = hours
# d = days
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
#data_directory = 'ConfigDir' # use data in another directory
# (change requires restart)
#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file
# (change requires restart)
#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file
# (change requires restart)
# If external_pid_file is not explicitly set, no extra PID file is written.
#external_pid_file = '' # write an extra PID file
# (change requires restart)
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
listen_addresses = '*'
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
#port = 5432 # (change requires restart)
max_connections = 100 # (change requires restart)
#superuser_reserved_connections = 3 # (change requires restart)
#unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
# (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - TCP settings -
# see "man 7 tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
# 0 selects the system default
# - Authentication -
#authentication_timeout = 1min # 1s-600s
#password_encryption = md5 # md5 or scram-sha-256
#db_user_namespace = off
# GSSAPI using Kerberos
#krb_server_keyfile = ''
#krb_caseins_users = off
# - SSL -
#ssl = off
#ssl_ca_file = ''
#ssl_cert_file = 'server.crt'
#ssl_crl_file = ''
#ssl_key_file = 'server.key'
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
#ssl_prefer_server_ciphers = on
#ssl_ecdh_curve = 'prime256v1'
#ssl_min_protocol_version = 'TLSv1'
#ssl_max_protocol_version = ''
#ssl_dh_params_file = ''
#ssl_passphrase_command = ''
#ssl_passphrase_command_supports_reload = off
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = 128MB # min 128kB
# (change requires restart)
#huge_pages = try # on, off, or try
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
# you actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
#maintenance_work_mem = 64MB # min 1MB
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#max_stack_depth = 2MB # min 100kB
#shared_memory_type = mmap # the default is the first option
# supported by the operating system:
# mmap
# sysv
# windows
# (change requires restart)
dynamic_shared_memory_type = posix # the default is the first option
# supported by the operating system:
# posix
# sysv
# windows
# mmap
# (change requires restart)
# - Disk -
#temp_file_limit = -1 # limits per-process temp file space
# in kB, or -1 for no limit
# - Kernel Resources -
#max_files_per_process = 1000 # min 25
# (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 10 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
# - Asynchronous Behavior -
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
#max_worker_processes = 8 # (change requires restart)
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
#parallel_leader_participation = on
#max_parallel_workers = 8 # maximum number of max_worker_processes that
# can be used in parallel operations
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
#backend_flush_after = 0 # measured in pages, 0 disables
#------------------------------------------------------------------------------
# WRITE-AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
#wal_level = replica # minimal, replica, or logical
# (change requires restart)
#fsync = on # flush data to disk for crash safety
# (turning this off can cause
# unrecoverable data corruption)
#synchronous_commit = on # synchronization level;
# off, local, remote_write, remote_apply, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux)
# fsync
# fsync_writethrough
# open_sync
#full_page_writes = on # recover from partial page writes
#wal_compression = off # enable compression of full-page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
#wal_init_zero = on # zero-fill new WAL files
#wal_recycle = on # recycle WAL files
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
#checkpoint_timeout = 5min # range 30s-1d
max_wal_size = 1GB
min_wal_size = 80MB
#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
# - Archiving -
#archive_mode = off # enables archiving; off, on, or always
# (change requires restart)
#archive_command = '' # command to use to archive a logfile segment
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
#archive_timeout = 0 # force a logfile segment switch after this
# number of seconds; 0 disables
# - Archive Recovery -
# These are only used in recovery mode.
#restore_command = '' # command to use to restore an archived logfile segment
# placeholders: %p = path of file to restore
# %f = file name only
# e.g. 'cp /mnt/server/archivedir/%f %p'
# (change requires restart)
#archive_cleanup_command = '' # command to execute at every restartpoint
#recovery_end_command = '' # command to execute at completion of recovery
# - Recovery Target -
# Set these only when performing a targeted recovery.
#recovery_target = '' # 'immediate' to end recovery as soon as a
# consistent state is reached
# (change requires restart)
#recovery_target_name = '' # the named restore point to which recovery will proceed
# (change requires restart)
#recovery_target_time = '' # the time stamp up to which recovery will proceed
# (change requires restart)
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
# (change requires restart)
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
# (change requires restart)
#recovery_target_inclusive = on # Specifies whether to stop:
# just after the specified recovery target (on)
# just before the recovery target (off)
# (change requires restart)
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
# (change requires restart)
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
# (change requires restart)
#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------
# - Sending Servers -
# Set these on the master and on any standby that will send replication data.
#max_wal_senders = 10 # max number of walsender processes
# (change requires restart)
#wal_keep_segments = 0 # in logfile segments; 0 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
#max_replication_slots = 10 # max number of replication slots
# (change requires restart)
#track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart)
# - Master Server -
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
# method to choose sync standbys, number of sync standbys,
# and comma-separated list of application_name
# from standby(s); '*' = all
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
# - Standby Servers -
# These settings are ignored on a master server.
#primary_conninfo = '' # connection string to sending server
# (change requires restart)
#primary_slot_name = '' # replication slot on sending server
# (change requires restart)
#promote_trigger_file = '' # file name whose presence ends recovery
#hot_standby = on # "off" disallows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from master
# in milliseconds; 0 disables
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
# retrieve WAL after a failed attempt
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
# - Subscribers -
# These settings are ignored on a publisher.
#max_logical_replication_workers = 4 # taken from max_worker_processes
# (change requires restart)
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_bitmapscan = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_parallel_append = on
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
#enable_partitionwise_join = off
#enable_partitionwise_aggregate = off
#enable_parallel_hash = on
#enable_partition_pruning = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
#random_page_cost = 4.0 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#parallel_tuple_cost = 0.1 # same scale as above
#parallel_setup_cost = 1000.0 # same scale as above
#jit_above_cost = 100000 # perform JIT compilation if available
# and query more expensive than this;
# -1 disables
#jit_inline_above_cost = 500000 # inline small functions if query is
# more expensive than this; -1 disables
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
# query is more expensive than this;
# -1 disables
#min_parallel_table_scan_size = 8MB
#min_parallel_index_scan_size = 512kB
#effective_cache_size = 4GB
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0
# - Other Planner Options -
#default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#force_parallel_mode = off
#jit = on # allow JIT compilation
#plan_cache_mode = auto # auto, force_generic_plan or
# force_custom_plan
#------------------------------------------------------------------------------
# REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, syslog, and eventlog,
# depending on platform. csvlog
# requires logging_collector to be on.
# This is used when logging to stderr:
#logging_collector = off # Enable capturing of stderr and csvlog
# into log files. Required to be on for
# csvlogs.
# (change requires restart)
# These are only used if logging_collector is on:
#log_directory = 'log' # directory where log files are written,
# can be absolute or relative to PGDATA
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
#log_truncate_on_rotation = off # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
#syslog_sequence_numbers = on
#syslog_split_messages = on
# This is only relevant when logging to eventlog (win32):
# (change requires restart)
#event_source = 'PostgreSQL'
# - When to Log -
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds
#log_transaction_sample_rate = 0.0 # Fraction of transactions whose statements
# are logged regardless of their duration. 1.0 logs all
# statements from all transactions, 0.0 never logs.
# - What to Log -
#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = on
#log_checkpoints = off
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_error_verbosity = default # terse, default, or verbose messages
#log_hostname = off
#log_line_prefix = '%m [%p] ' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %p = process ID
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %n = timestamp with milliseconds (as a Unix epoch)
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_statement = 'none' # none, ddl, mod, all
#log_replication_commands = off
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
log_timezone = 'Etc/UTC'
#------------------------------------------------------------------------------
# PROCESS TITLE
#------------------------------------------------------------------------------
#cluster_name = '' # added to process titles if nonempty
# (change requires restart)
#update_process_title = on
#------------------------------------------------------------------------------
# STATISTICS
#------------------------------------------------------------------------------
# - Query and Index Statistics Collector -
#track_activities = on
#track_counts = on
#track_io_timing = off
#track_functions = none # none, pl, all
#track_activity_query_size = 1024 # (change requires restart)
#stats_temp_directory = 'pg_stat_tmp'
# - Monitoring -
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#log_statement_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#search_path = '"$user", public' # schema names
#row_security = on
#default_tablespace = '' # a tablespace name, '' uses the default
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#default_table_access_method = 'heap'
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_min_age = 50000000
#vacuum_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_freeze_table_age = 150000000
#vacuum_cleanup_index_scale_factor = 0.1 # fraction of total number of tuples
# before index cleanup, 0 always performs
# index cleanup
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
#gin_fuzzy_search_limit = 0
#gin_pending_list_limit = 4MB
# - Locale and Formatting -
datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
timezone = 'Etc/UTC'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
# selects precise output mode
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = 'en_US.utf8' # locale for system error message
# strings
lc_monetary = 'en_US.utf8' # locale for monetary formatting
lc_numeric = 'en_US.utf8' # locale for number formatting
lc_time = 'en_US.utf8' # locale for time formatting
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Shared Library Preloading -
#shared_preload_libraries = '' # (change requires restart)
#local_preload_libraries = ''
#session_preload_libraries = ''
#jit_provider = 'llvmjit' # JIT library to use
# - Other Defaults -
#dynamic_library_path = '$libdir'
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_relation = -2 # negative values mean
# (max_pred_locks_per_transaction
# / -max_pred_locks_per_relation) - 1
#max_pred_locks_per_page = 2 # min 0
#------------------------------------------------------------------------------
# VERSION AND PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#escape_string_warning = on
#lo_compat_privileges = off
#operator_precedence_warning = off
#quote_all_identifiers = off
#standard_conforming_strings = on
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------
#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?
#data_sync_retry = off # retry or panic on failure to fsync
# data?
# (change requires restart)
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the
# default postgresql.conf. Note that these are directives, not variable
# assignments, so they can usefully be given more than once.
#include_dir = '...' # include files ending in '.conf' from
# a directory, e.g., 'conf.d'
#include_if_exists = '...' # include file only if it exists
#include = '...' # include file
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here
# https://pgtune.leopard.in.ua/#/ oltp 48G ram, 12 cpus, ssd
shared_preload_libraries=pg_stat_statementspg_qualstats
track_functions=pl
track_io_timing=on
track_activity_query_size=2048
pg_stat_statements.max=10000
pg_stat_statements.track=all
max_connections=100
shared_buffers=12GB
effective_cache_size=36GB
maintenance_work_mem=2GB
checkpoint_completion_target=0.9
wal_buffers=16MB
default_statistics_target=100
random_page_cost=1.1
effective_io_concurrency=200
work_mem=31457kB
min_wal_size=2GB
max_wal_size=8GB
max_worker_processes=12
max_parallel_workers_per_gather=4
max_parallel_workers=12
max_parallel_maintenance_workers=4
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment