Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
  • 13-implement-unit-tests-to-verify-get_impacted_balances-behavior
  • 139-separate-c-cpp-code
  • 194-supercronic-maintenance-service
  • 249-analyze-after-creating-indices
  • 249-app-indices-api
  • 249-concurrent-app-indices
  • 49-c-locale-text-columns
  • 92-hive-operation-member-function
  • add-docker-labels
  • add-plpython
  • add-timescaledb-extension
  • ak-test
  • akociubinski/hafah-account-history-api
  • block-interface-debugging
  • bw_139-improved-from-json-conversion
  • bw_build_instance_fix
  • bw_disable-constraint-validity-checks
  • bw_docker_entrypoint_fix
  • bw_image_publishing_fix
  • bw_lock_changes
  • bw_mi_cte_for_reversible
  • bw_mr632_cleanup
  • bw_openapi_generation_fix
  • bw_postgresql-16-take-three
  • bw_table_partitioning_and_other_opts
  • bw_table_partitioning_and_other_opts-mt-tuning
  • bw_table_partitioning_and_other_opts-mt-tuning-with-idxs
  • bw_table_partitioning_and_other_opts_2
  • bw_tm-ops-as-hive-operation
  • c-locale-faster
  • ci-test-debugging
  • csp_from_stable
  • debug-connection-closing-message
  • debug-get-block-ids
  • develop
  • develop_check_keyauth_hive_alone
  • disable-table-logging
  • docker-compose-script-tweaks
  • emf_haf_dockerized_setup_supplement_tests
  • fix-ambiguous-app_create_context
  • fix-replay-error-deadlock
  • g-maintenance
  • generate_block_testing
  • generate_block_testing_make
  • gm_dockerfile
  • hive-block-log-interface
  • hive-docker-fix
  • imp-no-pic-for-exes
  • imp-no-pic-for-exes-fc_pic
  • jziebinski/bump-tt
  • kbotor/ci-rewrite-for-parallel-replay
  • kbotor/extended-block-log-job-test
  • kbotor/faketime-image
  • kbotor/permlink-logging
  • km_direct_sql_serial
  • km_removed_post_apply_operation-mt-version
  • km_removed_post_apply_operation_2
  • kmochocki/add-private-public-key-pair
  • kmochocki/beekeepy
  • kmochocki/comparsion-tests
  • kmochocki/issue_74
  • kmochocki/issue_79
  • kmochocki/just_update_hived
  • kmochocki/mi/drop_hive_schema
  • kmochocki/mzander/sql-serializer
  • kmochocki/pattern_fixes
  • kmochocki/tmp
  • kmochocki/tmp_only_irreversible
  • kmochocki/update-test-tools
  • kudmich/block_log_for_denser
  • kudmich/full_block_generator
  • latest_hived
  • lucius/test-branch-1
  • lucius/test-branch-2
  • mahdiyari/nov16-develop-unlogged-tables
  • mahdiyari/unlogged-tables
  • master
  • mi/crqash_when_restart_livesync
  • mi/improve_update_testing
  • mi/issue#245_slow_processing_alarm
  • mi/issue#247_triggers_in_context_schema
  • mi/issue#272_remove_hive_rowid
  • mi/issue_252_analyze
  • mi/log_applications
  • mi/master_merge_with_develop
  • mi/modified_autodetach
  • mi/modified_autodetach_2
  • mi/new_gcc_warnings
  • mi/pruning
  • mi/query_supervisor_crash
  • mi/vectors
  • mi_contexts_before_first_next_blocks_are_synchronized
  • mi_cte_for_reversible
  • mi_dtach_and_find_next_event
  • mi_dtach_and_find_next_event2
  • mi_fix_for_root_branch
  • mi_for_hivemind
  • mi_fork_and_set_irreversible_exception_break__node
  • mi_funtional_tests_cleanup
  • mi_funtional_tests_refactor
  • 1.27.10
  • 1.27.11rc1
  • 1.27.11rc2
  • 1.27.11rc3
  • 1.27.11rc4
  • 1.27.11rc5
  • 1.27.5
  • 1.27.5rc8
  • 1.27.5rc9
  • 1.27.6rc1
  • 1.27.6rc3
  • 1.27.6rc4
  • 1.27.6rc5
  • 1.27.6rc6
  • 1.27.6rc7
  • 1.27.6rc8
  • 1.27.6rc9
  • 1.27.7rc10
  • 1.27.7rc11
  • 1.27.7rc12
  • 1.27.7rc13
  • 1.27.7rc14
  • 1.27.7rc15
  • 1.27.7rc16
  • 1.27.8
  • 1.27.9
  • 20220201_auto
  • 20220214_auto
  • ChangesDatabaseSchema_2022_12_20
  • test-tag
  • unprotected
  • v-protected
  • v-protected-2
  • v-protected-3
  • v-protected-4
  • v-protected-5
  • v-protected-6
  • v1.27.3.0
  • v1.27.4.0
  • v1.27.5.0-rc0
  • v1.27.5.0-rc7
  • v1.27.6rc2
142 results

Target

Select target project
  • hive/haf
  • dan/haf
2 results
Select Git revision
  • 13-implement-unit-tests-to-verify-get_impacted_balances-behavior
  • 139-separate-c-cpp-code
  • 194-supercronic-maintenance-service
  • 249-analyze-after-creating-indices
  • 249-app-indices-api
  • 249-concurrent-app-indices
  • 49-c-locale-text-columns
  • 92-hive-operation-member-function
  • add-docker-labels
  • add-plpython
  • add-timescaledb-extension
  • ak-test
  • akociubinski/hafah-account-history-api
  • block-interface-debugging
  • bw_139-improved-from-json-conversion
  • bw_build_instance_fix
  • bw_disable-constraint-validity-checks
  • bw_docker_entrypoint_fix
  • bw_image_publishing_fix
  • bw_lock_changes
  • bw_mi_cte_for_reversible
  • bw_mr632_cleanup
  • bw_openapi_generation_fix
  • bw_postgresql-16-take-three
  • bw_table_partitioning_and_other_opts
  • bw_table_partitioning_and_other_opts-mt-tuning
  • bw_table_partitioning_and_other_opts-mt-tuning-with-idxs
  • bw_table_partitioning_and_other_opts_2
  • bw_tm-ops-as-hive-operation
  • c-locale-faster
  • ci-test-debugging
  • csp_from_stable
  • debug-connection-closing-message
  • debug-get-block-ids
  • develop
  • develop_check_keyauth_hive_alone
  • disable-table-logging
  • docker-compose-script-tweaks
  • emf_haf_dockerized_setup_supplement_tests
  • fix-ambiguous-app_create_context
  • fix-replay-error-deadlock
  • g-maintenance
  • generate_block_testing
  • generate_block_testing_make
  • gm_dockerfile
  • hive-block-log-interface
  • hive-docker-fix
  • imp-no-pic-for-exes
  • imp-no-pic-for-exes-fc_pic
  • jziebinski/bump-tt
  • kbotor/ci-rewrite-for-parallel-replay
  • kbotor/extended-block-log-job-test
  • kbotor/faketime-image
  • kbotor/permlink-logging
  • km_direct_sql_serial
  • km_removed_post_apply_operation-mt-version
  • km_removed_post_apply_operation_2
  • kmochocki/add-private-public-key-pair
  • kmochocki/beekeepy
  • kmochocki/comparsion-tests
  • kmochocki/issue_74
  • kmochocki/issue_79
  • kmochocki/just_update_hived
  • kmochocki/mi/drop_hive_schema
  • kmochocki/mzander/sql-serializer
  • kmochocki/pattern_fixes
  • kmochocki/tmp
  • kmochocki/tmp_only_irreversible
  • kmochocki/update-test-tools
  • kudmich/block_log_for_denser
  • kudmich/full_block_generator
  • latest_hived
  • lucius/test-branch-1
  • lucius/test-branch-2
  • mahdiyari/nov16-develop-unlogged-tables
  • mahdiyari/unlogged-tables
  • master
  • mi/crqash_when_restart_livesync
  • mi/improve_update_testing
  • mi/issue#245_slow_processing_alarm
  • mi/issue#247_triggers_in_context_schema
  • mi/issue#272_remove_hive_rowid
  • mi/issue_252_analyze
  • mi/log_applications
  • mi/master_merge_with_develop
  • mi/modified_autodetach
  • mi/modified_autodetach_2
  • mi/new_gcc_warnings
  • mi/pruning
  • mi/query_supervisor_crash
  • mi/vectors
  • mi_contexts_before_first_next_blocks_are_synchronized
  • mi_cte_for_reversible
  • mi_dtach_and_find_next_event
  • mi_dtach_and_find_next_event2
  • mi_fix_for_root_branch
  • mi_for_hivemind
  • mi_fork_and_set_irreversible_exception_break__node
  • mi_funtional_tests_cleanup
  • mi_funtional_tests_refactor
  • 1.27.10
  • 1.27.11rc1
  • 1.27.11rc2
  • 1.27.11rc3
  • 1.27.11rc4
  • 1.27.11rc5
  • 1.27.5
  • 1.27.5rc8
  • 1.27.5rc9
  • 1.27.6rc1
  • 1.27.6rc3
  • 1.27.6rc4
  • 1.27.6rc5
  • 1.27.6rc6
  • 1.27.6rc7
  • 1.27.6rc8
  • 1.27.6rc9
  • 1.27.7rc10
  • 1.27.7rc11
  • 1.27.7rc12
  • 1.27.7rc13
  • 1.27.7rc14
  • 1.27.7rc15
  • 1.27.7rc16
  • 1.27.8
  • 1.27.9
  • 20220201_auto
  • 20220214_auto
  • ChangesDatabaseSchema_2022_12_20
  • test-tag
  • unprotected
  • v-protected
  • v-protected-2
  • v-protected-3
  • v-protected-4
  • v-protected-5
  • v-protected-6
  • v1.27.3.0
  • v1.27.4.0
  • v1.27.5.0-rc0
  • v1.27.5.0-rc7
  • v1.27.6rc2
142 results
Show changes
Commits on Source (20)
Showing
with 265 additions and 171 deletions
......@@ -9,13 +9,18 @@ stages:
variables:
PYTEST_NUMBER_OF_PROCESSES: 8
CTEST_NUMBER_OF_JOBS: 4
GIT_STRATEGY: clone
GIT_DEPTH: 1
GIT_SUBMODULE_DEPTH: 1
GIT_SUBMODULE_STRATEGY: recursive
GIT_SUBMODULE_UPDATE_FLAGS: --jobs 4
FF_ENABLE_JOB_CLEANUP: 1
GIT_STRATEGY: clone
# uses registry.gitlab.syncad.com/hive/haf/ci-base-image:ubuntu22.04-17
BUILDER_IMAGE_TAG: "@sha256:234d3592e53d4cd7cc6df8e61366e8cbe69ac439355475c34fb2b0daf40e7a26"
FF_NETWORK_PER_BUILD: 1
# uses registry.gitlab.syncad.com/hive/haf/ci-base-image:ubuntu24.04-1
BUILDER_IMAGE_TAG: "@sha256:fc149082a4ee91ed622a14d283ae7fe44d13b123f2927d2e71a2167bbe63fab0"
CI_DEBUG_SERVICES: "true"
SETUP_SCRIPTS_PATH: "$CI_PROJECT_DIR/scripts"
TEST_TOOLS_NODE_DEFAULT_WAIT_FOR_LIVE_TIMEOUT: 60
......@@ -28,12 +33,7 @@ variables:
include:
- template: Workflows/Branch-Pipelines.gitlab-ci.yml
- local: '/scripts/ci-helpers/prepare_data_image_job.yml'
- project: 'hive/common-ci-configuration'
ref: e74d7109838ff05fdc239bced6a726aa7ad46a9b
file:
- '/templates/python_projects.gitlab-ci.yml'
- '/templates/cache_cleanup.gitlab-ci.yml'
- '/templates/docker_image_jobs.gitlab-ci.yml'
# Do not include common-ci-configuration here, it is already referenced by scripts/ci-helpers/prepare_data_image_job.yml included from Hive
verify_poetry_lock_sanity:
extends: .verify_poetry_lock_sanity_template
......@@ -527,24 +527,53 @@ update_with_wrong_table_schema:
- public-runner-docker
- hived-for-tests
# job responsible for replaying data using preconfigured filtering options specified in given config.ini file
replay_filtered_haf_data_accounts_body_operations:
extends: .prepare_haf_data_5m
needs:
- job: haf_image_build
artifacts: true
stage: build_and_test_phase_1
variables:
HIVE_NETWORK_TYPE: mainnet
BLOCK_LOG_SOURCE_DIR: "$BLOCK_LOG_SOURCE_DIR_5M"
CONFIG_INI_SOURCE: "$CI_PROJECT_DIR/tests/integration/replay/patterns/accounts_body_operations_filtered/config.ini"
DATA_CACHE_DIR: "${PIPELINE_DATA_CACHE_HAF_DIRECTORY}_replay_accounts_body_operations_filtered"
tags:
- data-cache-storage
block_api_tests:
extends: .replay_step
image: $CI_REGISTRY_IMAGE/ci-base-image:ubuntu22.04-8-jmeter
extends: .jmeter_benchmark_job
stage: build_and_test_phase_2
needs:
- job: replay_filtered_haf_data_accounts_body_operations
artifacts: true
- job: haf_image_build
artifacts: true
variables:
FF_NETWORK_PER_BUILD: 1
PATTERNS_PATH: "$CI_PROJECT_DIR/tests/integration/replay/patterns/accounts_body_operations_filtered"
BENCHMARK_DIR: "$CI_PROJECT_DIR/hive/tests/python/hive-local-tools/tests_api/benchmarks"
script:
# setup
- |
echo -e "\e[0Ksection_start:$(date +%s):blocks_api_test_setup[collapsed=true]\r\e[0KSetting up blocks api tests..."
psql $DB_URL -c "CREATE ROLE bench LOGIN PASSWORD 'mark' INHERIT IN ROLE hived_group;"
export BENCHMARK_DB_URL="postgresql://bench:mark@hfm-only-instance:5432/$DB_NAME"
echo -e "\e[0Ksection_end:$(date +%s):blocks_api_test_setup\r\e[0K"
# Allow access from any network to eliminate CI IP addressing problems
HAF_DB_ACCESS: |
"host all haf_admin 0.0.0.0/0 trust"
"host all hived 0.0.0.0/0 trust"
"host all hafah_user 0.0.0.0/0 trust"
"host all all 0.0.0.0/0 scram-sha-256"
BENCHMARK_DB_URL: "postgresql://hived@haf-instance:5432/haf_block_log"
HIVED_UID: $HIVED_UID
services:
- name: ${HAF_IMAGE_NAME}
alias: haf-instance
variables:
PG_ACCESS: "${HAF_DB_ACCESS}"
DATA_SOURCE: "${PIPELINE_DATA_CACHE_HAF_DIRECTORY}_replay_accounts_body_operations_filtered"
LOG_FILE: $CI_JOB_NAME.log
command: ["--replay-blockchain", "--stop-at-block=5000000"]
script:
# run pattern tests
- |
echo -e "\e[0Ksection_start:$(date +%s):blocks_api_test[collapsed=true]\r\e[0KRunning blocks api tests..."
......@@ -567,8 +596,7 @@ block_api_tests:
when: always
expire_in: 1 week
tags:
- public-runner-docker
- hived-for-tests
- data-cache-storage
prepare_haf_data:
extends: .prepare_haf_data_5m
......
......@@ -2,12 +2,12 @@
# docker buildx build --progress=plain --target=ci-base-image --tag registry.gitlab.syncad.com/hive/haf/ci-base-image$CI_IMAGE_TAG --file Dockerfile .
# To be started from cloned haf source directory.
ARG CI_REGISTRY_IMAGE=registry.gitlab.syncad.com/hive/haf/
ARG CI_IMAGE_TAG=ubuntu22.04-17
ARG CI_IMAGE_TAG=ubuntu24.04-1
ARG BUILD_IMAGE_TAG
ARG IMAGE_TAG_PREFIX
FROM registry.gitlab.syncad.com/hive/hive/minimal-runtime:ubuntu22.04-13 AS minimal-runtime
FROM registry.gitlab.syncad.com/hive/hive/minimal-runtime:ubuntu24.04-1 AS minimal-runtime
ENV PATH="/home/haf_admin/.local/bin:$PATH"
......@@ -28,10 +28,10 @@ RUN bash -x ./scripts/setup_ubuntu.sh --haf-admin-account="haf_admin" --hived-ac
# everyone to upgrade their haf_api_node in sync with this commit. We should switch haf_api_node's healthcheck to
# use wget once images based on this Dockerfile are made official, and we can drop curl soon thereafter
RUN apt-get update && \
DEBIAN_FRONTEND=noniteractive apt-get install --no-install-recommends -y postgresql-common gnupg && \
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-common gnupg && \
/usr/share/postgresql-common/pgdg/apt.postgresql.org.sh -y && \
apt-get update && \
DEBIAN_FRONTEND=noniteractive apt-get install --no-install-recommends -y curl postgresql-17 postgresql-17-cron libpq5 libboost-chrono1.74.0 libboost-context1.74.0 libboost-filesystem1.74.0 libboost-thread1.74.0 busybox netcat-openbsd && \
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y curl postgresql-17 postgresql-17-cron libpq5 libboost-chrono1.83.0 libboost-context1.83.0 libboost-filesystem1.83.0 libboost-thread1.83.0 busybox netcat-openbsd && \
apt-get remove -y gnupg && \
apt-get autoremove -y && \
busybox --install -s
......@@ -44,7 +44,7 @@ RUN useradd -r -s /usr/sbin/nologin -b /nonexistent -c "HAF maintenance service
USER haf_admin
WORKDIR /home/haf_admin
FROM registry.gitlab.syncad.com/hive/hive/ci-base-image:ubuntu22.04-13 AS ci-base-image
FROM registry.gitlab.syncad.com/hive/hive/ci-base-image:ubuntu24.04-1 AS ci-base-image
ENV PATH="/home/haf_admin/.local/bin:$PATH"
......@@ -107,7 +107,7 @@ RUN \
# Here we could use a smaller image without packages specific to build requirements
FROM ${CI_REGISTRY_IMAGE}ci-base-image:$CI_IMAGE_TAG AS base_instance
ENV BUILD_IMAGE_TAG=${BUILD_IMAGE_TAG:-:ubuntu22.04-8}
ENV BUILD_IMAGE_TAG=${BUILD_IMAGE_TAG:-:ubuntu24.04-1}
ARG P2P_PORT=2001
ENV P2P_PORT=${P2P_PORT}
......@@ -208,9 +208,9 @@ EXPOSE ${WS_PORT}
# JSON rpc service
EXPOSE ${HTTP_PORT}
FROM registry.gitlab.syncad.com/hive/haf/minimal-runtime:ubuntu22.04-16 AS minimal-instance
FROM registry.gitlab.syncad.com/hive/haf/minimal-runtime:ubuntu24.04-1 AS minimal-instance
ENV BUILD_IMAGE_TAG=${BUILD_IMAGE_TAG:-:ubuntu22.04-8}
ENV BUILD_IMAGE_TAG=${BUILD_IMAGE_TAG:-:ubuntu24.04-1}
ARG P2P_PORT=2001
ENV P2P_PORT=${P2P_PORT}
......
# syntax=docker/dockerfile:1.4
# docker buildx build --tag registry.gitlab.syncad.com/hive/haf/ci-base-image:$CI_IMAGE_TAG-jmeter --progress=plain --file Dockerfile.jmeter .
ARG CI_IMAGE_TAG=ubuntu22.04-8
FROM phusion/baseimage:jammy-1.0.1 AS build
COPY <<-EOF /opt/patch.sed
s/jtl2junit/m2u/g
s/results file/results file (required)/g
23 i final Options helpOpt = new Options();
23 i helpOpt.addOption("?", "help", false, "");
23 i helpOpt.addOption(new Option("i", CMD_OPTION_INPUT, true, ""));
23 i helpOpt.addOption(new Option("o", CMD_OPTION_OUTPUT, true, ""));
23 i helpOpt.addOption(new Option("t", CMD_OPTION_TESTSUITE_NAME, true, ""));
23 i helpOpt.addOption(new Option("f", M2UConstants.JUNIT_FILTER_SWITCH_NAME, true, ""));
23 i final CommandLine helpCmd = parser.parse( helpOpt, argv );
23 i if (helpCmd.hasOption("help")) {
23 i new HelpFormatter().printHelp( APPLICATION_NAME, options );
23 i System.exit(0);
23 i }
72 i options.addOption("?", "help", false, "Show these usage instructions");
EOF
RUN <<EOF
set -e
# Install system dependencies
apt-get update
apt-get install -y git unzip wget ca-certificates maven openjdk-8-jdk
apt-get clean
rm -rf /var/lib/apt/lists/*
# Prepare tools directory
mkdir -p /opt/tools
cd /opt/tools
# Install Apache JMeter
wget --quiet https://archive.apache.org/dist/jmeter/binaries/apache-jmeter-5.4.3.zip -O jmeter.zip
unzip -qq jmeter.zip
rm jmeter.zip
mv apache-jmeter-5.4.3 jmeter
wget --quiet https://jdbc.postgresql.org/download/postgresql-42.3.1.jar -O /opt/tools/jmeter/lib/postgresql-42.3.1.jar
# Build m2u from source
mkdir -p m2u
git clone --single-branch --branch master https://github.com/tguzik/m2u.git m2u-source
cd m2u-source
find -name CommandLineParser.java -exec sed -i -f /opt/patch.sed {} \;
mvn
# Install m2u
mv target/m2u.jar ../m2u/m2u.jar
cd ../m2u
rm -R ../m2u-source
echo 'java -jar /opt/tools/m2u/m2u.jar $@' > m2u
chmod +x m2u
EOF
FROM registry.gitlab.syncad.com/hive/haf/ci-base-image:$CI_IMAGE_TAG
COPY --from=build /opt/tools /opt/tools
USER root
RUN <<EOF
set -e
# Install system dependencies
apt-get update
apt-get install -y openjdk-8-jre
apt-get clean
rm -rf /var/lib/apt/lists/*
# Creater symlinks in bin directory
ln -s /opt/tools/jmeter/bin/jmeter /usr/bin/jmeter
ln -s /opt/tools/m2u/m2u /usr/bin/m2u
EOF
USER haf_admin
RUN <<EOF
set -e
# Install user dependencies
pip3 install prettytable
EOF
\ No newline at end of file
*
\ No newline at end of file
......@@ -20,8 +20,8 @@ The image above shows the main components of a HAF installation:
sql_serializer is the hived plugin which is responsible for pushing the data from blockchain blocks into the HAF database. The plugin also informs the database about the occurrence of microforks (in which case HAF has to revert database changes that resulted from the forked out blocks). It also signals the database when a block has become irreversible (no longer revertable via a fork), so that the info from that block can be moved from the "reversible" tables inside the database to the "irreversible" tables.
Detailed documentation for the sql_serializer is here: [src/sql_serializer/README.md](./src/sql_serializer/README.md)
* **PostgreSQL database**
A HAF database contains data from blockchain blocks in the form of SQL tables (these tables are stored in the "hive" schema inside the database), and it also contains tables for the data generated by HAF apps running on the HAF server (each app has its own separate schema to encapsulate its data). The system utilizes Postgres authentication and authorization mechanisms to protect HAF-based apps from interfering with each other.
* **HIVE FORK MANAGER** is a PostgreSQL extension that implements HAF's API inside the "hive" schema. This extension must be included when creating a new HAF database. This extension defines the format of block data saved in the database. It also defines a set of SQL stored procedures that are used by HAF apps to get data about the blocks. The SQL_SERIALIZER dumps blocks to the tables defined by the hive_fork_manager. This extension defines the process by which HAF apps consume blocks, and ensures that apps cannot corrupt each other's data. The hive_fork_manager is also responsible for rewinding the state of the tables of all the HAF apps running on the server in the case of a micro-fork occurrence. Detailed documentation for hive_fork_manager is here: [src/hive_fork_manager/Readme.md](./src/hive_fork_manager/Readme.md)
A HAF database contains data from blockchain blocks in the form of SQL tables (these tables are stored in the "hafd" schema inside the database), and it also contains tables for the data generated by HAF apps running on the HAF server (each app has its own separate schema to encapsulate its data). The system utilizes Postgres authentication and authorization mechanisms to protect HAF-based apps from interfering with each other.
* **HIVE FORK MANAGER** is a PostgreSQL extension that implements HAF's API inside the "hive" schema. This extension must be included when creating a new HAF database. This extension defines the format of block data saved in the database. It also defines a set of SQL stored procedures that are used by HAF apps to get data about the blocks. The SQL_SERIALIZER dumps blocks to the tables defined by the hive_fork_manager in 'hafd' schema. This extension defines the process by which HAF apps consume blocks, and ensures that apps cannot corrupt each other's data. The hive_fork_manager is also responsible for rewinding the state of the tables of all the HAF apps running on the server in the case of a micro-fork occurrence. Detailed documentation for hive_fork_manager is here: [src/hive_fork_manager/Readme.md](./src/hive_fork_manager/Readme.md)
# HAF server quickstart
......
Subproject commit 5129c6fa3704730f4e46fef950ab10f486f6561f
Subproject commit 1a8bfcdf46a4a6430b8ea4b788fc1cbe71aecb99
#! /bin/bash
REGISTRY=${1:-registry.gitlab.syncad.com/hive/haf/}
CI_IMAGE_TAG=ubuntu22.04-17
CI_IMAGE_TAG=ubuntu24.04-1
# exit when any command fails
set -e
......
include:
- project: 'hive/hive'
ref: 1c2fe378cbb7c61147881dce247a6d9c28188f9e #develop
ref: 1a8bfcdf46a4a6430b8ea4b788fc1cbe71aecb99 #develop
file: '/scripts/ci-helpers/prepare_data_image_job.yml'
.prepare_haf_image:
......@@ -36,17 +36,18 @@ include:
BLOCK_LOG_SOURCE_DIR: ""
CONFIG_INI_SOURCE: ""
HIVE_NETWORK_TYPE: mainnet
DATA_CACHE_DIR: "${DATA_CACHE_HAF_PREFIX}_${HAF_COMMIT}"
script:
- mkdir "${DATA_CACHE_HAF_PREFIX}_${HAF_COMMIT}/datadir" -pv
- cd "${DATA_CACHE_HAF_PREFIX}_${HAF_COMMIT}/datadir"
- flock "${DATA_CACHE_HAF_PREFIX}_${HAF_COMMIT}/datadir" $SCRIPTS_PATH/ci-helpers/build_data.sh $HAF_IMAGE_NAME
--data-cache="${DATA_CACHE_HAF_PREFIX}_${HAF_COMMIT}" --block-log-source-dir="$BLOCK_LOG_SOURCE_DIR" --config-ini-source="$CONFIG_INI_SOURCE"
- mkdir "${DATA_CACHE_DIR}/datadir" -pv
- cd "${DATA_CACHE_DIR}/datadir"
- flock "${DATA_CACHE_DIR}/datadir" $SCRIPTS_PATH/ci-helpers/build_data.sh $HAF_IMAGE_NAME
--data-cache="${DATA_CACHE_DIR}" --block-log-source-dir="$BLOCK_LOG_SOURCE_DIR" --config-ini-source="$CONFIG_INI_SOURCE"
- cd "$CI_PROJECT_DIR"
- cp "${DATA_CACHE_HAF_PREFIX}_${HAF_COMMIT}/datadir/hived_uid.env" "$CI_PROJECT_DIR/hived_uid.env"
- cp "${DATA_CACHE_HAF_PREFIX}_${HAF_COMMIT}/datadir/docker_entrypoint.log" "${CI_PROJECT_DIR}/docker_entrypoint.log"
- ls -la "${DATA_CACHE_HAF_PREFIX}_${HAF_COMMIT}/datadir/"
- cp "${DATA_CACHE_DIR}/datadir/hived_uid.env" "$CI_PROJECT_DIR/hived_uid.env"
- cp "${DATA_CACHE_DIR}/datadir/docker_entrypoint.log" "${CI_PROJECT_DIR}/docker_entrypoint.log"
- ls -la "${DATA_CACHE_DIR}/datadir/"
after_script:
- rm "${DATA_CACHE_HAF_PREFIX}_${HAF_COMMIT}/replay_running" -f
- rm "${DATA_CACHE_DIR}/replay_running" -f
artifacts:
reports:
......
......@@ -51,10 +51,23 @@ def generate_type_string_from_schema(schema):
if schema['format'] == 'date-time':
return 'TIMESTAMP'
return 'TEXT'
elif schema_type == 'array':
items = schema['items']
if '$ref' in items:
reference = items['$ref']
# openapi references typically start with #, but that's not a valid json pointer
if len(reference) > 0 and reference[0] == '#':
reference = reference[1:]
referent = resolve_pointer(collected_openapi_fragments, reference)
return reference.split('/')[-1] + '[]'
if 'type' in items:
return generate_type_string_from_schema(items) + '[]'
elif schema_type == 'boolean':
return 'BOOLEAN'
elif schema_type == 'number':
return 'FLOAT'
elif schema_type == 'object':
return 'object'
else:
assert(False)
......@@ -163,6 +176,8 @@ def generate_code_from_openapi_fragment(openapi_fragment, sql_output):
assert('type' in schema)
if schema['type'] == 'string' and 'enum' in schema:
generate_code_for_enum_openapi_fragment(schema_name, schema['enum'], sql_output)
elif schema['type'] == 'object' and 'properties' and 'x-sql-datatype' in schema:
pass
elif schema['type'] == 'object' and 'properties' in schema:
generate_code_for_object_openapi_fragment(schema_name, schema['properties'], sql_output)
elif schema['type'] == 'array':
......
......@@ -39,7 +39,7 @@ install_all_dev_packages() {
"$SRC_DIR/hive/scripts/setup_ubuntu.sh" --runtime --dev
apt-get update
DEBIAN_FRONTEND=noniteractive apt-get install -y \
DEBIAN_FRONTEND=noninteractive apt-get install -y \
systemd \
libpq-dev \
tox \
......@@ -47,7 +47,7 @@ install_all_dev_packages() {
postgresql-common
/usr/share/postgresql-common/pgdg/apt.postgresql.org.sh -y
DEBIAN_FRONTEND=noniteractive apt-get install -y postgresql-17 postgresql-server-dev-17 postgresql-17-cron \
DEBIAN_FRONTEND=noninteractive apt-get install -y postgresql-17 postgresql-server-dev-17 postgresql-17-cron \
netcat-openbsd # needed to correctly handle --skip-hived option
apt-get clean
......
......@@ -109,13 +109,110 @@ all blocks in a batch are fully processed AND the current_block_number has been
#### Using a group of contexts
In certain situations, it becomes necessary to ensure that multiple contexts are synchronized
and point to the same block. This synchronization of contexts allows for consistent behavior
across different applications. To achieve this, there are specific functions available, such as 'hive.app_next_block',
across different applications. To achieve this, there are specific functions/procedures available, such as 'hive.app_next_iteration',
that operate on an array of contexts and move them synchronously.
When using synchronized contexts, it is of utmost importance to ensure that all the contexts within a group
are consistently in the same state. This means that the contexts shall always traverse blocks together within
the same group of contexts passed to the functions.
#### Managing Indexes
Indexes in **HAF** are managed by **`hived`**, ensuring consistency, security, and optimal
query performance. The **Index Management API** is designed to manage indexes on **HAF-managed
tables** only. Applications must not create indexes manually on these tables. However, for
application-specific private tables, applications retain full control over index creation and
management.
##### - Registering an Index Dependency
Applications should use the function **`hive.register_index_dependency`** to declare an index
they require on **HAF-managed tables**. This function:
- Parses the **`CREATE INDEX`** command.
- Associates the index with a **specific application context**.
- Ensures that indexes are only created by **`hived`**, preventing direct modifications by
applications.
##### Example Usage:
```sql
SELECT hive.register_index_dependency(
'my_app_context',
'CREATE INDEX my_index ON my_schema.my_table (column_name);'
);
```
This request is stored in **`hafd.indexes_constraints`**, and `hived` will later process and
create the required indexes.
##### - Checking Index Creation Status
Applications can check if `hived` has finished creating the registered indexes using
**`hive.check_if_registered_indexes_created`**.
##### Example Usage:
```sql
SELECT hive.check_if_registered_indexes_created('my_app_context');
```
- Returns `TRUE` if all indexes registered under the given application context are created.
- Returns `FALSE` if any index is still missing.
##### - Removing Index Dependencies
When an application no longer needs its registered indexes, it should call
**`hive.remove_index_dependencies`** to clean up the dependency records. However, indexes
are **not removed if other application contexts depend on them**. The system ensures that
indexes remain available until all dependent applications have deregistered their use.
##### Example Usage:
```sql
SELECT hive.remove_index_dependencies('my_app_context');
```
- This removes the application context from the index dependency list.
- If no other application contexts rely on the index, the index will be **dropped**.
##### - Index Creation Timing Considerations
The timing of index creation significantly impacts performance and system efficiency.
Applications should consider:
- **Before Synchronization:** Registering index dependencies prior to synchronization allows
`hived` to incorporate these indexes during its initial setup. This ensures that indexes
are built non-concurrently alongside other HAF indexes, leading to a streamlined process.
- **After Synchronization:** If indexes are registered post-synchronization, `hived` will create
them concurrently to avoid disrupting operations. This may result in longer index creation
times and increased resource usage.
**Recommendation:**
To optimize performance, applications should register their required indexes **before**
initiating synchronization. This approach ensures that index creation is integrated into the
synchronization process, improving efficiency and reducing system load.
#### Managing Table Vacuuming
`hived` is responsible for maintaining the integrity and performance of the database by executing
periodic vacuum operations at optimal moments. To enhance efficiency and prevent table bloat,
an API for managing table vacuuming has been introduced. This API allows applications to request
vacuuming for specific tables while ensuring that excessive vacuum operations do not negatively
impact system performance.
##### API Functions
##### Requesting Table Vacuuming
Applications can request vacuuming for a table using the function **`hive.app_request_table_vacuum`**.
This function:
- Inserts a vacuum request for the specified table.
- Optionally checks if the table has been vacuumed recently to avoid redundant operations.
- Updates the request status to ensure that the vacuuming process is tracked properly.
###### Example Usage:
An application can request vacuuming of a specific table, ensuring that it has not been vacuumed
within a given interval. If the table was vacuumed recently, the request will be ignored.
```sql
-- Request vacuuming for 'my_table' if it has not been vacuumed in the last hour
SELECT hive.app_request_table_vacuum('my_table', INTERVAL '1 hour');
```
#### Using HAF built-in roles
Based on extensive experience with a diverse range of applications, it is recommended to configure Postgres roles
in accordance with the guidelines depicted in the image below. This illustration provides a comprehensive overview
......@@ -138,12 +235,12 @@ For example, some apps perform irreversible external operations such as a transf
Other apps require very high performance, and don't want to incur the extra performance overhead associated with maintaining the data required to rollback blocks in the case of a fork. In such case, it may make sense to trade off the responsiveness of presenting the most recent blockchain data in order to create an app that can respond to api queries faster and support more users.
HAF distinguish which appl will only traverse irreversible block data. This means that calls to `hive.app_next_block` will return only the range of irreversible blocks which are not already processed or NULL (blocks that are not yet marked as irreversible will be excluded). Similarly, the set of views for an irreversible context only deliver a snapshot of irreversible data up to the block already processed by the app.
HAF distinguish which appl will only traverse irreversible block data. This means that calls to `hive.app_next_iteration` will return only the range of irreversible blocks which are not already processed or NULL (blocks that are not yet marked as irreversible will be excluded). Similarly, the set of views for an irreversible context only deliver a snapshot of irreversible data up to the block already processed by the app.
The user needs to decide if an application is non-forking, he can do this during creation af a context with 'hive.app_create_context' and passing an argument
'_is_forking' = FALSE.
It is possible to change an already created context from non-forking to forking and vice versa with methods
`app_context_set_non_forking(context_name)` and `hive.app_context_set_forking(context_name)`
`hive.app_context_set_non_forking(context_name)` and `hive.app_context_set_forking(context_name)`
:warning: **Switching from forking to non-forking appl will delete all its reversible data**
......
......@@ -37,7 +37,7 @@ BEGIN
_name
, _schema
, ( SELECT MAX( hf.id ) FROM hafd.fork hf ) -- current fork id
, COALESCE( ( SELECT hid.consistent_block FROM hafd.irreversible_data hid ), 0 ) -- head of irreversible block
, COALESCE( ( SELECT hid.consistent_block FROM hafd.hive_state hid ), 0 ) -- head of irreversible block
, _is_forking
, _is_attached
, NULL
......@@ -67,7 +67,7 @@ BEGIN
_name
, _schema
, ( SELECT MAX( hf.id ) FROM hafd.fork hf ) -- current fork id
, COALESCE( ( SELECT hid.consistent_block FROM hafd.irreversible_data hid ), 0 ) -- head of irreversible block
, COALESCE( ( SELECT hid.consistent_block FROM hafd.hive_state hid ), 0 ) -- head of irreversible block
, _is_forking
, False
, _stages
......@@ -241,7 +241,7 @@ BEGIN
WHERE hc.name = __lead_context;
SELECT hir.consistent_block INTO __head_of_irreversible_block
FROM hafd.irreversible_data hir;
FROM hafd.hive_state hir;
IF __current_block_num > __head_of_irreversible_block THEN
RAISE EXCEPTION 'Cannot attach context % because the block num % is grater than top of irreversible block %'
......@@ -477,7 +477,7 @@ $BODY$
DECLARE
__result hafd.contexts.irreversible_block%TYPE;
BEGIN
SELECT COALESCE( consistent_block, 0 ) INTO __result FROM hafd.irreversible_data;
SELECT COALESCE( consistent_block, 0 ) INTO __result FROM hafd.hive_state;
RETURN __result;
END;
$BODY$;
......@@ -858,7 +858,7 @@ BEGIN
FROM UNNEST(_contexts) AS context_names(name)
LEFT JOIN hafd.contexts hc USING(name)
JOIN hafd.contexts_attachment hca ON hca.context_id = hc.id
CROSS JOIN hafd.irreversible_data), FALSE);
CROSS JOIN hafd.hive_state), FALSE);
END;
$BODY$
;
......@@ -882,10 +882,13 @@ CREATE OR REPLACE FUNCTION hive.get_app_current_block_age(_contexts hive.context
STABLE
AS $BODY$
BEGIN
RETURN now() - (select min(coalesce(hafd.blocks.created_at, to_timestamp(0))) from
RETURN now() - (select min(coalesce(hafd.blocks.created_at, hafd.blocks_reversible.created_at, to_timestamp(0))) from
UNNEST(_contexts) AS context_names(name)
LEFT JOIN hafd.contexts USING(name)
LEFT JOIN hafd.blocks on num = hafd.contexts.current_block_num);
LEFT JOIN hafd.blocks on hafd.blocks.num = hafd.contexts.current_block_num
LEFT JOIN hafd.blocks_reversible on hafd.blocks_reversible.num = hafd.contexts.current_block_num AND
hafd.blocks_reversible.fork_id = hafd.contexts.fork_id
);
END;
$BODY$;
......
......@@ -69,7 +69,7 @@ BEGIN
, hc.fork_id
INTO __curent_events_id, __current_context_block_num, __current_context_irreversible_block, __current_fork_id
FROM hafd.contexts hc WHERE hc.name = __lead_context;
SELECT consistent_block INTO __newest_irreversible_block_num FROM hafd.irreversible_data;
SELECT consistent_block INTO __newest_irreversible_block_num FROM hafd.hive_state;
-- hived can at any moment commit new events
-- because of read committed, we need to be ready such situations
......@@ -207,7 +207,7 @@ BEGIN
FROM hafd.contexts as hc
WHERE hc.name = __lead_context;
SELECT consistent_block INTO __newest_irreversible_block_num FROM hafd.irreversible_data;
SELECT consistent_block INTO __newest_irreversible_block_num FROM hafd.hive_state;
IF __current_block_num <= __irreversible_block_num
AND __newest_irreversible_block_num IS NOT NULL THEN
......
......@@ -45,7 +45,7 @@ DECLARE
__lead_context_distance_to_irr_hb INTEGER;
BEGIN
SELECT
( ( SELECT COALESCE( hid.consistent_block, 0 ) - ctx.current_block_num FROM hafd.irreversible_data hid ) ) INTO __lead_context_distance_to_irr_hb
( ( SELECT COALESCE( hid.consistent_block, 0 ) - ctx.current_block_num FROM hafd.hive_state hid ) ) INTO __lead_context_distance_to_irr_hb
FROM hafd.contexts ctx
WHERE ctx.name = _contexts [ 1 ];
......
......@@ -40,7 +40,7 @@ ALTER TABLE hafd.operations OWNER TO hived_group;
ALTER TABLE hafd.transactions_multisig OWNER TO hived_group;
ALTER TABLE hafd.accounts OWNER TO hived_group;
ALTER TABLE hafd.account_operations OWNER TO hived_group;
ALTER TABLE hafd.irreversible_data OWNER TO hived_group;
ALTER TABLE hafd.hive_state OWNER TO hived_group;
ALTER TABLE hafd.blocks_reversible OWNER TO hived_group;
ALTER TABLE hafd.transactions_reversible OWNER TO hived_group;
ALTER TABLE hafd.operations_reversible OWNER TO hived_group;
......@@ -236,6 +236,8 @@ GRANT EXECUTE ON FUNCTION
, hive.remove_obsolete_operations
, hive.detach_table
, hive.app_check_contexts_synchronized(_contexts hive.contexts_group)
, hive.set_sync_state( _new_state hafd.sync_state )
, hive.get_sync_state()
TO hived_group;
GRANT USAGE ON SCHEMA hive to haf_maintainer;
......
......@@ -11,7 +11,7 @@ UNION ALL
(
WITH
consistent_block AS
(SELECT COALESCE(hid.consistent_block, 0) AS consistent_block FROM hafd.irreversible_data hid LIMIT 1)
(SELECT COALESCE(hid.consistent_block, 0) AS consistent_block FROM hafd.hive_state hid LIMIT 1)
,forks AS
(
SELECT hbr.num, max(hbr.fork_id) AS max_fork_id
......@@ -52,7 +52,7 @@ FROM
JOIN (
SELECT hbr.num, MAX(hbr.fork_id) as max_fork_id
FROM hafd.blocks_reversible hbr
WHERE hbr.num > ( SELECT COALESCE( hid.consistent_block, 0 ) FROM hafd.irreversible_data hid )
WHERE hbr.num > ( SELECT COALESCE( hid.consistent_block, 0 ) FROM hafd.hive_state hid )
GROUP by hbr.num
) as forks ON forks.max_fork_id = har.fork_id AND forks.num = har.block_num
) reversible
......@@ -120,7 +120,7 @@ FROM (
(
SELECT rb.num, MAX(rb.fork_id) AS max_fork_id
FROM hafd.blocks_reversible rb
WHERE rb.num > ( SELECT COALESCE( hid.consistent_block, 0 ) FROM hafd.irreversible_data hid )
WHERE rb.num > ( SELECT COALESCE( hid.consistent_block, 0 ) FROM hafd.hive_state hid )
GROUP BY rb.num
) visible_blks ON visible_blks.num = hbr.num AND visible_blks.max_fork_id = hbr.fork_id
) t
......@@ -166,7 +166,7 @@ FROM
JOIN (
SELECT hbr.num, MAX(hbr.fork_id) as max_fork_id
FROM hafd.blocks_reversible hbr
WHERE hbr.num > ( SELECT COALESCE( hid.consistent_block, 0 ) FROM hafd.irreversible_data hid )
WHERE hbr.num > ( SELECT COALESCE( hid.consistent_block, 0 ) FROM hafd.hive_state hid )
GROUP by hbr.num
) as forks ON forks.max_fork_id = htr.fork_id AND forks.num = htr.block_num
) reversible
......@@ -209,7 +209,7 @@ FROM
(
SELECT hbr.num, MAX(hbr.fork_id) as max_fork_id
FROM hafd.blocks_reversible hbr
WHERE hbr.num > ( SELECT COALESCE( hid.consistent_block, 0 ) FROM hafd.irreversible_data hid )
WHERE hbr.num > ( SELECT COALESCE( hid.consistent_block, 0 ) FROM hafd.hive_state hid )
GROUP by hbr.num
) visible_ops on visible_ops.num = hafd.operation_id_to_block_num(o.id) and visible_ops.max_fork_id = o.fork_id
JOIN
......@@ -252,7 +252,7 @@ FROM
(
SELECT hbr.num, MAX(hbr.fork_id) as max_fork_id
FROM hafd.blocks_reversible hbr
WHERE hbr.num > ( SELECT COALESCE( hid.consistent_block, 0 ) FROM hafd.irreversible_data hid )
WHERE hbr.num > ( SELECT COALESCE( hid.consistent_block, 0 ) FROM hafd.hive_state hid )
GROUP by hbr.num
) visible_ops on visible_ops.num = hafd.operation_id_to_block_num(o.id) and visible_ops.max_fork_id = o.fork_id
) t
......@@ -283,7 +283,7 @@ FROM (
JOIN (
SELECT hbr.num, MAX(hbr.fork_id) as max_fork_id
FROM hafd.blocks_reversible hbr
WHERE hbr.num > ( SELECT COALESCE( hid.consistent_block, 0 ) FROM hafd.irreversible_data hid )
WHERE hbr.num > ( SELECT COALESCE( hid.consistent_block, 0 ) FROM hafd.hive_state hid )
GROUP by hbr.num
) as forks ON forks.max_fork_id = htr.fork_id AND forks.num = htr.block_num
) as trr ON trr.trx_hash = htmr.trx_hash AND trr.max_fork_id = htmr.fork_id
......@@ -301,7 +301,7 @@ UNION ALL
(
WITH
consistent_block AS
(SELECT COALESCE(hid.consistent_block, 0) AS consistent_block FROM hafd.irreversible_data hid LIMIT 1)
(SELECT COALESCE(hid.consistent_block, 0) AS consistent_block FROM hafd.hive_state hid LIMIT 1)
,forks AS
(
SELECT hbr.num, max(hbr.fork_id) AS max_fork_id
......
......@@ -113,7 +113,7 @@ BEGIN
-- application contexts will use the event to clear data in shadow tables
INSERT INTO hafd.events_queue( event, block_num )
VALUES( 'NEW_IRREVERSIBLE', _block_num );
UPDATE hafd.irreversible_data SET consistent_block = _block_num;
UPDATE hafd.hive_state SET consistent_block = _block_num;
END;
$BODY$
;
......@@ -141,7 +141,7 @@ BEGIN
UPDATE hafd.irreversible_data SET consistent_block = _block_num;
UPDATE hafd.hive_state SET consistent_block = _block_num;
END;
$BODY$
;
......@@ -153,7 +153,7 @@ CREATE OR REPLACE FUNCTION hive.set_irreversible_dirty()
AS
$BODY$
BEGIN
UPDATE hafd.irreversible_data SET is_dirty = TRUE;
UPDATE hafd.hive_state SET is_dirty = TRUE;
END;
$BODY$
;
......@@ -165,7 +165,7 @@ CREATE OR REPLACE FUNCTION hive.set_irreversible_not_dirty()
AS
$BODY$
BEGIN
UPDATE hafd.irreversible_data SET is_dirty = FALSE;
UPDATE hafd.hive_state SET is_dirty = FALSE;
END;
$BODY$
;
......@@ -179,7 +179,7 @@ $BODY$
DECLARE
__is_dirty BOOL := FALSE;
BEGIN
SELECT is_dirty INTO __is_dirty FROM hafd.irreversible_data;
SELECT is_dirty INTO __is_dirty FROM hafd.hive_state;
RETURN __is_dirty;
END;
$BODY$
......@@ -212,7 +212,7 @@ CREATE OR REPLACE FUNCTION hive.disable_fk_of_irreversible()
AS
$BODY$
BEGIN
PERFORM hive.save_and_drop_foreign_keys( 'hafd', 'irreversible_data' );
PERFORM hive.save_and_drop_foreign_keys( 'hafd', 'hive_state' );
PERFORM hive.save_and_drop_foreign_keys( 'hafd', 'blocks' );
PERFORM hive.save_and_drop_foreign_keys( 'hafd', 'transactions' );
PERFORM hive.save_and_drop_foreign_keys( 'hafd', 'transactions_multisig' );
......@@ -239,7 +239,7 @@ BEGIN
PERFORM hive.restore_indexes( 'hafd.applied_hardforks' );
PERFORM hive.restore_indexes( 'hafd.accounts' );
PERFORM hive.restore_indexes( 'hafd.account_operations' );
PERFORM hive.restore_indexes( 'hafd.irreversible_data' );
PERFORM hive.restore_indexes( 'hafd.hive_state' );
PERFORM hive.reanalyze_indexes_with_expressions();
END;
......@@ -259,7 +259,7 @@ BEGIN
PERFORM hive.restore_foreign_keys( 'hafd.transactions_multisig' );
PERFORM hive.restore_foreign_keys( 'hafd.operations' );
PERFORM hive.restore_foreign_keys( 'hafd.applied_hardforks' );
PERFORM hive.restore_foreign_keys( 'hafd.irreversible_data' );
PERFORM hive.restore_foreign_keys( 'hafd.hive_state' );
PERFORM hive.restore_foreign_keys( 'hafd.accounts' );
PERFORM hive.restore_foreign_keys( 'hafd.account_operations' );
......@@ -455,7 +455,7 @@ BEGIN
RETURN;
END IF;
INSERT INTO hafd.irreversible_data VALUES(1,NULL, FALSE) ON CONFLICT DO NOTHING;
INSERT INTO hafd.hive_state VALUES(1,NULL, FALSE) ON CONFLICT DO NOTHING;
INSERT INTO hafd.events_queue VALUES( 0, 'NEW_IRREVERSIBLE', 0 ) ON CONFLICT DO NOTHING;
INSERT INTO hafd.events_queue VALUES( hive.unreachable_event_id(), 'NEW_BLOCK', 2147483647 ) ON CONFLICT DO NOTHING;
SELECT MAX(eq.id) + 1 FROM hafd.events_queue eq WHERE eq.id != hive.unreachable_event_id() INTO __events_id;
......@@ -565,3 +565,32 @@ END;
$BODY$
;
CREATE OR REPLACE FUNCTION hive.get_sync_state()
RETURNS hafd.sync_state
LANGUAGE plpgsql
STABLE
AS
$BODY$
DECLARE
__result hafd.sync_state;
BEGIN
SELECT state INTO __result
FROM hafd.hive_state;
RETURN __result;
END;
$BODY$
;
CREATE OR REPLACE FUNCTION hive.set_sync_state( _new_state hafd.sync_state )
RETURNS void
LANGUAGE plpgsql
VOLATILE
AS
$BODY$
BEGIN
UPDATE hafd.hive_state SET state = _new_state;
END;
$BODY$
;
......@@ -312,7 +312,7 @@ DECLARE
__upper_bound_events_id BIGINT := NULL;
__max_block_num INTEGER := NULL;
BEGIN
SELECT consistent_block INTO __max_block_num FROM hafd.irreversible_data;
SELECT consistent_block INTO __max_block_num FROM hafd.hive_state;
-- find the upper bound of events possible to remove
SELECT MIN(heq.id) INTO __upper_bound_events_id
......@@ -590,7 +590,7 @@ DECLARE
__consistent_block INTEGER := NULL;
__is_dirty BOOL := TRUE;
BEGIN
SELECT consistent_block, is_dirty INTO __consistent_block, __is_dirty FROM hafd.irreversible_data;
SELECT consistent_block, is_dirty INTO __consistent_block, __is_dirty FROM hafd.hive_state;
IF ( __is_dirty = FALSE ) THEN
RETURN;
......@@ -613,7 +613,7 @@ BEGIN
DELETE FROM hafd.blocks WHERE num > __consistent_block;
UPDATE hafd.irreversible_data SET is_dirty = FALSE;
UPDATE hafd.hive_state SET is_dirty = FALSE;
END;
$BODY$
;
......
......@@ -34,18 +34,24 @@ CREATE TABLE IF NOT EXISTS hafd.blocks (
);
SELECT pg_catalog.pg_extension_config_dump('hafd.blocks', '');
CREATE TABLE IF NOT EXISTS hafd.irreversible_data (
CREATE TYPE hafd.sync_state AS ENUM (
'START', 'WAIT', 'REINDEX_WAIT', 'REINDEX', 'P2P', 'LIVE'
);
CREATE TABLE IF NOT EXISTS hafd.hive_state (
id integer,
consistent_block integer,
is_dirty bool NOT NULL,
state hafd.sync_state NOT NULL DEFAULT 'START',
CONSTRAINT pk_irreversible_data PRIMARY KEY ( id )
);
-- We use ADD CONSTRAINT with ALTER TABLE followed by NOT VALID because the NOT VALID option isn't documented
-- or supported within CREATE TABLE, and thus, seems not to work there.
-- This applies to the following tables as well.
ALTER TABLE hafd.irreversible_data ADD CONSTRAINT fk_1_hive_irreversible_data FOREIGN KEY (consistent_block) REFERENCES hafd.blocks (num) NOT VALID;
SELECT pg_catalog.pg_extension_config_dump('hafd.irreversible_data', '');
ALTER TABLE hafd.hive_state ADD CONSTRAINT fk_1_hive_irreversible_data FOREIGN KEY (consistent_block) REFERENCES hafd.blocks (num) NOT VALID;
SELECT pg_catalog.pg_extension_config_dump('hafd.hive_state', '');
CREATE TABLE IF NOT EXISTS hafd.transactions (
block_num integer NOT NULL,
......
#include <boost/algorithm/string.hpp>
#include "configuration.hpp"
#include "psql_utils/logger.hpp"
#include <boost/algorithm/string.hpp>
#include <cassert>
namespace PsqlTools::QuerySupervisor {
......