Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • hive/haf
  • dan/haf
2 results
Show changes
Commits on Source (294)
Showing
with 315 additions and 52 deletions
......@@ -26,6 +26,19 @@ variables:
include:
- template: Workflows/Branch-Pipelines.gitlab-ci.yml
- local: '/scripts/ci-helpers/prepare_data_image_job.yml'
- project: 'hive/common-ci-configuration'
ref: 7c58303ffb61d91ef9662ec20e7b498bc6f62b8f
file:
- '/templates/python_projects.gitlab-ci.yml'
verify_poetry_lock_sanity:
extends: .verify_poetry_lock_sanity_template
stage: build_and_test_phase_1
variables:
PYPROJECT_DIR: "$CI_PROJECT_DIR/tests/integration/haf-local-tools"
tags:
- public-runner-docker
.haf_image_build:
extends: .prepare_haf_image
......@@ -370,6 +383,28 @@ replay_with_update:
- public-runner-docker
- hived-for-tests
replay_with_app:
extends: .job-defaults
image:
name: $HAF_IMAGE_NAME
entrypoint: [""]
stage: build_and_test_phase_2
needs:
- job: haf_image_build
artifacts: true
variables:
PG_ACCESS: "host all all 127.0.0.1/32 trust"
script:
- /home/haf_admin/docker_entrypoint.sh --execute-maintenance-script=$CI_PROJECT_DIR/scripts/maintenance-scripts/run_live_replay_with_app.sh
artifacts:
paths:
- "replay_with_app.log"
- "node_logs.log"
- "node_logs1.log"
tags:
- public-runner-docker
- hived-for-tests
update_with_wrong_table_schema:
extends: .job-defaults
image:
......
......@@ -359,7 +359,12 @@ The test `test.unit.<module_name>` is added to ctest.
If there is a need to create a psql extension ( to use CREATE EXTENSION psql command ) a cmake macro invocation should be added to the cmake file:
`ADD_PSQL_EXTENSION` with parameters:
- NAME - name of extension. Must match the basename of the <name_of_extension>.control file in the source directory (see https://www.postgreSQL.org/docs/14/extend-extensions.html#id-1.8.3.18.11 ).
- SOURCES - list of sql scripts. The order of the files is important since they are compiled into one sql script.
- SCHEMA_SOURCES - list of sql scripts with schema changes (e.g. tables, views, casts, operators).
- DEPLOY_SOURCES - list of sql scripts with repeatable changes (e.g. functions, procedures).
The order of the files in sources is important since they are compiled into one sql script.
When installing HAF for the first time, both SCHEMA_SOURCES and DEPLOY_SOURCES will be applied.
When upgrading HAF, only DEPLOY_SOURCES will be applied.
Because casts and operators need existing function definition, scripts defining those functions need to be in both SCHEMA_SOURCES and DEPLOY_SOURCES.
The macro creates a new target extension.<name_of_extension>. The command `ninja extension.<name_of_extension>` will create a psql extension in `${CMAKE_BINARY_DIR}/extensions/<name_of_extension>`.
......
#! /bin/bash
set -euo pipefail
echo "Starting the container with user $(whoami) with uid $(id -u)"
......@@ -96,6 +95,16 @@ run_instance() {
sudo --user=hived -En /bin/bash << EOF
echo "Attempting to execute hived using additional command line arguments:" "${HIVED_ARGS[@]}"
if [ ! -f "$DATADIR/config.ini" ]; then
echo "No config file exists, creating a default config file"
/home/hived/bin/hived --webserver-ws-endpoint=0.0.0.0:${WS_PORT} --webserver-http-endpoint=0.0.0.0:${HTTP_PORT} --p2p-endpoint=0.0.0.0:${P2P_PORT} \
--data-dir="$DATADIR" --shared-file-dir="$SHM_DIR" \
--plugin=sql_serializer --psql-url="dbname=haf_block_log host=/var/run/postgresql port=5432" \
${HIVED_ARGS[@]} --dump-config > /dev/null 2>&1
sed -i 's/^plugin = .*$/plugin = node_status_api account_by_key account_by_key_api block_api condenser_api database_api json_rpc market_history market_history_api network_broadcast_api p2p rc_api reputation reputation_api rewards_api state_snapshot transaction_status transaction_status_api wallet_bridge_api webserver/g' "$DATADIR/config.ini"
fi
/home/hived/bin/hived --webserver-ws-endpoint=0.0.0.0:${WS_PORT} --webserver-http-endpoint=0.0.0.0:${HTTP_PORT} --p2p-endpoint=0.0.0.0:${P2P_PORT} \
--data-dir="$DATADIR" --shared-file-dir="$SHM_DIR" \
--plugin=sql_serializer --psql-url="dbname=haf_block_log host=/var/run/postgresql port=5432" \
......@@ -121,7 +130,7 @@ return ${status}
# shellcheck disable=SC2317
cleanup () {
echo "Performing cleanup...."
hived_pid=$(pidof 'hived' || echo '') # pidof returns 1 if hived isn't running, which crashes the script
hived_pid=$(pidof 'hived' || echo '') # pidof returns 1 if hived isn't running, which crashes the script
echo "Hived pid: $hived_pid"
jobs -l
......@@ -145,6 +154,17 @@ prepare_pg_hba_file() {
EOF
}
create_conf_d_directory_if_necessary() {
# PostgreSQL looks for additional config files in this directory. Usually, the user will bind-mount
# config files into this location. If they don't, create an empty directory so PostgreSQL doesn't
# error out at startup
if sudo --user=postgres -n [ ! -e "/home/hived/datadir/haf_postgresql_conf.d" ]; then
sudo -n mkdir -p "/home/hived/datadir/haf_postgresql_conf.d"
sudo -n chown -Rc postgres:postgres "/home/hived/datadir/haf_postgresql_conf.d"
fi
}
# https://gist.github.com/CMCDragonkai/e2cde09b688170fb84268cafe7a2b509
# If we do `trap cleanup INT QUIT TERM` directly, then using `exit` command anywhere
# in the script will exit the script without triggering the cleanup
......@@ -155,7 +175,7 @@ trap cleanup EXIT
sudo --user=hived -n mkdir -p "$DATADIR/blockchain"
# PostgresQL configuration (postgresql.conf) has data_directory hardcoded as '/home/hived/datadir/haf_db_store/pgdata' and custom configuration path as
# /home/hived/datadir/haf_postgresql_conf.d/custom_postgres.conf. As such we need to make /home/hived/datadir a symbolinc link to actual data directory if
# /home/hived/datadir/haf_postgresql_conf.d/custom_postgres.conf. As such we need to make /home/hived/datadir a symbolinc link to actual data directory if
# variable $DATADIR is set to a non-default value.
if [[ "$DATADIR" != "/home/hived/datadir" ]]; then
echo "Non-standard datadir requested: $DATADIR. Adding symbolic link and setting permissions..."
......@@ -170,29 +190,26 @@ sudo -n --user=hived mkdir -p -m 755 "$HAF_DB_STORE"
# Prepare HBA file before starting PostgreSQL
prepare_pg_hba_file
create_conf_d_directory_if_necessary
# Handle PGCTLTIMEOUT if set
[[ -n ${PGCTLTIMEOUT:-} ]] && echo "PGCTLTIMEOUT = ${PGCTLTIMEOUT}" | sudo tee "/etc/postgresql/${POSTGRES_VERSION}/main/environment"
# cat "/etc/postgresql/${POSTGRES_VERSION}/main/environment"
if [ -d "$PGDATA" ]
then
echo "Attempting to setup postgres instance already containing HAF database..."
# in case when container is restarted over already existing (and potentially filled) data directory, we need to be sure that docker-internal postgres has deployed HFM extension
sudo -n /home/haf_admin/haf/scripts/setup_postgres.sh --haf-admin-account=haf_admin --haf-binaries-dir="/home/haf_admin/build" --haf-database-store="/home/hived/datadir/haf_db_store/tablespace"
sudo -n "/usr/share/postgresql/${POSTGRES_VERSION}/extension/hive_fork_manager_update_script_generator.sh" --haf-admin-account=haf_admin --haf-db-name=haf_block_log
echo "Postgres instance setup completed."
else
sudo --user=hived -n mkdir "$PGDATA"
sudo --user=hived -n mkdir "$HAF_DB_STORE/tablespace"
if sudo --user=postgres -n [ ! -d "$PGDATA" -o ! -f "$PGDATA/PG_VERSION" ]; then
sudo --user=hived -n mkdir -p "$PGDATA"
sudo --user=hived -n mkdir -p "$HAF_DB_STORE/tablespace"
sudo -n chown -Rc postgres:postgres "$HAF_DB_STORE"
sudo -n chown -Rc postgres:postgres "$PGDATA"
echo "Attempting to setup postgres instance: running initdb..."
# Here is an exception against using /etc/init.d/postgresql script to manage postgres - maybe there is some better way to force initdb using regular script.
sudo --user=postgres -n PGDATA="$PGDATA" "/usr/lib/postgresql/${POSTGRES_VERSION}/bin/initdb"
# initdb will refuse to run in a non-empty directory, so run initdb in an empty temporary directory then copy the files over
mkdir -p /tmp/$$/pgdata
sudo -n chown -Rc postgres:postgres /tmp/$$/pgdata
sudo --user=postgres -n PGDATA="/tmp/$$/pgdata" "/usr/lib/postgresql/${POSTGRES_VERSION}/bin/initdb"
sudo --user=postgres -n bash -c "cd /tmp/$$/pgdata && tar cf - ." | sudo --user=postgres -n bash -c "cd \"$PGDATA\" && tar xf -"
sudo -n rm -r /tmp/$$/pgdata
echo "Attempting to setup postgres instance: running setup_postgres.sh..."
......@@ -203,6 +220,14 @@ else
/home/haf_admin/haf/scripts/setup_db.sh --haf-db-admin=haf_admin --haf-db-name=haf_block_log --haf-app-user=haf_app_admin
sudo -n /home/haf_admin/haf/scripts/setup_pghero.sh --database=haf_block_log
else
echo "Attempting to setup postgres instance already containing HAF database..."
# in case when container is restarted over already existing (and potentially filled) data directory, we need to be sure that docker-internal postgres has deployed HFM extension
sudo -n /home/haf_admin/haf/scripts/setup_postgres.sh --haf-admin-account=haf_admin --haf-binaries-dir="/home/haf_admin/build" --haf-database-store="/home/hived/datadir/haf_db_store/tablespace"
sudo -n "/usr/share/postgresql/${POSTGRES_VERSION}/extension/hive_fork_manager_update_script_generator.sh" --haf-admin-account=haf_admin --haf-db-name=haf_block_log
echo "Postgres instance setup completed."
fi
cd "$DATADIR"
......@@ -249,8 +274,8 @@ status=0
if [ ${DO_MAINTENANCE} -eq 1 ];
then
echo "Running maintance script located at $MAINTENANCE_SCRIPT_NAME"
$MAINTENANCE_SCRIPT_NAME
echo "Running maintance script located at ${MAINTENANCE_SCRIPT_NAME} using additional command line arguments:" "${HIVED_ARGS[@]}"
$MAINTENANCE_SCRIPT_NAME ${HIVED_ARGS[@]}
elif [ ${PERFORM_DUMP} -eq 1 ];
then
echo "Attempting to perform instance snapshot dump"
......
......@@ -800,6 +800,7 @@ max_wal_senders = 0
# Set to 4 to build indexes faster
max_parallel_maintenance_workers = 4
# Allows to override above configuration settings
# Allows overriding above configuration settings
include_if_exists = '/home/hived/datadir/haf_postgresql_conf.d/custom_postgres.conf'
# include files ending in '.conf' from
include_dir = '/home/hived/datadir/haf_postgresql_conf.d'
Subproject commit 3e43cbeaaaf356c1e7f1c8641299ad03aa17fc4d
Subproject commit 9d96d892592b16271af470f1dce81de2e0ab65da
include:
- project: 'hive/hive'
ref: 3e43cbeaaaf356c1e7f1c8641299ad03aa17fc4d #develop
ref: 610e7072cf897ec5ae8014c8b965e4996b60363f #develop
file: '/scripts/ci-helpers/prepare_data_image_job.yml'
.prepare_haf_image:
......
......@@ -34,6 +34,13 @@ BEGIN
END
$$;
DO $$
BEGIN
-- This is needed in postgres 15 or later. In 14, roles automatically have the admin option
GRANT hive_applications_group TO haf_app_admin WITH ADMIN OPTION;
END
$$;
DO $$
BEGIN
CREATE ROLE hived WITH LOGIN INHERIT IN ROLE hived_group;
......
# Running tests locally
It is possible to run some tests locally by maintenance scripts:
docker run <image name> --execute-maintenance-script=<script name> [ arguments ]
For example:
docker run -ePYTEST_NUMBER_OF_PROCESSES="0" -ePG_ACCESS="host all all 127.0.0.1/32 trust" registry.gitlab.syncad.com/hive/haf/testnet-base_instance:testnet-base_instance-4a2d57c020d8f04602de36f82f31b9eea14acfea --execute-maintenance-script=/home/haf_admin/haf/scripts/maintenance-scripts/run_haf_system_tests.sh test_operations_after_switching_fork.py
docker run -ePG_ACCESS="host all all 127.0.0.1/32 trust" registry.gitlab.syncad.com/hive/haf/base_instance:base_instance-4a2d57c020d8f04602de36f82f31b9eea14acfea --execute-maintenance-script=/home/haf_admin/haf/scripts/maintenance-scripts/run_hfm_functional_tests.sh
PG_ACCESS - is environmant variable required in functional and system tests, arguments are optional and currently work only in system tests.
......@@ -7,6 +7,16 @@ SCRIPTSDIR="$SCRIPTPATH/.."
LOG_FILE=applications_system_tests.log
source "$SCRIPTSDIR/maintenance-scripts/ci_common.sh"
ARGS=()
while [ $# -gt 0 ]; do
case "$1" in
*)
echo "Attempting to collect option: ${1}"
ARGS+=("$1")
;;
esac
shift
done
test_start
......@@ -26,6 +36,6 @@ echo -e "\e[0Ksection_end:$(date +%s):python_venv\r\e[0K"
cd "${REPO_DIR}/tests/integration/system/applications"
pytest --junitxml report.xml -n "${PYTEST_NUMBER_OF_PROCESSES}" -m "not mirrornet"
pytest --junitxml report.xml -n "${PYTEST_NUMBER_OF_PROCESSES}" -m "not mirrornet" ${ARGS[@]}
test_end
......@@ -7,6 +7,16 @@ SCRIPTSDIR="$SCRIPTPATH/.."
LOG_FILE=haf_system_tests.log
source "$SCRIPTSDIR/maintenance-scripts/ci_common.sh"
ARGS=()
while [ $# -gt 0 ]; do
case "$1" in
*)
echo "Attempting to collect option: ${1}"
ARGS+=("$1")
;;
esac
shift
done
test_start
......@@ -26,6 +36,6 @@ python3 -m venv --system-site-packages venv/
echo -e "\e[0Ksection_end:$(date +%s):python_venv\r\e[0K"
cd "${REPO_DIR}/tests/integration/system/haf"
pytest --junitxml report.xml -n "${PYTEST_NUMBER_OF_PROCESSES}" -m "not mirrornet"
pytest --junitxml report.xml -n "${PYTEST_NUMBER_OF_PROCESSES}" -m "not mirrornet" ${ARGS[@]}
test_end
#! /bin/bash
# Test scenario
# This is a durability test which checks if syncing blocks
# works correctly when in parallel an application is processing.
# In the past HAF or application make violation of FK constraints when hived was processing
# new irreversible block events, and application attaching its contexts or finding a new event to process.
# test scenario:
# 1. HAF is replayed to 1m of blocks and stops
# 2. HAF is started in the background to continue replay with limit 1.02m but now with huge psql-live-synch-threshold
# 3. a SQL HAF app is started to sync blocks in the background
# it means hived is still replaying from blocklog file, but sql-serializer is syncing block in LIVE state
# it give us dense calling context detaching, moving block one by one, and updating irreversible block
# 4. HAF app is stopped after syncing 1.02m of blocks
#
# Expected results: hived and psql which runs the app returns with 0
set -xeuo pipefail
SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
SCRIPTSDIR="$SCRIPTPATH/.."
LOG_FILE=replay_with_update.log
source "$SCRIPTSDIR/maintenance-scripts/ci_common.sh"
NUMBER_OF_BLOCKS_TO_FIRST_REPLAY=1000000
LAST_BLOCK_TO_SYNC=$((${NUMBER_OF_BLOCKS_TO_FIRST_REPLAY}+20000));
test_start
# container must have /blockchain directory mounted containing block_log with at 5000000 first blocks
export BLOCK_LOG_SOURCE_DIR_5M="/blockchain/block_log_5m"
export PATTERNS_PATH="${REPO_DIR}/tests/integration/replay/patterns/no_filter"
export DATADIR="${REPO_DIR}/datadir"
export REPLAY="--replay-blockchain --stop-replay-at-block $NUMBER_OF_BLOCKS_TO_FIRST_REPLAY"
export REPLAY_CONTINUATION="--replay-blockchain --psql-livesync-threshold=1000000000 --stop-replay-at-block $LAST_BLOCK_TO_SYNC"
export HIVED_PATH="/home/hived/bin/hived"
export COMPRESS_BLOCK_LOG_PATH="/home/hived/bin/compress_block_log"
export DB_NAME=haf_block_log
export DB_ADMIN="haf_admin"
export SETUP_SCRIPTS_PATH="/home/haf_admin/haf/scripts"
if ! test -e "${BLOCK_LOG_SOURCE_DIR_5M}/block_log"
then
echo "container must have /blockchain directory mounted containing block_log with at least 5000000 first blocks"
exit 1
fi
# 1. HAF is replayed to 1m of blocks and stops
echo -e "\e[0Ksection_start:$(date +%s):replay[collapsed=true]\r\e[0KExecuting replay..."
test -n "$PATTERNS_PATH"
mkdir $DATADIR/blockchain -p
cp "$PATTERNS_PATH"/* "$DATADIR" -r
cp ${BLOCK_LOG_SOURCE_DIR_5M}/block_log $DATADIR/blockchain
$HIVED_PATH --data-dir "$DATADIR" $REPLAY --exit-before-sync --psql-url "postgresql:///$DB_NAME" 2>&1 | tee -i node_logs.log
echo -e "\e[0Ksection_end:$(date +%s):replay\r\e[0K"
# 3 HAF is started to continue replay with limit 1.1m but now with huge psql-live-synch-threshold
$HIVED_PATH --data-dir "$DATADIR" $REPLAY_CONTINUATION --exit-before-sync --psql-url "postgresql:///$DB_NAME" 2>&1 | tee -i node_logs1.log &
hived_pid=$!
# 2. a SQL HAF app is started to sync blocks in the background
# run script that makes database update
psql -d $DB_NAME -a -v ON_ERROR_STOP=on -U $DB_ADMIN -f "${REPO_DIR}/tests/integration/replay/application.sql"
wait $hived_pid
test_end
#include <hive/plugins/sql_serializer/blockchain_data_filter.hpp>
#include <fc/io/json.hpp>
#include <fc/log/logger.hpp>
#include <fstream>
......
......@@ -52,6 +52,11 @@ ADD_PSQL_EXTENSION(
get_legacy_style_operation.sql
extract_set_witness_properties.sql
trigger_switch/trigger_on.sql
types/types.sql # drop and recreate operation types on upgrade
types/cast_functions.sql
types/casts.sql
types/process_operation.sql
)
ADD_SUBDIRECTORY( shared_lib )
......
......@@ -183,6 +183,11 @@ BEGIN
SELECT MAX(hf.id) INTO __fork_id FROM hive.fork hf WHERE hf.block_num <= _last_synced_block;
-- lock EXCLUSIVE may be taken by hived in function:
-- hive.remove_unecessary_events
-- so here we can stuck while hived is servicing a new irreversible block notification
LOCK TABLE hive.contexts IN SHARE MODE;
UPDATE hive.contexts
SET fork_id = __fork_id
, irreversible_block = COALESCE( __head_of_irreversible_block, 0 )
......
DROP TYPE IF EXISTS hive.authority_type CASCADE;
CREATE TYPE hive.authority_type AS ENUM( 'OWNER', 'ACTIVE', 'POSTING', 'WITNESS', 'NEW_OWNER_AUTHORITY', 'RECENT_OWNER_AUTHORITY');
DROP TYPE IF EXISTS hive.key_type CASCADE;
CREATE TYPE hive.key_type AS ENUM( 'OWNER', 'ACTIVE', 'POSTING', 'MEMO', 'WITNESS_SIGNING');
DROP TYPE IF EXISTS hive.keyauth_record_type CASCADE;
CREATE TYPE hive.keyauth_record_type AS
(
key_auth TEXT
, authority_kind hive.authority_type
, account_name TEXT
account_name TEXT
, key_kind hive.key_type
, key_auth BYTEA
, account_auth TEXT
, weight_threshold INTEGER
, w INTEGER
);
DROP TYPE IF EXISTS hive.keyauth_c_record_type CASCADE;
CREATE TYPE hive.keyauth_c_record_type AS
(
key_auth TEXT
account_name TEXT
, authority_c_kind INTEGER
, account_name TEXT
, key_auth BYTEA
, account_auth TEXT
, weight_threshold INTEGER
, w INTEGER
);
DROP FUNCTION IF EXISTS hive.get_keyauths_wrapper;
......@@ -23,20 +29,24 @@ CREATE OR REPLACE FUNCTION hive.get_keyauths_wrapper(IN _operation_body hive.ope
RETURNS SETOF hive.keyauth_c_record_type
AS 'MODULE_PATHNAME', 'get_keyauths_wrapped' LANGUAGE C;
DROP FUNCTION IF EXISTS hive.authority_type_c_int_to_enum;
CREATE OR REPLACE FUNCTION hive.authority_type_c_int_to_enum(IN _pos integer)
RETURNS hive.authority_type
DROP FUNCTION IF EXISTS hive.key_type_c_int_to_enum;
CREATE OR REPLACE FUNCTION hive.key_type_c_int_to_enum(IN _pos integer)
RETURNS hive.key_type
LANGUAGE plpgsql
IMMUTABLE
AS
$$
DECLARE
__arr hive.authority_type []:= enum_range(null::hive.authority_type);
__arr hive.key_type []:= enum_range(null::hive.key_type);
BEGIN
return __arr[_pos + 1];
END
$$;
CREATE OR REPLACE FUNCTION hive.public_key_to_string(p_key BYTEA)
RETURNS TEXT
AS 'MODULE_PATHNAME', 'public_key_to_string' LANGUAGE C;
DROP FUNCTION IF EXISTS hive.get_keyauths;
CREATE OR REPLACE FUNCTION hive.get_keyauths(IN _operation_body hive.operation)
RETURNS SETOF hive.keyauth_record_type
......@@ -45,11 +55,14 @@ IMMUTABLE
AS
$$
BEGIN
RETURN QUERY SELECT
key_auth ,
hive.authority_type_c_int_to_enum(authority_c_kind),
account_name
FROM hive.get_keyauths_wrapper(_operation_body);
RETURN QUERY SELECT
account_name,
hive.key_type_c_int_to_enum(authority_c_kind),
key_auth,
account_auth,
weight_threshold,
w
FROM hive.get_keyauths_wrapper(_operation_body);
END
$$;
......@@ -65,7 +78,3 @@ CREATE OR REPLACE FUNCTION hive.get_keyauths_operations()
RETURNS SETOF hive.get_operations_type
AS 'MODULE_PATHNAME', 'get_keyauths_operations' LANGUAGE C;
DROP FUNCTION IF EXISTS hive.is_keyauths_operation;
CREATE OR REPLACE FUNCTION hive.is_keyauths_operation(IN _operation_body hive.operation)
RETURNS Boolean
AS 'MODULE_PATHNAME', 'is_keyauths_operation' LANGUAGE C;
......@@ -21,8 +21,3 @@ DROP FUNCTION IF EXISTS hive.get_metadata_operations;
CREATE OR REPLACE FUNCTION hive.get_metadata_operations()
RETURNS SETOF hive.get_metadata_operations_type
AS 'MODULE_PATHNAME', 'get_metadata_operations' LANGUAGE C;
DROP FUNCTION IF EXISTS hive.is_metadata_operation;
CREATE OR REPLACE FUNCTION hive.is_metadata_operation(IN _operation_body hive.operation)
RETURNS Boolean
AS 'MODULE_PATHNAME', 'is_metadata_operation' LANGUAGE C;
......@@ -66,6 +66,7 @@ BEGIN
IF ( _block_num < __irreversible_head_block ) THEN
RETURN;
END IF;
PERFORM hive.remove_unecessary_events( _block_num );
-- application contexts will use the event to clear data in shadow tables
......
......@@ -289,6 +289,17 @@ DECLARE
__upper_bound_events_id BIGINT := NULL;
__max_block_num INTEGER := NULL;
BEGIN
-- if we cannot get exclusive lock for contexts row then we return and will back here
-- next time, when hived will try to remove events with next irreversible block
-- the contexts are locked by the apps during attach: hive.app_context_attach
BEGIN
LOCK TABLE hive.contexts IN ACCESS EXCLUSIVE MODE NOWAIT;
EXCEPTION WHEN SQLSTATE '55P03' THEN
-- 55P03 lock_not_available https://www.postgresql.org/docs/current/errcodes-appendix.html
RETURN;
END;
SELECT consistent_block INTO __max_block_num FROM hive.irreversible_data;
-- find the upper bound of events possible to remove
......
#include "extract_set_witness_properties.hpp"
#include <hive/protocol/types.hpp>
#include <hive/protocol/asset.hpp>
#include <fc/io/json.hpp>
#include <fc/io/raw.hpp>
using namespace hive::protocol;
using witness_set_properties_props_t = fc::flat_map< fc::string, std::vector< char > >;
struct wsp_fill_helper
{
const witness_set_properties_props_t& source;
extract_set_witness_properties_result_t& result;
template<typename T>
void try_fill(const fc::string& pname, const fc::string& alt_pname = fc::string{})
{
auto itr = source.find( pname );
if( itr == source.end() && alt_pname != fc::string{} )
itr = source.find( alt_pname );
if(itr != source.end())
{
T unpack_result;
fc::raw::unpack_from_vector<T>(itr->second, unpack_result);
result[pname] = fc::json::to_string(unpack_result);
}
}
};
void extract_set_witness_properties_from_flat_map(extract_set_witness_properties_result_t& output, const fc::flat_map<fc::string, std::vector<char>>& _input)
{
wsp_fill_helper helper{ _input, output };
helper.try_fill<public_key_type>("key");
helper.try_fill<asset>("account_creation_fee");
helper.try_fill<uint32_t>("maximum_block_size");
helper.try_fill<uint16_t>("hbd_interest_rate", "sbd_interest_rate");
helper.try_fill<int32_t>("account_subsidy_budget");
helper.try_fill<uint32_t>("account_subsidy_decay");
helper.try_fill<public_key_type>("new_signing_key");
helper.try_fill<price>("hbd_exchange_rate", "sbd_exchange_rate");
helper.try_fill<fc::string>("url");
}
void extract_set_witness_properties_from_string(extract_set_witness_properties_result_t& output, const fc::string& _input)
{
witness_set_properties_props_t input_properties{};
fc::from_variant(fc::json::from_string(_input), input_properties);
extract_set_witness_properties_from_flat_map(output, input_properties);
}
#include <fc/string.hpp>
#include <fc/container/flat_fwd.hpp>
#include <vector>
using extract_set_witness_properties_result_t = fc::flat_map<fc::string, fc::string>;
void extract_set_witness_properties_from_flat_map(extract_set_witness_properties_result_t& output, const fc::flat_map<fc::string, std::vector<char>>& _input);
void extract_set_witness_properties_from_string(extract_set_witness_properties_result_t& output, const fc::string& _input);