Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • hive/hive
1 result
Show changes
Commits on Source (109)
Showing
with 567 additions and 166 deletions
......@@ -15,8 +15,8 @@ testnet_node_build:
stage: build
image: "$CI_REGISTRY_IMAGE/builder$BUILDER_IMAGE_TAG"
script:
# LOW_MEMORY=OFF CLEAR_VOTES=OFF TESTNET=ON ENABLE_MIRA=OFF
- ./ciscripts/build.sh OFF OFF ON OFF
# LOW_MEMORY=OFF CLEAR_VOTES=OFF TESTNET=ON ENABLE_MIRA=OFF HIVE_LINT=ON
- ./ciscripts/build.sh OFF OFF ON OFF ON
- mkdir -p "$CI_JOB_NAME"/tests/unit
- mv build/install-root "$CI_JOB_NAME"
- mv contrib/hived.run "$CI_JOB_NAME"
......@@ -35,8 +35,8 @@ consensus_build:
stage: build
image: "$CI_REGISTRY_IMAGE/builder$BUILDER_IMAGE_TAG"
script:
# LOW_MEMORY=ON CLEAR_VOTES=ON TESTNET=OFF ENABLE_MIRA=OFF
- ./ciscripts/build.sh ON ON OFF OFF
# LOW_MEMORY=ON CLEAR_VOTES=ON TESTNET=OFF ENABLE_MIRA=OFF HIVE_LINT=ON
- ./ciscripts/build.sh ON ON OFF OFF ON
- mkdir "$CI_JOB_NAME"
- mv build/install-root "$CI_JOB_NAME"
- mv contrib/hived.run "$CI_JOB_NAME"
......@@ -86,6 +86,13 @@ plugin_test:
tags:
- public-runner-docker
.beem_setup : &beem_setup |
git clone --depth=1 --single-branch --branch dk-hybrid-operations https://gitlab.syncad.com/hive/beem.git
cd beem
python3 setup.py install
cd ..
mkdir -p build/tests/hive-node-data
beem_tests:
stage: test
needs:
......@@ -95,13 +102,7 @@ beem_tests:
variables:
PYTHONPATH: $CI_PROJECT_DIR/tests/functional
script:
# boilerplate for installing latested beem
- git clone --depth=1 --single-branch --branch dk-hybrid-operations https://gitlab.syncad.com/hive/beem.git
- cd beem
- python3 setup.py install
- cd ..
# stuff specific to this test
- mkdir -p build/tests/hive-node-data
- *beem_setup
- cd tests/functional/python_tests/dhf_tests
- "python3 run_proposal_tests.py initminer hive.fund 5JNHfZYKGaomSFvd4NUdQ9qMcEAC43kujbfjueTHpVapX1Kzq2n --run-hived $CI_PROJECT_DIR/testnet_node_build/install-root/bin/hived --working-dir=$CI_PROJECT_DIR/build/tests/hive-node-data"
- rm -rf $CI_PROJECT_DIR/build/tests/hive-node-data
......@@ -130,13 +131,7 @@ list_proposals_tests:
variables:
PYTHONPATH: $CI_PROJECT_DIR/tests/functional
script:
# boilerplate for installing latested beem
- git clone --depth=1 --single-branch --branch dk-hybrid-operations https://gitlab.syncad.com/hive/beem.git
- cd beem
- python3 setup.py install
- cd ..
# stuff specific to this test
- mkdir -p build/tests/hive-node-data
- *beem_setup
- cd tests/functional/python_tests/dhf_tests
- "python3 list_proposals_tests.py initminer initminer 5JNHfZYKGaomSFvd4NUdQ9qMcEAC43kujbfjueTHpVapX1Kzq2n --run-hived $CI_PROJECT_DIR/testnet_node_build/install-root/bin/hived --working-dir=$CI_PROJECT_DIR/build/tests/hive-node-data --junit-output=list_proposals_tests.xml"
artifacts:
......@@ -149,6 +144,28 @@ list_proposals_tests:
tags:
- public-runner-docker
cli_wallet_tests:
stage: test
needs:
- job: testnet_node_build
artifacts: true
image: "$CI_REGISTRY_IMAGE/test$TEST_IMAGE_TAG"
variables:
PYTHONPATH: $CI_PROJECT_DIR/tests/functional
script:
- *beem_setup
- cd tests/functional/python_tests/cli_wallet
- "python3 run.py --hive-path $CI_PROJECT_DIR/testnet_node_build/install-root/bin/hived --hive-working-dir=$CI_PROJECT_DIR/build/tests/hive-node-data --path-to-cli $CI_PROJECT_DIR/testnet_node_build/install-root/bin --creator initminer --wif 5JNHfZYKGaomSFvd4NUdQ9qMcEAC43kujbfjueTHpVapX1Kzq2n --junit-output=cli_wallet_tests.xml"
artifacts:
paths:
- tests/functional/python_tests/cli_wallet/tests/logs/cli_wallet.log
reports:
junit: tests/functional/python_tests/cli_wallet/cli_wallet_tests.xml
when: always
expire_in: 6 months
tags:
- public-runner-docker
hived_options_tests:
stage: test
needs:
......@@ -159,10 +176,45 @@ hived_options_tests:
PYTHONPATH: $CI_PROJECT_DIR/tests/functional
script:
- cd tests/functional/python_tests/hived
- apt-get update -y && apt-get install -y python3 python3-pip python3-dev
- pip3 install -U psutil
- "python3 hived_options_tests.py --run-hived $CI_PROJECT_DIR/testnet_node_build/install-root/bin/hived"
tags:
- public-runner-docker
hived_replay_tests:
stage: test
needs:
- job: consensus_build
artifacts: true
image: "$CI_REGISTRY_IMAGE/builder$BUILDER_IMAGE_TAG"
variables:
PYTHONPATH: $CI_PROJECT_DIR/tests/functional
script:
- export ROOT_DIRECTORY=$PWD
- mkdir $ROOT_DIRECTORY/replay_logs
- cd tests/functional/python_tests/hived
- apt-get update -y && apt-get install -y python3 python3-pip python3-dev
- pip3 install -U wget psutil junit_xml gcovr secp256k1prp requests
- $CI_PROJECT_DIR/consensus_build/install-root/bin/truncate_block_log /blockchain/block_log /tmp/block_log 3000000
# quick replays for 10k blocks, with node restarts
- "python3 snapshot_1.py --run-hived $CI_PROJECT_DIR/consensus_build/install-root/bin/hived --block-log /tmp/block_log --blocks 10000 --artifact-directory $ROOT_DIRECTORY/replay_logs"
- "python3 snapshot_2.py --run-hived $CI_PROJECT_DIR/consensus_build/install-root/bin/hived --block-log /tmp/block_log --blocks 10000 --artifact-directory $ROOT_DIRECTORY/replay_logs"
# group of tests, that uses one node with 5 milion blocks replayed
- "python3 start_replay_tests.py --run-hived $CI_PROJECT_DIR/consensus_build/install-root/bin/hived --blocks 3000000 --block-log /tmp/block_log --test-directory $PWD/replay_based_tests --artifact-directory $ROOT_DIRECTORY/replay_logs"
artifacts:
paths:
- replay_logs
when: always
expire_in: 6 months
tags:
- public-runner-docker
- hived-for-tests
package_consensus_node:
stage: package
needs:
......@@ -182,3 +234,4 @@ package_consensus_node:
- "echo ===> the consensus node image for this build is: $CI_REGISTRY_IMAGE/consensus_node:$CI_COMMIT_SHORT_SHA"
tags:
- public-runner-docker
......@@ -81,6 +81,9 @@ if( ENABLE_MIRA )
endif()
OPTION( LOW_MEMORY_NODE "Build source for low memory node (ON OR OFF)" OFF )
include( CMakeDependentOption )
CMAKE_DEPENDENT_OPTION( STORE_ACCOUNT_METADATA "Keep the json_metadata for accounts, normally discarded on low memory nodes" OFF "LOW_MEMORY_NODE" ON )
MESSAGE( STATUS "LOW_MEMORY_NODE: ${LOW_MEMORY_NODE}" )
if( LOW_MEMORY_NODE )
MESSAGE( STATUS " " )
......@@ -88,16 +91,16 @@ if( LOW_MEMORY_NODE )
MESSAGE( STATUS " " )
SET( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DIS_LOW_MEM" )
SET( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DIS_LOW_MEM" )
endif()
OPTION (COLLECT_ACCOUNT_METADATA "Allows to enable/disable storing account metadata" ON)
MESSAGE( STATUS "COLLECT_ACCOUNT_METADATA: ${COLLECT_ACCOUNT_METADATA}" )
if( COLLECT_ACCOUNT_METADATA )
MESSAGE( STATUS " " )
MESSAGE( STATUS " CONFIGURING FOR ACCOUNT METADATA SUPPORT " )
MESSAGE( STATUS " " )
SET( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DCOLLECT_ACCOUNT_METADATA" )
SET( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DCOLLECT_ACCOUNT_METADATA" )
MESSAGE( STATUS "STORE_ACCOUNT_METADATA: ${STORE_ACCOUNT_METADATA}" )
if( STORE_ACCOUNT_METADATA )
MESSAGE( STATUS " " )
MESSAGE( STATUS " BUT STILL INDEXING ACCOUNT METADATA " )
MESSAGE( STATUS " " )
SET( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DCOLLECT_ACCOUNT_METADATA" )
SET( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DCOLLECT_ACCOUNT_METADATA" )
endif()
endif()
OPTION( SUPPORT_COMMENT_CONTENT "Build source with enabled comment content support (ON OR OFF)" OFF )
......@@ -144,31 +147,50 @@ if( HIVE_STATIC_BUILD AND ( ( MSVC AND NOT MINGW ) OR APPLE ) )
endif()
MESSAGE( STATUS "HIVE_STATIC_BUILD: ${HIVE_STATIC_BUILD}" )
SET( HIVE_LINT_LEVEL "OFF" CACHE STRING "Lint level during Hive build (FULL, HIGH, LOW, OFF)" )
SET( HIVE_LINT "OFF" CACHE STRING "Enable linting with clang-tidy during compilation" )
find_program(
CLANG_TIDY_EXE
NAMES "clang-tidy"
DOC "Path to clain-tidy executable"
)
SET( CLANG_TIDY_IGNORED
"-fuchsia-default-arguments\
,-hicpp-*\
,-cert-err60-cpp\
,-llvm-namespace-comment\
,-cert-err09-cpp\
,-cert-err61-cpp\
,-fuchsia-overloaded-operator\
,-misc-throw-by-value-catch-by-reference\
,-misc-unused-parameters\
,-clang-analyzer-core.uninitialized.Assign\
,-llvm-include-order\
,-clang-diagnostic-unused-lambda-capture\
,-misc-macro-parentheses\
,-boost-use-to-string\
,-misc-lambda-function-name\
,-cert-err58-cpp\
,-cert-err34-c\
,-cppcoreguidelines-*\
,-modernize-*\
,-clang-diagnostic-#pragma-messages\
,-google-*\
,-readability-*"
)
if( NOT CLANG_TIDY_EXE )
message( STATUS "clang-tidy not found" )
elseif( VERSION LESS 3.6 )
messgae( STATUS "clang-tidy found but only supported with CMake version >= 3.6" )
else()
message( STATUS "clany-tidy found: ${CLANG_TIDY_EXE}" )
if( "${HIVE_LINT_LEVEL}" STREQUAL "FULL" )
message( STATUS "Linting level set to: FULL" )
set( DO_CLANG_TIDY "${CLANG_TIDY_EXE}" "-checks='*'" )
elseif( "${HIVE_LINT_LEVEL}" STREQUAL "HIGH" )
message( STATUS "Linting level set to: HIGH" )
set( DO_CLANG_TIDY "${CLANG_TIDY_EXE}" "-checks='boost-use-to-string,clang-analyzer-*,cppcoreguidelines-*,llvm-*,misc-*,performance-*,readability-*'" )
elseif( "${HIVE_LINT_LEVEL}" STREQUAL "LOW" )
message( STATUS "Linting level set to: LOW" )
set( DO_CLANG_TIDY "${CLANG_TIDY_EXE}" "-checks='clang-analyzer-*'" )
if( HIVE_LINT )
message( STATUS "Linting enabled" )
set( DO_CLANG_TIDY ${CLANG_TIDY_EXE};-checks=*,${CLANG_TIDY_IGNORED};--warnings-as-errors=* )
else()
unset( CLANG_TIDY_EXE )
message( STATUS "Linting level set to: OFF" )
message( STATUS "Linting disabled" )
endif()
endif( NOT CLANG_TIDY_EXE )
......@@ -286,7 +308,6 @@ endif()
# fc/src/compress/miniz.c breaks strict aliasing. The Linux kernel builds with no strict aliasing
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-strict-aliasing -Werror -DBOOST_THREAD_DONT_PROVIDE_PROMISE_LAZY" )
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-strict-aliasing -DBOOST_THREAD_DONT_PROVIDE_PROMISE_LAZY" )
# -Werror
# external_plugins needs to be compiled first because libraries/app depends on HIVE_EXTERNAL_PLUGINS being fully populated
add_subdirectory( external_plugins )
......@@ -309,7 +330,7 @@ set(CMAKE_INSTALL_PREFIX ${CMAKE_BINARY_DIR}/install)
SET(CPACK_PACKAGE_DIRECTORY "${CMAKE_INSTALL_PREFIX}")
set(CPACK_PACKAGE_NAME "hive")
set(CPACK_PACKAGE_VENDOR "Steemit, Inc.")
set(CPACK_PACKAGE_VENDOR "Hive Community")
set(CPACK_PACKAGE_VERSION_MAJOR "${VERSION_MAJOR}")
set(CPACK_PACKAGE_VERSION_MINOR "${VERSION_MINOR}")
set(CPACK_PACKAGE_VERSION_PATCH "${VERSION_PATCH}")
......@@ -364,6 +385,9 @@ endif()
if( LOW_MEMORY_NODE )
MESSAGE( STATUS "\n\n CONFIGURED FOR LOW MEMORY NODE \n\n" )
if( STORE_ACCOUNT_METADATA )
MESSAGE( STATUS "\n\n BUT STILL STORING ACCOUNT METADATA \n\n" )
endif()
else()
MESSAGE( STATUS "\n\n CONFIGURED FOR FULL NODE \n\n" )
endif()
......
......@@ -4,6 +4,7 @@ ARG LOW_MEMORY_NODE=ON
ARG CLEAR_VOTES=ON
ARG BUILD_HIVE_TESTNET=OFF
ARG ENABLE_MIRA=OFF
ARG HIVE_LINT=OFF
FROM registry.gitlab.syncad.com/hive/hive/hive-baseenv:latest AS builder
ENV src_dir="/usr/local/src/hive"
......@@ -21,7 +22,7 @@ FROM builder AS consensus_node_builder
RUN \
cd ${src_dir} && \
${src_dir}/ciscripts/build.sh "ON" "ON" "OFF" "OFF"
${src_dir}/ciscripts/build.sh "ON" "ON" "OFF" "OFF" "ON"
###################################################################################################
## CONSENSUS NODE CONFIGURATION ##
......@@ -93,15 +94,17 @@ ARG LOW_MEMORY_NODE
ARG CLEAR_VOTES
ARG BUILD_HIVE_TESTNET
ARG ENABLE_MIRA
ARG HIVE_LINT
ENV LOW_MEMORY_NODE=${LOW_MEMORY_NODE}
ENV CLEAR_VOTES=${CLEAR_VOTES}
ENV BUILD_HIVE_TESTNET=${BUILD_HIVE_TESTNET}
ENV ENABLE_MIRA=${ENABLE_MIRA}
ENV HIVE_LINT=${HIVE_LINT}
RUN \
cd ${src_dir} && \
${src_dir}/ciscripts/build.sh ${LOW_MEMORY_NODE} ${CLEAR_VOTES} ${BUILD_HIVE_TESTNET} ${ENABLE_MIRA}
${src_dir}/ciscripts/build.sh ${LOW_MEMORY_NODE} ${CLEAR_VOTES} ${BUILD_HIVE_TESTNET} ${ENABLE_MIRA} ${HIVE_LINT}
###################################################################################################
## GENERAL NODE CONFIGURATION ##
......@@ -137,16 +140,20 @@ FROM builder AS testnet_node_builder
ARG LOW_MEMORY_NODE=OFF
ARG CLEAR_VOTES=OFF
ARG ENABLE_MIRA=OFF
ARG HIVE_LINT=ON
ENV LOW_MEMORY_NODE=${LOW_MEMORY_NODE}
ENV CLEAR_VOTES=${CLEAR_VOTES}
ENV BUILD_HIVE_TESTNET="ON"
ENV ENABLE_MIRA=${ENABLE_MIRA}
ENV HIVE_LINT=${HIVE_LINT}
RUN \
cd ${src_dir} && \
${src_dir}/ciscripts/build.sh ${LOW_MEMORY_NODE} ${CLEAR_VOTES} ${BUILD_HIVE_TESTNET} ${ENABLE_MIRA} && \
apt-get update && \
apt-get install -y clang && \
apt-get install -y clang-tidy && \
${src_dir}/ciscripts/build.sh ${LOW_MEMORY_NODE} ${CLEAR_VOTES} ${BUILD_HIVE_TESTNET} ${ENABLE_MIRA} ${HIVE_LINT} && \
apt-get install -y screen && \
pip3 install -U secp256k1prp && \
git clone https://gitlab.syncad.com/hive/beem.git && \
......
......@@ -39,6 +39,8 @@ RUN \
libbz2-dev \
liblz4-dev \
libzstd-dev \
clang \
clang-tidy \
&& \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
......
......@@ -5,12 +5,14 @@ LOW_MEMORY_NODE=$1
CLEAR_VOTES=$2
BUILD_HIVE_TESTNET=$3
ENABLE_MIRA=$4
HIVE_LINT=${5:OFF}
echo "PWD=${PWD}"
echo "LOW_MEMORY_NODE=${LOW_MEMORY_NODE}"
echo "CLEAR_VOTES=${CLEAR_VOTES}"
echo "BUILD_HIVE_TESTNET=${BUILD_HIVE_TESTNET}"
echo "ENABLE_MIRA=${ENABLE_MIRA}"
echo "HIVE_LINT=${HIVE_LINT}"
BUILD_DIR="${PWD}/build"
CMAKE_BUILD_TYPE=Release
......@@ -28,6 +30,7 @@ cmake \
-DBUILD_HIVE_TESTNET=${BUILD_HIVE_TESTNET} \
-DENABLE_MIRA=${ENABLE_MIRA} \
-DHIVE_STATIC_BUILD=ON \
-DHIVE_LINT=${HIVE_LINT} \
..
make -j$(nproc)
make install
......
#!/bin/bash
curl --silent -XPOST -H "Authorization: token $GITHUB_SECRET" https://api.github.com/repos/steemit/hive/statuses/$(git rev-parse HEAD) -d "{
\"state\": \"failure\",
\"target_url\": \"${BUILD_URL}\",
\"description\": \"JenkinsCI reports the build has failed!\",
\"context\": \"jenkins-ci-steemit\"
}"
rm -rf $WORKSPACE/*
# make docker cleanup after itself and delete all exited containers
sudo docker rm -v $(docker ps -a -q -f status=exited) || true
#!/bin/bash
curl --silent -XPOST -H "Authorization: token $GITHUB_SECRET" https://api.github.com/repos/steemit/hive/statuses/$(git rev-parse HEAD) -d "{
\"state\": \"pending\",
\"target_url\": \"${BUILD_URL}\",
\"description\": \"The build is now pending in jenkinsci!\",
\"context\": \"jenkins-ci-steemit\"
}"
#/bin/bash
curl --silent -XPOST -H "Authorization: token $GITHUB_SECRET" https://api.github.com/repos/steemit/hive/statuses/$(git rev-parse HEAD) -d "{
\"state\": \"success\",
\"target_url\": \"${BUILD_URL}\",
\"description\": \"Jenkins-CI reports build succeeded!!\",
\"context\": \"jenkins-ci-steemit\"
}"
rm -rf $WORKSPACE/*
# make docker cleanup after itself and delete all exited containers
sudo docker rm -v $(docker ps -a -q -f status=exited) || true
#!/bin/bash
set -e
sudo docker build --build-arg CI_BUILD=1 --build-arg BUILD_STEP=1 -t=steemit/hive:tests .
sudo docker run -v $WORKSPACE:/var/jenkins steemit/hive:tests cp -r /var/cobertura /var/jenkins
# make docker cleanup after itself and delete all exited containers
sudo docker rm -v $(docker ps -a -q -f status=exited) || true
\ No newline at end of file
......@@ -16,6 +16,17 @@ execute_unittest_group()
fi
}
# $1 ctest test name
execute_exactly_one_test()
{
local ctest_test_name=$1
echo "Start ctest test '${ctest_test_name}'"
if ! ctest -R ^${ctest_test_name}$ --output-on-failure -vv
then
exit 1
fi
}
execute_hive_functional()
{
echo "Start hive functional tests"
......@@ -51,8 +62,8 @@ echo "
execute_unittest_group plugin_test
execute_unittest_group chain_test
execute_exactly_one_test all_plugin_tests
execute_exactly_one_test all_chain_tests
execute_hive_functional
......
#!/bin/bash
set -e
/bin/bash $WORKSPACE/ciscripts/buildpending.sh
if /bin/bash $WORKSPACE/ciscripts/buildscript.sh; then
echo BUILD SUCCESS
else
echo BUILD FAILURE
exit 1
fi
#!/bin/bash
set -e
if /bin/bash $WORKSPACE/ciscripts/buildtests.sh; then
echo BUILD SUCCESS
else
echo BUILD FAILURE
exit 1
fi
\ No newline at end of file
#!/bin/bash
echo hived-testnet: getting deployment scripts from external source
wget -qO- $SCRIPTURL/master/$LAUNCHENV/$APP/testnetinit.sh > /usr/local/bin/testnetinit.sh
wget -qO- $SCRIPTURL/master/$LAUNCHENV/$APP/testnet.config.ini > /etc/hived/testnet.config.ini
wget -qO- $SCRIPTURL/master/$LAUNCHENV/$APP/fastgen.config.ini > /etc/hived/fastgen.config.ini
chmod +x /usr/local/bin/testnetinit.sh
echo hived-testnet: launching testnetinit script
/usr/local/bin/testnetinit.sh
......@@ -119,7 +119,14 @@ class application_impl {
};
application::application()
:my(new application_impl()), main_io_handler( true/*allow_close_when_signal_is_received*/, [ this ](){ shutdown(); } )
: pre_shutdown_plugins(
[]( abstract_plugin* a, abstract_plugin* b )
{
assert( a && b );
return a->get_pre_shutdown_order() > b->get_pre_shutdown_order();
}
),
my(new application_impl()), main_io_handler( true/*allow_close_when_signal_is_received*/, [ this ](){ finish(); } )
{
}
......@@ -322,6 +329,18 @@ bool application::initialize_impl(int argc, char** argv, vector<abstract_plugin*
}
}
void application::pre_shutdown() {
std::cout << "Before shutting down...\n";
for( auto& plugin : pre_shutdown_plugins )
{
plugin->pre_shutdown();
}
pre_shutdown_plugins.clear();
}
void application::shutdown() {
std::cout << "Shutting down...\n";
......@@ -339,6 +358,12 @@ void application::shutdown() {
plugins.clear();
}
void application::finish()
{
pre_shutdown();
shutdown();
}
void application::exec() {
if( !is_interrupt_request() )
......
......@@ -76,8 +76,12 @@ namespace appbase {
}
void startup();
void pre_shutdown();
void shutdown();
void finish();
/**
* Wait until quit(), SIGINT or SIGTERM and then shutdown
*/
......@@ -162,13 +166,21 @@ namespace appbase {
*/
///@{
void plugin_initialized( abstract_plugin& plug ) { initialized_plugins.push_back( &plug ); }
void plugin_started( abstract_plugin& plug ) { running_plugins.push_back( &plug ); }
void plugin_started( abstract_plugin& plug )
{
running_plugins.push_back( &plug );
pre_shutdown_plugins.insert( &plug );
}
///@}
private:
application(); ///< private because application is a singlton that should be accessed via instance()
map< string, std::shared_ptr< abstract_plugin > > plugins; ///< all registered plugins
vector< abstract_plugin* > initialized_plugins; ///< stored in the order they were started running
using pre_shutdown_cmp = std::function< bool ( abstract_plugin*, abstract_plugin* ) >;
using pre_shutdown_multiset = std::multiset< abstract_plugin*, pre_shutdown_cmp >;
pre_shutdown_multiset pre_shutdown_plugins; ///< stored in the order what is necessary in order to close every plugin in safe way
vector< abstract_plugin* > running_plugins; ///< stored in the order they were started running
std::string version_info;
std::string app_name = "appbase";
......@@ -195,6 +207,7 @@ namespace appbase {
public:
virtual ~plugin() {}
virtual pre_shutdown_order get_pre_shutdown_order() const override { return _pre_shutdown_order; }
virtual state get_state() const override { return _state; }
virtual const std::string& get_name()const override final { return Impl::name(); }
......@@ -230,6 +243,23 @@ namespace appbase {
BOOST_THROW_EXCEPTION( std::runtime_error("Initial state was not initialized, so final state cannot be started.") );
}
virtual void plugin_pre_shutdown() override
{
/*
By default most plugins don't need any pre-actions during shutdown.
A problem appears when P2P plugin receives and sends data into dependent plugins.
In this case is necessary to close P2P plugin as soon as possible.
*/
}
virtual void pre_shutdown() override final
{
if( _state == started )
{
this->plugin_pre_shutdown();
}
}
virtual void shutdown() override final
{
if( _state == started )
......@@ -243,7 +273,10 @@ namespace appbase {
protected:
plugin() = default;
virtual void set_pre_shutdown_order( pre_shutdown_order val ) { _pre_shutdown_order = val; }
private:
pre_shutdown_order _pre_shutdown_order = abstract_plugin::basic_order;
state _state = abstract_plugin::registered;
};
}
......@@ -35,13 +35,20 @@ namespace appbase {
stopped ///< the plugin is no longer running
};
enum pre_shutdown_order {
basic_order = 0, ///most plugins don't need to be prepared before another plugins, therefore it doesn't matter when they will be closed
p2p_order = 1 ///p2p plugin has to reject/break all connections at the start
};
virtual ~abstract_plugin(){}
virtual pre_shutdown_order get_pre_shutdown_order()const = 0;
virtual state get_state()const = 0;
virtual const std::string& get_name()const = 0;
virtual void set_program_options( options_description& cli, options_description& cfg ) = 0;
virtual void initialize(const variables_map& options) = 0;
virtual void startup() = 0;
virtual void pre_shutdown() = 0;
virtual void shutdown() = 0;
protected:
......@@ -64,8 +71,13 @@ namespace appbase {
/** Abstract method to be reimplemented in final plugin implementation.
It is a part of shutdown process triggerred by main application.
*/
virtual void plugin_pre_shutdown() = 0;
/** Abstract method to be reimplemented in final plugin implementation.
It is a part of shutdown process triggerred by main application.
*/
virtual void plugin_shutdown() = 0;
virtual void set_pre_shutdown_order( pre_shutdown_order val ) = 0;
};
template<typename Impl>
......
#pragma once
#include <fc/log/logger.hpp>
#include <vector>
#include <atomic>
#include <future>
namespace hive {
struct shutdown_state
{
using ptr_shutdown_state = std::shared_ptr< shutdown_state >;
std::promise<void> promise;
std::shared_future<void> future;
std::atomic_uint activity;
};
class shutdown_mgr
{
private:
std::string name;
std::atomic_bool running;
std::vector< shutdown_state::ptr_shutdown_state > states;
const char* fStatus(std::future_status s)
{
switch(s)
{
case std::future_status::ready:
return "ready";
case std::future_status::deferred:
return "deferred";
case std::future_status::timeout:
return "timeout";
default:
return "unknown";
}
}
void wait( const shutdown_state& state )
{
FC_ASSERT( !get_running().load(), "Lack of shutdown" );
std::future_status res;
uint32_t cnt = 0;
uint32_t time_maximum = 300;//30 seconds
do
{
if( state.activity.load() != 0 )
{
res = state.future.wait_for( std::chrono::milliseconds(100) );
if( res != std::future_status::ready )
{
ilog("finishing: ${s}, future status: ${fs}", ("s", fStatus( res ) )("fs", std::to_string( state.future.valid() ) ) );
}
FC_ASSERT( ++cnt <= time_maximum, "Closing the ${name} is terminated", ( "name", name ) );
}
else
{
res = std::future_status::ready;
}
}
while( res != std::future_status::ready );
}
public:
shutdown_mgr( std::string _name, size_t _nr_actions )
: name( _name ), running( true )
{
for( size_t i = 0; i < _nr_actions; ++i )
{
shutdown_state::ptr_shutdown_state _state( new shutdown_state() );
_state->future = std::shared_future<void>( _state->promise.get_future() );
_state->activity.store( 0 );
states.emplace_back( _state );
}
}
void prepare_shutdown()
{
running.store( false );
}
const std::atomic_bool& get_running() const
{
return running;
}
shutdown_state& get_state( size_t idx )
{
FC_ASSERT( idx < states.size(), "Incorrect index - lack of correct state" );
shutdown_state* _state = states[idx].get();
FC_ASSERT( _state, "State has NULL value" );
return *_state;
}
void wait()
{
if( get_running().load() )
return;
for( auto& state : states )
{
shutdown_state* _state = state.get();
FC_ASSERT( _state, "State has NULL value" );
wait( *_state );
}
}
};
class action_catcher
{
private:
const std::atomic_bool& running;
shutdown_state& state;
public:
action_catcher( const std::atomic_bool& _running, shutdown_state& _state ):
running( _running ), state( _state )
{
state.activity.store( state.activity.load() + 1 );
}
~action_catcher()
{
state.activity.store( state.activity.load() - 1 );
if( running.load() == false && state.future.valid() == false )
{
ilog("Sending notification to shutdown barrier.");
try
{
state.promise.set_value();
}
catch( const std::future_error& e )
{
ilog("action_catcher: future error exception. ( Code: ${c} )( Message: ${m} )", ( "c", e.code().value() )( "m", e.what() ) );
}
catch(...)
{
ilog("action_catcher: unknown error exception." );
}
}
}
};
}
......@@ -152,10 +152,10 @@ namespace hive { namespace chain {
my->block_file = file;
my->index_file = fc::path( file.generic_string() + ".index" );
my->block_log_fd = ::open(my->block_file.generic_string().c_str(), O_RDWR | O_APPEND | O_CREAT, 0644);
my->block_log_fd = ::open(my->block_file.generic_string().c_str(), O_RDWR | O_APPEND | O_CREAT | O_CLOEXEC, 0644);
if (my->block_log_fd == -1)
FC_THROW("Error opening block log file ${filename}: ${error}", ("filename", my->block_file)("error", strerror(errno)));
my->block_index_fd = ::open(my->index_file.generic_string().c_str(), O_RDWR | O_APPEND | O_CREAT, 0644);
my->block_index_fd = ::open(my->index_file.generic_string().c_str(), O_RDWR | O_APPEND | O_CREAT | O_CLOEXEC, 0644);
if (my->block_index_fd == -1)
FC_THROW("Error opening block index file ${filename}: ${error}", ("filename", my->index_file)("error", strerror(errno)));
my->block_log_size = get_file_size(my->block_log_fd);
......@@ -462,7 +462,7 @@ namespace hive { namespace chain {
//create and size the new temporary index file (block_log.index.new)
fc::path new_index_file(my->index_file.generic_string() + ".new");
const size_t block_index_size = block_num * sizeof(uint64_t);
int new_index_fd = ::open(new_index_file.generic_string().c_str(), O_RDWR | O_CREAT | O_TRUNC, 0644);
int new_index_fd = ::open(new_index_file.generic_string().c_str(), O_RDWR | O_CREAT | O_TRUNC | O_CLOEXEC, 0644);
if (new_index_fd == -1)
FC_THROW("Error opening temporary new index file ${filename}: ${error}", ("filename", new_index_file.generic_string())("error", strerror(errno)));
if (ftruncate(new_index_fd, block_index_size) == -1)
......@@ -572,7 +572,7 @@ namespace hive { namespace chain {
#endif //NOT USE_BACKWARD_INDEX
ilog("opening new block index");
my->block_index_fd = ::open(my->index_file.generic_string().c_str(), O_RDWR | O_APPEND | O_CREAT, 0644);
my->block_index_fd = ::open(my->index_file.generic_string().c_str(), O_RDWR | O_APPEND | O_CREAT | O_CLOEXEC, 0644);
if (my->block_index_fd == -1)
FC_THROW("Error opening block index file ${filename}: ${error}", ("filename", my->index_file)("error", strerror(errno)));
//report size of new index file and verify it is the right size for the blocks in block log
......
This diff is collapsed.
......@@ -271,7 +271,7 @@ vector<fork_item> fork_database::fetch_block_range_on_main_branch_by_number( con
void fork_database::set_head(shared_ptr<fork_item> h)
{
_head = h;
_head = std::move( h );
}
void fork_database::remove(block_id_type id)
......