Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • hive/hive
1 result
Show changes
Commits on Source (194)
Showing
with 344 additions and 263 deletions
......@@ -5,23 +5,29 @@ stages:
variables:
PYTEST_NUMBER_OF_PROCESSES: 8
MIRRORNET_CHAIN_ID: 42
MIRRORNET_SKELETON_KEY: "5JNHfZYKGaomSFvd4NUdQ9qMcEAC43kujbfjueTHpVapX1Kzq2n"
GIT_DEPTH: 1
GIT_SUBMODULE_STRATEGY: recursive
# pin to specific docker images for build repeatability
#points registry.gitlab.syncad.com/hive/hive/runtime:ubuntu-20.04-3
RUNTIME_IMAGE_TAG: "@sha256:58b2ef26ece0463bad9ba61c665ffb9dc4255a4d65b27f56048786e3fae7b2e5"
#points registry.gitlab.syncad.com/hive/hive/runtime:ubuntu-20.04-5
RUNTIME_IMAGE_TAG: "@sha256:598fadc96705d1be39c742e94836ae48009acd47f96a4ac087b835df7f7960d7"
#points registry.gitlab.syncad.com/hive/hive/builder:ubuntu-20.04-3
BUILDER_IMAGE_TAG: "@sha256:590750387c74c470ee30910502b4f855122eae9ef9c6e7ad0841985abdc3938c"
#points registry.gitlab.syncad.com/hive/hive/test:ubuntu-20.04-3
TEST_IMAGE_TAG: "@sha256:991f5c225ea66dccabd43ae18ebe9cc4c90dc2b14d80625202f96e041c832ac6"
#points to our clone of python:3.8-slim-bullseye
PYTHON_BULLSEYE_TAG: "@sha256:c4dc24a9761fe0a7b3f498fe677e8129ab0a62d72a3beae5ea899482f104a962"
# uses registry.gitlab.syncad.com/hive/hive/ci-base-image:ubuntu20.04-5
TEST_IMAGE_TAG: "@sha256:f6f8d1a300a7f13c1c5a64088db38cd641fb2ec397f4ac46b4ae8733753a62be"
# Versions of Python packages
PYTEST_VERSION: "7.1.2"
PYTEST_XDIST_VERSION: "2.5.0"
PYTEST_TIMEOUT_VERSION: "2.1.0"
PYTEST_RERUN_FAILURES_VERSION: "10.2"
PYTHON_JUNIT_XML_VERSION: "1.9"
PYTHON_DATEUTIL_VERSION: "2.8.2"
TOX_VERSION: "3.25.1"
include: '/scripts/ci-helpers/prepare_data_image_job.yml'
prepare_hived_data_image:
......@@ -76,15 +82,13 @@ testnet_node_build:
mirrornet_node_build:
stage: build
extends: .prepare_hived_data_5m_image
interruptible: true
extends: .docker_image_builder_job
variables:
REGISTRY_USER: "$HIVED_CI_IMGBUILDER_USER"
REGISTRY_PASS: "$HIVED_CI_IMGBUILDER_PASSWORD"
IMAGE_TAG: "$CI_COMMIT_SHA"
SOURCES_DIR: "."
REGISTRY_URL: "$CI_REGISTRY_IMAGE"
BINARIES_DIR: "$CI_PROJECT_DIR/hived-mirrornet-binaries"
BINARIES_DIR: "hived-mirrornet-binaries"
script:
- echo "Building hived in mirrornet mode"
- ./scripts/ci-helpers/build_instance.sh
......@@ -93,6 +97,7 @@ mirrornet_node_build:
"$REGISTRY_URL"
--network-type="mirrornet"
--export-binaries="$BINARIES_DIR"
- chmod -Rc a+rwx "$BINARIES_DIR"
artifacts:
paths:
- "$BINARIES_DIR"
......@@ -102,14 +107,15 @@ mirrornet_node_build:
mirrornet_replay_test:
stage: test
image: "$CI_REGISTRY_IMAGE/runtime$RUNTIME_IMAGE_TAG"
needs: [ "mirrornet_node_build" ]
image: "$CI_REGISTRY_IMAGE/ci-base-image$TEST_IMAGE_TAG"
needs:
- job: "mirrornet_node_build"
artifacts: true
variables:
BINARIES_DIR: "$CI_PROJECT_DIR/hived-mirrornet-binaries"
BINARIES_DIR: "hived-mirrornet-binaries"
BLOCK_LOG_SOURCE_DIR: "/blockchain"
NUMBER_OF_BLOCKS: 5000000
CHAIN_ID: 42
SKELETON_KEY: "5JNHfZYKGaomSFvd4NUdQ9qMcEAC43kujbfjueTHpVapX1Kzq2n"
MIRRORNET_WORKING_DIR: "$CI_PROJECT_DIR/mirrornet"
MAINNET_TRUNCATED_DIR: "$CI_PROJECT_DIR/mainnet"
interruptible: true
......@@ -132,15 +138,15 @@ mirrornet_replay_test:
--plugin block_log_conversion
-i "$MAINNET_TRUNCATED_DIR/block_log"
-o "$MIRRORNET_WORKING_DIR/blockchain/block_log"
--chain-id $CHAIN_ID
--private-key "$SKELETON_KEY"
--chain-id $MIRRORNET_CHAIN_ID
--private-key "$MIRRORNET_SKELETON_KEY"
--use-same-key
--jobs $NUMBER_OF_PROCESSES
- echo "Starting hived replay"
- ./hived
-d "$MIRRORNET_WORKING_DIR"
--chain-id $CHAIN_ID
--skeleton-key "$SKELETON_KEY"
--chain-id $MIRRORNET_CHAIN_ID
--skeleton-key "$MIRRORNET_SKELETON_KEY"
--set-benchmark-interval 100000
--force-replay
--stop-replay-at-block $NUMBER_OF_BLOCKS
......@@ -215,97 +221,101 @@ plugin_test:
- additional arguments: ${PYTEST_ARGS[@]}"
- timeout $((($PYTEST_TIMEOUT_MINUTES + 2) * 60)) pytest --timeout=$(($PYTEST_TIMEOUT_MINUTES * 60)) --junitxml="./report.xml" "${PROCESSES}" "${DURATIONS}" "${PYTEST_ARGS[@]}"
.beem_setup : &beem_setup
- git clone --depth=1 --single-branch --branch master https://gitlab.syncad.com/hive/beem.git
- python3 -m venv venv/
- . venv/bin/activate
- python3 -m pip install --upgrade pip
- pip3 install junit-xml python-dateutil
- cd beem
- python3 -m pip install --upgrade -r requirements-test.txt
- python3 setup.py install
- cd ..
- mkdir -p build/tests/hive-node-data
beem_tests:
stage: test
extends: .test_tools_based
.pytest_based:
variables:
JUNIT_REPORT: "report.xml" # should be ovverided by derived jobs
interruptible: true
image: "$CI_REGISTRY_IMAGE/ci-base-image$TEST_IMAGE_TAG"
before_script:
- python3 -m venv venv/
- . venv/bin/activate
- pip3 install pytest==${PYTEST_VERSION} pytest-xdist==${PYTEST_XDIST_VERSION} pytest-timeout==${PYTEST_TIMEOUT_VERSION} "$CI_PROJECT_DIR/tests/test_tools" "$CI_PROJECT_DIR/tests/schemas"
artifacts:
reports:
junit: $JUNIT_REPORT
name: "$CI_JOB_NAME-$CI_COMMIT_REF_NAME"
paths:
- "**/generated_during_*"
- "**/generated_by_package_fixtures"
when: always
expire_in: 1 week
.test_tools_based:
extends: .pytest_based
variables:
HIVED_PATH: "$CI_PROJECT_DIR/testnet_node_build/install-root/bin/hived"
CLI_WALLET_PATH: "$CI_PROJECT_DIR/testnet_node_build/install-root/bin/cli_wallet"
GET_DEV_KEY_PATH: "$CI_PROJECT_DIR/testnet_node_build/install-root/bin/get_dev_key"
TEST_TOOLS_NODE_DEFAULT_WAIT_FOR_LIVE_TIMEOUT: "60"
needs:
- job: testnet_node_build
artifacts: true
image: "$CI_REGISTRY_IMAGE/test$TEST_IMAGE_TAG"
variables:
PYTEST_TIMEOUT_MINUTES: 18
PYTHONPATH: $CI_PROJECT_DIR/tests/functional
.beem_tests_base:
stage: test
extends: .test_tools_based
script:
- *beem_setup
- cd tests/functional/python_tests/dhf_tests
- git clone --depth=1 --single-branch --branch master https://gitlab.syncad.com/hive/beem.git
- pip3 install junit-xml==${PYTHON_JUNIT_XML_VERSION} python-dateutil==${PYTHON_DATEUTIL_VERSION}
- cd beem
- python3 -m pip install --upgrade -r requirements-test.txt
- python3 setup.py install
- cd ..
- mkdir -p build/tests/hive-node-data
- cd tests/functional/python_tests/beem_tests
- *run-pytest
artifacts:
reports:
junit: tests/functional/python_tests/dhf_tests/report.xml
tags:
- public-runner-docker
.test_tools_based:
beem_testnet_tests:
extends: .beem_tests_base
needs:
- job: testnet_node_build
artifacts: true
variables:
HIVED_PATH: "$CI_PROJECT_DIR/testnet_node_build/install-root/bin/hived"
CLI_WALLET_PATH: "$CI_PROJECT_DIR/testnet_node_build/install-root/bin/cli_wallet"
GET_DEV_KEY_PATH: "$CI_PROJECT_DIR/testnet_node_build/install-root/bin/get_dev_key"
TEST_TOOLS_NODE_DEFAULT_WAIT_FOR_LIVE_TIMEOUT: "60"
interruptible: true
before_script:
- apt-get update -y && apt-get install -y python3 python3-pip python3-dev python3-venv
- python3 -m venv venv/
- . venv/bin/activate
- pip3 install pytest==${PYTEST_VERSION} pytest-xdist==${PYTEST_XDIST_VERSION} pytest-timeout==${PYTEST_TIMEOUT_VERSION} "$CI_PROJECT_DIR/tests/test_tools"
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_NAME"
paths:
- "**/generated_during_*"
- "**/generated_by_package_fixtures"
when: always
expire_in: 1 week
PYTEST_TIMEOUT_MINUTES: 18
PYTEST_ARGS: "-m not mirrornet"
JUNIT_REPORT: tests/functional/python_tests/beem_tests/report.xml
beem_mirrornet_tests:
extends: .beem_tests_base
needs:
- job: mirrornet_node_build
artifacts: true
variables:
PYTEST_TIMEOUT_MINUTES: 4
HIVED_PATH: "$CI_PROJECT_DIR/hived-mirrornet-binaries/hived"
CLI_WALLET_PATH: "$CI_PROJECT_DIR/hived-mirrornet-binaries/cli_wallet"
GET_DEV_KEY_PATH: "$CI_PROJECT_DIR/hived-mirrornet-binaries/get_dev_key"
JUNIT_REPORT: tests/functional/python_tests/beem_tests/report.xml
.test_tools_based_with_faketime:
extends: .test_tools_based
before_script:
- !reference [.test_tools_based, before_script]
- git clone --depth 1 --branch master https://github.com/wolfcw/libfaketime.git
- cd libfaketime && make
- export LIBFAKETIME_PATH=`pwd`/src/libfaketimeMT.so.1
- cd ..
- !reference [.beem_tests_base, before_script]
- export PYTEST_ARGS=(-m mirrornet --chain-id ${MIRRORNET_CHAIN_ID} --skeleton-key "${MIRRORNET_SKELETON_KEY}")
cli_wallet_tests:
stage: test
extends: .test_tools_based
needs:
- job: testnet_node_build
artifacts: true
image: "$CI_REGISTRY_IMAGE/test$TEST_IMAGE_TAG"
variables:
JUNIT_REPORT: tests/functional/python_tests/cli_wallet/report.xml
PYTEST_TIMEOUT_MINUTES: 30
PYTEST_NUMBER_OF_PROCESSES: 1
script:
- cd tests/functional/python_tests/cli_wallet
- *run-pytest
# RUN TESTS ON HF25. REMOVE AFTER HARDFORK26!!!
- export HIVE_HF26_TIME=1906530101 # Distant future (1 June 2030), to make sure that node is in HF25.
- *run-pytest
tags:
- public-runner-docker
artifacts:
reports:
junit: tests/functional/python_tests/cli_wallet/report.xml
hived_tests:
stage: test
extends: .test_tools_based
needs:
- job: testnet_node_build
artifacts: true
image: "$CI_REGISTRY_IMAGE/test$TEST_IMAGE_TAG"
variables:
JUNIT_REPORT: tests/functional/python_tests/hived/report.xml
PYTEST_TIMEOUT_MINUTES: 21
PYTEST_NUMBER_OF_PROCESSES: 1
script:
......@@ -313,187 +323,143 @@ hived_tests:
- *run-pytest
tags:
- public-runner-docker
artifacts:
reports:
junit: tests/functional/python_tests/hived/report.xml
cli_wallet_extended_tests:
stage: test
extends: .test_tools_based
needs:
- job: testnet_node_build
artifacts: true
image: "$CI_REGISTRY_IMAGE/test$TEST_IMAGE_TAG"
variables:
PYTEST_TIMEOUT_MINUTES: 14
JUNIT_REPORT: tests/functional/python_tests/cli_wallet_extended_tests/report.xml
script:
- cd tests/functional/python_tests/cli_wallet_extended_tests
- *run-pytest
# RUN TESTS ON HF25. REMOVE AFTER HARDFORK26!!!
- export HIVE_HF26_TIME=1906530101 # Distant future (1 June 2030), to make sure that node is in HF25.
- *run-pytest
artifacts:
reports:
junit: tests/functional/python_tests/cli_wallet_extended_tests/report.xml
tags:
- public-runner-docker
fork_tests:
stage: test
extends: .test_tools_based_with_faketime
needs:
- job: testnet_node_build
artifacts: true
image: "$CI_REGISTRY_IMAGE/test$TEST_IMAGE_TAG"
extends: .test_tools_based
variables:
PYTEST_TIMEOUT_MINUTES: 10
PYTEST_TIMEOUT_MINUTES: 15
JUNIT_REPORT: tests/functional/python_tests/fork_tests/report.xml
script:
- cd tests/functional/python_tests/fork_tests
- *run-pytest
artifacts:
reports:
junit: tests/functional/python_tests/fork_tests/report.xml
tags:
- public-runner-docker
hf26_tests:
stage: test
extends: .test_tools_based_with_faketime
needs:
- job: testnet_node_build
artifacts: true
image: "$CI_REGISTRY_IMAGE/test$TEST_IMAGE_TAG"
extends: .test_tools_based
variables:
PYTEST_TIMEOUT_MINUTES: 20
SIGN_TRANSACTION_PATH: "$CI_PROJECT_DIR/testnet_node_build/install-root/bin/sign_transaction"
JUNIT_REPORT: tests/functional/python_tests/hf26_tests/report.xml
script:
- cd tests/functional/python_tests/hf26_tests
- *run-pytest
artifacts:
reports:
junit: tests/functional/python_tests/hf26_tests/report.xml
tags:
- public-runner-docker
live_sync_tests:
stage: test
extends: .test_tools_based
needs:
- job: testnet_node_build
artifacts: true
image: "$CI_REGISTRY_IMAGE/test$TEST_IMAGE_TAG"
variables:
PYTHONPATH: $CI_PROJECT_DIR/tests/test_tools/package
PYTEST_TIMEOUT_MINUTES: 10
script:
- apt-get update -y && apt-get install -y tox
- cd tests/functional/python_tests/live_sync_tests
- timeout 11m tox
artifacts:
reports:
junit: tests/functional/python_tests/live_sync_tests/report.xml
- *run-pytest
tags:
- public-runner-docker
broadcast_tests:
stage: test
extends: .test_tools_based
needs:
- job: testnet_node_build
artifacts: true
image: "$CI_REGISTRY_IMAGE/test$TEST_IMAGE_TAG"
variables:
PYTEST_TIMEOUT_MINUTES: 6
JUNIT_REPORT: tests/functional/python_tests/broadcast_tests/report.xml
script:
- cd tests/functional/python_tests/broadcast_tests
- *run-pytest
artifacts:
reports:
junit: tests/functional/python_tests/broadcast_tests/report.xml
tags:
- public-runner-docker
test_tools_tests:
stage: test
extends: .test_tools_based_with_faketime
needs:
- job: testnet_node_build
artifacts: true
image: "$CI_REGISTRY_IMAGE/test$TEST_IMAGE_TAG"
extends: .test_tools_based
variables:
PYTEST_TIMEOUT_MINUTES: 30
JUNIT_REPORT: tests/test_tools/tests/report.xml
script:
- cd tests/test_tools/tests
- pip install local-tools/
- *run-pytest
artifacts:
reports:
junit: tests/test_tools/tests/report.xml
tags:
- public-runner-docker
rc_direct_delegations_tests:
stage: test
extends: .test_tools_based
needs:
- job: testnet_node_build
artifacts: true
image: "$CI_REGISTRY_IMAGE/test$TEST_IMAGE_TAG"
variables:
PYTEST_TIMEOUT_MINUTES: 12
JUNIT_REPORT: tests/functional/python_tests/direct_rc_delegations_tests/report.xml
script:
- cd tests/functional/python_tests/direct_rc_delegations_tests
- *run-pytest
artifacts:
reports:
junit: tests/functional/python_tests/direct_rc_delegations_tests/report.xml
tags:
- public-runner-docker
message_format_testnet_tests:
stage: test
extends: .test_tools_based
needs:
- job: testnet_node_build
artifacts: true
image: "$CI_REGISTRY_IMAGE/test$TEST_IMAGE_TAG"
variables:
PYTEST_TIMEOUT_MINUTES: 24
SIGN_TRANSACTION_PATH: "$CI_PROJECT_DIR/testnet_node_build/install-root/bin/sign_transaction"
TEST_TOOLS_VALIDATE_RESPONSE_SCHEMAS: "TRUE"
NODE_TYPE: "testnet"
JUNIT_REPORT: tests/api_tests/message_format_tests/report.xml
script:
- cd tests/api_tests/message_format_tests
- pip3 install "$CI_PROJECT_DIR/tests/schemas"
- export PYTEST_ARGS=(-m "${NODE_TYPE} or (not testnet and not mainnet_5m and not mainnet_64m)")
- *run-pytest
artifacts:
reports:
junit: tests/api_tests/message_format_tests/report.xml
tags:
- public-runner-docker
datagen_api_tests:
stage: test
extends: .test_tools_based
needs:
- job: testnet_node_build
artifacts: true
image: "$CI_REGISTRY_IMAGE/test$TEST_IMAGE_TAG"
variables:
PYTEST_TIMEOUT_MINUTES: 10
SIGN_TRANSACTION_PATH: "$CI_PROJECT_DIR/testnet_node_build/install-root/bin/sign_transaction"
JUNIT_REPORT: tests/functional/python_tests/datagen_tests/api_tests/report.xml
script:
- cd tests/functional/python_tests/datagen_tests/api_tests/
- *run-pytest
artifacts:
reports:
junit: tests/functional/python_tests/datagen_tests/api_tests/report.xml
tags:
- public-runner-docker
patterns_tests:
stage: test
image: "$CI_REGISTRY_IMAGE/python$PYTHON_BULLSEYE_TAG"
image: "$CI_REGISTRY_IMAGE/ci-base-image$TEST_IMAGE_TAG"
variables:
FF_NETWORK_PER_BUILD: 1
PYTHONPATH: "$CI_PROJECT_DIR/tests/test_tools/package"
......@@ -508,8 +474,11 @@ patterns_tests:
before_script:
- echo "HIVED image name $HIVED_IMAGE_NAME"
- pip install tox==3.25.1
- pip install -r $CI_PROJECT_DIR/tests/api_tests/comparsion_tests/requirements.txt
- python3 -m venv venv/
- . venv/bin/activate
- pip3 install pytest==${PYTEST_VERSION} pytest-xdist==${PYTEST_XDIST_VERSION} pytest-timeout==${PYTEST_TIMEOUT_VERSION} "$CI_PROJECT_DIR/tests/test_tools"
- pip3 install tox==${TOX_VERSION}
- pip3 install -r $CI_PROJECT_DIR/tests/api_tests/comparsion_tests/requirements.txt
script:
# run pattern tests
......@@ -517,7 +486,6 @@ patterns_tests:
- timeout 33m ./run_tests.sh hived-instance:8090 "$CI_PROJECT_DIR" "${TEST_SUITE}" FALSE
artifacts:
when: always
reports:
junit: tests/api_tests/pattern_tests/results.xml
paths:
......@@ -534,26 +502,22 @@ patterns_tests:
transaction_serialization_testnet_tests:
stage: test
extends: .test_tools_based_with_faketime
needs:
- job: testnet_node_build
artifacts: true
image: "$CI_REGISTRY_IMAGE/test$TEST_IMAGE_TAG"
extends: .test_tools_based
variables:
PYTEST_TIMEOUT_MINUTES: 15
JUNIT_REPORT: tests/functional/python_tests/transaction_serialization_tests/report.xml
script:
- cd $CI_PROJECT_DIR/tests/functional/python_tests/transaction_serialization_tests
- export PYTEST_ARGS=(-m testnet)
- *run-pytest
artifacts:
reports:
junit: tests/functional/python_tests/transaction_serialization_tests/report.xml
tags:
- public-runner-docker
python_pattern_mainnet_tests:
stage: test
image: "$CI_REGISTRY_IMAGE/python$PYTHON_BULLSEYE_TAG"
image: "$CI_REGISTRY_IMAGE/ci-base-image$TEST_IMAGE_TAG"
interruptible: true
variables:
PYTEST_TIMEOUT_MINUTES: 27
......@@ -568,9 +532,9 @@ python_pattern_mainnet_tests:
before_script:
- echo "HIVED image name $HIVED_IMAGE_NAME"
- apt update && apt install -y bash git ca-certificates curl build-essential python3-dev
- python3 -m ensurepip
- pip3 install pytest pytest-xdist pytest-timeout==${PYTEST_TIMEOUT_VERSION} "$CI_PROJECT_DIR/tests/test_tools" pytest-timeout==${PYTEST_TIMEOUT_VERSION}
- python3 -m venv venv/
- . venv/bin/activate
- pip3 install pytest==${PYTEST_VERSION} pytest-xdist==${PYTEST_XDIST_VERSION} pytest-timeout==${PYTEST_TIMEOUT_VERSION} "$CI_PROJECT_DIR/tests/test_tools"
script:
# run pattern tests
......@@ -592,20 +556,17 @@ python_pattern_mainnet_tests:
- hived-for-tests
.message_format_tests:
image: "$CI_REGISTRY_IMAGE/python$PYTHON_BULLSEYE_TAG"
stage: test
extends: .pytest_based
variables:
PYTEST_TIMEOUT_MINUTES: 30
TEST_TOOLS_VALIDATE_RESPONSE_SCHEMAS: "TRUE"
JUNIT_REPORT: tests/api_tests/message_format_tests/report.xml
script:
- apt update && apt install -y bash git ca-certificates curl build-essential python3-dev
- python3 -m ensurepip
- pip3 install pytest pytest-xdist pytest-timeout==${PYTEST_TIMEOUT_VERSION} "$CI_PROJECT_DIR/tests/test_tools" "$CI_PROJECT_DIR/tests/schemas"
- cd tests/api_tests/message_format_tests
- export PYTEST_ARGS=(-m ${NODE_TYPE} --http-endpoint=${NODE_ADDRESS})
- *run-pytest
artifacts:
reports:
junit: tests/api_tests/message_format_tests/report.xml
tags:
- public-runner-docker
- hived-for-tests
......@@ -635,18 +596,14 @@ message_format_mainnet_64m_tests:
foundation_layer_tests:
stage: test
extends: .test_tools_based
needs:
- job: testnet_node_build
artifacts: true
image: "$CI_REGISTRY_IMAGE/test$TEST_IMAGE_TAG"
variables:
PYTEST_TIMEOUT_MINUTES: 3
JUNIT_REPORT: tests/functional/python_tests/foundation_layer_tests/report.xml
script:
- pip install pytest-rerunfailures==${PYTEST_RERUN_FAILURES_VERSION}
- cd tests/functional/python_tests/foundation_layer_tests
- *run-pytest
artifacts:
reports:
junit: tests/functional/python_tests/foundation_layer_tests/report.xml
tags:
- public-runner-docker
[submodule "secp256k1-zkp"]
path = libraries/fc/vendor/secp256k1-zkp
url = https://github.com/cryptonomex/secp256k1-zkp.git
url = https://github.com/ElementsProject/secp256k1-zkp.git
[submodule "tests_api"]
path = tests/tests_api
url = ../tests_api.git
......
......@@ -2,9 +2,7 @@
# Modify CI_IMAGE_TAG here and inside script hive/scripts/ci-helpers/build_ci_base_images.sh and run it. Then push images to registry
# To be started from cloned haf source directory.
ARG CI_REGISTRY_IMAGE=registry.gitlab.syncad.com/hive/hive/
ARG CI_IMAGE_TAG=:ubuntu20.04-4
ARG BLOCK_LOG_SUFFIX
ARG CI_IMAGE_TAG=:ubuntu20.04-5
ARG BUILD_IMAGE_TAG
FROM phusion/baseimage:focal-1.0.0 AS runtime
......@@ -37,6 +35,9 @@ RUN ./scripts/setup_ubuntu.sh --dev --hived-account="hived"
USER hived
WORKDIR /home/hived
# Install additionally packages located in user directory
RUN /usr/local/src/scripts/setup_ubuntu.sh --user
#docker build --target=ci-base-image-5m -t registry.gitlab.syncad.com/hive/hive/ci-base-image-5m:ubuntu20.04-xxx -f Dockerfile .
FROM ${CI_REGISTRY_IMAGE}ci-base-image$CI_IMAGE_TAG AS ci-base-image-5m
......@@ -75,9 +76,9 @@ RUN \
find . -name *.a -type f -delete
# Here we could use a smaller image without packages specific to build requirements
FROM ${CI_REGISTRY_IMAGE}ci-base-image$BLOCK_LOG_SUFFIX$CI_IMAGE_TAG as base_instance
FROM ${CI_REGISTRY_IMAGE}ci-base-image$CI_IMAGE_TAG as base_instance
ENV BUILD_IMAGE_TAG=${BUILD_IMAGE_TAG:-:ubuntu20.04-4}
ENV BUILD_IMAGE_TAG=${BUILD_IMAGE_TAG}
ARG P2P_PORT=2001
ENV P2P_PORT=${P2P_PORT}
......@@ -101,6 +102,7 @@ COPY --from=build \
/home/hived/build/programs/cli_wallet/cli_wallet \
/home/hived/build/programs/util/compress_block_log \
/home/hived/build/programs/util/truncate_block_log \
/home/hived/build/programs/util/get_dev_key \
/home/hived/build/programs/blockchain_converter/blockchain_converter* \
/home/hived/build/tests/unit/* /home/hived/bin/
......@@ -118,7 +120,7 @@ STOPSIGNAL SIGINT
ENTRYPOINT [ "/home/hived/docker_entrypoint.sh" ]
FROM ${CI_REGISTRY_IMAGE}base_instance$BLOCK_LOG_SUFFIX:base_instance-${BUILD_IMAGE_TAG} as instance
FROM ${CI_REGISTRY_IMAGE}base_instance:base_instance-${BUILD_IMAGE_TAG} as instance
#p2p service
EXPOSE ${P2P_PORT}
......@@ -129,8 +131,11 @@ EXPOSE ${HTTP_PORT}
# Port specific to HTTP cli_wallet server
EXPOSE ${CLI_WALLET_PORT}
FROM ${CI_REGISTRY_IMAGE}instance-5m:instance-${BUILD_IMAGE_TAG} as data
FROM ${CI_REGISTRY_IMAGE}ci-base-image-5m$CI_IMAGE_TAG AS block_log_5m_source
FROM ${CI_REGISTRY_IMAGE}base_instance:base_instance-$BUILD_IMAGE_TAG as data
COPY --from=block_log_5m_source /home/hived/datadir /home/hived/datadir
ADD --chown=hived:hived ./docker/config_5M.ini /home/hived/datadir/config.ini
RUN "/home/hived/docker_entrypoint.sh" --force-replay --stop-replay-at-block=5000000 --exit-before-sync
......
......@@ -33,6 +33,9 @@ then
fi
sudo -n chown -Rc hived:hived /home/hived/datadir
# Be sure this directory exists
mkdir --mode=777 -p /home/hived/datadir/blockchain
sudo -n chown -Rc hived:hived /home/hived/shm_dir
cd /home/hived/datadir
......
......@@ -53,9 +53,12 @@ class net_plugin : public appbase::plugin<net_plugin>
int main( int argc, char** argv ) {
try {
appbase::app().register_plugin<net_plugin>(); // implict registration of chain_plugin dependency
if( !appbase::app().initialize( argc, argv ) )
return -1;
appbase::app().register_plugin<net_plugin>(); // implicit registration of chain_plugin dependency
auto initResult = appbase::app().initialize( argc, argv );
if( !initResult.should_start_loop() )
return initResult.get_result_code();
appbase::app().startup();
appbase::app().exec();
} catch ( const boost::exception& e ) {
......
......@@ -243,7 +243,7 @@ void application::set_program_options()
}
}
bool application::initialize_impl(int argc, char** argv, vector<abstract_plugin*> autostart_plugins)
initialization_result application::initialize_impl(int argc, char** argv, vector<abstract_plugin*> autostart_plugins)
{
try
{
......@@ -252,13 +252,13 @@ bool application::initialize_impl(int argc, char** argv, vector<abstract_plugin*
if( my->_args.count( "help" ) ) {
cout << my->_app_options << "\n";
return false;
return { initialization_result::ok, false };
}
if( my->_args.count( "version" ) )
{
cout << version_info << "\n";
return false;
return { initialization_result::ok, false };
}
bfs::path data_dir;
......@@ -307,7 +307,7 @@ bool application::initialize_impl(int argc, char** argv, vector<abstract_plugin*
if(my->_args.count("generate-completions") > 0)
{
generate_completions();
return false;
return { initialization_result::ok, false };
}
#endif
......@@ -320,7 +320,7 @@ bool application::initialize_impl(int argc, char** argv, vector<abstract_plugin*
std::cout << "\t" << quoted("data-dir") << ": " << quoted(my->_data_dir.string().c_str()) << ",\n";
std::cout << "\t" << quoted("config") << ": " << quoted(config_file_name.string().c_str()) << "\n";
std::cout << "}\n";
return false;
return { initialization_result::ok, false };
}
if(my->_args.count("list-plugins") > 0)
......@@ -330,7 +330,7 @@ bool application::initialize_impl(int argc, char** argv, vector<abstract_plugin*
std::cout << plugin.first << "\n";
}
return false;
return { initialization_result::ok, false };
}
if(my->_args.count("plugin") > 0)
......@@ -350,12 +350,27 @@ bool application::initialize_impl(int argc, char** argv, vector<abstract_plugin*
bpo::notify(my->_args);
return true;
return { initialization_result::ok, true };
}
catch (const boost::program_options::validation_error& e)
{
std::cerr << "Error parsing command line: " << e.what() << "\n";
return { initialization_result::validation_error, false };
}
catch (const boost::program_options::unknown_option& e)
{
std::cerr << "Error parsing command line: " << e.what() << "\n";
return { initialization_result::unrecognised_option, false };
}
catch (const boost::program_options::error_with_option_name& e)
{
std::cerr << "Error parsing command line: " << e.what() << "\n";
return { initialization_result::error_with_option, false };
}
catch (const boost::program_options::error& e)
{
std::cerr << "Error parsing command line: " << e.what() << "\n";
return false;
return { initialization_result::unknown_command_line_error, false };
}
}
......
......@@ -91,8 +91,11 @@ class plugin_b : public appbase::plugin<plugin_b>
int main( int argc, char** argv ) {
try {
appbase::app().register_plugin<plugin_b>();
if( !appbase::app().initialize( argc, argv ) )
return -1;
auto initResult = appbase::app().initialize( argc, argv );
if( !initResult.should_start_loop() )
return initResult.get_result_code();
appbase::app().startup();
appbase::app().exec();
} catch ( const boost::exception& e ) {
......
......@@ -18,6 +18,38 @@ namespace appbase {
class application;
class initialization_result
{
public:
enum result {
ok,
unrecognised_option,
error_with_option,
validation_error,
unknown_command_line_error
};
initialization_result(result result, bool start_loop)
: init_result(result)
, start_loop_flag(start_loop)
{
}
int get_result_code() const
{
return init_result;
}
bool should_start_loop() const
{
return start_loop_flag;
}
private:
result init_result;
bool start_loop_flag;
};
class io_handler
{
public:
......@@ -72,7 +104,7 @@ namespace appbase {
* @return true if the application and plugins were initialized, false or exception on error
*/
template< typename... Plugin >
bool initialize( int argc, char** argv )
initialization_result initialize( int argc, char** argv )
{
return initialize_impl( argc, argv, { find_plugin( Plugin::name() )... } );
}
......@@ -151,7 +183,7 @@ namespace appbase {
template< typename Impl >
friend class plugin;
bool initialize_impl( int argc, char** argv, vector< abstract_plugin* > autostart_plugins );
initialization_result initialize_impl( int argc, char** argv, vector< abstract_plugin* > autostart_plugins );
abstract_plugin* find_plugin( const string& name )const;
abstract_plugin& get_plugin( const string& name )const;
......
......@@ -79,7 +79,7 @@ fc::variant_object block_flow_control::get_report( report_type rt ) const
return fc::variant_object();
const char* type = "";
if( !finished() )
if( !finished() || except )
type = "broken";
else if( forked() )
type = "forked";
......@@ -94,7 +94,7 @@ fc::variant_object block_flow_control::get_report( report_type rt ) const
fc::variant_object_builder report;
report
( "num", full_block->get_block_num() )
("lib", stats.get_last_irreversible_block_num())
( "lib", stats.get_last_irreversible_block_num() )
( "type", type );
if( rt != report_type::MINIMAL )
{
......@@ -180,10 +180,18 @@ void p2p_block_flow_control::on_failure( const fc::exception& e ) const
trigger_promise();
}
fc::time_point_sec p2p_block_flow_control::get_block_timestamp() const
{
return full_block->get_block_header().timestamp;
}
void sync_block_flow_control::on_worker_done() const
{
//do not generate report: many stats make no practical sense for sync blocks
//and the excess logging seems to be slowing down sync
//...with exception to last couple blocks of syncing
if( ( fc::time_point::now() - get_block_timestamp() ) < HIVE_UP_TO_DATE_MARGIN__BLOCK_STATS )
block_flow_control::on_worker_done();
}
void existing_block_flow_control::on_end_of_apply_block() const
......@@ -192,4 +200,9 @@ void existing_block_flow_control::on_end_of_apply_block() const
stats.on_end_work();
}
fc::time_point_sec existing_block_flow_control::get_block_timestamp() const
{
return full_block->get_block_header().timestamp;
}
} } // hive::chain
......@@ -1124,7 +1124,7 @@ void database::switch_forks(const item_ptr new_head)
{
BOOST_SCOPE_EXIT(this_) { this_->clear_tx_status(); } BOOST_SCOPE_EXIT_END
// we have to treat blocks from fork as not validated
set_tx_status(database::TX_STATUS_INC_BLOCK);
set_tx_status(database::TX_STATUS_P2P_BLOCK);
_fork_db.set_head(*ritr);
auto session = start_undo_session();
apply_block((*ritr)->full_block, skip);
......@@ -1176,7 +1176,7 @@ void database::switch_forks(const item_ptr new_head)
// even though those blocks were already processed before, it is safer to treat them as completely new,
// especially since alternative would be to treat them as replayed blocks, but that would be misleading
// since replayed blocks are already irreversible, while these are clearly reversible
set_tx_status(database::TX_STATUS_INC_BLOCK);
set_tx_status(database::TX_STATUS_P2P_BLOCK);
_fork_db.set_head(*ritr);
auto session = start_undo_session();
apply_block((*ritr)->full_block, skip);
......@@ -1242,7 +1242,7 @@ bool database::_push_block(const block_flow_control& block_ctrl)
try
{
BOOST_SCOPE_EXIT(this_) { this_->clear_tx_status(); } BOOST_SCOPE_EXIT_END;
set_tx_status(database::TX_STATUS_INC_BLOCK);
set_tx_status(database::TX_STATUS_P2P_BLOCK);
// if we've linked in a chain of multiple blocks, we need to keep the fork_db's head block in sync
// with what we're applying. If we're only appending a single block, the forkdb's head block
......@@ -3771,6 +3771,9 @@ void database::process_decline_voting_rights()
nullify_proxied_witness_votes( account );
clear_witness_votes( account );
if( account.has_proxy() )
push_virtual_operation( proxy_cleared_operation( account.get_name(), get_account( account.get_proxy() ).get_name()) );
modify( account, [&]( account_object& a )
{
a.can_vote = false;
......@@ -4311,6 +4314,13 @@ void database::_apply_block(const std::shared_ptr<full_block_type>& full_block)
{
ilog( "Processing ${n} genesis hardforks", ("n", n) );
set_hardfork( n, true );
#ifdef IS_TEST_NET
if( n < HIVE_NUM_HARDFORKS )
{
ilog( "Next hardfork scheduled for ${t} (current block timestamp ${c})",
( "t", _hardfork_versions.times[ n + 1 ] )( "c", block.timestamp ) );
}
#endif
const hardfork_property_object& hardfork_state = get_hardfork_property_object();
FC_ASSERT( hardfork_state.current_hardfork_version == _hardfork_versions.versions[n], "Unexpected genesis hardfork state" );
......@@ -5606,7 +5616,7 @@ uint32_t database::update_last_irreversible_block(const bool currently_applying_
try
{
detail::without_pending_transactions(*this, existing_block_flow_control(nullptr), std::move(_pending_tx), [&]() {
detail::without_pending_transactions(*this, existing_block_flow_control(new_head_block->full_block), std::move(_pending_tx), [&]() {
try
{
dlog("calling switch_forks() from update_last_irreversible_block()");
......@@ -7352,6 +7362,10 @@ void database::remove_expired_governance_votes()
{
nullify_proxied_witness_votes( account );
clear_witness_votes( account );
if( account.has_proxy() )
push_virtual_operation( proxy_cleared_operation( account.get_name(), get_account( account.get_proxy() ).get_name()) );
modify( account, [&]( account_object& a )
{
a.clear_proxy();
......
......@@ -534,6 +534,7 @@ full_transaction_ptr full_transaction_cache::add_to_cache(const full_transaction
if (existing_transaction)
return existing_transaction;
result.first->second = transaction;
transaction->set_is_in_cache();
return transaction;
}
......
......@@ -1338,6 +1338,11 @@ void account_witness_proxy_evaluator::do_apply( const account_witness_proxy_oper
_db.clear_witness_votes( account );
_db.modify( account, [&]( account_object& a ) {
if( account.has_proxy() )
{
_db.push_virtual_operation( proxy_cleared_operation( account.get_name(), _db.get_account( account.get_proxy() ).get_name()) );
}
a.set_proxy( new_proxy );
});
......@@ -1349,6 +1354,9 @@ void account_witness_proxy_evaluator::do_apply( const account_witness_proxy_oper
_db.adjust_proxied_witness_votes( account, delta );
} else { /// we are clearing the proxy which means we simply update the account
FC_ASSERT( account.has_proxy(), "Proxy must change." );
_db.push_virtual_operation( proxy_cleared_operation( account.get_name(), _db.get_account( account.get_proxy() ).get_name()) );
_db.modify( account, [&]( account_object& a ) {
a.clear_proxy();
});
......@@ -2023,7 +2031,7 @@ void custom_json_evaluator::do_apply( const custom_json_operation& o )
{
if( _db.is_in_control() )
throw;
//note: it is up to evaluator to unconditionally (regardless of is_producing, working even during
//note: it is up to evaluator to unconditionally (regardless of is_in_control, working even during
//replay) undo changes made during custom operation in case of exception;
//generic_custom_operation_interpreter::apply_operations provides such protection (see issue #256)
}
......
......@@ -161,7 +161,7 @@ public:
void on_end_of_processing( uint32_t _exp_txs, uint32_t _fail_txs, uint32_t _ok_txs, uint32_t _post_txs, uint32_t _lib ) const
{
stats.on_cleanup( _exp_txs, _fail_txs, _ok_txs, _post_txs, _lib );
if( !except )
if( !except && current_phase == phase::APPLIED )
current_phase = phase::END;
}
......@@ -170,6 +170,7 @@ public:
// at the end of processing in worker thread (called even if there was exception earlier)
virtual void on_worker_done() const;
virtual fc::time_point_sec get_block_timestamp() const = 0;
const std::shared_ptr<full_block_type>& get_full_block() const { return full_block; }
const block_stats& get_stats() const { return stats; }
......@@ -217,7 +218,6 @@ public:
void attach_promise( const std::shared_ptr<boost::promise<void>>& _p ) { prom = _p; }
void store_produced_block( const std::shared_ptr<full_block_type>& _block ) { full_block = _block; }
const fc::time_point_sec& get_block_timestamp() const { return block_ts; }
const protocol::account_name_type& get_witness_owner() const { return witness_owner; }
const fc::ecc::private_key& get_block_signing_private_key() const { return block_signing_private_key; }
uint32_t get_skip_flags() const { return skip; }
......@@ -226,6 +226,8 @@ public:
virtual void on_end_of_apply_block() const override final;
virtual void on_failure( const fc::exception& e ) const override final;
virtual fc::time_point_sec get_block_timestamp() const override final { return block_ts; }
protected:
virtual const char* buffer_type() const override final { return "gen"; }
......@@ -249,7 +251,7 @@ class p2p_block_flow_control : public block_flow_control
{
public:
p2p_block_flow_control( const std::shared_ptr<full_block_type>& _block, uint32_t _skip )
: block_flow_control( _block ), skip( _skip ) {}
: block_flow_control( _block ), skip( _skip ) { FC_ASSERT( _block ); }
virtual ~p2p_block_flow_control() = default;
void attach_promise( const fc::promise<void>::ptr& _p ) { prom = _p; }
......@@ -259,6 +261,8 @@ public:
virtual void on_end_of_apply_block() const override final;
virtual void on_failure( const fc::exception& e ) const override final;
virtual fc::time_point_sec get_block_timestamp() const override final;
private:
virtual const char* buffer_type() const override { return "p2p"; }
......@@ -294,11 +298,13 @@ class existing_block_flow_control : public block_flow_control
{
public:
existing_block_flow_control( const std::shared_ptr<full_block_type>& _block )
: block_flow_control( _block ) {}
: block_flow_control( _block ) { FC_ASSERT( _block ); }
virtual ~existing_block_flow_control() = default;
virtual void on_end_of_apply_block() const override final;
virtual fc::time_point_sec get_block_timestamp() const override final;
protected:
virtual const char* buffer_type() const override final { return "old"; }
};
......
......@@ -116,12 +116,12 @@ namespace chain {
TX_STATUS_UNVERIFIED = 0x01, //new transaction from API or P2P
TX_STATUS_PENDING = 0x02, //transaction that was verified by the node and is now pending (or popped)
TX_STATUS_BLOCK = 0x08, //during block processing
TX_STATUS_INC_BLOCK = TX_STATUS_BLOCK | TX_STATUS_UNVERIFIED, //while processing new block from API or P2P
TX_STATUS_GEN_BLOCK = TX_STATUS_BLOCK | TX_STATUS_PENDING //while producing new block
TX_STATUS_P2P_BLOCK = TX_STATUS_BLOCK | TX_STATUS_UNVERIFIED, //while processing block from P2P
TX_STATUS_GEN_BLOCK = TX_STATUS_BLOCK | TX_STATUS_PENDING //while generating new block
};
// block coming from API or P2P is validated for the first time, also newly produced or even reapplied but after switching fork
bool is_validating_block() const { return _current_tx_status == TX_STATUS_INC_BLOCK; }
// block coming from P2P is validated for the first time, also newly generated or even reapplied but after switching fork
bool is_validating_block() const { return _current_tx_status == TX_STATUS_P2P_BLOCK; }
// this node is a block producer and it creates new block out of pending transactions
// (note that new block is not actually a block, that is, there are no pre/post block notifications)
bool is_producing_block() const { return _current_tx_status == TX_STATUS_GEN_BLOCK; }
......
......@@ -75,10 +75,13 @@ struct pending_transactions_restorer
auto head_block_time = _db.head_block_time();
_db._pending_tx.reserve( _db._popped_tx.size() + _pending_transactions.size() );
#if !defined(IS_TEST_NET) || defined NDEBUG //especially during debugging that limit is highly problematic
auto start = fc::time_point::now();
#if !defined IS_TEST_NET
bool in_sync = ( start - _block_ctrl.get_block_timestamp() ) < HIVE_UP_TO_DATE_MARGIN__PENDING_TXS;
#else
bool in_sync = true;
#endif
bool apply_trxs = true;
bool apply_trxs = in_sync;
uint32_t applied_txs = 0;
uint32_t postponed_txs = 0;
uint32_t expired_txs = 0;
......@@ -96,7 +99,7 @@ struct pending_transactions_restorer
const signed_transaction& tx = full_transaction->get_transaction();
try
{
if( tx.expiration < head_block_time )
if( tx.expiration <= head_block_time )
{
++expired_txs;
}
......@@ -161,7 +164,7 @@ struct pending_transactions_restorer
} );
_block_ctrl.on_end_of_processing( expired_txs, failed_txs, applied_txs, postponed_txs, _db.get_last_irreversible_block_num() );
if (postponed_txs || expired_txs)
if( in_sync && ( postponed_txs || expired_txs ) )
{
wlog("Postponed ${postponed_txs} pending transactions. ${applied_txs} were applied. ${expired_txs} expired.",
(postponed_txs)(applied_txs)(expired_txs));
......
......@@ -33,41 +33,26 @@ struct full_transaction_type
private:
mutable std::mutex results_mutex; // single mutex used to guard writes to any data
mutable std::atomic<bool> has_merkle_digest = { false };
mutable hive::protocol::digest_type merkle_digest; // transaction hash used for calculating block's merkle root
mutable std::atomic<bool> has_legacy_transaction_message_hash = { false };
mutable fc::ripemd160 legacy_transaction_message_hash; // hash of p2p transaction message generated from this transaction
mutable std::atomic<bool> has_digest_and_transaction_id = { false };
mutable hive::protocol::digest_type digest; // hash used for generating transaction id
mutable hive::protocol::transaction_id_type transaction_id; // transaction id itself (truncated digest)
mutable std::atomic<bool> validation_attempted = { false }; // true if validate() has been called & cached
mutable fc::exception_ptr validation_exception; // if validate() threw, this is what it threw
mutable fc::microseconds validation_computation_time;
mutable std::atomic<bool> validation_accessed = false;
mutable std::atomic<bool> has_is_packed_in_legacy_format = { false };
mutable bool is_packed_in_legacy_format = false;
struct signature_info_type
{
hive::protocol::digest_type sig_digest;
flat_set<hive::protocol::public_key_type> signature_keys;
std::shared_ptr<fc::exception> signature_keys_exception;
fc::exception_ptr signature_keys_exception; // ABW: do we need separate exception? - it should be merged with validation_exception
fc::microseconds computation_time;
};
mutable std::atomic<bool> has_signature_info = { false };
mutable signature_info_type signature_info; // if we've computed the public keys that signed the transaction, it's stored here
mutable std::atomic<bool> signature_keys_accessed = { false };
mutable std::atomic<bool> has_required_authorities = { false };
// ABW: it takes 128 bytes of space (plus actual account names) for 1us CPU time on average
mutable hive::protocol::required_authorities_type required_authorities; // if we've figured out who is supposed to sign this tranaction, it's here
mutable std::chrono::nanoseconds required_authorities_computation_time;
mutable std::atomic<bool> required_authorities_accessed;
/// all data below here isn't accessed across multiple threads, it's set at construction time and left alone
/// immutable data below here isn't accessed across multiple threads, it's set at construction time and left alone
// if this full_transaction was created while deserializing a block, we store
// containing_block_info, and our signed_transaction and serialized data point
......@@ -89,8 +74,25 @@ struct full_transaction_type
storage_type storage;
serialized_transaction_data serialized_transaction; // pointers to the beginning, middle, and end of the transaction in the storage
mutable int64_t rc_cost = -1; // RC cost of transaction - set when transaction is processed first, can be overwritten when it becomes part of block
mutable fc::ripemd160 legacy_transaction_message_hash; // hash of p2p transaction message generated from this transaction
mutable hive::protocol::transaction_id_type transaction_id; // transaction id itself (truncated digest)
mutable bool is_packed_in_legacy_format = false;
mutable bool is_in_cache = false; // true if this is tracked in the global transaction cache; if so, we need to remove ourselves upon garbage collection
mutable std::atomic<bool> has_merkle_digest = { false };
mutable std::atomic<bool> has_legacy_transaction_message_hash = { false };
mutable std::atomic<bool> has_digest_and_transaction_id = { false };
mutable std::atomic<bool> validation_attempted = { false }; // true if validate() has been called & cached
mutable std::atomic<bool> validation_accessed = false;
mutable std::atomic<bool> has_is_packed_in_legacy_format = { false };
mutable std::atomic<bool> has_signature_info = { false };
mutable std::atomic<bool> signature_keys_accessed = { false };
mutable std::atomic<bool> has_required_authorities = { false };
mutable std::atomic<bool> required_authorities_accessed = { false };
static std::atomic<uint32_t> number_of_instances_created;
static std::atomic<uint32_t> number_of_instances_destroyed;
......@@ -118,9 +120,11 @@ struct full_transaction_type
bool is_legacy_pack() const;
void precompute_validation(std::function<void(const hive::protocol::operation& op, bool post)> notify = std::function<void(const hive::protocol::operation&, bool)>()) const;
void validate(std::function<void(const hive::protocol::operation& op, bool post)> notify = std::function<void(const hive::protocol::operation&, bool)>()) const;
void set_rc_cost( int64_t cost ) const { rc_cost = cost; } // can only be called under main lock of write thread
const serialized_transaction_data& get_serialized_transaction() const;
size_t get_transaction_size() const;
int64_t get_rc_cost() const { return rc_cost; }
template <class DataStream>
void dump_serialized_transaction(DataStream& datastream) const
......
......@@ -57,7 +57,7 @@ struct get_custom_operation_name
};
template< typename CustomOperationType >
void custom_op_from_variant( const fc::variant& var, CustomOperationType& vo )
void custom_op_from_variant( const fc::variant& var, CustomOperationType& vo, const database& db )
{
static std::map<string,int64_t> to_legacy_tag = []()
{
......@@ -90,9 +90,20 @@ void custom_op_from_variant( const fc::variant& var, CustomOperationType& vo )
if( var.is_array() ) // legacy serialization
{
auto ar = var.get_array();
if( ar.size() < 2 ) return;
if( ar.size() != 2 )
{
FC_ASSERT( !db.is_in_control(), "Expected pair of values: [ operation_name_or_id, operation ]" );
if( ar.size() < 2 )
{
dlog( "Incomplete custom operation ignored @${b} ${var}", ( "b", db.head_block_num() + 1 )( var ) );
return;
}
dlog( "Dangling elements of custom operation ignored @${b} ${var}", ( "b", db.head_block_num() + 1 )( var ) );
}
if( ar[0].is_uint64() )
{
vo.set_which( ar[0].as_uint64() );
}
else
{
auto itr = to_legacy_tag.find(ar[0].as_string());
......@@ -108,6 +119,11 @@ void custom_op_from_variant( const fc::variant& var, CustomOperationType& vo )
FC_ASSERT( v_object.contains( "type" ), "Type field doesn't exist." );
FC_ASSERT( v_object.contains( "value" ), "Value field doesn't exist." );
if( v_object.size() != 2 )
{
FC_ASSERT( !db.is_in_control(), "Expected pair of values: { \"type\":operation_name_or_id, \"value\":operation }" );
dlog( "Dangling elements of custom operation ignored @${b} ${var}", ( "b", db.head_block_num() + 1 )( var ) );
}
int64_t which = -1;
......@@ -213,7 +229,6 @@ class generic_custom_operation_interpreter
{
try
{
FC_TODO( "Should we hardfork out old serialization?" )
fc::variant v = fc::json::from_string( outer_o.json );
std::vector< CustomOperationType > custom_operations;
......@@ -223,13 +238,13 @@ class generic_custom_operation_interpreter
for( auto& o : v.get_array() )
{
custom_operations.emplace_back();
custom_op_from_variant( o, custom_operations.back() );
custom_op_from_variant( o, custom_operations.back(), this->_db );
}
}
else
{
custom_operations.emplace_back();
custom_op_from_variant( v, custom_operations[0] );
custom_op_from_variant( v, custom_operations[0], this->_db );
}
apply_operations( custom_operations, operation( outer_o ) );
......
......@@ -83,7 +83,7 @@ else ( MSVC )
ExternalProject_Add( project_secp256k1
PREFIX ${CMAKE_CURRENT_BINARY_DIR}/vendor/secp256k1-zkp
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/vendor/secp256k1-zkp
CONFIGURE_COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/vendor/secp256k1-zkp/configure CPPFLAGS=${FPIC_FLAG} --prefix=${CMAKE_CURRENT_BINARY_DIR}/vendor/secp256k1-zkp --with-bignum=no --enable-tests=no --enable-benchmark=no
CONFIGURE_COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/vendor/secp256k1-zkp/configure CPPFLAGS=${FPIC_FLAG} --prefix=${CMAKE_CURRENT_BINARY_DIR}/vendor/secp256k1-zkp --with-bignum=no --enable-tests=no --enable-benchmark=no --enable-module-recovery --enable-experimental --enable-module-generator --enable-module-rangeproof
BUILD_COMMAND make
INSTALL_COMMAND true
BUILD_BYPRODUCTS ${CMAKE_CURRENT_BINARY_DIR}/vendor/secp256k1-zkp/src/project_secp256k1-build/.libs/libsecp256k1.a ${CMAKE_CURRENT_BINARY_DIR}/vendor/secp256k1-zkp/src/project_secp256k1-build/.libs/libsecp256k1.so
......
......@@ -47,6 +47,7 @@ namespace fc {
~public_key();
// bool verify( const fc::sha256& digest, const signature& sig );
public_key_data serialize()const;
public_key_point_data serialize_ecc_point()const;
operator public_key_data()const { return serialize(); }
......@@ -235,8 +236,9 @@ namespace fc {
commitment_type blind( const blind_factor_type& blind, uint64_t value );
blind_factor_type blind_sum( const std::vector<blind_factor_type>& blinds, uint32_t non_neg );
/** verifies taht commnits + neg_commits + excess == 0 */
bool verify_sum( const std::vector<commitment_type>& commits, const std::vector<commitment_type>& neg_commits, int64_t excess );
/** verifies that commnits + neg_commits == 0 */
/* note: verify_sum used to take an `excess` parameter, but the underlying implementation removed it, and in practice it was always set to `0` */
bool verify_sum( const std::vector<commitment_type>& commits, const std::vector<commitment_type>& neg_commits );
bool verify_range( uint64_t& min_val, uint64_t& max_val, const commitment_type& commit, const range_proof_type& proof );
range_proof_type range_proof_sign( uint64_t min_value,
......@@ -259,7 +261,6 @@ namespace fc {
range_proof_info range_get_info( const range_proof_type& proof );
} // namespace ecc
void to_variant( const ecc::private_key& var, variant& vo );
void from_variant( const variant& var, ecc::private_key& vo );
......
......@@ -11,12 +11,10 @@ namespace fc
public:
enum_type( EnumType t )
:value(t){}
enum_type( IntType t )
enum_type( IntType t = 0 ) // 0 is better than uninitialized (and fits most enums)
:value( (EnumType)t ){}
enum_type(){}
explicit operator IntType()const { return static_cast<IntType>(value); }
operator EnumType()const { return value; }
operator std::string()const { return fc::reflector<EnumType>::to_string(value); }
......