Commit eeaa80e8 authored by Bartek Wrona's avatar Bartek Wrona

'g-maintenance' rebased onto master

parents d92b7005 abfb8e98
FROM phusion/baseimage:0.9.19
RUN \
apt-get update && \
apt-get install -y \
autoconf \
automake \
autotools-dev \
build-essential \
cmake \
doxygen \
git \
libboost-all-dev \
libyajl-dev \
libreadline-dev \
libssl-dev \
libtool \
liblz4-tool \
ncurses-dev \
python3 \
python3-dev \
python3-jinja2 \
python3-pip \
libgflags-dev \
libsnappy-dev \
zlib1g-dev \
libbz2-dev \
liblz4-dev \
libzstd-dev && \
rm -rf /usr/local/src/hive
ADD . /usr/local/src/hive
RUN \
cd /usr/local/src/hive && \
mkdir build && \
cd build && \
cmake \
-DCMAKE_BUILD_TYPE=Release \
-DENABLE_STD_ALLOCATOR_SUPPORT=ON \
-DBUILD_HIVE_TESTNET=ON \
-DLOW_MEMORY_NODE=OFF \
-DCLEAR_VOTES=ON \
-DSKIP_BY_TX_ID=ON \
.. && \
make -j16 && \
./tests/chain_test && \
./tests/plugin_test && \
./programs/util/test_fixed_string
#!groovy
pipeline {
agent any
stages {
stage('Build') {
steps {
parallel ( "Build tests":
{
sh 'ciscripts/triggertests.sh'
step([$class: 'CoberturaPublisher', autoUpdateHealth: false, autoUpdateStability: false, coberturaReportFile: '**/cobertura/coverage.xml', failUnhealthy: false, failUnstable: false, maxNumberOfBuilds: 0, onlyStable: false, sourceEncoding: 'ASCII', zoomCoverageChart: false])
},
"Build docker image": {
sh 'ciscripts/triggerbuild.sh'
}, failFast: true )
}
}
}
post {
success {
sh 'ciscripts/buildsuccess.sh'
}
failure {
sh 'ciscripts/buildfailure.sh'
slackSend (color: '#ff0000', message: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})")
}
}
}
\ No newline at end of file
machine:
services:
- docker
dependencies:
cache_directories:
- "~/docker"
override:
- docker info
- time bash tests/scripts/create-ci-docker-image.sh
test:
override:
- time docker build --rm=false -t steemitinc/hive-test -f Dockerfile.test .
timeout: "1800s"
steps:
- name: gcr.io/cloud-builders/git
args: ['clone', '-b', '$BRANCH_NAME', '--recurse-submodules', '-v', 'https://github.com/blocktradesdevs/hive.git']
id: "Git clone"
- name: gcr.io/cloud-builders/git
dir: "hive"
args: ['status']
id: "Git status"
- name: 'gcr.io/cloud-builders/docker'
dir: "hive"
args: [
'build',
'-f', '../Builder.DockerFile',
'.'
]
options:
machineType: 'N1_HIGHCPU_32'
# Console appender definition json: {"appender", "stream"}
log-console-appender = {"appender":"stderr","stream":"std_error"}
log-appender = {"appender":"stderr","stream":"std_error"}
# {"appender":"p2p","file":"logs/p2p/p2p.log"}
log-logger = {"name":"default","level":"info","appender":"stderr"}
# {"name":"p2p","level":"warn","appender":"p2p"}
# File appender definition json: {"appender", "file"}
log-file-appender = {"appender":"p2p","file":"logs/p2p/p2p.log"}
backtrace = yes
# Logger definition json: {"name", "level", "appender"}
log-logger = {"name":"default","level":"debug","appender":"stderr"}
log-logger = {"name":"p2p","level":"info","appender":"stderr"}
plugin = webserver p2p json_rpc
plugin = database_api condenser_api
# Plugin(s) to enable, may be specified multiple times
plugin = webserver p2p json_rpc account_history_rocksdb
plugin = witness
plugin = rc
plugin = database_api account_history_api condenser_api
plugin = market_history
plugin = market_history_api
# Defines a range of accounts to track as a json pair ["from","to"] [from,to] Can be specified multiple times
# account-history-track-account-range =
plugin = account_history_rocksdb
plugin = account_history_api
# Defines a list of operations which will be explicitly logged.
# account-history-whitelist-ops = transfer_operation transfer_to_vesting_operation withdraw_vesting_operation interest_operation transfer_to_savings_operation transfer_from_savings_operation cancel_transfer_from_savings_operation escrow_transfer_operation escrow_approve_operation escrow_dispute_operation escrow_release_operation fill_convert_request_operation fill_order_operation claim_reward_balance_operation author_reward_operation curation_reward_operation fill_vesting_withdraw_operation fill_transfer_from_savings_operation delegate_vesting_shares_operation return_vesting_delegation_operation comment_benefactor_reward_operation
plugin = transaction_status
plugin = transaction_status_api
# Defines a list of operations which will be explicitly ignored.
# account-history-blacklist-ops =
plugin = account_by_key
plugin = account_by_key_api
# the location of the chain shared memory files (absolute path or relative to application data dir)
# shared-file-dir = "blockchain"
plugin = reputation
plugin = reputation_api
# Size of the shared memory file. Default: 54G
shared-file-size = 70G
plugin = block_api network_broadcast_api rc_api
# Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints.
# checkpoint =
account-history-rocksdb-path = "blockchain/account-history-rocksdb-storage"
# flush shared memory changes to disk every N blocks
# flush-state-interval = 0
shared-file-size = 24G
# Database edits to apply on startup (may specify multiple times)
# edit-script =
shared-file-full-threshold = 9500
shared-file-scale-rate = 1000
# Set the maximum size of cached feed for an account
follow-max-feed-size = 500
p2p-endpoint = 0.0.0.0:2001
# Block time (in epoch seconds) when to start calculating feeds
# follow-start-feeds = 0
transaction-status-block-depth = 64000
transaction-status-track-after-block = 47000000
# Track market history by grouping orders into buckets of equal size measured in seconds specified as a JSON array of numbers
market-history-bucket-size = [15,60,300,3600,86400]
webserver-http-endpoint = 0.0.0.0:8091
webserver-ws-endpoint = 0.0.0.0:8090
# How far back in time to track history for each bucket size, measured in the number of buckets (default: 5760)
market-history-buckets-per-size = 5760
# The local IP address and port to listen for incoming connections.
# p2p-endpoint =
# Maxmimum number of incoming connections on P2P endpoint
# p2p-max-connections =
# The IP address and port of a remote peer to sync with. Deprecated in favor of p2p-seed-node.
# seed-node =
# The IP address and port of a remote peer to sync with.
# p2p-seed-node =
# User agent to advertise to peers
p2p-user-agent = Graphene Reference Implementation
# The local IP and port to listen for incoming http connections.
# webserver-http-endpoint =
# The local IP and port to listen for incoming websocket connections.
# webserver-ws-endpoint =
# Number of threads used to handle queries. Default: 32.
webserver-thread-pool-size = 32
# Enable block production, even if the chain is stale.
enable-stale-production = false
# Percent of witnesses (0-99) that must be participating in order to produce blocks
# required-participation =
# name of witness controlled by this node (e.g. initwitness )
# witness =
# WIF PRIVATE KEY to be used by one or more witnesses or miners
# private-key =
webserver-thread-pool-size = 256
# Console appender definition json: {"appender", "stream"}
log-console-appender = {"appender":"stderr","stream":"std_error"}
# File appender definition json: {"appender", "file"}
log-file-appender = {"appender":"p2p","file":"logs/p2p/p2p.log"}
# Logger definition json: {"name", "level", "appender"}
log-logger = {"name":"default","level":"info","appender":"stderr"}
log-logger = {"name":"p2p","level":"info","appender":"stderr"}
# Plugin(s) to enable, may be specified multiple times
plugin = webserver p2p json_rpc witness
plugin = database_api condenser_api network_broadcast_api rc_api
# Defines a range of accounts to track as a json pair ["from","to"] [from,to] Can be specified multiple times
# account-history-track-account-range =
# Defines a list of operations which will be explicitly logged.
# account-history-whitelist-ops =
# Defines a list of operations which will be explicitly ignored.
# account-history-blacklist-ops =
# the location of the chain shared memory files (absolute path or relative to application data dir)
# shared-file-dir = "blockchain"
# Size of the shared memory file. Default: 54G
shared-file-size = 70G
# Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints.
# checkpoint =
# flush shared memory changes to disk every N blocks
flush-state-interval = 0
# Database edits to apply on startup (may specify multiple times)
# edit-script =
# Set the maximum size of cached feed for an account
follow-max-feed-size = 500
# Block time (in epoch seconds) when to start calculating feeds
# follow-start-feeds = 0
# Track market history by grouping orders into buckets of equal size measured in seconds specified as a JSON array of numbers
market-history-bucket-size = [15,60,300,3600,86400]
# How far back in time to track history for each bucket size, measured in the number of buckets (default: 5760)
market-history-buckets-per-size = 5760
# The local IP address and port to listen for incoming connections.
# p2p-endpoint =
# Maxmimum number of incoming connections on P2P endpoint
# p2p-max-connections =
# The IP address and port of a remote peer to sync with. Deprecated in favor of p2p-seed-node.
# seed-node =
# The IP address and port of a remote peer to sync with.
# p2p-seed-node =
# User agent to advertise to peers
p2p-user-agent = Graphene Reference Implementation
# The local IP and port to listen for incoming http connections.
# webserver-http-endpoint =
# The local IP and port to listen for incoming websocket connections.
# webserver-ws-endpoint =
# Number of threads used to handle queries. Default: 32.
webserver-thread-pool-size = 256
# Enable block production, even if the chain is stale.
enable-stale-production = false
# Percent of witnesses (0-99) that must be participating in order to produce blocks
# required-participation =
# name of witness controlled by this node (e.g. initwitness )
# witness =
# WIF PRIVATE KEY to be used by one or more witnesses or miners
# private-key =
# Console appender definition json: {"appender", "stream"}
log-console-appender = {"appender":"stderr","stream":"std_error"}
# File appender definition json: {"appender", "file"}
log-file-appender = {"appender":"p2p","file":"logs/p2p/p2p.log"}
# Logger definition json: {"name", "level", "appender"}
log-appender = {"appender":"stderr","stream":"std_error"}
log-logger = {"name":"default","level":"info","appender":"stderr"}
log-logger = {"name":"p2p","level":"info","appender":"stderr"}
# Plugin(s) to enable, may be specified multiple times
plugin = webserver p2p json_rpc witness transaction_status
plugin = database_api condenser_api rc_api block_api network_broadcast_api transaction_status_api
# Note: If using the docker image, account_history and account_history_api plugins
# are added automatically to track single accounts when the `TRACK_ACCOUNT` environment variable is set
# to an account name.
backtrace = yes
# If using the docker image, this is set with the environment variable `TRACK_ACCOUNT`
# Defines a range of accounts to track as a json pair ["from","to"] [from,to] Can be specified multiple times
# account-history-track-account-range = ["accountname","accountname"]
plugin = webserver p2p json_rpc
plugin = database_api condenser_api
# Defines a list of operations which will be explicitly logged.
# account-history-whitelist-ops =
plugin = witness
plugin = rc
# Defines a list of operations which will be explicitly ignored.
# account-history-blacklist-ops =
# Note: If using the docker image, account_history_rocksdb and account_history_api plugins
# are added automatically to track single account when the `TRACK_ACCOUNT` environment variable is set
# to an account name.
#
#plugin = account_history_rocksdb
#plugin = account_history_api
# the location of the chain shared memory files (absolute path or relative to application data dir)
shared-file-dir = "blockchain"
plugin = transaction_status
plugin = transaction_status_api
# Size of the shared memory file. Default: 100G
shared-file-size = 64G
plugin = block_api network_broadcast_api rc_api
# Set autoscaling of shared memory file
# If using the docker image, this is set with a default command line arugment
# shared-file-full-threshold = 9500
# shared-file-scale-rate = 1000
account-history-rocksdb-path = "blockchain/account-history-rocksdb-storage"
# Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints.
# checkpoint =
shared-file-size = 24G
# flush shared memory changes to disk every N blocks
flush-state-interval = 0
# Database edits to apply on startup (may specify multiple times)
# edit-script =
# Set the maximum size of cached feed for an account
follow-max-feed-size = 500
# Block time (in epoch seconds) when to start calculating feeds
# follow-start-feeds = 0
# Track market history by grouping orders into buckets of equal size measured in seconds specified as a JSON array of numbers
market-history-bucket-size = [15,60,300,3600,86400]
# How far back in time to track history for each bucket size, measured in the number of buckets (default: 5760)
market-history-buckets-per-size = 5760
# The local IP address and port to listen for incoming connections.
# p2p-endpoint =
# Maxmimum number of incoming connections on P2P endpoint
# p2p-max-connections =
# The IP address and port of a remote peer to sync with. Deprecated in favor of p2p-seed-node.
# seed-node =
# The IP address and port of a remote peer to sync with.
# p2p-seed-node =
# User agent to advertise to peers
p2p-user-agent = Graphene Reference Implementation
# The local IP and port to listen for incoming http connections.
# webserver-http-endpoint =
# The local IP and port to listen for incoming websocket connections.
# webserver-ws-endpoint =
# Number of threads used to handle queries. Default: 32.
webserver-thread-pool-size = 256
# Defines a range of accounts to track as a json pair ["from","to"]
# Uncomment lines to track only your own exchange account
# tracking more accounts will have a significant impact on resources needed
# Note: If using the docker image, this is set with the environment variable `TRACK_ACCOUNT`
# Use custom `config.ini` if you need to track more than one.
#
#account-history-rocksdb-track-account-range = ["binance-hot","binance-hot"]
#account-history-rocksdb-track-account-range = ["bittrex","bittrex"]
#account-history-rocksdb-track-account-range = ["blocktrades","blocktrades"]
#account-history-rocksdb-track-account-range = ["deepcrypto8","deepcrypto8"]
#account-history-rocksdb-track-account-range = ["huobi-pro","huobi-pro"]
# Enable block production, even if the chain is stale.
enable-stale-production = false
p2p-endpoint = 0.0.0.0:2001
# Percent of witnesses (0-99) that must be participating in order to produce blocks
# required-participation =
transaction-status-block-depth = 64000
transaction-status-track-after-block = 47000000
# name of witness controlled by this node (e.g. initwitness )
# witness =
webserver-http-endpoint = 0.0.0.0:8091
webserver-ws-endpoint = 0.0.0.0:8090
# WIF PRIVATE KEY to be used by one or more witnesses or miners
# private-key =
\ No newline at end of file
webserver-thread-pool-size = 32
# Console appender definition json: {"appender", "stream"}
log-console-appender = {"appender":"stderr","stream":"std_error"}
# File appender definition json: {"appender", "file"}
log-file-appender = {"appender":"p2p","file":"logs/p2p/p2p.log"}
# Logger definition json: {"name", "level", "appender"}
log-logger = {"name":"default","level":"debug","appender":"stderr"}
log-logger = {"name":"p2p","level":"info","appender":"stderr"}
# Plugin(s) to enable, may be specified multiple times
plugin = webserver p2p json_rpc witness account_by_key reputation market_history
plugin = database_api account_by_key_api network_broadcast_api reputation_api market_history_api condenser_api block_api rc_api
# Defines a range of accounts to track as a json pair ["from","to"] [from,to] Can be specified multiple times
# account-history-track-account-range =
# Defines a list of operations which will be explicitly logged.
# account-history-whitelist-ops = transfer_operation transfer_to_vesting_operation withdraw_vesting_operation interest_operation transfer_to_savings_operation transfer_from_savings_operation cancel_transfer_from_savings_operation escrow_transfer_operation escrow_approve_operation escrow_dispute_operation escrow_release_operation fill_convert_request_operation fill_order_operation claim_reward_balance_operation author_reward_operation curation_reward_operation fill_vesting_withdraw_operation fill_transfer_from_savings_operation delegate_vesting_shares_operation return_vesting_delegation_operation comment_benefactor_reward_operation
# Defines a list of operations which will be explicitly ignored.
# account-history-blacklist-ops =
# the location of the chain shared memory files (absolute path or relative to application data dir)
# shared-file-dir = "blockchain"
# Size of the shared memory file. Default: 54G
shared-file-size = 300G
# Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints.
# checkpoint =
# flush shared memory changes to disk every N blocks
# flush-state-interval = 0
# Database edits to apply on startup (may specify multiple times)
# edit-script =
# Set the maximum size of cached feed for an account
follow-max-feed-size = 500
# Block time (in epoch seconds) when to start calculating feeds
# follow-start-feeds = 0
# Track market history by grouping orders into buckets of equal size measured in seconds specified as a JSON array of numbers
market-history-bucket-size = [15,60,300,3600,86400]
# How far back in time to track history for each bucket size, measured in the number of buckets (default: 5760)
market-history-buckets-per-size = 5760
# The local IP address and port to listen for incoming connections.
# p2p-endpoint =
# Maxmimum number of incoming connections on P2P endpoint
# p2p-max-connections =
# The IP address and port of a remote peer to sync with. Deprecated in favor of p2p-seed-node.
# seed-node =
# The IP address and port of a remote peer to sync with.
# p2p-seed-node =
# User agent to advertise to peers
p2p-user-agent = Graphene Reference Implementation
# The local IP and port to listen for incoming http connections.
# webserver-http-endpoint =
# The local IP and port to listen for incoming websocket connections.
# webserver-ws-endpoint =
# Number of threads used to handle queries. Default: 32.
webserver-thread-pool-size = 256
# Enable block production, even if the chain is stale.
enable-stale-production = false
# Percent of witnesses (0-99) that must be participating in order to produce blocks
# required-participation =
# name of witness controlled by this node (e.g. initwitness )
# witness =
# WIF PRIVATE KEY to be used by one or more witnesses or miners
# private-key =
# Console appender definition json: {"appender", "stream"}
log-console-appender = {"appender":"stderr","stream":"std_error"}
# File appender definition json: {"appender", "file"}
log-file-appender = {"appender":"p2p","file":"logs/p2p/p2p.log"}
# Logger definition json: {"name", "level", "appender"}
log-logger = {"name":"default","level":"debug","appender":"stderr"}
log-logger = {"name":"p2p","level":"info","appender":"stderr"}
# Plugin(s) to enable, may be specified multiple times
plugin = webserver p2p json_rpc witness account_by_key tags follow market_history account_history
plugin = database_api account_by_key_api network_broadcast_api tags_api follow_api market_history_api condenser_api account_history_api rc_api
# Defines a range of accounts to track as a json pair ["from","to"] [from,to] Can be specified multiple times
# account-history-track-account-range =
# Defines a list of operations which will be explicitly logged.
account-history-whitelist-ops = transfer_operation transfer_to_vesting_operation withdraw_vesting_operation interest_operation transfer_to_savings_operation transfer_from_savings_operation cancel_transfer_from_savings_operation escrow_transfer_operation escrow_approve_operation escrow_dispute_operation escrow_release_operation fill_convert_request_operation fill_order_operation claim_reward_balance_operation author_reward_operation curation_reward_operation fill_vesting_withdraw_operation fill_transfer_from_savings_operation delegate_vesting_shares_operation return_vesting_delegation_operation comment_benefactor_reward_operation
# Defines a list of operations which will be explicitly ignored.
# account-history-blacklist-ops =
# the location of the chain shared memory files (absolute path or relative to application data dir)
# shared-file-dir = "blockchain"
# Size of the shared memory file. Default: 54G
shared-file-size = 215G
# Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints.
# checkpoint =
# flush shared memory changes to disk every N blocks
# flush-state-interval = 0
# Database edits to apply on startup (may specify multiple times)
# edit-script =
# Set the maximum size of cached feed for an account
follow-max-feed-size = 500
# Block time (in epoch seconds) when to start calculating feeds
# follow-start-feeds = 0
# Track market history by grouping orders into buckets of equal size measured in seconds specified as a JSON array of numbers
market-history-bucket-size = [15,60,300,3600,86400]
# How far back in time to track history for each bucket size, measured in the number of buckets (default: 5760)
market-history-buckets-per-size = 5760
# The local IP address and port to listen for incoming connections.
# p2p-endpoint =
# Maxmimum number of incoming connections on P2P endpoint
# p2p-max-connections =
# The IP address and port of a remote peer to sync with. Deprecated in favor of p2p-seed-node.
# seed-node =
# The IP address and port of a remote peer to sync with.
# p2p-seed-node =
# User agent to advertise to peers
p2p-user-agent = Graphene Reference Implementation
# The local IP and port to listen for incoming http connections.
# webserver-http-endpoint =
# The local IP and port to listen for incoming websocket connections.
# webserver-ws-endpoint =
# Number of threads used to handle queries. Default: 32.
# webserver-thread-pool-size = 1024
# Enable block production, even if the chain is stale.
enable-stale-production = false
# Percent of witnesses (0-99) that must be participating in order to produce blocks
# required-participation =
# name of witness controlled by this node (e.g. initwitness )
# witness =
# WIF PRIVATE KEY to be used by one or more witnesses or miners
# private-key =
......@@ -9,9 +9,28 @@ HIVED="${SCRIPTPATH}/bin/hived"
ARGS=""
# to help handling maintenance issues
if [[ -e ${DATADIR}/blockchain/stop ]]; then
echo "blockchain/stop exists so refusing to start"
sleep 60
exit 1
fi
if [[ -e ${DATADIR}/blockchain/force_replay ]]; then
echo "We will force replay this time"
rm -fv ${DATADIR}/blockchain/force_replay
ARGS+=" --replay-blockchain --force-replay --set-benchmark-interval 100000"
fi
if [[ -e ${DATADIR}/blockchain/force_open ]]; then
echo "We will force open this time"
rm -fv ${DATADIR}/blockchain/force_open
ARGS+=" --force-open"
fi
if [[ ! -z "$TRACK_ACCOUNT" ]]; then
ARGS+=" --plugin=account_history --plugin=account_history_api"
ARGS+=" --account-history-track-account-range=[\"$TRACK_ACCOUNT\",\"$TRACK_ACCOUNT\"]"
<