Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • hive/reputation_tracker
1 result
Show changes
Commits on Source (5)
FROM openresty/openresty:alpine AS without_tag
COPY docker/reputation_tracker_nginx.conf.template /usr/local/openresty/nginx/conf/nginx.conf
COPY docker/reputation_tracker_nginx.conf.template /usr/local/openresty/nginx/conf/nginx.conf.template
COPY rewrite_rules.conf /usr/local/openresty/nginx/conf/rewrite_rules.conf
COPY docker/rewriter_entrypoint.sh /entrypoint.sh
......
-- \ \ / /_\ | _ \ \| |_ _| \| |/ __|
-- \ \/\/ / _ \| / .` || || .` | (_ |
-- \_/\_/_/ \_\_|_\_|\_|___|_|\_|\___|
--
-- this file is only executed at startup if the function hafbe_indexes.do_haf_indexes_exist()
-- returns true. This function has a list of the indexes created in this file, and returns
-- true if they all exist. If you add, remove, or rename an index created in this file, you
-- must make a corresponding change in that function
--
-- We do this because the ANALYZE at the end of this file is slow, and only needs to be run
-- if we actually created any indexes.
-- Note, for each index below, we first check and see if it exists but is invalid; if so, we drop it.
-- That will cause it to be recreated by the subsequent CREATE IF NOT EXISTS
-- We could chec/drop all of the indexes in a single DO block at the top of the file, which might
-- look cleaner. But I figure this way, someone doing cut & paste is more likely to grab both the
-- drop and the create.
DO $$
BEGIN
IF EXISTS(SELECT 1 FROM pg_index WHERE NOT indisvalid AND indexrelid = (SELECT oid FROM pg_class WHERE relname = 'effective_comment_vote_idx')) THEN
RAISE NOTICE 'Dropping invalid index effective_comment_vote_idx, it will be recreated';
DROP INDEX hafd.effective_comment_vote_idx;
END IF;
END
$$;
DECLARE
__schema_name VARCHAR;
BEGIN
SHOW SEARCH_PATH INTO __schema_name;
CREATE UNIQUE INDEX CONCURRENTLY IF NOT EXISTS effective_comment_vote_idx ON hafd.operations USING btree
(
(body_binary::jsonb -> 'value' ->> 'author'),
(body_binary::jsonb -> 'value' ->> 'voter'),
(body_binary::jsonb -> 'value' ->> 'permlink'),
id desc
)
WHERE hive.operation_id_to_type_id(id) = 72;
--FIXME indexes must be created concurrently
PERFORM hive.register_index_dependency(
__schema_name,
'CREATE UNIQUE INDEX IF NOT EXISTS delete_comment_op_idx ON hafd.operations USING btree
(
(body_binary::jsonb -> ''value'' ->> ''author''),
(body_binary::jsonb -> ''value'' ->> ''permlink''),
id desc
)
WHERE hive.operation_id_to_type_id(id) in (17, 61)'
);
DO $$
BEGIN
IF EXISTS(SELECT 1 FROM pg_index WHERE NOT indisvalid AND indexrelid = (SELECT oid FROM pg_class WHERE relname = 'delete_comment_op_idx')) THEN
RAISE NOTICE 'Dropping invalid index delete_comment_op_idx, it will be recreated';
DROP INDEX hafd.delete_comment_op_idx;
END IF;
END
$$;
PERFORM hive.register_index_dependency(
__schema_name,
'CREATE UNIQUE INDEX IF NOT EXISTS effective_comment_vote_idx ON hafd.operations USING btree
(
(body_binary::jsonb -> ''value'' ->> ''author''),
(body_binary::jsonb -> ''value'' ->> ''voter''),
(body_binary::jsonb -> ''value'' ->> ''permlink''),
id desc
)
WHERE hive.operation_id_to_type_id(id) = 72'
);
CREATE UNIQUE INDEX CONCURRENTLY IF NOT EXISTS delete_comment_op_idx ON hafd.operations USING btree
(
(body_binary::jsonb -> 'value' ->> 'author'),
(body_binary::jsonb -> 'value' ->> 'permlink'),
id desc
)
WHERE hive.operation_id_to_type_id(id) in (17, 61);
ANALYZE hafd.operations;
END
$$;
......@@ -8,6 +8,31 @@ trap 'trap - 2 15 && kill -- -$$' 2 15
postgres_user=${POSTGRES_USER:-"haf_admin"}
postgres_host=${POSTGRES_HOST:-"localhost"}
postgres_port=${POSTGRES_PORT:-5432}
POSTGRES_ACCESS=${POSTGRES_URL:-"postgresql://$postgres_user@$postgres_host:$postgres_port/haf_block_log"}
POSTGRES_ACCESS=${POSTGRES_URL:-"postgresql://$postgres_user@$postgres_host:$postgres_port/haf_block_log?application_name=reptracker_health_check"}
exec [ "$(psql "$POSTGRES_ACCESS" --quiet --no-align --tuples-only --command="SELECT hive.is_app_in_sync('reptracker_app');")" = t ]
# this health check will return healthy if:
# - reputation_tracker has processed a block in the last 60 seconds
# (as long as it was also after the container started, we don't want
# to report healthy immediately after a restart)
# or
# - reputation_tracker's head block has caught up to haf's irreversible block
# (so we don't mark reputation_tracker as unhealthy if HAF stops getting blocks)
#
# This check needs to know when the block processing started, so the docker entrypoint
# must write this to a file like:
# date --utc --iso-8601=seconds > /tmp/block_processing_startup_time.txt
if [ ! -f "/tmp/block_processing_startup_time.txt" ]; then
echo "file /tmp/block_processing_startup_time.txt does not exist, which means block"
echo "processing hasn't started yet"
exit 1
fi
STARTUP_TIME="$(cat /tmp/block_processing_startup_time.txt)"
CHECK="SET TIME ZONE 'UTC'; \
SELECT ((now() - (SELECT last_active_at FROM hafd.contexts WHERE name = 'reptracker_app')) < interval '1 minute' \
AND (SELECT last_active_at FROM hafd.contexts WHERE name = 'reptracker_app') > '${STARTUP_TIME}'::timestamp) OR \
hive.is_app_in_sync('reptracker_app');"
# the docker container probably won't have a locale set, do this to suppress the warning
export LC_ALL=C
exec [ "$(psql "$POSTGRES_ACCESS" --quiet --no-align --tuples-only --command="${CHECK}")" = t ]
Subproject commit 92c46150a8f9a3a279ccdb0be0644bcefe430cb2
Subproject commit 5c8a432a43b799fbeeebd142b5f24cdea10f59ae
......@@ -17,10 +17,10 @@ EOF
function wait-for-rt-startup() {
if command -v psql &> /dev/null
then
until psql "$POSTGRES_ACCESS" --quiet --tuples-only --command="$COMMAND" | grep 0 &>/dev/null
until psql "$POSTGRES_ACCESS" --quiet --tuples-only --command="$COMMAND" | grep 1 &>/dev/null
do
echo "$MESSAGE"
sleep 3
sleep 20
done
else
echo "Please install psql before running this script."
......@@ -29,8 +29,8 @@ function wait-for-rt-startup() {
}
#shellcheck disable=SC2089
COMMAND="SELECT CASE WHEN irreversible_block = 5000000 THEN 0 ELSE 1 END FROM hafd.contexts WHERE name = 'reptracker_app';"
MESSAGE="Waiting for Balance Tracker to finish processing blocks..."
COMMAND="SELECT hive.is_app_in_sync('reptracker_app')::INT;"
MESSAGE="Waiting for Reputation Tracker to finish processing blocks..."
while [ $# -gt 0 ]; do
case "$1" in
......
......@@ -32,8 +32,6 @@ POSTGRES_URL=${POSTGRES_URL:-""}
REPTRACKER_SCHEMA=${REPTRACKER_SCHEMA:-"reptracker_app"}
IS_FORKING=${IS_FORKING:-"true"}
SWAGGER_URL=${SWAGGER_URL:-"{reptracker-host}"}
CREATE_SCHEMA=1
CREATE_INDEXES=1
POSTGRES_APP_NAME=reptracker_install
......@@ -57,14 +55,6 @@ while [ $# -gt 0 ]; do
--is_forking=*)
IS_FORKING="${1#*=}"
;;
--indexes-only)
CREATE_SCHEMA=0
POSTGRES_APP_NAME=reptracker_install_indexes
;;
--schema-only)
CREATE_INDEXES=0
POSTGRES_APP_NAME=reptracker_install_schema
;;
--help)
print_help
exit 0
......@@ -87,28 +77,11 @@ done
POSTGRES_ACCESS=${POSTGRES_URL:-"postgresql://$POSTGRES_USER@$POSTGRES_HOST:$POSTGRES_PORT/haf_block_log?application_name=${POSTGRES_APP_NAME}"}
create_haf_indexes() {
if [ "$(psql "$POSTGRES_ACCESS" --quiet --no-align --tuples-only --command="SELECT ${REPTRACKER_SCHEMA}.do_rep_indexes_exist();")" = f ]; then
# if HAF is in massive sync, where most indexes on HAF tables have been deleted, we should wait. We don't
# want to add our own indexes, which would slow down massive sync, so we just wait.
echo "Waiting for HAF to be out of massive sync"
psql "$POSTGRES_ACCESS" -v "ON_ERROR_STOP=on" -c "SELECT hive.wait_for_ready_instance(ARRAY['${REPTRACKER_SCHEMA}'], interval '3 days');"
echo "Creating indexes, this might take a while."
# There's an un-solved bug that happens any time and app like hafbe adds/drops indexes at the same time
# HAF is entering/leaving massive sync. We need to prevent this, probably by having hafbe set a flag
# that prevents haf from re-entering massive sync during the time hafbe is creating indexes
psql "$POSTGRES_ACCESS" -v ON_ERROR_STOP=on -c "SET SEARCH_PATH TO ${REPTRACKER_SCHEMA};" -c "\timing" -f "$SRCPATH/db/rep_indexes.sql"
else
echo "HAF indexes already exist, skipping creation"
fi
}
#pushd "$reptracker_dir"
#./scripts/generate_version_sql.sh "$reptracker_dir"
#popd
if [ "$CREATE_SCHEMA" = 1 ]; then
echo "Installing app..."
psql "$POSTGRES_ACCESS" -v ON_ERROR_STOP=on -f "$SRCPATH/db/builtin_roles.sql"
psql "$POSTGRES_ACCESS" -v ON_ERROR_STOP=on -c "SET ROLE reptracker_owner;CREATE SCHEMA IF NOT EXISTS ${REPTRACKER_SCHEMA} AUTHORIZATION reptracker_owner;"
......@@ -127,8 +100,6 @@ if [ "$CREATE_SCHEMA" = 1 ]; then
psql "$POSTGRES_ACCESS" -v ON_ERROR_STOP=on -c "SET ROLE reptracker_owner;GRANT USAGE ON SCHEMA reptracker_endpoints to reptracker_user;"
psql "$POSTGRES_ACCESS" -v ON_ERROR_STOP=on -c "SET ROLE reptracker_owner;GRANT SELECT ON ALL TABLES IN SCHEMA ${REPTRACKER_SCHEMA} TO reptracker_user;"
psql "$POSTGRES_ACCESS" -v ON_ERROR_STOP=on -c "SET ROLE reptracker_owner;GRANT SELECT ON ALL TABLES IN SCHEMA reptracker_endpoints TO reptracker_user;"
fi
#register indexes
psql "$POSTGRES_ACCESS" -v ON_ERROR_STOP=on -c "SET SEARCH_PATH TO ${REPTRACKER_SCHEMA};" -f "$SRCPATH/db/rep_indexes.sql"
if [ "$CREATE_INDEXES" = 1 ]; then
create_haf_indexes
fi
......@@ -68,6 +68,10 @@ POSTGRES_ACCESS=${POSTGRES_URL:-"postgresql://$POSTGRES_USER@$POSTGRES_HOST:$POS
process_blocks() {
local n_blocks="${1:-null}"
log_file="reptracker_sync.log"
# record the startup time for use in health checks
date -uIseconds > /tmp/block_processing_startup_time.txt
# wait untill reptracker indexes are created
psql "$POSTGRES_ACCESS" -v "ON_ERROR_STOP=on" -c "\timing" -c "SELECT hive.wait_till_registered_indexes_created('${REPTRACKER_SCHEMA}')"
psql "$POSTGRES_ACCESS" -v "ON_ERROR_STOP=on" -v REPTRACKER_SCHEMA="${REPTRACKER_SCHEMA}" -c "\timing" -c "SET SEARCH_PATH TO ${REPTRACKER_SCHEMA};" -c "CALL ${REPTRACKER_SCHEMA}.main('${REPTRACKER_SCHEMA}', $n_blocks);" 2>&1 | tee -i $log_file
}
......